hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7fd52ba1635643bf559ba3d3d8a3f0330d80983 | 5,576 | py | Python | userbot/modules/animasi1.py | Wiki28/WikixCilik | a7e8d684e34174001af3e69d1f00de4e98243abe | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/animasi1.py | Wiki28/WikixCilik | a7e8d684e34174001af3e69d1f00de4e98243abe | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/animasi1.py | Wiki28/WikixCilik | a7e8d684e34174001af3e69d1f00de4e98243abe | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # Ported by @Pocongonlen
# From Pocong-Userbot <https://github.com/poocong/Pocong-Userbot>
# Recode by @greyyvbss
from time import sleep
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.utils import edit_or_reply, cilik_cmd
@cilik_cmd(pattern="hai(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"**Hai , Assalamualaikum**")
sleep(1)
await xx.edit("Kalian Nungguin aku gak??")
sleep(1)
await xx.edit("Ih ga mau🤢")
sleep(1)
await xx.edit("gasukaa😫")
sleep(1)
await xx.edit("__GELAYY__🤮")
@cilik_cmd(pattern="kntl(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"Tau kh kalian wahai tuan-tuan??")
sleep(1)
await xx.edit("se**KONT0L** **K0NTOL** nya si **K0NTOL**")
sleep(1)
await xx.edit("lebih **KONTOL** lagi")
sleep(1)
await xx.edit("kalian")
await xx.edit("kalian **K**")
await xx.edit("kalian **Ko**")
await xx.edit("kalian **Kon**")
await xx.edit("kalian **Kont**")
await xx.edit("kalian **Konto**")
await xx.edit("kalian **Kontol**")
@cilik_cmd(pattern="pe(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"**ga usah sok keras deh bg**")
sleep(2)
await xx.edit("**karena lu petinggi di tele**")
sleep(1)
await xx.edit("**atau karena title lu itu**")
sleep(1)
await xx.edit("**ga ngaruh di rl bg.**")
@cilik_cmd(pattern="phe(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"**ga usah sok keras deh bg**")
sleep(2)
await xx.edit("**karena lu petinggi di tele**")
sleep(1)
await xx.edit("**atau karena title lu itu**")
sleep(1)
await xx.edit("**ga ngaruh di rl bg**")
@cilik_cmd(pattern="alay(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"eh kamu, iya kamu")
sleep(1)
await xx.edit("**ALAY** bnget sih")
sleep(1)
await xx.edit("spam bot mulu")
sleep(1)
await xx.edit("baru jadi userbot ya?? xixixi")
sleep(1)
await xx.edit("pantes **NORAK**")
@cilik_cmd(pattern="jawa(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"baik")
sleep(1)
await xx.edit("Tidak Sombong")
sleep(1)
await xx.edit("Ganteng")
sleep(1)
await xx.edit("Sopan")
sleep(1)
await xx.edit("Rajin")
sleep(1)
await xx.edit("Budiman")
sleep(1)
await xx.edit("Alim")
sleep(1)
await xx.edit("Berguna")
sleep(1)
await xx.edit("**Nguli Juga**")
sleep(1)
await xx.edit("Pemaaf")
sleep(1)
await xx.edit("Jujur")
sleep(1)
await xx.edit("Tidk Sombong")
sleep(1)
await xx.edit("Kaya")
sleep(1)
await xx.edit("Pokoknya Jawa Pro Dah")
sleep(1)
await xx.edit("Tidak Seperti Yang Lain")
sleep(1)
await xx.edit("Bersama Kuli Membangun Negri")
sleep(1)
await xx.edit("eh salah salah, \nBersama **Jawa** Membangun Negri")
@cilik_cmd(pattern="erpe(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"Hai, Kamu Anak Erpe Ya")
sleep(1)
await xx.edit("Kok Pake Muka Orang sih?")
sleep(1)
await xx.edit("Oh iya, Muka Anak Erpe Kan")
sleep(1)
await xx.edit("**BURIK - BURIK**")
sleep(1)
await xx.edit("Jadinya Pake Muka Orang")
sleep(1)
await xx.edit("Karena apaa ?")
sleep(1)
await xx.edit("**Karena, BURIK**")
sleep(1)
await xx.edit("Canda **BURIK**")
sleep(1)
await xx.edit("Lari Ada Plastik KePanasan")
@cilik_cmd(pattern="lopyu(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"`Cuma Mau Bilang`")
sleep(1)
await xx.edit("`A`")
await xx.edit("`Ak`")
await xx.edit("`Aku`")
await xx.edit("`Aku S`")
await xx.edit("`Aku Sa`")
await xx.edit("`Aku Say`")
await xx.edit("`Aku Saya`")
await xx.edit("`Aku Sayan`")
await xx.edit("`Aku Sayang`")
await xx.edit("`Aku Sayang K`")
await xx.edit("`Aku Sayang Ka`")
await xx.edit("`Aku Sayang Kam`")
await xx.edit("`Aku Sayang Kamu`")
sleep(1)
await xx.edit("`I LOVE YOU 💞`")
@cilik_cmd(pattern="dahlah(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"**`Ayo Menyerah`**")
sleep(2)
await xx.edit("**`Ngapain Semangat`**")
@cilik_cmd(pattern="ehm(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"Eh..")
sleep(2)
await xx.edit("Suara kamu ga jelas")
sleep(2)
await xx.edit("Kayanya kalau call pribadi lebih jelas")
sleep(2)
await xx.edit("Gamau nyoba?")
#P o c o n g U s e r b o t
#Ini Tercipta Hasil kegabutan ku Doang
#Jadi Ga Usah Bacot Ngentod
CMD_HELP.update(
{
"animasi1": f"➢ **Plugin : **`animasi1`\
\n\n ┌✪ **Command :** `{cmd}hai`\
\n └✪ **Function : ** Cosplay Nissa Sablon\
\n\n ┌✪ **Command :** `{cmd}kntl`\
\n └✪ **Function : **Kalian kntl\
\n\n ┌✪ **Command :** `{cmd}alay`\
\n └✪ **Function : ** Lumayanlah Buat Nyindir\
\n\n ┌✪ **Command :** `{cmd}phe / {cmd}pe`\
\n └✪ **Function : ** Jagoan tele\
\n\n ┌✪ **Command :** `{cmd}ehm`\
\n └✪ **Function : ** Eum Biasalah cewe mau nya call mulu\
\n\n ┌✪ **Command :** `{cmd}lopyu`\
\n └✪ **Function : ** Nyatakan Cinta Ke Cewe Orng\
\n\n ┌✪ **Command :** `{cmd}dahlah`\
\n └✪ **Function : ** Cek Aja dh sndri\
\n\n ┌✪ **Command :** `{cmd}jawa`\
\n └✪ **Function : ** Jawa Pride Ni Bos.\
\n\n ┌✪ **Command :** `{cmd}erpe`\
\n └✪ **Function : ** Ngatain Bocah Erpe."
})
| 27.60396 | 71 | 0.578551 |
from time import sleep
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.utils import edit_or_reply, cilik_cmd
@cilik_cmd(pattern="hai(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"**Hai , Assalamualaikum**")
sleep(1)
await xx.edit("Kalian Nungguin aku gak??")
sleep(1)
await xx.edit("Ih ga mau🤢")
sleep(1)
await xx.edit("gasukaa😫")
sleep(1)
await xx.edit("__GELAYY__🤮")
@cilik_cmd(pattern="kntl(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"Tau kh kalian wahai tuan-tuan??")
sleep(1)
await xx.edit("se**KONT0L** **K0NTOL** nya si **K0NTOL**")
sleep(1)
await xx.edit("lebih **KONTOL** lagi")
sleep(1)
await xx.edit("kalian")
await xx.edit("kalian **K**")
await xx.edit("kalian **Ko**")
await xx.edit("kalian **Kon**")
await xx.edit("kalian **Kont**")
await xx.edit("kalian **Konto**")
await xx.edit("kalian **Kontol**")
@cilik_cmd(pattern="pe(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"**ga usah sok keras deh bg**")
sleep(2)
await xx.edit("**karena lu petinggi di tele**")
sleep(1)
await xx.edit("**atau karena title lu itu**")
sleep(1)
await xx.edit("**ga ngaruh di rl bg.**")
@cilik_cmd(pattern="phe(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"**ga usah sok keras deh bg**")
sleep(2)
await xx.edit("**karena lu petinggi di tele**")
sleep(1)
await xx.edit("**atau karena title lu itu**")
sleep(1)
await xx.edit("**ga ngaruh di rl bg**")
@cilik_cmd(pattern="alay(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"eh kamu, iya kamu")
sleep(1)
await xx.edit("**ALAY** bnget sih")
sleep(1)
await xx.edit("spam bot mulu")
sleep(1)
await xx.edit("baru jadi userbot ya?? xixixi")
sleep(1)
await xx.edit("pantes **NORAK**")
@cilik_cmd(pattern="jawa(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"baik")
sleep(1)
await xx.edit("Tidak Sombong")
sleep(1)
await xx.edit("Ganteng")
sleep(1)
await xx.edit("Sopan")
sleep(1)
await xx.edit("Rajin")
sleep(1)
await xx.edit("Budiman")
sleep(1)
await xx.edit("Alim")
sleep(1)
await xx.edit("Berguna")
sleep(1)
await xx.edit("**Nguli Juga**")
sleep(1)
await xx.edit("Pemaaf")
sleep(1)
await xx.edit("Jujur")
sleep(1)
await xx.edit("Tidk Sombong")
sleep(1)
await xx.edit("Kaya")
sleep(1)
await xx.edit("Pokoknya Jawa Pro Dah")
sleep(1)
await xx.edit("Tidak Seperti Yang Lain")
sleep(1)
await xx.edit("Bersama Kuli Membangun Negri")
sleep(1)
await xx.edit("eh salah salah, \nBersama **Jawa** Membangun Negri")
@cilik_cmd(pattern="erpe(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"Hai, Kamu Anak Erpe Ya")
sleep(1)
await xx.edit("Kok Pake Muka Orang sih?")
sleep(1)
await xx.edit("Oh iya, Muka Anak Erpe Kan")
sleep(1)
await xx.edit("**BURIK - BURIK**")
sleep(1)
await xx.edit("Jadinya Pake Muka Orang")
sleep(1)
await xx.edit("Karena apaa ?")
sleep(1)
await xx.edit("**Karena, BURIK**")
sleep(1)
await xx.edit("Canda **BURIK**")
sleep(1)
await xx.edit("Lari Ada Plastik KePanasan")
@cilik_cmd(pattern="lopyu(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"`Cuma Mau Bilang`")
sleep(1)
await xx.edit("`A`")
await xx.edit("`Ak`")
await xx.edit("`Aku`")
await xx.edit("`Aku S`")
await xx.edit("`Aku Sa`")
await xx.edit("`Aku Say`")
await xx.edit("`Aku Saya`")
await xx.edit("`Aku Sayan`")
await xx.edit("`Aku Sayang`")
await xx.edit("`Aku Sayang K`")
await xx.edit("`Aku Sayang Ka`")
await xx.edit("`Aku Sayang Kam`")
await xx.edit("`Aku Sayang Kamu`")
sleep(1)
await xx.edit("`I LOVE YOU 💞`")
@cilik_cmd(pattern="dahlah(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"**`Ayo Menyerah`**")
sleep(2)
await xx.edit("**`Ngapain Semangat`**")
@cilik_cmd(pattern="ehm(?: |$)(.*)")
async def _(event):
xx = await edit_or_reply(event, f"Eh..")
sleep(2)
await xx.edit("Suara kamu ga jelas")
sleep(2)
await xx.edit("Kayanya kalau call pribadi lebih jelas")
sleep(2)
await xx.edit("Gamau nyoba?")
CMD_HELP.update(
{
"animasi1": f"➢ **Plugin : **`animasi1`\
\n\n ┌✪ **Command :** `{cmd}hai`\
\n └✪ **Function : ** Cosplay Nissa Sablon\
\n\n ┌✪ **Command :** `{cmd}kntl`\
\n └✪ **Function : **Kalian kntl\
\n\n ┌✪ **Command :** `{cmd}alay`\
\n └✪ **Function : ** Lumayanlah Buat Nyindir\
\n\n ┌✪ **Command :** `{cmd}phe / {cmd}pe`\
\n └✪ **Function : ** Jagoan tele\
\n\n ┌✪ **Command :** `{cmd}ehm`\
\n └✪ **Function : ** Eum Biasalah cewe mau nya call mulu\
\n\n ┌✪ **Command :** `{cmd}lopyu`\
\n └✪ **Function : ** Nyatakan Cinta Ke Cewe Orng\
\n\n ┌✪ **Command :** `{cmd}dahlah`\
\n └✪ **Function : ** Cek Aja dh sndri\
\n\n ┌✪ **Command :** `{cmd}jawa`\
\n └✪ **Function : ** Jawa Pride Ni Bos.\
\n\n ┌✪ **Command :** `{cmd}erpe`\
\n └✪ **Function : ** Ngatain Bocah Erpe."
})
| true | true |
f7fd5350f744a4ed47ffca2551d7feba45a71b45 | 2,976 | py | Python | python/mxnet/kvstore/kvstore_server.py | mchoi8739/incubator-mxnet | cff583250479b31c394f568ffb835b720cb84dc4 | [
"Apache-2.0"
] | 211 | 2016-06-06T08:32:36.000Z | 2021-07-03T16:50:16.000Z | python/mxnet/kvstore/kvstore_server.py | mchoi8739/incubator-mxnet | cff583250479b31c394f568ffb835b720cb84dc4 | [
"Apache-2.0"
] | 82 | 2016-03-29T02:40:02.000Z | 2021-02-06T22:20:40.000Z | python/mxnet/kvstore/kvstore_server.py | mchoi8739/incubator-mxnet | cff583250479b31c394f568ffb835b720cb84dc4 | [
"Apache-2.0"
] | 58 | 2016-10-27T07:37:08.000Z | 2021-07-03T16:50:17.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""A server node for the key value store."""
import ctypes
import sys
import pickle
import logging
from ..base import _LIB, check_call
from .base import create
__all__ = ['KVStoreServer']
class KVStoreServer(object):
"""The key-value store server."""
def __init__(self, kvstore):
"""Initialize a new KVStoreServer.
Parameters
----------
kvstore : KVStore
"""
self.kvstore = kvstore
self.handle = kvstore.handle
self.init_logginig = False
def _controller(self):
"""Return the server controller."""
def server_controller(cmd_id, cmd_body, _):
"""Server controler."""
if not self.init_logginig:
# the reason put the codes here is because we cannot get
# kvstore.rank earlier
head = '%(asctime)-15s Server[' + str(
self.kvstore.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
self.init_logginig = True
if cmd_id == 0:
try:
optimizer = pickle.loads(cmd_body)
except:
raise
self.kvstore.set_optimizer(optimizer)
else:
print("server %d, unknown command (%d, %s)" % (
self.kvstore.rank, cmd_id, cmd_body))
return server_controller
def run(self):
"""Run the server, whose behavior is like.
>>> while receive(x):
... if is_command x: controller(x)
... else if is_key_value x: updater(x)
"""
_ctrl_proto = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p)
check_call(_LIB.MXKVStoreRunServer(self.handle, _ctrl_proto(self._controller()), None))
def _init_kvstore_server_module():
"""Start server/scheduler."""
is_worker = ctypes.c_int()
check_call(_LIB.MXKVStoreIsWorkerNode(ctypes.byref(is_worker)))
if is_worker.value == 0:
kvstore = create('dist')
server = KVStoreServer(kvstore)
server.run()
sys.exit()
_init_kvstore_server_module()
| 34.206897 | 95 | 0.629704 |
import ctypes
import sys
import pickle
import logging
from ..base import _LIB, check_call
from .base import create
__all__ = ['KVStoreServer']
class KVStoreServer(object):
def __init__(self, kvstore):
self.kvstore = kvstore
self.handle = kvstore.handle
self.init_logginig = False
def _controller(self):
def server_controller(cmd_id, cmd_body, _):
if not self.init_logginig:
head = '%(asctime)-15s Server[' + str(
self.kvstore.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
self.init_logginig = True
if cmd_id == 0:
try:
optimizer = pickle.loads(cmd_body)
except:
raise
self.kvstore.set_optimizer(optimizer)
else:
print("server %d, unknown command (%d, %s)" % (
self.kvstore.rank, cmd_id, cmd_body))
return server_controller
def run(self):
_ctrl_proto = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p)
check_call(_LIB.MXKVStoreRunServer(self.handle, _ctrl_proto(self._controller()), None))
def _init_kvstore_server_module():
is_worker = ctypes.c_int()
check_call(_LIB.MXKVStoreIsWorkerNode(ctypes.byref(is_worker)))
if is_worker.value == 0:
kvstore = create('dist')
server = KVStoreServer(kvstore)
server.run()
sys.exit()
_init_kvstore_server_module()
| true | true |
f7fd53dd051aa5265b5f35472858eecc129ca7ef | 2,968 | py | Python | src/openeo_grass_gis_driver/udf_lang_udf_type.py | metzm/openeo-grassgis-driver | 4831f1778921f78bf7fc7688393682a8dfe92a7a | [
"Apache-2.0"
] | null | null | null | src/openeo_grass_gis_driver/udf_lang_udf_type.py | metzm/openeo-grassgis-driver | 4831f1778921f78bf7fc7688393682a8dfe92a7a | [
"Apache-2.0"
] | null | null | null | src/openeo_grass_gis_driver/udf_lang_udf_type.py | metzm/openeo-grassgis-driver | 4831f1778921f78bf7fc7688393682a8dfe92a7a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from openeo_grass_gis_driver.actinia_processing.actinia_interface import ActiniaInterface
from flask import make_response, jsonify
from openeo_grass_gis_driver.process_graph_db import GraphDB
# from .actinia_processing import udf_reduce_time
# from flask_restful import Resource
from openeo_grass_gis_driver.authentication import ResourceBase
__license__ = "Apache License, Version 2.0"
__author__ = "Sören Gebbert"
__copyright__ = "Copyright 2018, Sören Gebbert, mundialis"
__maintainer__ = "Soeren Gebbert"
__email__ = "soerengebbert@googlemail.com"
python_udfs = dict(python={})
# python_udfs["python"][udf_reduce_time.PROCESS_NAME] = udf_reduce_time.DOC
GET_UDF_TYPE_EXAMPLE = None
GET_UDF_TYPE_DOC = {
"summary": "Returns the process description of UDF schemas, which offer different possibilities how "
"user-defined scripts can be applied to the data.",
"tags": ["UDF"],
"parameters": [
{
"name": "lang",
"in": "path",
"description": "Language identifier such as `R`",
"type": "string",
"enum": ["python", "R"],
"required": True
},
{
"name": "udf_type",
"in": "path",
"type": "string",
"description": "The UDF types define how UDFs can be exposed to the data, how they can be parallelized, "
"and how the result schema should be structured.",
"enum": ["apply_pixel", "apply_scene", "reduce_time", "reduce_space", "window_time", "window_space",
"window_spacetime", "aggregate_time", "aggregate_space", "aggregate_spacetime"],
"required": True
}
],
"responses": {
"200": {
"description": "Process description",
"schema": None,
"examples": {"application/json": GET_UDF_TYPE_EXAMPLE}
},
"401": {"$ref": "#/responses/auth_required"},
"403": {"$ref": "#/responses/access_denied"},
"404": {"description": "UDF type with specified identifier is not available"},
"501": {"description": "This API feature, language or UDF type is not supported by the back-end."},
"503": {"$ref": "#/responses/unavailable"}
}
}
class UdfType(ResourceBase):
def __init__(self):
self.iface = ActiniaInterface()
self.db = GraphDB()
def get(self, lang, udf_type):
if lang not in python_udfs:
return make_response(jsonify({"description": "UDF type with "
"specified identifier is not available"}), 404)
if udf_type not in python_udfs[lang]:
return make_response(jsonify({"description": "UDF type with "
"specified identifier is not available"}), 404)
return make_response(jsonify(python_udfs[lang][udf_type]), 200)
| 38.051282 | 117 | 0.605458 |
from openeo_grass_gis_driver.actinia_processing.actinia_interface import ActiniaInterface
from flask import make_response, jsonify
from openeo_grass_gis_driver.process_graph_db import GraphDB
from openeo_grass_gis_driver.authentication import ResourceBase
__license__ = "Apache License, Version 2.0"
__author__ = "Sören Gebbert"
__copyright__ = "Copyright 2018, Sören Gebbert, mundialis"
__maintainer__ = "Soeren Gebbert"
__email__ = "soerengebbert@googlemail.com"
python_udfs = dict(python={})
GET_UDF_TYPE_EXAMPLE = None
GET_UDF_TYPE_DOC = {
"summary": "Returns the process description of UDF schemas, which offer different possibilities how "
"user-defined scripts can be applied to the data.",
"tags": ["UDF"],
"parameters": [
{
"name": "lang",
"in": "path",
"description": "Language identifier such as `R`",
"type": "string",
"enum": ["python", "R"],
"required": True
},
{
"name": "udf_type",
"in": "path",
"type": "string",
"description": "The UDF types define how UDFs can be exposed to the data, how they can be parallelized, "
"and how the result schema should be structured.",
"enum": ["apply_pixel", "apply_scene", "reduce_time", "reduce_space", "window_time", "window_space",
"window_spacetime", "aggregate_time", "aggregate_space", "aggregate_spacetime"],
"required": True
}
],
"responses": {
"200": {
"description": "Process description",
"schema": None,
"examples": {"application/json": GET_UDF_TYPE_EXAMPLE}
},
"401": {"$ref": "#/responses/auth_required"},
"403": {"$ref": "#/responses/access_denied"},
"404": {"description": "UDF type with specified identifier is not available"},
"501": {"description": "This API feature, language or UDF type is not supported by the back-end."},
"503": {"$ref": "#/responses/unavailable"}
}
}
class UdfType(ResourceBase):
def __init__(self):
self.iface = ActiniaInterface()
self.db = GraphDB()
def get(self, lang, udf_type):
if lang not in python_udfs:
return make_response(jsonify({"description": "UDF type with "
"specified identifier is not available"}), 404)
if udf_type not in python_udfs[lang]:
return make_response(jsonify({"description": "UDF type with "
"specified identifier is not available"}), 404)
return make_response(jsonify(python_udfs[lang][udf_type]), 200)
| true | true |
f7fd5459c1ffdbaf4ef78ac96d89c51a0d4808d6 | 19,222 | py | Python | ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py | Syndra/Ambari-source | 717526b2bf3636622212b14de0d3d298a20c7370 | [
"Apache-2.0"
] | 1 | 2021-06-24T07:59:25.000Z | 2021-06-24T07:59:25.000Z | ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py | Syndra/Ambari-source | 717526b2bf3636622212b14de0d3d298a20c7370 | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params.py | Syndra/Ambari-source | 717526b2bf3636622212b14de0d3d298a20c7370 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from functions import calc_xmn_from_xms
from functions import check_append_heap_property
from functions import trim_heap_property
from resource_management.core.logger import Logger
from resource_management import *
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.expect import expect
from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
import status_params
from ambari_commons import OSCheck
import ConfigParser
import os
if OSCheck.is_windows_family():
from params_windows import *
else:
from params_linux import *
# server configurations
config = Script.get_config()
exec_tmp_dir = Script.get_tmp_dir()
def get_combined_memory_mb(value1, value2):
try:
part1 = int(value1.strip()[:-1]) if value1.lower().strip()[-1:] == 'm' else int(value1)
part2 = int(value2.strip()[:-1]) if value2.lower().strip()[-1:] == 'm' else int(value2)
return str(part1 + part2) + 'm'
except:
return None
pass
#AMBARI_METRICS data
ams_pid_dir = status_params.ams_collector_pid_dir
is_ams_distributed = config['configurations']['ams-site']['timeline.metrics.service.operation.mode'] == 'distributed'
ams_collector_script = "/usr/sbin/ambari-metrics-collector"
ams_collector_pid_dir = status_params.ams_collector_pid_dir
ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
ams_collector_list = default("/clusterHostInfo/metrics_collector_hosts", [])
embedded_mode_multiple_instances = False
if not is_ams_distributed and len(ams_collector_list) > 1:
embedded_mode_multiple_instances = True
set_instanceId = "false"
cluster_name = config["clusterName"]
if 'cluster-env' in config['configurations'] and \
'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
set_instanceId = "true"
else:
ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
metric_collector_host = select_metric_collector_hosts_from_hostnames(ams_collector_hosts)
random_metric_collector_host = select_metric_collector_hosts_from_hostnames(ams_collector_hosts)
if 'cluster-env' in config['configurations'] and \
'metrics_collector_external_port' in config['configurations']['cluster-env']:
metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
else:
metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
if metric_collector_web_address.find(':') != -1:
metric_collector_port = metric_collector_web_address.split(':')[1]
else:
metric_collector_port = '6188'
failover_strategy_blacklisted_interval_seconds = default("/configurations/ams-env/failover_strategy_blacklisted_interval", "600")
failover_strategy = default("/configurations/ams-site/failover.strategy", "round-robin")
if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
metric_collector_https_enabled = True
metric_collector_protocol = 'https'
else:
metric_collector_https_enabled = False
metric_collector_protocol = 'http'
metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
metric_truststore_ca_certs='ca.pem'
metric_truststore_alias_list = []
for host in ams_collector_hosts.split(","):
metric_truststore_alias = default("/configurations/ams-ssl-client/{host}.ssl.client.truststore.alias", None)
if not metric_truststore_alias:
metric_truststore_alias = host
metric_truststore_alias_list.append(metric_truststore_alias)
agent_cache_dir = config['hostLevelParams']['agentCacheDir']
service_package_folder = config['commandParams']['service_package_folder']
stack_name = default("/hostLevelParams/stack_name", None)
dashboards_dirs = []
# Stack specific
dashboards_dirs.append(os.path.join(agent_cache_dir, service_package_folder,
'files', 'grafana-dashboards', stack_name))
# Default
dashboards_dirs.append(os.path.join(agent_cache_dir, service_package_folder,
'files', 'grafana-dashboards', 'default'))
# Custom services
dashboards_dirs.append(os.path.join(agent_cache_dir, 'dashboards', 'grafana-dashboards'))
def get_grafana_dashboard_defs():
dashboard_defs = []
for dashboards_dir in dashboards_dirs:
if os.path.exists(dashboards_dir):
for root, dirs, files in os.walk(dashboards_dir):
for file in files:
if 'grafana' in file:
dashboard_defs.append(os.path.join(root, file))
return dashboard_defs
# find ambari version for grafana dashboards
def get_ambari_version():
ambari_version = None
AMBARI_AGENT_CONF = '/etc/ambari-agent/conf/ambari-agent.ini'
ambari_agent_config = ConfigParser.RawConfigParser()
if os.path.exists(AMBARI_AGENT_CONF):
try:
ambari_agent_config.read(AMBARI_AGENT_CONF)
data_dir = ambari_agent_config.get('agent', 'prefix')
ver_file = os.path.join(data_dir, 'version')
f = open(ver_file, "r")
ambari_version = f.read().strip()
f.close()
except Exception, e:
Logger.info('Unable to determine ambari version from version file.')
Logger.debug('Exception: %s' % str(e))
# No hostname script identified in the ambari agent conf
pass
pass
return ambari_version
ams_collector_log_dir = config['configurations']['ams-env']['metrics_collector_log_dir']
ams_collector_conf_dir = "/etc/ambari-metrics-collector/conf"
ams_monitor_log_dir = config['configurations']['ams-env']['metrics_monitor_log_dir']
ams_monitor_dir = "/usr/lib/python2.6/site-packages/resource_monitoring"
ams_monitor_conf_dir = "/etc/ambari-metrics-monitor/conf"
ams_monitor_pid_dir = status_params.ams_monitor_pid_dir
ams_monitor_script = "/usr/sbin/ambari-metrics-monitor"
ams_grafana_script = "/usr/sbin/ambari-metrics-grafana"
ams_grafana_home_dir = '/usr/lib/ambari-metrics-grafana'
ams_grafana_log_dir = default("/configurations/ams-grafana-env/metrics_grafana_log_dir", '/var/log/ambari-metrics-grafana')
ams_grafana_pid_dir = status_params.ams_grafana_pid_dir
ams_grafana_conf_dir = '/etc/ambari-metrics-grafana/conf'
ams_grafana_data_dir = default("/configurations/ams-grafana-env/metrics_grafana_data_dir", '/var/lib/ambari-metrics-grafana')
ams_grafana_admin_user = config['configurations']['ams-grafana-env']['metrics_grafana_username']
ams_grafana_admin_pwd = config['configurations']['ams-grafana-env']['metrics_grafana_password']
metrics_grafana_hosts = default('/clusterHostInfo/metrics_grafana_hosts', None)
ams_grafana_host = None
if metrics_grafana_hosts:
ams_grafana_host = metrics_grafana_hosts[0]
ams_grafana_port = default("/configurations/ams-grafana-ini/port", 3000)
ams_grafana_protocol = default("/configurations/ams-grafana-ini/protocol", 'http')
ams_grafana_cert_file = default("/configurations/ams-grafana-ini/cert_file", '/etc/ambari-metrics/conf/ams-grafana.crt')
ams_grafana_cert_key = default("/configurations/ams-grafana-ini/cert_key", '/etc/ambari-metrics/conf/ams-grafana.key')
ams_grafana_ca_cert = default("/configurations/ams-grafana-ini/ca_cert", None)
ams_hbase_home_dir = "/usr/lib/ams-hbase/"
ams_hbase_init_check_enabled = default("/configurations/ams-site/timeline.metrics.hbase.init.check.enabled", True)
#hadoop params
hbase_excluded_hosts = config['commandParams']['excluded_hosts']
hbase_drain_only = config['commandParams']['mark_draining_only']
hbase_included_hosts = config['commandParams']['included_hosts']
hbase_user = status_params.hbase_user
smokeuser = config['configurations']['cluster-env']['smokeuser']
hbase_root_dir = config['configurations']['ams-hbase-site']['hbase.rootdir']
hbase_pid_dir = status_params.hbase_pid_dir
is_hbase_distributed = config['configurations']['ams-hbase-site']['hbase.cluster.distributed']
is_local_fs_rootdir = hbase_root_dir.startswith('file://')
# security is disabled for embedded mode, when HBase is backed by file
security_enabled = False if not is_hbase_distributed else config['configurations']['cluster-env']['security_enabled']
# this is "hadoop-metrics.properties" for 1.x stacks
metric_prop_file_name = "hadoop-metrics2-hbase.properties"
# not supporting 32 bit jdk.
java64_home = config['hostLevelParams']['java_home']
java_version = expect("/hostLevelParams/java_version", int)
metrics_collector_heapsize = default('/configurations/ams-env/metrics_collector_heapsize', "512")
metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
skip_disk_metrics_patterns = default("/configurations/ams-env/timeline.metrics.skip.disk.metrics.patterns", None)
hbase_log_dir = config['configurations']['ams-hbase-env']['hbase_log_dir']
hbase_classpath_additional = default("/configurations/ams-hbase-env/hbase_classpath_additional", None)
master_heapsize = config['configurations']['ams-hbase-env']['hbase_master_heapsize']
regionserver_heapsize = config['configurations']['ams-hbase-env']['hbase_regionserver_heapsize']
# Check if hbase java options already have appended "m". If Yes, remove the trailing m.
metrics_collector_heapsize = check_append_heap_property(str(metrics_collector_heapsize), "m")
master_heapsize = check_append_heap_property(str(master_heapsize), "m")
regionserver_heapsize = check_append_heap_property(str(regionserver_heapsize), "m")
regionserver_xmn_max = default('/configurations/ams-hbase-env/hbase_regionserver_xmn_max', None)
if regionserver_xmn_max:
regionserver_xmn_max = int(trim_heap_property(str(regionserver_xmn_max), "m"))
regionserver_xmn_percent = expect("/configurations/ams-hbase-env/hbase_regionserver_xmn_ratio", float)
regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
else:
regionserver_xmn_size = config['configurations']['ams-hbase-env']['regionserver_xmn_size']
pass
hbase_master_xmn_size = config['configurations']['ams-hbase-env']['hbase_master_xmn_size']
hbase_master_maxperm_size = config['configurations']['ams-hbase-env']['hbase_master_maxperm_size']
# Check if hbase java options already have appended "m". If Yes, remove the trailing m.
hbase_master_maxperm_size = check_append_heap_property(str(hbase_master_maxperm_size), "m")
hbase_master_xmn_size = check_append_heap_property(str(hbase_master_xmn_size), "m")
regionserver_xmn_size = check_append_heap_property(str(regionserver_xmn_size), "m")
# Choose heap size for embedded mode as sum of master + regionserver
if not is_hbase_distributed:
hbase_heapsize = get_combined_memory_mb(master_heapsize, regionserver_heapsize)
if hbase_heapsize is None:
hbase_heapsize = master_heapsize
else:
hbase_heapsize = master_heapsize
max_open_files_limit = default("/configurations/ams-hbase-env/max_open_files_limit", "32768")
hostname = config["hostname"]
cluster_zookeeper_quorum_hosts = ",".join(config['clusterHostInfo']['zookeeper_hosts'])
if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
cluster_zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
else:
cluster_zookeeper_clientPort = '2181'
if not is_hbase_distributed:
zookeeper_quorum_hosts = hostname
zookeeper_clientPort = '61181'
else:
zookeeper_quorum_hosts = cluster_zookeeper_quorum_hosts
zookeeper_clientPort = cluster_zookeeper_clientPort
ams_checkpoint_dir = config['configurations']['ams-site']['timeline.metrics.aggregator.checkpoint.dir']
_hbase_tmp_dir = config['configurations']['ams-hbase-site']['hbase.tmp.dir']
hbase_tmp_dir = substitute_vars(_hbase_tmp_dir, config['configurations']['ams-hbase-site'])
_zookeeper_data_dir = config['configurations']['ams-hbase-site']['hbase.zookeeper.property.dataDir']
zookeeper_data_dir = substitute_vars(_zookeeper_data_dir, config['configurations']['ams-hbase-site'])
# TODO UPGRADE default, update site during upgrade
_local_dir_conf = default('/configurations/ams-hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
local_dir = substitute_vars(_local_dir_conf, config['configurations']['ams-hbase-site'])
phoenix_max_global_mem_percent = default('/configurations/ams-site/phoenix.query.maxGlobalMemoryPercentage', '20')
phoenix_client_spool_dir = default('/configurations/ams-site/phoenix.spool.directory', '/tmp')
phoenix_server_spool_dir = default('/configurations/ams-hbase-site/phoenix.spool.directory', '/tmp')
# Substitute vars if present
phoenix_client_spool_dir = substitute_vars(phoenix_client_spool_dir, config['configurations']['ams-hbase-site'])
phoenix_server_spool_dir = substitute_vars(phoenix_server_spool_dir, config['configurations']['ams-hbase-site'])
client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
rs_hosts = ["localhost"]
smoke_test_user = config['configurations']['cluster-env']['smokeuser']
smokeuser_permissions = "RWXCA"
service_check_data = functions.get_unique_id_and_date()
user_group = config['configurations']['cluster-env']["user_group"]
hadoop_user = "hadoop"
kinit_cmd = ""
if security_enabled:
_hostname_lowercase = config['hostname'].lower()
client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
hbase_user_keytab = config['configurations']['ams-hbase-env']['hbase_user_keytab']
ams_collector_jaas_config_file = format("{hbase_conf_dir}/ams_collector_jaas.conf")
ams_collector_keytab_path = config['configurations']['ams-hbase-security-site']['hbase.myclient.keytab']
ams_collector_jaas_princ = config['configurations']['ams-hbase-security-site']['hbase.myclient.principal'].replace('_HOST',_hostname_lowercase)
ams_zookeeper_jaas_config_file = format("{hbase_conf_dir}/ams_zookeeper_jaas.conf")
ams_zookeeper_keytab = config['configurations']['ams-hbase-security-site']['ams.zookeeper.keytab']
ams_zookeeper_principal_name = config['configurations']['ams-hbase-security-site']['ams.zookeeper.principal'].replace('_HOST',_hostname_lowercase)
master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
master_keytab_path = config['configurations']['ams-hbase-security-site']['hbase.master.keytab.file']
master_jaas_princ = config['configurations']['ams-hbase-security-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
regionserver_keytab_path = config['configurations']['ams-hbase-security-site']['hbase.regionserver.keytab.file']
regionserver_jaas_princ = config['configurations']['ams-hbase-security-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
#Ambari metrics log4j settings
ams_hbase_log_maxfilesize = default('configurations/ams-hbase-log4j/ams_hbase_log_maxfilesize',256)
ams_hbase_log_maxbackupindex = default('configurations/ams-hbase-log4j/ams_hbase_log_maxbackupindex',20)
ams_hbase_security_log_maxfilesize = default('configurations/ams-hbase-log4j/ams_hbase_security_log_maxfilesize',256)
ams_hbase_security_log_maxbackupindex = default('configurations/ams-hbase-log4j/ams_hbase_security_log_maxbackupindex',20)
ams_log_max_backup_size = default('configurations/ams-log4j/ams_log_max_backup_size',80)
ams_log_number_of_backup_files = default('configurations/ams-log4j/ams_log_number_of_backup_files',60)
#log4j.properties
if (('ams-hbase-log4j' in config['configurations']) and ('content' in config['configurations']['ams-hbase-log4j'])):
hbase_log4j_props = config['configurations']['ams-hbase-log4j']['content']
else:
hbase_log4j_props = None
if (('ams-log4j' in config['configurations']) and ('content' in config['configurations']['ams-log4j'])):
log4j_props = config['configurations']['ams-log4j']['content']
else:
log4j_props = None
hbase_env_sh_template = config['configurations']['ams-hbase-env']['content']
ams_env_sh_template = config['configurations']['ams-env']['content']
ams_grafana_env_sh_template = config['configurations']['ams-grafana-env']['content']
ams_grafana_ini_template = config['configurations']['ams-grafana-ini']['content']
hbase_staging_dir = default("/configurations/ams-hbase-site/hbase.bulkload.staging.dir", "/amshbase/staging")
skip_create_hbase_root_dir = default("/configurations/ams-site/timeline.metrics.skip.create.hbase.root.dir", False)
hbase_wal_dir = default("/configurations/ams-hbase-site/hbase.wal.dir", None)
if hbase_wal_dir and re.search("^file://|/", hbase_wal_dir): #If wal dir is on local file system, create it.
hbase_wal_dir = re.sub("^file://|/", "", hbase_wal_dir, count=1)
#for create_hdfs_directory
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources()
)
| 51.12234 | 157 | 0.79487 |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from functions import calc_xmn_from_xms
from functions import check_append_heap_property
from functions import trim_heap_property
from resource_management.core.logger import Logger
from resource_management import *
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.functions.expect import expect
from ambari_commons.ambari_metrics_helper import select_metric_collector_hosts_from_hostnames
import status_params
from ambari_commons import OSCheck
import ConfigParser
import os
if OSCheck.is_windows_family():
from params_windows import *
else:
from params_linux import *
config = Script.get_config()
exec_tmp_dir = Script.get_tmp_dir()
def get_combined_memory_mb(value1, value2):
try:
part1 = int(value1.strip()[:-1]) if value1.lower().strip()[-1:] == 'm' else int(value1)
part2 = int(value2.strip()[:-1]) if value2.lower().strip()[-1:] == 'm' else int(value2)
return str(part1 + part2) + 'm'
except:
return None
pass
ams_pid_dir = status_params.ams_collector_pid_dir
is_ams_distributed = config['configurations']['ams-site']['timeline.metrics.service.operation.mode'] == 'distributed'
ams_collector_script = "/usr/sbin/ambari-metrics-collector"
ams_collector_pid_dir = status_params.ams_collector_pid_dir
ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
ams_collector_list = default("/clusterHostInfo/metrics_collector_hosts", [])
embedded_mode_multiple_instances = False
if not is_ams_distributed and len(ams_collector_list) > 1:
embedded_mode_multiple_instances = True
set_instanceId = "false"
cluster_name = config["clusterName"]
if 'cluster-env' in config['configurations'] and \
'metrics_collector_external_hosts' in config['configurations']['cluster-env']:
ams_collector_hosts = config['configurations']['cluster-env']['metrics_collector_external_hosts']
set_instanceId = "true"
else:
ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", []))
metric_collector_host = select_metric_collector_hosts_from_hostnames(ams_collector_hosts)
random_metric_collector_host = select_metric_collector_hosts_from_hostnames(ams_collector_hosts)
if 'cluster-env' in config['configurations'] and \
'metrics_collector_external_port' in config['configurations']['cluster-env']:
metric_collector_port = config['configurations']['cluster-env']['metrics_collector_external_port']
else:
metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
if metric_collector_web_address.find(':') != -1:
metric_collector_port = metric_collector_web_address.split(':')[1]
else:
metric_collector_port = '6188'
failover_strategy_blacklisted_interval_seconds = default("/configurations/ams-env/failover_strategy_blacklisted_interval", "600")
failover_strategy = default("/configurations/ams-site/failover.strategy", "round-robin")
if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY":
metric_collector_https_enabled = True
metric_collector_protocol = 'https'
else:
metric_collector_https_enabled = False
metric_collector_protocol = 'http'
metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "")
metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "")
metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "")
metric_truststore_ca_certs='ca.pem'
metric_truststore_alias_list = []
for host in ams_collector_hosts.split(","):
metric_truststore_alias = default("/configurations/ams-ssl-client/{host}.ssl.client.truststore.alias", None)
if not metric_truststore_alias:
metric_truststore_alias = host
metric_truststore_alias_list.append(metric_truststore_alias)
agent_cache_dir = config['hostLevelParams']['agentCacheDir']
service_package_folder = config['commandParams']['service_package_folder']
stack_name = default("/hostLevelParams/stack_name", None)
dashboards_dirs = []
dashboards_dirs.append(os.path.join(agent_cache_dir, service_package_folder,
'files', 'grafana-dashboards', stack_name))
dashboards_dirs.append(os.path.join(agent_cache_dir, service_package_folder,
'files', 'grafana-dashboards', 'default'))
dashboards_dirs.append(os.path.join(agent_cache_dir, 'dashboards', 'grafana-dashboards'))
def get_grafana_dashboard_defs():
dashboard_defs = []
for dashboards_dir in dashboards_dirs:
if os.path.exists(dashboards_dir):
for root, dirs, files in os.walk(dashboards_dir):
for file in files:
if 'grafana' in file:
dashboard_defs.append(os.path.join(root, file))
return dashboard_defs
def get_ambari_version():
ambari_version = None
AMBARI_AGENT_CONF = '/etc/ambari-agent/conf/ambari-agent.ini'
ambari_agent_config = ConfigParser.RawConfigParser()
if os.path.exists(AMBARI_AGENT_CONF):
try:
ambari_agent_config.read(AMBARI_AGENT_CONF)
data_dir = ambari_agent_config.get('agent', 'prefix')
ver_file = os.path.join(data_dir, 'version')
f = open(ver_file, "r")
ambari_version = f.read().strip()
f.close()
except Exception, e:
Logger.info('Unable to determine ambari version from version file.')
Logger.debug('Exception: %s' % str(e))
pass
pass
return ambari_version
ams_collector_log_dir = config['configurations']['ams-env']['metrics_collector_log_dir']
ams_collector_conf_dir = "/etc/ambari-metrics-collector/conf"
ams_monitor_log_dir = config['configurations']['ams-env']['metrics_monitor_log_dir']
ams_monitor_dir = "/usr/lib/python2.6/site-packages/resource_monitoring"
ams_monitor_conf_dir = "/etc/ambari-metrics-monitor/conf"
ams_monitor_pid_dir = status_params.ams_monitor_pid_dir
ams_monitor_script = "/usr/sbin/ambari-metrics-monitor"
ams_grafana_script = "/usr/sbin/ambari-metrics-grafana"
ams_grafana_home_dir = '/usr/lib/ambari-metrics-grafana'
ams_grafana_log_dir = default("/configurations/ams-grafana-env/metrics_grafana_log_dir", '/var/log/ambari-metrics-grafana')
ams_grafana_pid_dir = status_params.ams_grafana_pid_dir
ams_grafana_conf_dir = '/etc/ambari-metrics-grafana/conf'
ams_grafana_data_dir = default("/configurations/ams-grafana-env/metrics_grafana_data_dir", '/var/lib/ambari-metrics-grafana')
ams_grafana_admin_user = config['configurations']['ams-grafana-env']['metrics_grafana_username']
ams_grafana_admin_pwd = config['configurations']['ams-grafana-env']['metrics_grafana_password']
metrics_grafana_hosts = default('/clusterHostInfo/metrics_grafana_hosts', None)
ams_grafana_host = None
if metrics_grafana_hosts:
ams_grafana_host = metrics_grafana_hosts[0]
ams_grafana_port = default("/configurations/ams-grafana-ini/port", 3000)
ams_grafana_protocol = default("/configurations/ams-grafana-ini/protocol", 'http')
ams_grafana_cert_file = default("/configurations/ams-grafana-ini/cert_file", '/etc/ambari-metrics/conf/ams-grafana.crt')
ams_grafana_cert_key = default("/configurations/ams-grafana-ini/cert_key", '/etc/ambari-metrics/conf/ams-grafana.key')
ams_grafana_ca_cert = default("/configurations/ams-grafana-ini/ca_cert", None)
ams_hbase_home_dir = "/usr/lib/ams-hbase/"
ams_hbase_init_check_enabled = default("/configurations/ams-site/timeline.metrics.hbase.init.check.enabled", True)
hbase_excluded_hosts = config['commandParams']['excluded_hosts']
hbase_drain_only = config['commandParams']['mark_draining_only']
hbase_included_hosts = config['commandParams']['included_hosts']
hbase_user = status_params.hbase_user
smokeuser = config['configurations']['cluster-env']['smokeuser']
hbase_root_dir = config['configurations']['ams-hbase-site']['hbase.rootdir']
hbase_pid_dir = status_params.hbase_pid_dir
is_hbase_distributed = config['configurations']['ams-hbase-site']['hbase.cluster.distributed']
is_local_fs_rootdir = hbase_root_dir.startswith('file://')
security_enabled = False if not is_hbase_distributed else config['configurations']['cluster-env']['security_enabled']
metric_prop_file_name = "hadoop-metrics2-hbase.properties"
java64_home = config['hostLevelParams']['java_home']
java_version = expect("/hostLevelParams/java_version", int)
metrics_collector_heapsize = default('/configurations/ams-env/metrics_collector_heapsize', "512")
metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60)
metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10)
skip_disk_metrics_patterns = default("/configurations/ams-env/timeline.metrics.skip.disk.metrics.patterns", None)
hbase_log_dir = config['configurations']['ams-hbase-env']['hbase_log_dir']
hbase_classpath_additional = default("/configurations/ams-hbase-env/hbase_classpath_additional", None)
master_heapsize = config['configurations']['ams-hbase-env']['hbase_master_heapsize']
regionserver_heapsize = config['configurations']['ams-hbase-env']['hbase_regionserver_heapsize']
metrics_collector_heapsize = check_append_heap_property(str(metrics_collector_heapsize), "m")
master_heapsize = check_append_heap_property(str(master_heapsize), "m")
regionserver_heapsize = check_append_heap_property(str(regionserver_heapsize), "m")
regionserver_xmn_max = default('/configurations/ams-hbase-env/hbase_regionserver_xmn_max', None)
if regionserver_xmn_max:
regionserver_xmn_max = int(trim_heap_property(str(regionserver_xmn_max), "m"))
regionserver_xmn_percent = expect("/configurations/ams-hbase-env/hbase_regionserver_xmn_ratio", float)
regionserver_xmn_size = calc_xmn_from_xms(regionserver_heapsize, regionserver_xmn_percent, regionserver_xmn_max)
else:
regionserver_xmn_size = config['configurations']['ams-hbase-env']['regionserver_xmn_size']
pass
hbase_master_xmn_size = config['configurations']['ams-hbase-env']['hbase_master_xmn_size']
hbase_master_maxperm_size = config['configurations']['ams-hbase-env']['hbase_master_maxperm_size']
hbase_master_maxperm_size = check_append_heap_property(str(hbase_master_maxperm_size), "m")
hbase_master_xmn_size = check_append_heap_property(str(hbase_master_xmn_size), "m")
regionserver_xmn_size = check_append_heap_property(str(regionserver_xmn_size), "m")
if not is_hbase_distributed:
hbase_heapsize = get_combined_memory_mb(master_heapsize, regionserver_heapsize)
if hbase_heapsize is None:
hbase_heapsize = master_heapsize
else:
hbase_heapsize = master_heapsize
max_open_files_limit = default("/configurations/ams-hbase-env/max_open_files_limit", "32768")
hostname = config["hostname"]
cluster_zookeeper_quorum_hosts = ",".join(config['clusterHostInfo']['zookeeper_hosts'])
if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']:
cluster_zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort']
else:
cluster_zookeeper_clientPort = '2181'
if not is_hbase_distributed:
zookeeper_quorum_hosts = hostname
zookeeper_clientPort = '61181'
else:
zookeeper_quorum_hosts = cluster_zookeeper_quorum_hosts
zookeeper_clientPort = cluster_zookeeper_clientPort
ams_checkpoint_dir = config['configurations']['ams-site']['timeline.metrics.aggregator.checkpoint.dir']
_hbase_tmp_dir = config['configurations']['ams-hbase-site']['hbase.tmp.dir']
hbase_tmp_dir = substitute_vars(_hbase_tmp_dir, config['configurations']['ams-hbase-site'])
_zookeeper_data_dir = config['configurations']['ams-hbase-site']['hbase.zookeeper.property.dataDir']
zookeeper_data_dir = substitute_vars(_zookeeper_data_dir, config['configurations']['ams-hbase-site'])
_local_dir_conf = default('/configurations/ams-hbase-site/hbase.local.dir', "${hbase.tmp.dir}/local")
local_dir = substitute_vars(_local_dir_conf, config['configurations']['ams-hbase-site'])
phoenix_max_global_mem_percent = default('/configurations/ams-site/phoenix.query.maxGlobalMemoryPercentage', '20')
phoenix_client_spool_dir = default('/configurations/ams-site/phoenix.spool.directory', '/tmp')
phoenix_server_spool_dir = default('/configurations/ams-hbase-site/phoenix.spool.directory', '/tmp')
phoenix_client_spool_dir = substitute_vars(phoenix_client_spool_dir, config['configurations']['ams-hbase-site'])
phoenix_server_spool_dir = substitute_vars(phoenix_server_spool_dir, config['configurations']['ams-hbase-site'])
client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
rs_hosts = ["localhost"]
smoke_test_user = config['configurations']['cluster-env']['smokeuser']
smokeuser_permissions = "RWXCA"
service_check_data = functions.get_unique_id_and_date()
user_group = config['configurations']['cluster-env']["user_group"]
hadoop_user = "hadoop"
kinit_cmd = ""
if security_enabled:
_hostname_lowercase = config['hostname'].lower()
client_jaas_config_file = format("{hbase_conf_dir}/hbase_client_jaas.conf")
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
hbase_user_keytab = config['configurations']['ams-hbase-env']['hbase_user_keytab']
ams_collector_jaas_config_file = format("{hbase_conf_dir}/ams_collector_jaas.conf")
ams_collector_keytab_path = config['configurations']['ams-hbase-security-site']['hbase.myclient.keytab']
ams_collector_jaas_princ = config['configurations']['ams-hbase-security-site']['hbase.myclient.principal'].replace('_HOST',_hostname_lowercase)
ams_zookeeper_jaas_config_file = format("{hbase_conf_dir}/ams_zookeeper_jaas.conf")
ams_zookeeper_keytab = config['configurations']['ams-hbase-security-site']['ams.zookeeper.keytab']
ams_zookeeper_principal_name = config['configurations']['ams-hbase-security-site']['ams.zookeeper.principal'].replace('_HOST',_hostname_lowercase)
master_jaas_config_file = format("{hbase_conf_dir}/hbase_master_jaas.conf")
master_keytab_path = config['configurations']['ams-hbase-security-site']['hbase.master.keytab.file']
master_jaas_princ = config['configurations']['ams-hbase-security-site']['hbase.master.kerberos.principal'].replace('_HOST',_hostname_lowercase)
regionserver_jaas_config_file = format("{hbase_conf_dir}/hbase_regionserver_jaas.conf")
regionserver_keytab_path = config['configurations']['ams-hbase-security-site']['hbase.regionserver.keytab.file']
regionserver_jaas_princ = config['configurations']['ams-hbase-security-site']['hbase.regionserver.kerberos.principal'].replace('_HOST',_hostname_lowercase)
ams_hbase_log_maxfilesize = default('configurations/ams-hbase-log4j/ams_hbase_log_maxfilesize',256)
ams_hbase_log_maxbackupindex = default('configurations/ams-hbase-log4j/ams_hbase_log_maxbackupindex',20)
ams_hbase_security_log_maxfilesize = default('configurations/ams-hbase-log4j/ams_hbase_security_log_maxfilesize',256)
ams_hbase_security_log_maxbackupindex = default('configurations/ams-hbase-log4j/ams_hbase_security_log_maxbackupindex',20)
ams_log_max_backup_size = default('configurations/ams-log4j/ams_log_max_backup_size',80)
ams_log_number_of_backup_files = default('configurations/ams-log4j/ams_log_number_of_backup_files',60)
if (('ams-hbase-log4j' in config['configurations']) and ('content' in config['configurations']['ams-hbase-log4j'])):
hbase_log4j_props = config['configurations']['ams-hbase-log4j']['content']
else:
hbase_log4j_props = None
if (('ams-log4j' in config['configurations']) and ('content' in config['configurations']['ams-log4j'])):
log4j_props = config['configurations']['ams-log4j']['content']
else:
log4j_props = None
hbase_env_sh_template = config['configurations']['ams-hbase-env']['content']
ams_env_sh_template = config['configurations']['ams-env']['content']
ams_grafana_env_sh_template = config['configurations']['ams-grafana-env']['content']
ams_grafana_ini_template = config['configurations']['ams-grafana-ini']['content']
hbase_staging_dir = default("/configurations/ams-hbase-site/hbase.bulkload.staging.dir", "/amshbase/staging")
skip_create_hbase_root_dir = default("/configurations/ams-site/timeline.metrics.skip.create.hbase.root.dir", False)
hbase_wal_dir = default("/configurations/ams-hbase-site/hbase.wal.dir", None)
if hbase_wal_dir and re.search("^file://|/", hbase_wal_dir):
hbase_wal_dir = re.sub("^file://|/", "", hbase_wal_dir, count=1)
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name']
kinit_path_local = functions.get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
import functools
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
hdfs_resource_ignore_file = "/var/lib/ambari-agent/data/.hdfs_resource_ignore",
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
immutable_paths = get_not_managed_resources()
)
| false | true |
f7fd5478e7454ef964bb747ed6c016a4f2cd7b63 | 4,112 | py | Python | preprocessing/vcc2018/feature_reader.py | unilight/cdvae-vc | 6470b0e587d40f6d1d91712a0dacef5ff8d661ce | [
"MIT"
] | 55 | 2019-07-08T09:40:50.000Z | 2021-12-20T15:30:58.000Z | preprocessing/vcc2018/feature_reader.py | yu-tsao/cdvae-vc | 6470b0e587d40f6d1d91712a0dacef5ff8d661ce | [
"MIT"
] | 7 | 2020-01-28T22:12:32.000Z | 2021-08-25T14:47:40.000Z | preprocessing/vcc2018/feature_reader.py | yu-tsao/cdvae-vc | 6470b0e587d40f6d1d91712a0dacef5ff8d661ce | [
"MIT"
] | 13 | 2019-07-09T00:37:14.000Z | 2021-12-27T06:34:14.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import argparse
import os
import sys
import numpy as np
import h5py
import logging
from scipy.io import wavfile
from sprocket.speech.synthesizer import Synthesizer
import tensorflow as tf
def Segment_feature_reader(
file_pattern,
feat_param,
batch_size,
crop_length,
capacity=256,
min_after_dequeue=128,
num_threads=8,
):
with tf.name_scope('InputSpectralFrame'):
# get dimensions
SP_DIM = feat_param['fftl'] // 2 + 1
MCC_DIM = feat_param['mcep_dim']
FEAT_DIM = feat_param['feat_dim']
record_bytes = FEAT_DIM * 4
files = []
for p in file_pattern:
files.extend(tf.gfile.Glob(p))
print('Found {} files'.format(len(files)))
filename_queue = tf.train.string_input_producer(files)
reader = tf.WholeFileReader()
_, value = reader.read(filename_queue)
value = tf.decode_raw(value, tf.float32)
value = tf.reshape(value, [-1, FEAT_DIM,])
values = tf.random_crop(value, [crop_length, FEAT_DIM])
# WORLD features
sp = values[:, : SP_DIM]
mcc = values[:, SP_DIM : SP_DIM + MCC_DIM]
# speaker label
speaker = tf.cast(values[:, -1], tf.int64)
dictionary = {
'sp': sp,
'mcc': mcc,
'speaker': speaker,
}
return tf.train.shuffle_batch(
dictionary,
batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
)
def Whole_feature_reader(filename, feat_param, dtype=np.float32):
"""FUNCTION TO READ whole utterance of features
"""
SP_DIM = feat_param['fftl'] // 2 + 1
MCC_DIM = feat_param['mcep_dim']
FEAT_DIM = feat_param['feat_dim']
values = np.fromfile(filename, dtype).astype(np.float64).reshape([-1, FEAT_DIM])
sp = values[:, : SP_DIM].copy(order='C')
mcc = values[:, SP_DIM : SP_DIM + MCC_DIM].copy(order='C')
ap = values[:, SP_DIM + MCC_DIM : SP_DIM * 2 + MCC_DIM].copy(order='C')
f0 = values[:, SP_DIM * 2 + MCC_DIM].copy(order='C')
en_sp = values[:, SP_DIM * 2 + MCC_DIM + 1].copy(order='C')
en_mcc = values[:, SP_DIM * 2 + MCC_DIM + 2].copy(order='C')
speaker = values[:, -1].astype(np.int64)
dictionary = {
'sp': sp,
'mcc': mcc,
'ap': ap,
'f0': f0,
'en_sp': en_sp,
'en_mcc': en_mcc,
'speaker': speaker,
}
return dictionary
def main():
""" Feature reader & synthesis check
Usage:
1. read original features
feature_ready.py --filename filename
2. read f0 transformed features
feature_ready.py --filename filename --tarspk target_speaker
"""
parser = argparse.ArgumentParser(
description="test feature readers")
parser.add_argument(
"--file_pattern", default=None, type=str,
help="the pattern of the testing feature file(s)")
parser.add_argument(
"--tarspk", default=None, type=str,
help="the name of target speaker")
parser.add_argument(
"--wavname", default='test.wav', type=str,
help="the name of output wav")
args = parser.parse_args()
# parameter setting
feat_param = {
'fs':22050,
'shiftms':5,
'fftl':1024,
'mcep_alpha': 0.455,
'sp_dim':513,
'mcc_dim':34,
'feat_dim': 513 + 34 + 513 + 3 + 39 + 1
}
# load acoustic features and synthesis
if os.path.exists(args.file_pattern):
sp, mcc, ap, f0, en_sp, en_mcc, acoustic, spk, = Whole_feature_reader(
args.file_pattern, feat_param)
en_mcc = np.expand_dims(en_mcc, 1)
mcc = np.concatenate([en_mcc, mcc], axis=1)
world_synthesis(args.wavname, feat_param, f0, mcc, ap)
if __name__ == "__main__":
main()
| 29.163121 | 84 | 0.57393 |
from __future__ import division
import argparse
import os
import sys
import numpy as np
import h5py
import logging
from scipy.io import wavfile
from sprocket.speech.synthesizer import Synthesizer
import tensorflow as tf
def Segment_feature_reader(
file_pattern,
feat_param,
batch_size,
crop_length,
capacity=256,
min_after_dequeue=128,
num_threads=8,
):
with tf.name_scope('InputSpectralFrame'):
SP_DIM = feat_param['fftl'] // 2 + 1
MCC_DIM = feat_param['mcep_dim']
FEAT_DIM = feat_param['feat_dim']
record_bytes = FEAT_DIM * 4
files = []
for p in file_pattern:
files.extend(tf.gfile.Glob(p))
print('Found {} files'.format(len(files)))
filename_queue = tf.train.string_input_producer(files)
reader = tf.WholeFileReader()
_, value = reader.read(filename_queue)
value = tf.decode_raw(value, tf.float32)
value = tf.reshape(value, [-1, FEAT_DIM,])
values = tf.random_crop(value, [crop_length, FEAT_DIM])
sp = values[:, : SP_DIM]
mcc = values[:, SP_DIM : SP_DIM + MCC_DIM]
speaker = tf.cast(values[:, -1], tf.int64)
dictionary = {
'sp': sp,
'mcc': mcc,
'speaker': speaker,
}
return tf.train.shuffle_batch(
dictionary,
batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
)
def Whole_feature_reader(filename, feat_param, dtype=np.float32):
SP_DIM = feat_param['fftl'] // 2 + 1
MCC_DIM = feat_param['mcep_dim']
FEAT_DIM = feat_param['feat_dim']
values = np.fromfile(filename, dtype).astype(np.float64).reshape([-1, FEAT_DIM])
sp = values[:, : SP_DIM].copy(order='C')
mcc = values[:, SP_DIM : SP_DIM + MCC_DIM].copy(order='C')
ap = values[:, SP_DIM + MCC_DIM : SP_DIM * 2 + MCC_DIM].copy(order='C')
f0 = values[:, SP_DIM * 2 + MCC_DIM].copy(order='C')
en_sp = values[:, SP_DIM * 2 + MCC_DIM + 1].copy(order='C')
en_mcc = values[:, SP_DIM * 2 + MCC_DIM + 2].copy(order='C')
speaker = values[:, -1].astype(np.int64)
dictionary = {
'sp': sp,
'mcc': mcc,
'ap': ap,
'f0': f0,
'en_sp': en_sp,
'en_mcc': en_mcc,
'speaker': speaker,
}
return dictionary
def main():
parser = argparse.ArgumentParser(
description="test feature readers")
parser.add_argument(
"--file_pattern", default=None, type=str,
help="the pattern of the testing feature file(s)")
parser.add_argument(
"--tarspk", default=None, type=str,
help="the name of target speaker")
parser.add_argument(
"--wavname", default='test.wav', type=str,
help="the name of output wav")
args = parser.parse_args()
feat_param = {
'fs':22050,
'shiftms':5,
'fftl':1024,
'mcep_alpha': 0.455,
'sp_dim':513,
'mcc_dim':34,
'feat_dim': 513 + 34 + 513 + 3 + 39 + 1
}
if os.path.exists(args.file_pattern):
sp, mcc, ap, f0, en_sp, en_mcc, acoustic, spk, = Whole_feature_reader(
args.file_pattern, feat_param)
en_mcc = np.expand_dims(en_mcc, 1)
mcc = np.concatenate([en_mcc, mcc], axis=1)
world_synthesis(args.wavname, feat_param, f0, mcc, ap)
if __name__ == "__main__":
main()
| true | true |
f7fd54b9468d971a394bf3a23a022dda10d440ca | 130,636 | py | Python | src/transformers/models/big_bird/modeling_big_bird.py | theainerd/transformers | f7328de46dbeda4992a093a0501932bf0fc7b76f | [
"Apache-2.0"
] | 34 | 2021-07-05T02:44:31.000Z | 2022-03-28T14:39:57.000Z | src/transformers/models/big_bird/modeling_big_bird.py | theainerd/transformers | f7328de46dbeda4992a093a0501932bf0fc7b76f | [
"Apache-2.0"
] | 3 | 2021-07-22T15:49:44.000Z | 2022-03-19T08:46:27.000Z | src/transformers/models/big_bird/modeling_big_bird.py | theainerd/transformers | f7328de46dbeda4992a093a0501932bf0fc7b76f | [
"Apache-2.0"
] | 6 | 2021-07-05T02:44:32.000Z | 2022-02-14T10:10:13.000Z | # coding=utf-8
# Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch BigBird model. """
import math
import os
from dataclasses import dataclass
from typing import Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel, SequenceSummary, apply_chunking_to_forward
from ...utils import logging
from .configuration_big_bird import BigBirdConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "google/bigbird-roberta-base"
_CONFIG_FOR_DOC = "BigBirdConfig"
_TOKENIZER_FOR_DOC = "BigBirdTokenizer"
BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/bigbird-roberta-base",
"google/bigbird-roberta-large",
"google/bigbird-base-trivia-itc",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
]
_TRIVIA_QA_MAPPING = {
"big_bird_attention": "attention/self",
"output_layer_norm": "output/LayerNorm",
"attention_output": "attention/output/dense",
"output": "output/dense",
"self_attention_layer_norm": "attention/output/LayerNorm",
"intermediate": "intermediate/dense",
"word_embeddings": "bert/embeddings/word_embeddings",
"position_embedding": "bert/embeddings/position_embeddings",
"type_embeddings": "bert/embeddings/token_type_embeddings",
"embeddings": "bert/embeddings",
"layer_normalization": "output/LayerNorm",
"layer_norm": "LayerNorm",
"trivia_qa_head": "qa_classifier",
"dense": "intermediate/dense",
"dense_1": "qa_outputs",
}
def load_tf_weights_in_big_bird(model, tf_checkpoint_path, is_trivia_qa=False):
"""Load tf checkpoints in a pytorch model."""
def load_tf_weights_bert(init_vars, tf_path):
names = []
tf_weights = {}
for name, shape in init_vars:
array = tf.train.load_variable(tf_path, name)
name = name.replace("bert/encoder/LayerNorm", "bert/embeddings/LayerNorm")
logger.info(f"Loading TF weight {name} with shape {shape}")
names.append(name)
tf_weights[name] = array
return names, tf_weights
def load_tf_weights_trivia_qa(init_vars):
names = []
tf_weights = {}
for i, var in enumerate(init_vars):
name_items = var.name.split("/")
if "transformer_scaffold" in name_items[0]:
layer_name_items = name_items[0].split("_")
if len(layer_name_items) < 3:
layer_name_items += [0]
name_items[0] = f"bert/encoder/layer_{layer_name_items[2]}"
name = "/".join([_TRIVIA_QA_MAPPING[x] if x in _TRIVIA_QA_MAPPING else x for x in name_items])[
:-2
] # remove last :0 in variable
if "self/attention/output" in name:
name = name.replace("self/attention/output", "output")
if i >= len(init_vars) - 2:
name = name.replace("intermediate", "output")
logger.info(f"Loading TF weight {name} with shape {var.shape}")
array = var.value().numpy()
names.append(name)
tf_weights[name] = array
return names, tf_weights
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.saved_model.load(tf_path).variables if is_trivia_qa else tf.train.list_variables(tf_path)
assert len(init_vars) > 0, "Loaded trained variables cannot be empty."
pt_names = list(model.state_dict().keys())
if is_trivia_qa:
names, tf_weights = load_tf_weights_trivia_qa(init_vars)
else:
names, tf_weights = load_tf_weights_bert(init_vars, tf_path)
for txt_name in names:
array = tf_weights[txt_name]
name = txt_name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
pt_name = []
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
pt_name.append("weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
pt_name.append("bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
pt_name.append("weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
pt_name.append("classifier")
elif scope_names[0] == "transform":
pointer = getattr(pointer, "transform")
pt_name.append("transform")
if ("bias" in name) or ("kernel" in name):
pointer = getattr(pointer, "dense")
pt_name.append("dense")
elif ("beta" in name) or ("gamma" in name):
pointer = getattr(pointer, "LayerNorm")
pt_name.append("LayerNorm")
else:
try:
pointer = getattr(pointer, scope_names[0])
pt_name.append(f"{scope_names[0]}")
except AttributeError:
logger.info(f"Skipping {m_name}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
pt_name.append(f"{num}")
if m_name[-11:] == "_embeddings" or m_name == "embeddings":
pointer = getattr(pointer, "weight")
pt_name.append("weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
if len(array.shape) > len(pointer.shape) and math.prod(array.shape) == math.prod(pointer.shape):
# print(txt_name, array.shape)
if (
txt_name.endswith("attention/self/key/kernel")
or txt_name.endswith("attention/self/query/kernel")
or txt_name.endswith("attention/self/value/kernel")
):
array = array.transpose(1, 0, 2).reshape(pointer.shape)
elif txt_name.endswith("attention/output/dense/kernel"):
array = array.transpose(0, 2, 1).reshape(pointer.shape)
else:
array = array.reshape(pointer.shape)
if pointer.shape != array.shape:
raise ValueError(
f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched of {txt_name}."
)
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
pt_weight_name = ".".join(pt_name)
logger.info(f"Initialize PyTorch weight {pt_weight_name} from {txt_name}.")
pointer.data = torch.from_numpy(array)
tf_weights.pop(txt_name, None)
pt_names.remove(pt_weight_name)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
logger.info(f"Weights not initialized in PyTorch model: {', '.join(pt_names)}.")
return model
class BigBirdEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
# End copy
self.rescale_embeddings = config.rescale_embeddings
self.hidden_size = config.hidden_size
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if self.rescale_embeddings:
inputs_embeds = inputs_embeds * (self.hidden_size ** 0.5)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.dropout(embeddings)
embeddings = self.LayerNorm(embeddings)
return embeddings
class BigBirdSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BigBirdModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = F.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class BigBirdBlockSparseAttention(nn.Module):
def __init__(self, config, seed=None):
super().__init__()
self.max_seqlen = config.max_position_embeddings
self.seed = seed
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.num_random_blocks = config.num_random_blocks
self.block_size = config.block_size
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
band_mask=None,
from_mask=None,
to_mask=None,
from_blocked_mask=None,
to_blocked_mask=None,
output_attentions=None,
):
# Currently this `class` can't be used in decoder.
batch_size, seqlen, _ = hidden_states.size()
to_seq_length = from_seq_length = seqlen
from_block_size = to_block_size = self.block_size
assert from_seq_length % from_block_size == 0, "Query sided sequence length must be multiple of block size"
assert to_seq_length % to_block_size == 0, "Key/Value sided sequence length must be multiple of block size"
query_layer = self.transpose_for_scores(self.query(hidden_states))
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
context_layer, attention_probs = self.bigbird_block_sparse_attention(
query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
self.num_attention_heads,
self.num_random_blocks,
self.attention_head_size,
from_block_size,
to_block_size,
batch_size,
from_seq_length,
to_seq_length,
seed=self.seed,
plan_from_length=None,
plan_num_rand_blocks=None,
output_attentions=output_attentions,
)
context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
@staticmethod
def torch_bmm_nd(inp_1, inp_2, ndim=None):
""" Fast nd matrix multiplication """
# faster replacement of torch.einsum ("bhqk,bhkd->bhqd")
return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view(
inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1])
)
@staticmethod
def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None):
""" Fast nd matrix multiplication with transpose """
# faster replacement of torch.einsum (bhqd,bhkd->bhqk)
return torch.bmm(
inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2)
).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2]))
def bigbird_block_sparse_attention(
self,
query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
n_heads,
n_rand_blocks,
attention_head_size,
from_block_size,
to_block_size,
batch_size,
from_seq_len,
to_seq_len,
seed,
plan_from_length,
plan_num_rand_blocks,
output_attentions,
):
# BigBird block-sparse attention as suggested in paper
# ITC:
# global tokens: 2 x block_size
# window tokens: 3 x block_size
# random tokens: num_rand_tokens x block_size
# ETC:
# global tokens: extra_globals_tokens + 2 x block_size
# window tokens: 3 x block_size
# random tokens: num_rand_tokens x block_size
# Note:
# 1) Currently, ETC is not supported.
# 2) Window size is fixed to 3 blocks & it can be changed only by
# changing `block_size`.
# 3) Number of global blocks are fixed (2 blocks here) & global tokens can be
# controlled only by `block_size`.
# attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention)
# hence following code can be divided into 5 parts.
if from_seq_len // from_block_size != to_seq_len // to_block_size:
raise ValueError("Error the number of blocks needs to be same!")
rsqrt_d = 1 / math.sqrt(attention_head_size)
bsz = batch_size
# generate random attention and corresponding masks
np.random.seed(seed)
if from_seq_len in [1024, 3072, 4096]: # old plans used in paper
rand_attn = [
self._bigbird_block_rand_mask(
self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024
)[: (from_seq_len // from_block_size - 2)]
for _ in range(n_heads)
]
else:
if plan_from_length is None:
plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan(
from_seq_len, from_block_size, n_rand_blocks
)
rand_attn = self._bigbird_block_rand_mask_with_head(
from_seq_length=from_seq_len,
to_seq_length=to_seq_len,
from_block_size=from_block_size,
to_block_size=to_block_size,
num_heads=n_heads,
plan_from_length=plan_from_length,
plan_num_rand_blocks=plan_num_rand_blocks,
)
rand_attn = np.stack(rand_attn, axis=0)
rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long)
rand_attn.unsqueeze_(0)
rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0)
rand_mask = self._create_rand_mask_from_inputs(
from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size
)
blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1)
blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)
blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)
# preparing block for randn attn
gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn)
gathered_key = gathered_key.view(
bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1
) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]
gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn)
gathered_value = gathered_value.view(
bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1
) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1]
# 1st PART
# 1st block (global block) attention scores
# q[0] x (k[0], k[1], k[2], k[3], k[4] .... )
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]
first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4)
first_product = first_product * rsqrt_d
first_product += (1.0 - to_mask) * -10000.0
first_attn_weights = F.softmax(first_product, dim=-1) # [bsz, n_heads, from_block_size, to_seq_len]
# [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]
first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4)
first_context_layer.unsqueeze_(2)
# 2nd PART
# 2nd block attention scores
# q[1] x (sliding_keys, random_keys, global_keys)
# sliding key blocks -> 2nd, 3rd blocks
# global key blocks -> 1st block
second_key_mat = torch.cat(
[
blocked_key_matrix[:, :, 0],
blocked_key_matrix[:, :, 1],
blocked_key_matrix[:, :, 2],
blocked_key_matrix[:, :, -1],
gathered_key[:, :, 0],
],
dim=2,
) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]
second_value_mat = torch.cat(
[
blocked_value_matrix[:, :, 0],
blocked_value_matrix[:, :, 1],
blocked_value_matrix[:, :, 2],
blocked_value_matrix[:, :, -1],
gathered_value[:, :, 0],
],
dim=2,
) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1]
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4)
second_seq_pad = torch.cat(
[
to_mask[:, :, :, : 3 * to_block_size],
to_mask[:, :, :, -to_block_size:],
first_context_layer.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),
],
dim=3,
)
second_rand_pad = torch.cat(
[
first_context_layer.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),
rand_mask[:, :, 0],
],
dim=3,
)
second_product = second_product * rsqrt_d
second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * -10000.0
second_attn_weights = F.softmax(
second_product, dim=-1
) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
# [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]
second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4)
second_context_layer.unsqueeze_(2)
# 3rd PART
# Middle blocks attention scores
# q[-2:2] x (sliding_keys, random_keys, global_keys)
# sliding attn is calculated using special trick of shifting tokens as discussed in paper
# random keys are generated by taking random indices as per `rand_attn`
# global keys -> 1st & last block
exp_blocked_key_matrix = torch.cat(
[blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3
) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
exp_blocked_value_matrix = torch.cat(
[blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]],
dim=3,
) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
middle_query_matrix = blocked_query_matrix[:, :, 2:-2]
# sliding attention scores for q[-2:2]
# [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size]
inner_band_product = inner_band_product * rsqrt_d
# randn attention scores for q[-2:2]
# [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]
rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size]
rand_band_product = rand_band_product * rsqrt_d
# Including 1st block (since it's global)
first_band_product = torch.einsum(
"bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]
first_band_product = first_band_product * rsqrt_d
# Including last block (since it's global)
last_band_product = torch.einsum(
"bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]
last_band_product = last_band_product * rsqrt_d
# masking padded tokens
inner_band_product += (1.0 - band_mask) * -10000.0
first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * -10000.0
last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * -10000.0
rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * -10000.0
# completing attention scores matrix for all q[-2:2]
band_product = torch.cat(
[first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]
# safely doing softmax since attention matrix is completed
attn_weights = F.softmax(
band_product, dim=-1
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size]
# contibution of sliding keys
# [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1]
context_layer = self.torch_bmm_nd(
attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5
)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
# adding contribution of random keys
# [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1]
context_layer += self.torch_bmm_nd(
attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5
)
# ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
# adding contribution of global keys
context_layer += torch.einsum(
"bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
context_layer += torch.einsum(
"bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1]
# 4th PART
# last 2nd token attention scores
# q[-2] x (sliding_keys, random_keys, global_keys)
# sliding key blocks -> last 3 blocks
# global key block -> 1st block
# random key block -> based on indices stored in `randn_attn`
second_last_key_mat = torch.cat(
[
blocked_key_matrix[:, :, 0],
blocked_key_matrix[:, :, -3],
blocked_key_matrix[:, :, -2],
blocked_key_matrix[:, :, -1],
gathered_key[:, :, -1],
],
dim=2,
) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1]
second_last_value_mat = torch.cat(
[
blocked_value_matrix[:, :, 0],
blocked_value_matrix[:, :, -3],
blocked_value_matrix[:, :, -2],
blocked_value_matrix[:, :, -1],
gathered_value[:, :, -1],
],
dim=2,
) # [bsz, n_heads, (4+r)*to_block_size, -1]
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4)
second_last_seq_pad = torch.cat(
[
to_mask[:, :, :, :to_block_size],
to_mask[:, :, :, -3 * to_block_size :],
context_layer.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),
],
dim=3,
)
second_last_rand_pad = torch.cat(
[
context_layer.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),
rand_mask[:, :, -1],
],
dim=3,
)
second_last_product = second_last_product * rsqrt_d
second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * -10000.0
second_last_attn_weights = F.softmax(
second_last_product, dim=-1
) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size]
# [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1]
second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4)
second_last_context_layer.unsqueeze_(2)
# 5th PART
# last block (global) attention scores
# q[-1] x (k[0], k[1], k[2], k[3], .... )
# [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len]
last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4)
last_product = last_product * rsqrt_d
last_product += (1.0 - to_mask) * -10000.0
last_attn_weights = F.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n]
# [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1]
last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4)
last_context_layer.unsqueeze_(2)
# combining representations of all tokens
context_layer = torch.cat(
[first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer],
dim=2,
)
context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask
context_layer = torch.transpose(context_layer, 1, 2)
# this is just for visualizing; forward pass doesn't depend on following code
if output_attentions:
# TODO(PVP): need to verify if below code is correct
attention_probs = torch.zeros(
bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device
)
# 1st query block
# corresponding to `first_context_layer`
attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global
# 2nd query block
# corresponding to `second_context_layer`
attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[
:, :, :, : 3 * to_block_size
] # 1st three key blocks (global + sliding)
attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[
:, :, :, 3 * to_block_size : 4 * to_block_size
] # last key block (global)
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[:, 4 * to_block_size :]
attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# Middle query blocks
# corresponding to `context_layer`
# sliding keys
for q_idx in range(from_seq_len // from_block_size - 4):
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)[:, :, 2:-2, :, 1:-1, :]
right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size]
attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view(
bsz, n_heads, from_block_size, 3, to_block_size
) # inner_band_product
# global keys (correspomding to 1st key block)
attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[
:, :, :, :, :to_block_size
].view(
bsz, n_heads, -1, to_block_size
) # first_band_product
# global keys (corresponding to last key block)
attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[
:, :, :, :, -to_block_size:
].view(
bsz, n_heads, -1, to_block_size
) # last_band_product
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
for q_idx in range(1, len(i2) - 1):
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size]
attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# Second-last query block
# corresponding to `second_last_context_layer`
attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[
:, :, :, :to_block_size
] # 1st key block (global)
attention_probs[
:, :, -2 * from_block_size : -from_block_size, -3 * to_block_size :
] = second_last_attn_weights[
:, :, :, to_block_size : 4 * to_block_size
] # last three blocks (global + sliding)
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[:, 4 * to_block_size :]
attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# last query block
# corresponding to `last_context_layer`
attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global
else:
attention_probs = None
return context_layer, attention_probs
@staticmethod
def torch_gather_b2(params, indices):
# this operation is equilvalent to tf.gather when batch_dims=2
if params.shape[:2] != indices.shape[:2]:
raise ValueError(
f"Make sure that the first two dimensions of params and indices are identical, \
but they are params: {params.shape[:2]} vs. indices: {params.shape[:2]}"
)
num_indices_to_gather = indices.shape[-2] * indices.shape[-1]
num_indices_to_pick_from = params.shape[2]
indices_shift = (
torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device)
// num_indices_to_gather
* num_indices_to_pick_from
)
flattened_indices = indices.view(-1) + indices_shift
flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1])
out_flattened = flattened_params.index_select(0, flattened_indices)
out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:])
return out
@staticmethod
def _create_rand_mask_from_inputs(
from_blocked_mask,
to_blocked_mask,
rand_attn,
num_attention_heads,
num_rand_blocks,
batch_size,
from_seq_length,
from_block_size,
):
"""
Create 3D attention mask from a 2D tensor mask.
Args:
from_blocked_mask: 2D Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size].
to_blocked_mask: int32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size].
rand_attn: [batch_size, num_attention_heads,
from_seq_length//from_block_size-2, num_rand_blocks]
num_attention_heads: int. Number of attention heads.
num_rand_blocks: int. Number of random chunks per row.
batch_size: int. Batch size for computation.
from_seq_length: int. length of from sequence.
from_block_size: int. size of block in from sequence.
Returns:
float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2,
from_block_size, num_rand_blocks*to_block_size].
"""
num_windows = from_seq_length // from_block_size - 2
rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)])
rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size)
rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask)
return rand_mask
@staticmethod
def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks):
"""
Gives the plan of where to put random attention.
Args:
from_seq_length: int. length of from sequence.
from_block_size: int. size of block in from sequence.
num_rand_blocks: int. Number of random chunks per row.
Returns:
plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for
each block
"""
plan_from_length = []
plan_num_rand_blocks = []
if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size):
plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size))
plan_num_rand_blocks.append(num_rand_blocks)
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(0)
elif (num_rand_blocks + 5) < (from_seq_length // from_block_size):
plan_from_length.append(int((num_rand_blocks + 5) * from_block_size))
plan_num_rand_blocks.append(num_rand_blocks // 2)
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2))
else:
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(num_rand_blocks)
return plan_from_length, plan_num_rand_blocks
@staticmethod
def _bigbird_block_rand_mask(
from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1
):
"""
Create adjacency list of random attention.
Args:
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
num_rand_blocks: int. Number of random chunks per row.
last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence,
if positive then num_rand_blocks blocks choosen only upto last_idx.
Returns:
adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks
"""
# using this method when from_seq_length in [1024, 3072, 4096]
assert (
from_seq_length // from_block_size == to_seq_length // to_block_size
), "Error the number of blocks needs to be same!"
rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32)
middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32)
last = to_seq_length // to_block_size - 1
if last_idx > (2 * to_block_size):
last = (last_idx // to_block_size) - 1
r = num_rand_blocks # shorthand
for i in range(1, from_seq_length // from_block_size - 1):
start = i - 2
end = i
if i == 1:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r]
elif i == 2:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r]
elif i == from_seq_length // from_block_size - 3:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
# Missing -3: should have been sliced till last-3
elif i == from_seq_length // from_block_size - 2:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
# Missing -4: should have been sliced till last-4
else:
if start > last:
start = last
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
elif (end + 1) == last:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
else:
rand_attn[i - 1, :] = np.random.permutation(
np.concatenate((middle_seq[:start], middle_seq[end + 1 : last]))
)[:r]
return rand_attn
def _bigbird_block_rand_mask_with_head(
self,
from_seq_length,
to_seq_length,
from_block_size,
to_block_size,
num_heads,
plan_from_length,
plan_num_rand_blocks,
window_block_left=1,
window_block_right=1,
global_block_top=1,
global_block_bottom=1,
global_block_left=1,
global_block_right=1,
):
"""
Create adjacency list of random attention.
Args:
from_seq_length: int. length of from sequence.
to_seq_length: int. length of to sequence.
from_block_size: int. size of block in from sequence.
to_block_size: int. size of block in to sequence.
num_heads: int. total number of heads.
plan_from_length: list. plan from length where num_random_blocks are choosen from.
plan_num_rand_blocks: list. number of rand blocks within the plan.
window_block_left: int. number of blocks of window to left of a block.
window_block_right: int. number of blocks of window to right of a block.
global_block_top: int. number of blocks at the top.
global_block_bottom: int. number of blocks at the bottom.
global_block_left: int. Number of blocks globally used to the left.
global_block_right: int. Number of blocks globally used to the right.
Returns:
adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by
num_rand_blocks
"""
# using this method when from_seq_length not in [1024, 3072, 4096]
assert (
from_seq_length // from_block_size == to_seq_length // to_block_size
), "Error the number of blocks needs to be same!"
assert from_seq_length in plan_from_length, "Error from sequence length not in plan!"
# Total number of blocks in the mmask
num_blocks = from_seq_length // from_block_size
# Number of blocks per plan
plan_block_length = np.array(plan_from_length) // from_block_size
# till when to follow plan
max_plan_idx = plan_from_length.index(from_seq_length)
# Random Attention adjajency list
rand_attn = [
np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32)
for i in range(num_heads)
]
# We will go iteratively over the plan blocks and pick random number of
# Attention blocks from the legally allowed blocks
for plan_idx in range(max_plan_idx + 1):
rnd_r_cnt = 0
if plan_idx > 0:
# set the row for all from_blocks starting from 0 to
# plan_block_length[plan_idx-1]
# column indx start fromm plan_block_length[plan_idx-1] and ends at
# plan_block_length[plan_idx]
if plan_num_rand_blocks[plan_idx] > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))
for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]):
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=plan_block_length[plan_idx - 1],
to_end_block_id=plan_block_length[plan_idx],
num_rand_blocks=plan_num_rand_blocks[plan_idx],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
for pl_id in range(plan_idx):
if plan_num_rand_blocks[pl_id] == 0:
continue
for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]):
rnd_r_cnt = 0
to_start_block_id = 0
if pl_id > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id]))
to_start_block_id = plan_block_length[pl_id - 1]
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1]))
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=to_start_block_id,
to_end_block_id=plan_block_length[pl_id],
num_rand_blocks=plan_num_rand_blocks[pl_id],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
if plan_num_rand_blocks[plan_idx] == 0:
continue
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))
from_start_block_id = global_block_top
to_start_block_id = 0
if plan_idx > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))
from_start_block_id = plan_block_length[plan_idx - 1]
to_start_block_id = plan_block_length[plan_idx - 1]
for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]):
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=to_start_block_id,
to_end_block_id=plan_block_length[plan_idx],
num_rand_blocks=plan_num_rand_blocks[plan_idx],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
for nh in range(num_heads):
rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :]
return rand_attn
@staticmethod
def _get_single_block_row_attention(
block_id,
to_start_block_id,
to_end_block_id,
num_rand_blocks,
window_block_left=1,
window_block_right=1,
global_block_left=1,
global_block_right=1,
):
"""
For a single row block get random row attention.
Args:
block_id: int. block id of row.
to_start_block_id: int. random attention coloum start id.
to_end_block_id: int. random attention coloum end id.
num_rand_blocks: int. number of random blocks to be selected.
window_block_left: int. number of blocks of window to left of a block.
window_block_right: int. number of blocks of window to right of a block.
global_block_left: int. Number of blocks globally used to the left.
global_block_right: int. Number of blocks globally used to the right.
Returns:
row containing the random attention vector of size num_rand_blocks.
"""
# list of to_blocks from which to choose random attention
to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32)
# permute the blocks
perm_block = np.random.permutation(to_block_list)
# illegal blocks for the current block id, using window
illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1))
# Add blocks at the start and at the end
illegal_blocks.extend(list(range(global_block_left)))
illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id)))
# The second from_block cannot choose random attention on second last to_block
if block_id == 1:
illegal_blocks.append(to_end_block_id - 2)
# The second last from_block cannot choose random attention on second to_block
if block_id == to_end_block_id - 2:
illegal_blocks.append(1)
selected_random_blokcs = []
for i in range(to_end_block_id - to_start_block_id):
if perm_block[i] not in illegal_blocks:
selected_random_blokcs.append(perm_block[i])
if len(selected_random_blokcs) == num_rand_blocks:
break
return np.array(selected_random_blokcs, dtype=np.int32)
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BigBird
class BigBirdSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BigBirdAttention(nn.Module):
def __init__(self, config, seed=None):
super().__init__()
self.attention_type = config.attention_type
self.config = config
self.seed = seed
if self.config.attention_type == "original_full":
self.self = BigBirdSelfAttention(config)
elif self.config.attention_type == "block_sparse":
self.self = BigBirdBlockSparseAttention(config, seed)
else:
raise ValueError(
f"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}"
)
self.output = BigBirdSelfOutput(config)
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
if value == "original_full":
# copy all weights to new full attention class
attn_weights = BigBirdSelfAttention(self.config)
else:
# copy all weights to new sparse attention class
attn_weights = BigBirdBlockSparseAttention(self.config, self.seed)
attn_weights.query = self.self.query
attn_weights.value = self.self.value
attn_weights.key = self.self.key
self.self = attn_weights
self.attention_type = value
if not self.training:
self.self.eval()
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
# block_sparse config
band_mask=None,
from_mask=None,
to_mask=None,
from_blocked_mask=None,
to_blocked_mask=None,
):
if self.attention_type == "original_full":
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
else:
assert (
encoder_hidden_states is None
), "BigBird cannot be used as a decoder when config.attention_type != 'original_full'"
self_outputs = self.self(
hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BigBird
class BigBirdIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BigBird
class BigBirdOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BigBirdLayer(nn.Module):
def __init__(self, config, seed=None):
super().__init__()
self.config = config
self.attention_type = config.attention_type
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BigBirdAttention(config, seed=seed)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = BigBirdAttention(config)
self.intermediate = BigBirdIntermediate(config)
self.output = BigBirdOutput(config)
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
self.attention.set_attention_type(value)
if self.add_cross_attention:
self.crossattention.set_attention_type(value)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
band_mask=None,
from_mask=None,
to_mask=None,
blocked_encoder_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_value=self_attn_past_key_value,
output_attentions=output_attentions,
band_mask=band_mask,
from_mask=from_mask,
to_mask=to_mask,
from_blocked_mask=blocked_encoder_mask,
to_blocked_mask=blocked_encoder_mask,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with \
cross-attention layers by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BigBirdEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.attention_type = config.attention_type
self.layer = nn.ModuleList(
[BigBirdLayer(config, seed=layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
for layer in self.layer:
layer.set_attention_type(value)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
band_mask=None,
from_mask=None,
to_mask=None,
blocked_encoder_mask=None,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
band_mask,
from_mask,
to_mask,
blocked_encoder_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
band_mask,
from_mask,
to_mask,
blocked_encoder_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->BigBird
class BigBirdPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->BigBird
class BigBirdLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BigBirdPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->BigBird
class BigBirdOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BigBirdLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->BigBird
class BigBirdOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->BigBird
class BigBirdPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BigBirdLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BigBirdPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BigBirdConfig
load_tf_weights = load_tf_weights_in_big_bird
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
BIG_BIRD_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config (:class:`~transformers.BigBirdConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BIG_BIRD_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BigBirdTokenizer`. See
:func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@dataclass
class BigBirdForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.BigBirdtForPreTraining`.
Args:
loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@add_start_docstrings(
"The bare BigBird Model transformer outputting raw hidden-states without any specific head on top.",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdModel(BigBirdPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.attention_type = self.config.attention_type
self.config = config
self.block_size = self.config.block_size
self.embeddings = BigBirdEmbeddings(config)
self.encoder = BigBirdEncoder(config)
if add_pooling_layer:
self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
else:
self.pooler = None
self.activation = None
if self.attention_type != "original_full" and config.add_cross_attention:
logger.warning(
"When using `BigBirdForCausalLM` as decoder, then `attention_type` must be `original_full`. Setting `attention_type=original_full`"
)
self.set_attention_type("original_full")
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
self.encoder.set_attention_type(value)
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# in order to use block_sparse attention, sequence_length has to be at least
# bigger than all global attentions: 2 * block_size
# + sliding tokens: 3 * block_size
# + random tokens: 2 * num_random_blocks * block_size
max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size
if self.attention_type == "block_sparse" and seq_length <= max_tokens_to_attend:
# change attention_type from block_sparse to original_full
sequence_length = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1)
logger.warning(
"Attention type 'block_sparse' is not possible if sequence_length: "
f"{sequence_length} <= num global tokens: 2 * config.block_size "
"+ min. num sliding tokens: 3 * config.block_size "
"+ config.num_random_blocks * config.block_size "
"+ additional buffer: config.num_random_blocks * config.block_size "
f"= {max_tokens_to_attend} with config.block_size "
f"= {self.config.block_size}, config.num_random_blocks "
f"= {self.config.num_random_blocks}."
"Changing attention type to 'original_full'..."
)
self.set_attention_type("original_full")
if self.attention_type == "block_sparse":
(
padding_len,
input_ids,
attention_mask,
token_type_ids,
position_ids,
inputs_embeds,
) = self._pad_to_block_size(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
pad_token_id=self.config.pad_token_id,
)
else:
padding_len = 0
if self.attention_type == "block_sparse":
blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn(
attention_mask, self.block_size
)
extended_attention_mask = None
elif self.attention_type == "original_full":
blocked_encoder_mask = None
band_mask = None
from_mask = None
to_mask = None
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, device
)
else:
raise ValueError(
f"attention_type can either be original_full or block_sparse, but is {self.attention_type}"
)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
band_mask=band_mask,
from_mask=from_mask,
to_mask=to_mask,
blocked_encoder_mask=blocked_encoder_mask,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooler_output = self.activation(self.pooler(sequence_output[:, 0, :])) if (self.pooler is not None) else None
# undo padding
if padding_len > 0:
# unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1)
sequence_output = sequence_output[:, :-padding_len]
if not return_dict:
return (sequence_output, pooler_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooler_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@staticmethod
def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int):
batch_size, seq_length = attention_mask.size()
assert (
seq_length % block_size == 0
), f"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block size is {block_size}."
def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask):
"""
Create 3D attention mask from a 2D tensor mask.
Args:
from_blocked_mask: 2D Tensor of shape [batch_size,
from_seq_length//from_block_size, from_block_size].
to_blocked_mask: int32 Tensor of shape [batch_size,
to_seq_length//to_block_size, to_block_size].
Returns:
float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size,
3*to_block_size].
"""
exp_blocked_to_pad = torch.cat(
[to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2
)
band_mask = torch.einsum("blq,blk->blqk", from_blocked_mask[:, 2:-2], exp_blocked_to_pad)
band_mask.unsqueeze_(1)
return band_mask
blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size)
band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask)
from_mask = attention_mask.view(batch_size, 1, seq_length, 1)
to_mask = attention_mask.view(batch_size, 1, 1, seq_length)
return blocked_encoder_mask, band_mask, from_mask, to_mask
def _pad_to_block_size(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
token_type_ids: torch.Tensor,
position_ids: torch.Tensor,
inputs_embeds: torch.Tensor,
pad_token_id: int,
):
"""A helper function to pad tokens and mask to work with implementation of BigBird block-sparse attention."""
# padding
block_size = self.config.block_size
input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
batch_size, seq_len = input_shape[:2]
padding_len = (block_size - seq_len % block_size) % block_size
if padding_len > 0:
logger.info(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.block_size`: {block_size}"
)
if input_ids is not None:
input_ids = F.pad(input_ids, (0, padding_len), value=pad_token_id)
if position_ids is not None:
# pad with position_id = pad_token_id as in modeling_bigbird.BigBirdEmbeddings
position_ids = F.pad(position_ids, (0, padding_len), value=pad_token_id)
if inputs_embeds is not None:
input_ids_padding = inputs_embeds.new_full(
(batch_size, padding_len),
self.config.pad_token_id,
dtype=torch.long,
)
inputs_embeds_padding = self.embeddings(input_ids_padding)
inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)
attention_mask = F.pad(attention_mask, (0, padding_len), value=False) # no attention on the padding tokens
token_type_ids = F.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0
return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
class BigBirdForPreTraining(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BigBirdModel(config, add_pooling_layer=True)
self.cls = BigBirdPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BigBirdForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape ``(batch_size, sequence_length)``, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`):
Labels for computing the next sequence prediction (classification) loss. If specified, nsp loss will be
added to masked_lm loss. Input should be a sequence pair (see :obj:`input_ids` docstring) Indices should be
in ``[0, 1]``:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Returns:
Example::
>>> from transformers import BigBirdTokenizer, BigBirdForPreTraining
>>> import torch
>>> tokenizer = BigBirdTokenizer.from_pretrained('bigbird-roberta-base')
>>> model = BigBirdForPreTraining.from_pretrained('bigbird-roberta-base')
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if next_sentence_label is not None and total_loss is not None:
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = total_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BigBirdForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings("""BigBird Model with a `language modeling` head on top. """, BIG_BIRD_START_DOCSTRING)
class BigBirdForMaskedLM(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `BigBirdForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = BigBirdModel(config)
self.cls = BigBirdOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""BigBird Model with a `language modeling` head on top for CLM fine-tuning. """, BIG_BIRD_START_DOCSTRING
)
class BigBirdForCausalLM(BigBirdPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BigBirdForCausalLM` as a standalone, add `is_decoder=True.`")
self.bert = BigBirdModel(config)
self.cls = BigBirdOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]``.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
Returns:
Example::
>>> from transformers import BigBirdTokenizer, BigBirdForCausalLM, BigBirdConfig
>>> import torch
>>> tokenizer = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
>>> config = BigBirdConfig.from_pretrained("google/bigbird-base")
>>> config.is_decoder = True
>>> model = BigBirdForCausalLM.from_pretrained('google/bigbird-roberta-base', config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
class BigBirdClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
self.config = config
def forward(self, features, **kwargs):
x = features[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = ACT2FN[self.config.hidden_act](x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdForSequenceClassification(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BigBirdModel(config)
self.classifier = BigBirdClassificationHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdForMultipleChoice(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BigBirdModel(config)
self.sequence_summary = SequenceSummary(config)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(
BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
pooled_output = self.sequence_summary(sequence_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdForTokenClassification(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BigBirdModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class BigBirdForQuestionAnsweringHead(nn.Module):
"""Head for question answering tasks."""
def __init__(self, config):
super().__init__()
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.intermediate = BigBirdIntermediate(config)
self.output = BigBirdOutput(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, encoder_output):
hidden_states = self.dropout(encoder_output)
hidden_states = self.intermediate(hidden_states)
hidden_states = self.output(hidden_states, encoder_output)
hidden_states = self.qa_outputs(hidden_states)
return hidden_states
@add_start_docstrings(
"""
BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdForQuestionAnswering(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.sep_token_id = config.sep_token_id
self.bert = BigBirdModel(config, add_pooling_layer=False)
self.qa_classifier = BigBirdForQuestionAnsweringHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/bigbird-base-trivia-itc",
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
question_lengths=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
seqlen = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1)
if question_lengths is None and input_ids is not None:
# assuming input_ids format: <cls> <question> <sep> context <sep>
question_lengths = torch.argmax(input_ids.eq(self.sep_token_id).int(), dim=-1) + 1
question_lengths.unsqueeze_(1)
logits_mask = None
if question_lengths is not None:
# setting lengths logits to `-infi`
logits_mask = self.prepare_question_mask(question_lengths, seqlen)
if token_type_ids is None:
token_type_ids = (~logits_mask).long()
logits_mask = logits_mask
logits_mask.unsqueeze_(2)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_classifier(sequence_output)
if logits_mask is not None:
# removing question tokens from the competition
logits = logits - logits_mask * 1e6
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@staticmethod
def prepare_question_mask(q_lengths: torch.Tensor, maxlen: int):
# q_lengths -> (bz, 1)
mask = torch.arange(0, maxlen).to(q_lengths.device)
mask.unsqueeze_(0) # -> (1, maxlen)
mask = mask < q_lengths
return mask
| 43.896505 | 213 | 0.635414 |
import math
import os
from dataclasses import dataclass
from typing import Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel, SequenceSummary, apply_chunking_to_forward
from ...utils import logging
from .configuration_big_bird import BigBirdConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "google/bigbird-roberta-base"
_CONFIG_FOR_DOC = "BigBirdConfig"
_TOKENIZER_FOR_DOC = "BigBirdTokenizer"
BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/bigbird-roberta-base",
"google/bigbird-roberta-large",
"google/bigbird-base-trivia-itc",
]
_TRIVIA_QA_MAPPING = {
"big_bird_attention": "attention/self",
"output_layer_norm": "output/LayerNorm",
"attention_output": "attention/output/dense",
"output": "output/dense",
"self_attention_layer_norm": "attention/output/LayerNorm",
"intermediate": "intermediate/dense",
"word_embeddings": "bert/embeddings/word_embeddings",
"position_embedding": "bert/embeddings/position_embeddings",
"type_embeddings": "bert/embeddings/token_type_embeddings",
"embeddings": "bert/embeddings",
"layer_normalization": "output/LayerNorm",
"layer_norm": "LayerNorm",
"trivia_qa_head": "qa_classifier",
"dense": "intermediate/dense",
"dense_1": "qa_outputs",
}
def load_tf_weights_in_big_bird(model, tf_checkpoint_path, is_trivia_qa=False):
def load_tf_weights_bert(init_vars, tf_path):
names = []
tf_weights = {}
for name, shape in init_vars:
array = tf.train.load_variable(tf_path, name)
name = name.replace("bert/encoder/LayerNorm", "bert/embeddings/LayerNorm")
logger.info(f"Loading TF weight {name} with shape {shape}")
names.append(name)
tf_weights[name] = array
return names, tf_weights
def load_tf_weights_trivia_qa(init_vars):
names = []
tf_weights = {}
for i, var in enumerate(init_vars):
name_items = var.name.split("/")
if "transformer_scaffold" in name_items[0]:
layer_name_items = name_items[0].split("_")
if len(layer_name_items) < 3:
layer_name_items += [0]
name_items[0] = f"bert/encoder/layer_{layer_name_items[2]}"
name = "/".join([_TRIVIA_QA_MAPPING[x] if x in _TRIVIA_QA_MAPPING else x for x in name_items])[
:-2
]
if "self/attention/output" in name:
name = name.replace("self/attention/output", "output")
if i >= len(init_vars) - 2:
name = name.replace("intermediate", "output")
logger.info(f"Loading TF weight {name} with shape {var.shape}")
array = var.value().numpy()
names.append(name)
tf_weights[name] = array
return names, tf_weights
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
init_vars = tf.saved_model.load(tf_path).variables if is_trivia_qa else tf.train.list_variables(tf_path)
assert len(init_vars) > 0, "Loaded trained variables cannot be empty."
pt_names = list(model.state_dict().keys())
if is_trivia_qa:
names, tf_weights = load_tf_weights_trivia_qa(init_vars)
else:
names, tf_weights = load_tf_weights_bert(init_vars, tf_path)
for txt_name in names:
array = tf_weights[txt_name]
name = txt_name.split("/")
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
pt_name = []
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
pt_name.append("weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
pt_name.append("bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
pt_name.append("weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
pt_name.append("classifier")
elif scope_names[0] == "transform":
pointer = getattr(pointer, "transform")
pt_name.append("transform")
if ("bias" in name) or ("kernel" in name):
pointer = getattr(pointer, "dense")
pt_name.append("dense")
elif ("beta" in name) or ("gamma" in name):
pointer = getattr(pointer, "LayerNorm")
pt_name.append("LayerNorm")
else:
try:
pointer = getattr(pointer, scope_names[0])
pt_name.append(f"{scope_names[0]}")
except AttributeError:
logger.info(f"Skipping {m_name}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
pt_name.append(f"{num}")
if m_name[-11:] == "_embeddings" or m_name == "embeddings":
pointer = getattr(pointer, "weight")
pt_name.append("weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
if len(array.shape) > len(pointer.shape) and math.prod(array.shape) == math.prod(pointer.shape):
if (
txt_name.endswith("attention/self/key/kernel")
or txt_name.endswith("attention/self/query/kernel")
or txt_name.endswith("attention/self/value/kernel")
):
array = array.transpose(1, 0, 2).reshape(pointer.shape)
elif txt_name.endswith("attention/output/dense/kernel"):
array = array.transpose(0, 2, 1).reshape(pointer.shape)
else:
array = array.reshape(pointer.shape)
if pointer.shape != array.shape:
raise ValueError(
f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched of {txt_name}."
)
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
pt_weight_name = ".".join(pt_name)
logger.info(f"Initialize PyTorch weight {pt_weight_name} from {txt_name}.")
pointer.data = torch.from_numpy(array)
tf_weights.pop(txt_name, None)
pt_names.remove(pt_weight_name)
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.")
logger.info(f"Weights not initialized in PyTorch model: {', '.join(pt_names)}.")
return model
class BigBirdEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.rescale_embeddings = config.rescale_embeddings
self.hidden_size = config.hidden_size
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if self.rescale_embeddings:
inputs_embeds = inputs_embeds * (self.hidden_size ** 0.5)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.dropout(embeddings)
embeddings = self.LayerNorm(embeddings)
return embeddings
class BigBirdSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BigBirdModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = F.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class BigBirdBlockSparseAttention(nn.Module):
def __init__(self, config, seed=None):
super().__init__()
self.max_seqlen = config.max_position_embeddings
self.seed = seed
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.num_random_blocks = config.num_random_blocks
self.block_size = config.block_size
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
band_mask=None,
from_mask=None,
to_mask=None,
from_blocked_mask=None,
to_blocked_mask=None,
output_attentions=None,
):
# Currently this `class` can't be used in decoder.
batch_size, seqlen, _ = hidden_states.size()
to_seq_length = from_seq_length = seqlen
from_block_size = to_block_size = self.block_size
assert from_seq_length % from_block_size == 0, "Query sided sequence length must be multiple of block size"
assert to_seq_length % to_block_size == 0, "Key/Value sided sequence length must be multiple of block size"
query_layer = self.transpose_for_scores(self.query(hidden_states))
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
context_layer, attention_probs = self.bigbird_block_sparse_attention(
query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
self.num_attention_heads,
self.num_random_blocks,
self.attention_head_size,
from_block_size,
to_block_size,
batch_size,
from_seq_length,
to_seq_length,
seed=self.seed,
plan_from_length=None,
plan_num_rand_blocks=None,
output_attentions=output_attentions,
)
context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
@staticmethod
def torch_bmm_nd(inp_1, inp_2, ndim=None):
return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view(
inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1])
)
@staticmethod
def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None):
return torch.bmm(
inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2)
).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2]))
def bigbird_block_sparse_attention(
self,
query_layer,
key_layer,
value_layer,
band_mask,
from_mask,
to_mask,
from_blocked_mask,
to_blocked_mask,
n_heads,
n_rand_blocks,
attention_head_size,
from_block_size,
to_block_size,
batch_size,
from_seq_len,
to_seq_len,
seed,
plan_from_length,
plan_num_rand_blocks,
output_attentions,
):
if from_seq_len // from_block_size != to_seq_len // to_block_size:
raise ValueError("Error the number of blocks needs to be same!")
rsqrt_d = 1 / math.sqrt(attention_head_size)
bsz = batch_size
np.random.seed(seed)
if from_seq_len in [1024, 3072, 4096]:
rand_attn = [
self._bigbird_block_rand_mask(
self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024
)[: (from_seq_len // from_block_size - 2)]
for _ in range(n_heads)
]
else:
if plan_from_length is None:
plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan(
from_seq_len, from_block_size, n_rand_blocks
)
rand_attn = self._bigbird_block_rand_mask_with_head(
from_seq_length=from_seq_len,
to_seq_length=to_seq_len,
from_block_size=from_block_size,
to_block_size=to_block_size,
num_heads=n_heads,
plan_from_length=plan_from_length,
plan_num_rand_blocks=plan_num_rand_blocks,
)
rand_attn = np.stack(rand_attn, axis=0)
rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long)
rand_attn.unsqueeze_(0)
rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0)
rand_mask = self._create_rand_mask_from_inputs(
from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size
)
blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1)
blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)
blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1)
gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn)
gathered_key = gathered_key.view(
bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1
)
gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn)
gathered_value = gathered_value.view(
bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1
)
first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4)
first_product = first_product * rsqrt_d
first_product += (1.0 - to_mask) * -10000.0
first_attn_weights = F.softmax(first_product, dim=-1)
first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4)
first_context_layer.unsqueeze_(2)
second_key_mat = torch.cat(
[
blocked_key_matrix[:, :, 0],
blocked_key_matrix[:, :, 1],
blocked_key_matrix[:, :, 2],
blocked_key_matrix[:, :, -1],
gathered_key[:, :, 0],
],
dim=2,
)
second_value_mat = torch.cat(
[
blocked_value_matrix[:, :, 0],
blocked_value_matrix[:, :, 1],
blocked_value_matrix[:, :, 2],
blocked_value_matrix[:, :, -1],
gathered_value[:, :, 0],
],
dim=2,
)
second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4)
second_seq_pad = torch.cat(
[
to_mask[:, :, :, : 3 * to_block_size],
to_mask[:, :, :, -to_block_size:],
first_context_layer.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),
],
dim=3,
)
second_rand_pad = torch.cat(
[
first_context_layer.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),
rand_mask[:, :, 0],
],
dim=3,
)
second_product = second_product * rsqrt_d
second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * -10000.0
second_attn_weights = F.softmax(
second_product, dim=-1
)
second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4)
second_context_layer.unsqueeze_(2)
exp_blocked_key_matrix = torch.cat(
[blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3
)
exp_blocked_value_matrix = torch.cat(
[blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]],
dim=3,
)
middle_query_matrix = blocked_query_matrix[:, :, 2:-2]
inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5)
inner_band_product = inner_band_product * rsqrt_d
rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5)
rand_band_product = rand_band_product * rsqrt_d
first_band_product = torch.einsum(
"bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0]
) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size]
first_band_product = first_band_product * rsqrt_d
# Including last block (since it's global)
last_band_product = torch.einsum(
"bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1]
)
last_band_product = last_band_product * rsqrt_d
inner_band_product += (1.0 - band_mask) * -10000.0
first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * -10000.0
last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * -10000.0
rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * -10000.0
band_product = torch.cat(
[first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1
)
attn_weights = F.softmax(
band_product, dim=-1
)
context_layer = self.torch_bmm_nd(
attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5
)
context_layer += self.torch_bmm_nd(
attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5
)
context_layer += torch.einsum(
"bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0]
)
context_layer += torch.einsum(
"bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1]
)
second_last_key_mat = torch.cat(
[
blocked_key_matrix[:, :, 0],
blocked_key_matrix[:, :, -3],
blocked_key_matrix[:, :, -2],
blocked_key_matrix[:, :, -1],
gathered_key[:, :, -1],
],
dim=2,
)
second_last_value_mat = torch.cat(
[
blocked_value_matrix[:, :, 0],
blocked_value_matrix[:, :, -3],
blocked_value_matrix[:, :, -2],
blocked_value_matrix[:, :, -1],
gathered_value[:, :, -1],
],
dim=2,
)
second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4)
second_last_seq_pad = torch.cat(
[
to_mask[:, :, :, :to_block_size],
to_mask[:, :, :, -3 * to_block_size :],
context_layer.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]),
],
dim=3,
)
second_last_rand_pad = torch.cat(
[
context_layer.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]),
rand_mask[:, :, -1],
],
dim=3,
)
second_last_product = second_last_product * rsqrt_d
second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * -10000.0
second_last_attn_weights = F.softmax(
second_last_product, dim=-1
)
second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4)
second_last_context_layer.unsqueeze_(2)
last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4)
last_product = last_product * rsqrt_d
last_product += (1.0 - to_mask) * -10000.0
last_attn_weights = F.softmax(last_product, dim=-1)
last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4)
last_context_layer.unsqueeze_(2)
context_layer = torch.cat(
[first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer],
dim=2,
)
context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask
context_layer = torch.transpose(context_layer, 1, 2)
if output_attentions:
# TODO(PVP): need to verify if below code is correct
attention_probs = torch.zeros(
bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device
)
# 1st query block
# corresponding to `first_context_layer`
attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global
# 2nd query block
# corresponding to `second_context_layer`
attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[
:, :, :, : 3 * to_block_size
] # 1st three key blocks (global + sliding)
attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[
:, :, :, 3 * to_block_size : 4 * to_block_size
] # last key block (global)
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[:, 4 * to_block_size :]
attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# Middle query blocks
# corresponding to `context_layer`
# sliding keys
for q_idx in range(from_seq_len // from_block_size - 4):
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)[:, :, 2:-2, :, 1:-1, :]
right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size]
attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view(
bsz, n_heads, from_block_size, 3, to_block_size
) # inner_band_product
# global keys (correspomding to 1st key block)
attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[
:, :, :, :, :to_block_size
].view(
bsz, n_heads, -1, to_block_size
) # first_band_product
# global keys (corresponding to last key block)
attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[
:, :, :, :, -to_block_size:
].view(
bsz, n_heads, -1, to_block_size
) # last_band_product
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
for q_idx in range(1, len(i2) - 1):
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size]
attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# Second-last query block
# corresponding to `second_last_context_layer`
attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[
:, :, :, :to_block_size
] # 1st key block (global)
attention_probs[
:, :, -2 * from_block_size : -from_block_size, -3 * to_block_size :
] = second_last_attn_weights[
:, :, :, to_block_size : 4 * to_block_size
] # last three blocks (global + sliding)
# random keys
for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights):
# p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch
for p2, i2, w2 in zip(range(n_heads), i1, w1):
# p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads
attn_probs_view = attention_probs.view(
bsz,
n_heads,
from_seq_len // from_block_size,
from_block_size,
to_seq_len // to_block_size,
to_block_size,
)
right_slice = w2[:, 4 * to_block_size :]
attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view(
from_block_size, n_rand_blocks, to_block_size
)
# last query block
# corresponding to `last_context_layer`
attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global
else:
attention_probs = None
return context_layer, attention_probs
@staticmethod
def torch_gather_b2(params, indices):
# this operation is equilvalent to tf.gather when batch_dims=2
if params.shape[:2] != indices.shape[:2]:
raise ValueError(
f"Make sure that the first two dimensions of params and indices are identical, \
but they are params: {params.shape[:2]} vs. indices: {params.shape[:2]}"
)
num_indices_to_gather = indices.shape[-2] * indices.shape[-1]
num_indices_to_pick_from = params.shape[2]
indices_shift = (
torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device)
// num_indices_to_gather
* num_indices_to_pick_from
)
flattened_indices = indices.view(-1) + indices_shift
flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1])
out_flattened = flattened_params.index_select(0, flattened_indices)
out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:])
return out
@staticmethod
def _create_rand_mask_from_inputs(
from_blocked_mask,
to_blocked_mask,
rand_attn,
num_attention_heads,
num_rand_blocks,
batch_size,
from_seq_length,
from_block_size,
):
num_windows = from_seq_length // from_block_size - 2
rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)])
rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size)
rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask)
return rand_mask
@staticmethod
def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks):
plan_from_length = []
plan_num_rand_blocks = []
if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size):
plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size))
plan_num_rand_blocks.append(num_rand_blocks)
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(0)
elif (num_rand_blocks + 5) < (from_seq_length // from_block_size):
plan_from_length.append(int((num_rand_blocks + 5) * from_block_size))
plan_num_rand_blocks.append(num_rand_blocks // 2)
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2))
else:
plan_from_length.append(from_seq_length)
plan_num_rand_blocks.append(num_rand_blocks)
return plan_from_length, plan_num_rand_blocks
@staticmethod
def _bigbird_block_rand_mask(
from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1
):
# using this method when from_seq_length in [1024, 3072, 4096]
assert (
from_seq_length // from_block_size == to_seq_length // to_block_size
), "Error the number of blocks needs to be same!"
rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32)
middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32)
last = to_seq_length // to_block_size - 1
if last_idx > (2 * to_block_size):
last = (last_idx // to_block_size) - 1
r = num_rand_blocks # shorthand
for i in range(1, from_seq_length // from_block_size - 1):
start = i - 2
end = i
if i == 1:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r]
elif i == 2:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r]
elif i == from_seq_length // from_block_size - 3:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
# Missing -3: should have been sliced till last-3
elif i == from_seq_length // from_block_size - 2:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r]
# Missing -4: should have been sliced till last-4
else:
if start > last:
start = last
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
elif (end + 1) == last:
rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r]
else:
rand_attn[i - 1, :] = np.random.permutation(
np.concatenate((middle_seq[:start], middle_seq[end + 1 : last]))
)[:r]
return rand_attn
def _bigbird_block_rand_mask_with_head(
self,
from_seq_length,
to_seq_length,
from_block_size,
to_block_size,
num_heads,
plan_from_length,
plan_num_rand_blocks,
window_block_left=1,
window_block_right=1,
global_block_top=1,
global_block_bottom=1,
global_block_left=1,
global_block_right=1,
):
# using this method when from_seq_length not in [1024, 3072, 4096]
assert (
from_seq_length // from_block_size == to_seq_length // to_block_size
), "Error the number of blocks needs to be same!"
assert from_seq_length in plan_from_length, "Error from sequence length not in plan!"
# Total number of blocks in the mmask
num_blocks = from_seq_length // from_block_size
# Number of blocks per plan
plan_block_length = np.array(plan_from_length) // from_block_size
# till when to follow plan
max_plan_idx = plan_from_length.index(from_seq_length)
# Random Attention adjajency list
rand_attn = [
np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32)
for i in range(num_heads)
]
# We will go iteratively over the plan blocks and pick random number of
# Attention blocks from the legally allowed blocks
for plan_idx in range(max_plan_idx + 1):
rnd_r_cnt = 0
if plan_idx > 0:
# set the row for all from_blocks starting from 0 to
# plan_block_length[plan_idx-1]
# column indx start fromm plan_block_length[plan_idx-1] and ends at
# plan_block_length[plan_idx]
if plan_num_rand_blocks[plan_idx] > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))
for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]):
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=plan_block_length[plan_idx - 1],
to_end_block_id=plan_block_length[plan_idx],
num_rand_blocks=plan_num_rand_blocks[plan_idx],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
for pl_id in range(plan_idx):
if plan_num_rand_blocks[pl_id] == 0:
continue
for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]):
rnd_r_cnt = 0
to_start_block_id = 0
if pl_id > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id]))
to_start_block_id = plan_block_length[pl_id - 1]
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1]))
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=to_start_block_id,
to_end_block_id=plan_block_length[pl_id],
num_rand_blocks=plan_num_rand_blocks[pl_id],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
if plan_num_rand_blocks[plan_idx] == 0:
continue
curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1]))
from_start_block_id = global_block_top
to_start_block_id = 0
if plan_idx > 0:
rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx]))
from_start_block_id = plan_block_length[plan_idx - 1]
to_start_block_id = plan_block_length[plan_idx - 1]
for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]):
for h in range(num_heads):
rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention(
block_id=blk_rw_idx,
to_start_block_id=to_start_block_id,
to_end_block_id=plan_block_length[plan_idx],
num_rand_blocks=plan_num_rand_blocks[plan_idx],
window_block_left=window_block_left,
window_block_right=window_block_right,
global_block_left=global_block_left,
global_block_right=global_block_right,
)
for nh in range(num_heads):
rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :]
return rand_attn
@staticmethod
def _get_single_block_row_attention(
block_id,
to_start_block_id,
to_end_block_id,
num_rand_blocks,
window_block_left=1,
window_block_right=1,
global_block_left=1,
global_block_right=1,
):
# list of to_blocks from which to choose random attention
to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32)
# permute the blocks
perm_block = np.random.permutation(to_block_list)
# illegal blocks for the current block id, using window
illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1))
# Add blocks at the start and at the end
illegal_blocks.extend(list(range(global_block_left)))
illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id)))
# The second from_block cannot choose random attention on second last to_block
if block_id == 1:
illegal_blocks.append(to_end_block_id - 2)
# The second last from_block cannot choose random attention on second to_block
if block_id == to_end_block_id - 2:
illegal_blocks.append(1)
selected_random_blokcs = []
for i in range(to_end_block_id - to_start_block_id):
if perm_block[i] not in illegal_blocks:
selected_random_blokcs.append(perm_block[i])
if len(selected_random_blokcs) == num_rand_blocks:
break
return np.array(selected_random_blokcs, dtype=np.int32)
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BigBird
class BigBirdSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BigBirdAttention(nn.Module):
def __init__(self, config, seed=None):
super().__init__()
self.attention_type = config.attention_type
self.config = config
self.seed = seed
if self.config.attention_type == "original_full":
self.self = BigBirdSelfAttention(config)
elif self.config.attention_type == "block_sparse":
self.self = BigBirdBlockSparseAttention(config, seed)
else:
raise ValueError(
f"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}"
)
self.output = BigBirdSelfOutput(config)
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
if value == "original_full":
# copy all weights to new full attention class
attn_weights = BigBirdSelfAttention(self.config)
else:
# copy all weights to new sparse attention class
attn_weights = BigBirdBlockSparseAttention(self.config, self.seed)
attn_weights.query = self.self.query
attn_weights.value = self.self.value
attn_weights.key = self.self.key
self.self = attn_weights
self.attention_type = value
if not self.training:
self.self.eval()
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
# block_sparse config
band_mask=None,
from_mask=None,
to_mask=None,
from_blocked_mask=None,
to_blocked_mask=None,
):
if self.attention_type == "original_full":
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
else:
assert (
encoder_hidden_states is None
), "BigBird cannot be used as a decoder when config.attention_type != 'original_full'"
self_outputs = self.self(
hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BigBird
class BigBirdIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BigBird
class BigBirdOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BigBirdLayer(nn.Module):
def __init__(self, config, seed=None):
super().__init__()
self.config = config
self.attention_type = config.attention_type
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BigBirdAttention(config, seed=seed)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = BigBirdAttention(config)
self.intermediate = BigBirdIntermediate(config)
self.output = BigBirdOutput(config)
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
self.attention.set_attention_type(value)
if self.add_cross_attention:
self.crossattention.set_attention_type(value)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
band_mask=None,
from_mask=None,
to_mask=None,
blocked_encoder_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_value=self_attn_past_key_value,
output_attentions=output_attentions,
band_mask=band_mask,
from_mask=from_mask,
to_mask=to_mask,
from_blocked_mask=blocked_encoder_mask,
to_blocked_mask=blocked_encoder_mask,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with \
cross-attention layers by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BigBirdEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.attention_type = config.attention_type
self.layer = nn.ModuleList(
[BigBirdLayer(config, seed=layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
# attention type is already correctly set
if value == self.attention_type:
return
self.attention_type = value
for layer in self.layer:
layer.set_attention_type(value)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
band_mask=None,
from_mask=None,
to_mask=None,
blocked_encoder_mask=None,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
band_mask,
from_mask,
to_mask,
blocked_encoder_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
band_mask,
from_mask,
to_mask,
blocked_encoder_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->BigBird
class BigBirdPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->BigBird
class BigBirdLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BigBirdPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->BigBird
class BigBirdOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BigBirdLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->BigBird
class BigBirdOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->BigBird
class BigBirdPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BigBirdLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BigBirdPreTrainedModel(PreTrainedModel):
config_class = BigBirdConfig
load_tf_weights = load_tf_weights_in_big_bird
base_model_prefix = "bert"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
BIG_BIRD_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config (:class:`~transformers.BigBirdConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
BIG_BIRD_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BigBirdTokenizer`. See
:func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@dataclass
class BigBirdForPreTrainingOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@add_start_docstrings(
"The bare BigBird Model transformer outputting raw hidden-states without any specific head on top.",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdModel(BigBirdPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.attention_type = self.config.attention_type
self.config = config
self.block_size = self.config.block_size
self.embeddings = BigBirdEmbeddings(config)
self.encoder = BigBirdEncoder(config)
if add_pooling_layer:
self.pooler = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
else:
self.pooler = None
self.activation = None
if self.attention_type != "original_full" and config.add_cross_attention:
logger.warning(
"When using `BigBirdForCausalLM` as decoder, then `attention_type` must be `original_full`. Setting `attention_type=original_full`"
)
self.set_attention_type("original_full")
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def set_attention_type(self, value: str):
if value not in ["original_full", "block_sparse"]:
raise ValueError(
f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}"
)
if value == self.attention_type:
return
self.attention_type = value
self.encoder.set_attention_type(value)
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size
if self.attention_type == "block_sparse" and seq_length <= max_tokens_to_attend:
sequence_length = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1)
logger.warning(
"Attention type 'block_sparse' is not possible if sequence_length: "
f"{sequence_length} <= num global tokens: 2 * config.block_size "
"+ min. num sliding tokens: 3 * config.block_size "
"+ config.num_random_blocks * config.block_size "
"+ additional buffer: config.num_random_blocks * config.block_size "
f"= {max_tokens_to_attend} with config.block_size "
f"= {self.config.block_size}, config.num_random_blocks "
f"= {self.config.num_random_blocks}."
"Changing attention type to 'original_full'..."
)
self.set_attention_type("original_full")
if self.attention_type == "block_sparse":
(
padding_len,
input_ids,
attention_mask,
token_type_ids,
position_ids,
inputs_embeds,
) = self._pad_to_block_size(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
pad_token_id=self.config.pad_token_id,
)
else:
padding_len = 0
if self.attention_type == "block_sparse":
blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn(
attention_mask, self.block_size
)
extended_attention_mask = None
elif self.attention_type == "original_full":
blocked_encoder_mask = None
band_mask = None
from_mask = None
to_mask = None
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, device
)
else:
raise ValueError(
f"attention_type can either be original_full or block_sparse, but is {self.attention_type}"
)
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
band_mask=band_mask,
from_mask=from_mask,
to_mask=to_mask,
blocked_encoder_mask=blocked_encoder_mask,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooler_output = self.activation(self.pooler(sequence_output[:, 0, :])) if (self.pooler is not None) else None
if padding_len > 0:
sequence_output = sequence_output[:, :-padding_len]
if not return_dict:
return (sequence_output, pooler_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooler_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@staticmethod
def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int):
batch_size, seq_length = attention_mask.size()
assert (
seq_length % block_size == 0
), f"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block size is {block_size}."
def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask):
exp_blocked_to_pad = torch.cat(
[to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2
)
band_mask = torch.einsum("blq,blk->blqk", from_blocked_mask[:, 2:-2], exp_blocked_to_pad)
band_mask.unsqueeze_(1)
return band_mask
blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size)
band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask)
from_mask = attention_mask.view(batch_size, 1, seq_length, 1)
to_mask = attention_mask.view(batch_size, 1, 1, seq_length)
return blocked_encoder_mask, band_mask, from_mask, to_mask
def _pad_to_block_size(
self,
input_ids: torch.Tensor,
attention_mask: torch.Tensor,
token_type_ids: torch.Tensor,
position_ids: torch.Tensor,
inputs_embeds: torch.Tensor,
pad_token_id: int,
):
block_size = self.config.block_size
input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape
batch_size, seq_len = input_shape[:2]
padding_len = (block_size - seq_len % block_size) % block_size
if padding_len > 0:
logger.info(
f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of "
f"`config.block_size`: {block_size}"
)
if input_ids is not None:
input_ids = F.pad(input_ids, (0, padding_len), value=pad_token_id)
if position_ids is not None:
position_ids = F.pad(position_ids, (0, padding_len), value=pad_token_id)
if inputs_embeds is not None:
input_ids_padding = inputs_embeds.new_full(
(batch_size, padding_len),
self.config.pad_token_id,
dtype=torch.long,
)
inputs_embeds_padding = self.embeddings(input_ids_padding)
inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2)
attention_mask = F.pad(attention_mask, (0, padding_len), value=False)
token_type_ids = F.pad(token_type_ids, (0, padding_len), value=0)
return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
class BigBirdForPreTraining(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BigBirdModel(config, add_pooling_layer=True)
self.cls = BigBirdPreTrainingHeads(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BigBirdForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if next_sentence_label is not None and total_loss is not None:
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = total_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BigBirdForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings("""BigBird Model with a `language modeling` head on top. """, BIG_BIRD_START_DOCSTRING)
class BigBirdForMaskedLM(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `BigBirdForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = BigBirdModel(config)
self.cls = BigBirdOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
assert self.config.pad_token_id is not None, "The PAD token should be defined for generation"
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""BigBird Model with a `language modeling` head on top for CLM fine-tuning. """, BIG_BIRD_START_DOCSTRING
)
class BigBirdForCausalLM(BigBirdPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BigBirdForCausalLM` as a standalone, add `is_decoder=True.`")
self.bert = BigBirdModel(config)
self.cls = BigBirdOnlyMLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
class BigBirdClassificationHead(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
self.config = config
def forward(self, features, **kwargs):
x = features[:, 0, :]
x = self.dropout(x)
x = self.dense(x)
x = ACT2FN[self.config.hidden_act](x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdForSequenceClassification(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BigBirdModel(config)
self.classifier = BigBirdClassificationHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.num_labels == 1:
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdForMultipleChoice(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BigBirdModel(config)
self.sequence_summary = SequenceSummary(config)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
@add_start_docstrings_to_model_forward(
BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
pooled_output = self.sequence_summary(sequence_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdForTokenClassification(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BigBirdModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class BigBirdForQuestionAnsweringHead(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.intermediate = BigBirdIntermediate(config)
self.output = BigBirdOutput(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, encoder_output):
hidden_states = self.dropout(encoder_output)
hidden_states = self.intermediate(hidden_states)
hidden_states = self.output(hidden_states, encoder_output)
hidden_states = self.qa_outputs(hidden_states)
return hidden_states
@add_start_docstrings(
"""
BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BIG_BIRD_START_DOCSTRING,
)
class BigBirdForQuestionAnswering(BigBirdPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 2
self.num_labels = config.num_labels
self.sep_token_id = config.sep_token_id
self.bert = BigBirdModel(config, add_pooling_layer=False)
self.qa_classifier = BigBirdForQuestionAnsweringHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/bigbird-base-trivia-itc",
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
question_lengths=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
seqlen = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1)
if question_lengths is None and input_ids is not None:
question_lengths = torch.argmax(input_ids.eq(self.sep_token_id).int(), dim=-1) + 1
question_lengths.unsqueeze_(1)
logits_mask = None
if question_lengths is not None:
logits_mask = self.prepare_question_mask(question_lengths, seqlen)
if token_type_ids is None:
token_type_ids = (~logits_mask).long()
logits_mask = logits_mask
logits_mask.unsqueeze_(2)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_classifier(sequence_output)
if logits_mask is not None:
logits = logits - logits_mask * 1e6
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@staticmethod
def prepare_question_mask(q_lengths: torch.Tensor, maxlen: int):
mask = torch.arange(0, maxlen).to(q_lengths.device)
mask.unsqueeze_(0)
mask = mask < q_lengths
return mask
| true | true |
f7fd55932dd9961e78482975451755e03c572f38 | 9,361 | py | Python | pavia_SdA.py | RichardScottOZ/deeplearn_hsi | f3c88e779d5a9a0afbdd3d41d3b08839c984bdf6 | [
"BSD-2-Clause"
] | 92 | 2016-03-05T23:33:13.000Z | 2022-01-12T11:44:16.000Z | pavia_SdA.py | RichardScottOZ/deeplearn_hsi | f3c88e779d5a9a0afbdd3d41d3b08839c984bdf6 | [
"BSD-2-Clause"
] | 4 | 2016-06-03T14:07:19.000Z | 2018-11-18T14:04:57.000Z | pavia_SdA.py | RichardScottOZ/deeplearn_hsi | f3c88e779d5a9a0afbdd3d41d3b08839c984bdf6 | [
"BSD-2-Clause"
] | 46 | 2016-05-25T13:59:30.000Z | 2022-02-08T12:10:33.000Z | import os
import sys
import time
import scipy.io as sio
import numpy
import scipy
import theano
import theano.tensor as T
from scipy.stats import t
from sklearn import svm
from theano.tensor.shared_randomstreams import RandomStreams
import PIL.Image
from SdA import SdA
from hsi_utils import *
cmap = numpy.asarray( [[0, 0, 0],
[192, 192, 192],
[0, 255, 0],
[0, 255, 255],
[0, 128, 0],
[255, 0, 255],
[165, 82, 41],
[128, 0, 128],
[255, 0, 0],
[255, 255, 0]], dtype='int32')
# load .mat files
hsi_file = u'/home/hantek/data/hsi_data/pavia/PaviaU.mat'
gnd_file = u'/home/hantek/data/hsi_data/pavia/PaviaU_gt.mat'
data = sio.loadmat(hsi_file)
img = scale_to_unit_interval(data['paviaU'].astype(theano.config.floatX))
width = img.shape[0]
height = img.shape[1]
bands = img.shape[2]
data = sio.loadmat(gnd_file)
gnd_img = data['paviaU_gt'].astype(numpy.int32)
# extract supervised spectral data
datasets, _, _, _ = \
prepare_data(hsi_img=img, gnd_img=gnd_img, merge=False,
window_size=7, n_principle=3, batch_size=100)
############################################################################
# build model
finetune_lr=0.05
pretraining_epochs=800
pretrain_lr=0.5
training_epochs=250000
batch_size=100
hidden_layers_sizes=[60, 60, 60, 60]
corruption_levels = [0., 0., 0., 0.]
print 'finetuning learning rate=', finetune_lr
print 'pretraining learning rate=', pretrain_lr
print 'pretraining epoches=', pretraining_epochs
print 'fine tuning epoches=', training_epochs
print 'batch size=', batch_size
print 'hidden layers sizes=', hidden_layers_sizes
print 'corruption levels=', corruption_levels
# compute number of minibatches for training, validation and testing
n_train_batches = datasets[0][0].get_value(borrow=True).shape[0]
n_train_batches /= batch_size
# numpy random generator
numpy_rng = numpy.random.RandomState(89677)
print '... building the model'
# construct the stacked denoising autoencoder class
sda = SdA(numpy_rng=numpy_rng, n_ins=bands,
hidden_layers_sizes=hidden_layers_sizes,
n_outs=gnd_img.max())
#########################
# PRETRAINING THE MODEL #
#########################
print '... getting the pretraining functions'
pretraining_fns = sda.pretraining_functions(train_set_x=datasets[0][0],
batch_size=batch_size)
print '... pre-training the model'
start_time = time.clock()
## Pre-train layer-wise
for i in xrange(sda.n_layers):
# go through pretraining epochs
for epoch in xrange(pretraining_epochs):
# go through the training set
c = []
for batch_index in xrange(n_train_batches):
c.append(pretraining_fns[i](index=batch_index,
corruption=corruption_levels[i],
lr=pretrain_lr))
print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
print numpy.mean(c)
end_time = time.clock()
print >> sys.stderr, ('The pretraining code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
########################
# FINETUNING THE MODEL #
########################
# get the training, validation and testing function for the model
print '... getting the finetuning functions'
train_fn, validate_model, test_model = sda.build_finetune_functions(
datasets=datasets, batch_size=batch_size,
learning_rate=finetune_lr)
print '... finetunning the model'
validation_frequency = 1000 * n_train_batches
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_params = None
best_validation_loss = numpy.inf
test_score = 0.
start_time = time.clock()
epoch = 0
while (epoch < training_epochs):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_fn(minibatch_index)
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
validation_losses = validate_model()
this_validation_loss = numpy.mean(validation_losses)
print('epoch %i, minibatch %i/%i, validation error %f %%' % \
(epoch, minibatch_index + 1, n_train_batches,
this_validation_loss * 100.))
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
# save best validation score and iteration number
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = test_model()
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
end_time = time.clock()
print(('Optimization complete with best validation score of %f %%,'
'with test performance %f %%') %
(best_validation_loss * 100., test_score * 100.))
print >> sys.stdout, ('The fine tuning code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time)
/ 60.))
############################################################################
filename = 'pavia_l4sda_pt%d_ft%d_lrp%.4f_f%.4f_bs%d_hid%d' % \
(pretraining_epochs, training_epochs, pretrain_lr, finetune_lr,
batch_size, hidden_layers_sizes[0])
print '... getting filters'
image = PIL.Image.fromarray(
tile_raster_images(X=sda.dA_layers[0].W.get_value(borrow=True)[:100, :].T,
img_shape=(10, 10),
tile_shape=(10, hidden_layers_sizes[0]/10),
tile_spacing=(1, 1)))
image.save(filename + '_filters.png')
print '... saving parameters'
sda.save_params(filename + '_params.pkl')
print '... classifying test set with learnt model:'
pred_func = theano.function(inputs=[sda.x], outputs=sda.logLayer.y_pred)
pred_test = pred_func(datasets[2][0].get_value(borrow=True))
true_test = datasets[2][1].get_value(borrow=True)
true_valid = datasets[1][1].get_value(borrow=True)
true_train = datasets[0][1].get_value(borrow=True)
result_analysis(pred_test, true_train, true_valid, true_test)
print '... classifying the whole image with learnt model:'
start_time = time.clock()
y = pred_func(img.reshape(width*height, bands)) + 1
end_time = time.clock()
print 'finished, running time:%fm' % ((end_time-start_time) / 60.)
y_rgb = cmap[y, :]
y_image = y_rgb.reshape(width, height, 3)
scipy.misc.imsave(filename + '_wholeimg.png' , y_image)
############################################################################
print '... performing Student\'s t-test'
best_c = 10000.
best_g = 10.
svm_classifier = svm.SVC(C=best_c, gamma=best_g, kernel='rbf')
svm_classifier.fit(datasets[0][0].get_value(), datasets[0][1].get_value())
data = [numpy.vstack((datasets[1][0].get_value(),
datasets[2][0].get_value())),
numpy.hstack((datasets[1][1].get_value(),
datasets[2][1].get_value()))]
numpy_rng = numpy.random.RandomState(89677)
num_test = 10
print 'Total number of tests: %d' % num_test
k_sae = []
k_svm = []
for i in xrange(num_test):
[_, _], [_, _], [test_x, test_y], _ = \
train_valid_test(data, ratio=[0, 1, 1], batch_size=1,
random_state=numpy_rng.random_integers(1e10))
test_y = test_y + 1 # fix the label scale problem
pred_y = pred_func(test_x)
cm = confusion_matrix(test_y, pred_y)
pr_a = cm.trace()*1.0 / test_y.size
pr_e = ((cm.sum(axis=0)*1.0/test_y.size) * \
(cm.sum(axis=1)*1.0/test_y.size)).sum()
k_sae.append( (pr_a - pr_e) / (1 - pr_e) )
pred_y = svm_classifier.predict(test_x)
cm = confusion_matrix(test_y, pred_y)
pr_a = cm.trace()*1.0 / test_y.size
pr_e = ((cm.sum(axis=0)*1.0/test_y.size) * \
(cm.sum(axis=1)*1.0/test_y.size)).sum()
k_svm.append( (pr_a - pr_e) / (1 - pr_e) )
std_k_sae = numpy.std(k_sae)
std_k_svm = numpy.std(k_svm)
mean_k_sae = numpy.mean(k_sae)
mean_k_svm = numpy.mean(k_svm)
left = ( (mean_k_sae - mean_k_svm) * numpy.sqrt(num_test*2-2)) \
/ ( numpy.sqrt(2./num_test) * num_test * (std_k_sae**2 + std_k_svm**2) )
rv = t(num_test*2.0 - 2)
right = rv.ppf(0.95)
print '\tstd\t\tmean'
print 'k_sae\t%f\t%f' % (std_k_sae, mean_k_sae)
print 'k_svm\t%f\t%f' % (std_k_svm, mean_k_svm)
if left > right:
print 'left = %f, right = %f, test PASSED.' % (left, right)
else:
print 'left = %f, right = %f, test FAILED.' % (left, right)
| 37.294821 | 82 | 0.590428 | import os
import sys
import time
import scipy.io as sio
import numpy
import scipy
import theano
import theano.tensor as T
from scipy.stats import t
from sklearn import svm
from theano.tensor.shared_randomstreams import RandomStreams
import PIL.Image
from SdA import SdA
from hsi_utils import *
cmap = numpy.asarray( [[0, 0, 0],
[192, 192, 192],
[0, 255, 0],
[0, 255, 255],
[0, 128, 0],
[255, 0, 255],
[165, 82, 41],
[128, 0, 128],
[255, 0, 0],
[255, 255, 0]], dtype='int32')
hsi_file = u'/home/hantek/data/hsi_data/pavia/PaviaU.mat'
gnd_file = u'/home/hantek/data/hsi_data/pavia/PaviaU_gt.mat'
data = sio.loadmat(hsi_file)
img = scale_to_unit_interval(data['paviaU'].astype(theano.config.floatX))
width = img.shape[0]
height = img.shape[1]
bands = img.shape[2]
data = sio.loadmat(gnd_file)
gnd_img = data['paviaU_gt'].astype(numpy.int32)
datasets, _, _, _ = \
prepare_data(hsi_img=img, gnd_img=gnd_img, merge=False,
window_size=7, n_principle=3, batch_size=100)
| false | true |
f7fd56537e30c96a86303d1da2f4016aec414c6a | 8,264 | py | Python | RESSPyLab/sqp_solver.py | ioannis-vm/RESSPyLab | 306fc24d5f8ece8f2f2de274b56b80ba2019f605 | [
"MIT"
] | 7 | 2019-10-15T09:16:41.000Z | 2021-09-24T11:28:45.000Z | RESSPyLab/sqp_solver.py | ioannis-vm/RESSPyLab | 306fc24d5f8ece8f2f2de274b56b80ba2019f605 | [
"MIT"
] | 3 | 2020-10-22T14:27:22.000Z | 2021-11-15T17:46:49.000Z | RESSPyLab/sqp_solver.py | ioannis-vm/RESSPyLab | 306fc24d5f8ece8f2f2de274b56b80ba2019f605 | [
"MIT"
] | 6 | 2019-07-22T05:47:10.000Z | 2021-10-24T02:06:26.000Z | """@package sqp_linsearch
Abstract class for RESSPyLab SQP solvers.
"""
import numpy as np
class SqpSolver:
def __init__(self, objective_function, constraint, dumper=None):
""" Abstract class to define SQP solvers given the objective function to minimize and the constraints to apply.
:param MatModelErrorNda objective_function: Provides the objective function, and gradient / Hessian of it.
:param AugLagConstraint constraint: Provides the constraints, and gradients / Hessians of them.
:param Dumper dumper: Used to output information to the screen and/or to a file.
The problem to be solved is to minimize an objective function, f(x), subjected to some constraints. Formally,
minimize f(x)
x
subjected to g_i(x) <= 0
where g_i(x) are nonlinear (possibly linear) constraint functions, i = 1, 2, ..., m are the number of
constraints. The SQP method solves this problem by linearizing the constraints and solving a quadratic model of
f(x) at each iteration k:
minimize q(x) = grad[f(x_k)]^T . x_k + 1/2 x_k^T . H_k . x_k
x
subjected to grad[g(x_k)]^T . x_k + g(x_k) <= 0,
where H_k is an approximation of the Hessian of f(x_k). For additional details see the references below.
References:
[1] Bierlaire (2015) "Optimization: Principles and Algorithms"
[2] Nocedal and Wright (2006) "Numerical optimization"
"""
self.total_iterations = 0
self.maximum_iterations = 3000
self.precision = np.sqrt(np.finfo(float).eps)
self.constraint = constraint
self.objective_fun = objective_function
if dumper is None:
self.use_dumper = False
else:
self.use_dumper = True
self.dumper = dumper
# Used to let the all parts of the solver be aware of the active constraints
self.active_constraints_index = 0
self.active_constraints_set = False
# Used for exit information
self.convergence_reached_tag = 1
self.maximum_iterations_reached_tag = 2
self.unknown_exit = 99
return
def reset_solver(self):
""" Sets the internal iteration count and active constraints to their starting values. """
self.total_iterations = 0
self.active_constraints_index = 0
self.active_constraints_set = False
return
def set_maximum_iterations(self, n):
""" Sets the maximum iterations to n. """
self.maximum_iterations = n
return
def set_tolerance(self, tol):
""" Sets the precision to tol. """
self.precision = tol
return
def merit_fun(self, x, c):
""" Merit function used to ensure global convergence. """
raise Exception("Not implemented in {0}".format(self))
def globalized_sqp(self, x_0, dual_x_0):
""" Pure virtual method for solving the globalized SQP problem. """
raise Exception("Not implemented in {0}".format(self))
def hess_xx_lagrangian(self, x, hess_f, dual_x):
""" Returns the Hessian of the Lagrangian only with respect to xx.
:param np.array x: (n, 1) Primal variables.
:param np.array hess_f: (n, n) Hessian of the objective function.
:param np.array dual_x: (m, 1) Dual variables.
:return np.array: (n, n) Hessian of the Lagrangian wrt. xx.
Note returned Hessian is only the "upper left corner" of the Hessian of the problem.
"""
hess = hess_f
constraint_hessians = self.constraint.get_hessian(x)
for i, hi in enumerate(constraint_hessians):
hess = hess + dual_x[i] * hi
return hess
def grad_lagrangian(self, x, grad_f, dual_x, constraint_array, active_constraints=None):
""" Returns the gradient of the problem.
:param np.array x: (n, 1) Primal variables.
:param np.array grad_f: (n, 1) Gradient of the objective function.
:param np.array dual_x: (m, 1) Dual variables.
:param np.array constraint_array: (m, 1) Values of each of the constraints.
:param np.array active_constraints: (m, 1) Bool values, True if active, False if inactive. If None, then all
are assumed to be active.
:return np.array: (n + p, 1) Gradient of the Lagrangian of the problem, p is the number of active constraints.
"""
grad = grad_f
constraint_grads = self.constraint.get_gradient(x)
dual_2 = dual_x * 1.0
dual_2[np.logical_not(constraint_array)] = 0.
for i, gi in enumerate(constraint_grads):
grad = grad + float(dual_x[i]) * gi
if active_constraints is None:
ca_active = constraint_array
else:
# Don't consider any of the constraint values if the are inactive (i.e., g(x)_i <= 0)
ca_active = (constraint_array[active_constraints]).reshape(-1, 1)
if len(ca_active) != 0:
grad = np.row_stack((grad, ca_active))
return grad
def get_constraint_array(self, x):
""" Returns the column vector of constraint values. """
return np.array(self.constraint.get_g(x)).reshape((-1, 1))
def get_constraint_gradient_array(self, x):
""" Returns the column stack of the gradients of each constraint.
:param np.array x: (n, 1) Primal variables.
:return np.array: (n, m) Gradients of all the constraint functions.
The returned array is equivalent with the transpose of the Jacobian of the constraint vector function.
"""
all_constraint_grads = self.constraint.get_gradient(x)
constraint_grads = 1. * all_constraint_grads[0]
for i in range(1, len(all_constraint_grads)):
constraint_grads = np.column_stack((constraint_grads, 1. * all_constraint_grads[i]))
return constraint_grads
def solve(self, x_0, dual_x_0):
""" Returns the variables and dual variables that minimize the objective function s.t. the constraints.
:param np.array x_0: (n, 1) Initial guess at primal variables.
:param np.array dual_x_0: (m, 1) Initial guess at dual variables, m is the number of constraints specified.
:return list: Solution to the optimization problem.
"""
# Sanitize the inputs
if type(x_0) is not np.ndarray or type(dual_x_0) is not np.ndarray:
x_0 = np.array(x_0)
dual_x_0 = np.array(dual_x_0)
# Make sure that the arrays are column vectors
x_0 = x_0.reshape(-1, 1)
dual_x_0 = dual_x_0.reshape(-1, 1)
print ("Starting SQP minimization...")
[x, dual_x, exit_info] = self.globalized_sqp(x_0, dual_x_0)
conv_criteria = exit_info['val']
print (exit_info['msg'])
print ("Exiting with ||grad[L]|| = {0:e}".format(conv_criteria))
print ("x = {0}".format(x.reshape(-1)))
print ("dual_x = {0}".format(dual_x.reshape(-1)))
return [x, dual_x]
def solve_return_conv(self, x_0, dual_x_0):
""" Returns the variables and dual variables that minimize the objective function s.t. the constraints.
:param np.array x_0: (n, 1) Initial guess at primal variables.
:param np.array dual_x_0: (m, 1) Initial guess at dual variables, m is the number of constraints specified.
:return list: Solution to the optimization problem, also returns the convergence criteria at exit.
"""
# Sanitize the inputs
if type(x_0) is not np.ndarray or type(dual_x_0) is not np.ndarray:
x_0 = np.array(x_0)
dual_x_0 = np.array(dual_x_0)
# Make sure that the arrays are column vectors
x_0 = x_0.reshape(-1, 1)
dual_x_0 = dual_x_0.reshape(-1, 1)
print ("Starting SQP minimization...")
[x, dual_x, exit_info] = self.globalized_sqp(x_0, dual_x_0)
convergence_criteria = exit_info['val']
print (exit_info['msg'])
print ("Exiting with ||grad[L]|| = {0:e}".format(convergence_criteria))
print ("x = {0}".format(x.reshape(-1)))
print ("dual_x = {0}".format(dual_x.reshape(-1)))
return [x, dual_x, convergence_criteria]
| 42.818653 | 119 | 0.639279 | import numpy as np
class SqpSolver:
def __init__(self, objective_function, constraint, dumper=None):
self.total_iterations = 0
self.maximum_iterations = 3000
self.precision = np.sqrt(np.finfo(float).eps)
self.constraint = constraint
self.objective_fun = objective_function
if dumper is None:
self.use_dumper = False
else:
self.use_dumper = True
self.dumper = dumper
self.active_constraints_index = 0
self.active_constraints_set = False
self.convergence_reached_tag = 1
self.maximum_iterations_reached_tag = 2
self.unknown_exit = 99
return
def reset_solver(self):
self.total_iterations = 0
self.active_constraints_index = 0
self.active_constraints_set = False
return
def set_maximum_iterations(self, n):
self.maximum_iterations = n
return
def set_tolerance(self, tol):
self.precision = tol
return
def merit_fun(self, x, c):
raise Exception("Not implemented in {0}".format(self))
def globalized_sqp(self, x_0, dual_x_0):
raise Exception("Not implemented in {0}".format(self))
def hess_xx_lagrangian(self, x, hess_f, dual_x):
hess = hess_f
constraint_hessians = self.constraint.get_hessian(x)
for i, hi in enumerate(constraint_hessians):
hess = hess + dual_x[i] * hi
return hess
def grad_lagrangian(self, x, grad_f, dual_x, constraint_array, active_constraints=None):
grad = grad_f
constraint_grads = self.constraint.get_gradient(x)
dual_2 = dual_x * 1.0
dual_2[np.logical_not(constraint_array)] = 0.
for i, gi in enumerate(constraint_grads):
grad = grad + float(dual_x[i]) * gi
if active_constraints is None:
ca_active = constraint_array
else:
ca_active = (constraint_array[active_constraints]).reshape(-1, 1)
if len(ca_active) != 0:
grad = np.row_stack((grad, ca_active))
return grad
def get_constraint_array(self, x):
return np.array(self.constraint.get_g(x)).reshape((-1, 1))
def get_constraint_gradient_array(self, x):
all_constraint_grads = self.constraint.get_gradient(x)
constraint_grads = 1. * all_constraint_grads[0]
for i in range(1, len(all_constraint_grads)):
constraint_grads = np.column_stack((constraint_grads, 1. * all_constraint_grads[i]))
return constraint_grads
def solve(self, x_0, dual_x_0):
# Sanitize the inputs
if type(x_0) is not np.ndarray or type(dual_x_0) is not np.ndarray:
x_0 = np.array(x_0)
dual_x_0 = np.array(dual_x_0)
# Make sure that the arrays are column vectors
x_0 = x_0.reshape(-1, 1)
dual_x_0 = dual_x_0.reshape(-1, 1)
print ("Starting SQP minimization...")
[x, dual_x, exit_info] = self.globalized_sqp(x_0, dual_x_0)
conv_criteria = exit_info['val']
print (exit_info['msg'])
print ("Exiting with ||grad[L]|| = {0:e}".format(conv_criteria))
print ("x = {0}".format(x.reshape(-1)))
print ("dual_x = {0}".format(dual_x.reshape(-1)))
return [x, dual_x]
def solve_return_conv(self, x_0, dual_x_0):
# Sanitize the inputs
if type(x_0) is not np.ndarray or type(dual_x_0) is not np.ndarray:
x_0 = np.array(x_0)
dual_x_0 = np.array(dual_x_0)
# Make sure that the arrays are column vectors
x_0 = x_0.reshape(-1, 1)
dual_x_0 = dual_x_0.reshape(-1, 1)
print ("Starting SQP minimization...")
[x, dual_x, exit_info] = self.globalized_sqp(x_0, dual_x_0)
convergence_criteria = exit_info['val']
print (exit_info['msg'])
print ("Exiting with ||grad[L]|| = {0:e}".format(convergence_criteria))
print ("x = {0}".format(x.reshape(-1)))
print ("dual_x = {0}".format(dual_x.reshape(-1)))
return [x, dual_x, convergence_criteria]
| true | true |
f7fd580bab329ba69aca4bcbee258d64b2851965 | 19,086 | py | Python | src/request.py | piwaniuk/critic | 28ed20bb8032d7cc5aa23de98da51e619fd84164 | [
"Apache-2.0"
] | 216 | 2015-01-05T12:48:10.000Z | 2022-03-08T00:12:23.000Z | src/request.py | piwaniuk/critic | 28ed20bb8032d7cc5aa23de98da51e619fd84164 | [
"Apache-2.0"
] | 55 | 2015-02-28T12:10:26.000Z | 2020-11-18T17:45:16.000Z | src/request.py | piwaniuk/critic | 28ed20bb8032d7cc5aa23de98da51e619fd84164 | [
"Apache-2.0"
] | 34 | 2015-05-02T15:15:10.000Z | 2020-06-15T19:20:37.000Z | # -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2012 Jens Lindström, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import re
import urllib
import urlparse
import httplib
import wsgiref.util
import base
import auth
import configuration
import dbutils
# Paths to which access should be allowed without authentication even if
# anonymous users are not allowed in general.
INSECURE_PATHS = set(["login", "validatelogin",
"createuser", "registeruser"])
def decodeURIComponent(text):
"""\
Replace %HH escape sequences and return the resulting string.
"""
return urllib.unquote_plus(text)
class NoDefault:
"""\
Placeholder class to signal that a parameter has no default value.
An instance of this class is provided to Request.getParameter() as the
'default' argument to signal that it is an error if the parameter is
not present.
"""
pass
class HTTPResponse(Exception):
def __init__(self, status):
self.status = status
self.body = []
self.content_type = "text/plain"
def execute(self, db, req):
req.setStatus(self.status)
if self.body:
req.setContentType(self.content_type)
req.start()
return self.body
class NoContent(HTTPResponse):
def __init__(self):
super(NoContent, self).__init__(204)
class NotModified(HTTPResponse):
def __init__(self):
super(NotModified, self).__init__(304)
class Forbidden(HTTPResponse):
def __init__(self, message="Forbidden"):
super(Forbidden, self).__init__(403)
self.body = [message]
class NotFound(HTTPResponse):
def __init__(self, message="Not found"):
super(NotFound, self).__init__(404)
self.body = [message]
class Redirect(HTTPResponse):
def __init__(self, status, location, no_cache=False):
super(Redirect, self).__init__(status)
self.location = location
self.no_cache = no_cache
def execute(self, db, req):
from htmlutils import htmlify
if not req.allowRedirect(self.status):
self.status = 403
self.body = ["Cowardly refusing to redirect %s request."
% req.method]
else:
req.addResponseHeader("Location", self.location)
self.body = ["<p>Please go here: <a href=%s>%s</a>."
% (htmlify(self.location, attributeValue=True),
htmlify(self.location))]
self.content_type = "text/html"
return super(Redirect, self).execute(db, req)
class Found(Redirect):
def __init__(self, location):
super(Found, self).__init__(302, location)
class SeeOther(Redirect):
def __init__(self, location):
super(SeeOther, self).__init__(303, location)
class MovedTemporarily(Redirect):
def __init__(self, location, no_cache=False):
super(MovedTemporarily, self).__init__(307, location)
self.no_cache = no_cache
def execute(self, db, req):
if self.no_cache:
req.addResponseHeader("Cache-Control", "no-cache")
return super(MovedTemporarily, self).execute(db, req)
class NeedLogin(MovedTemporarily):
def __init__(self, source, optional=False):
if isinstance(source, Request):
target = source.getTargetURL()
else:
target = str(source)
location = "/login?target=%s" % urllib.quote(target)
if optional:
location += "&optional=yes"
return super(NeedLogin, self).__init__(location, no_cache=True)
class RequestHTTPAuthentication(HTTPResponse):
def __init__(self):
super(RequestHTTPAuthentication, self).__init__(401)
def execute(self, db, req):
import page.utils
self.body = str(page.utils.displayMessage(
db, req, dbutils.User.makeAnonymous(),
title="Authentication required",
message=("You must provide valid HTTP authentication to access "
"this system.")))
self.content_type = "text/html"
req.addResponseHeader("WWW-Authenticate", "Basic realm=\"Critic\"")
return super(RequestHTTPAuthentication, self).execute(db, req)
class DisplayMessage(base.Error):
"""\
Utility exception raised by pages to display a simply message.
"""
def __init__(self, title, body=None, review=None, html=False, status=200):
self.title = title
self.body = body
self.review = review
self.html = html
self.status = status
class InvalidParameterValue(DisplayMessage):
"""\
Exception raised by pages when a query parameter has an invalid value.
This exception is automatically raised by Request.getParameter() if the
parameter's value can't be converted as requested.
"""
def __init__(self, name, value, expected):
DisplayMessage.__init__(self, "Invalid URI Parameter Value!", "Got '%s=%s', expected %s." % (name, value, expected), status=400)
class MissingParameter(DisplayMessage):
"""\
Exception raised by pages when a required query parameter is missing.
This exception is automatically raised by Request.getParameter() if the
parameter is required and missing.
"""
def __init__(self, name):
DisplayMessage.__init__(self, "Missing URI Parameter!", "Expected '%s' parameter." % name, status=400)
class MissingWSGIRemoteUser(Exception):
"""\
Exception raised if WSGI environ "REMOTE_USER" is missing.
This error happens when Critic is running in "host" authentication mode but no
REMOTE_USER variable was present in the WSGI environ dict provided by the
web server.
"""
pass
class Request:
"""\
WSGI request wrapper class.
Pages and operations should typically only need to access request parameters
(via getParameter()) and headers (via getRequestHeader()), and set response
status (using setStatus()) if not "200 OK" and content-type (using
setContentType()) if not "text/html". The start() method must be called
before any content is returned to the WSGI layer, but this is taken care of
by the main request handling function (critic.py::main).
In the case of POST requests, the request body is retrieved using the read()
method.
Properties:
user -- user name from HTTP authentication
method -- HTTP method ("GET" or "POST", typically)
path -- URI path component, without leading forward slash
original_path -- same as 'path', unless the path is a short-hand for another
path, in which case 'path' is the resolved path
query -- URI query component
original_query == same as 'query', unless the path is a short-hand for
another path, in which case 'query' is typically extended
with parameters derived from the short-hand path
Primary methods:
getParameter(name, default, filter) -- get URI query parameter
getRequestHeader(name) -- get HTTP request header
getRequestHeaders(name) -- get all HTTP request headers
read() -- read HTTP request body
setStatus(code, message) -- set HTTP response status
setContentType(content_type) -- set Content-Type response header
addResponseHeader(name, value) -- add HTTP response header
Methods used by framework code:
start() -- call the WSGI layers start_response() callback
isStarted() -- check if start() has been called
getContentType() -- get response content type
"""
def __init__(self, db, environ, start_response):
"""\
Construct request wrapper.
The environ and start_response arguments should be the arguments to the
WSGI application object.
"""
self.__db = db
self.__environ = environ
self.__start_response = start_response
self.__status = None
self.__content_type = None
self.__response_headers = []
self.__started = False
content_length = environ.get("CONTENT_LENGTH")
self.__request_body_length = int(content_length) if content_length else 0
self.__request_body_read = 0
self.server_name = \
self.getRequestHeader("X-Forwarded-Host") \
or environ.get("SERVER_NAME") \
or configuration.base.HOSTNAME
self.method = environ.get("REQUEST_METHOD", "")
self.path = environ.get("PATH_INFO", "").lstrip("/")
self.original_path = self.path
self.query = environ.get("QUERY_STRING", "")
self.parsed_query = urlparse.parse_qs(self.query, keep_blank_values=True)
self.original_query = self.query
self.cookies = {}
header = self.getRequestHeader("Cookie")
if header:
for cookie in map(str.strip, header.split(";")):
name, _, value = cookie.partition("=")
if name and value:
self.cookies[name] = value
self.session_type = configuration.base.SESSION_TYPE
def updateQuery(self, items):
self.parsed_query.update(items)
self.query = urllib.urlencode(
sorted(self.parsed_query.items()), doseq=True)
@property
def user(self):
return self.__db.user
def getTargetURL(self):
target = "/" + self.path
if self.query:
target += "?" + self.query
return target
def getRequestURI(self):
return wsgiref.util.request_uri(self.__environ)
def getEnvironment(self):
return self.__environ
def getParameter(self, name, default=NoDefault, filter=lambda value: value):
"""\
Get URI query parameter.
If the requested parameter was not present in the URI query component,
the supplied default value is returned instead, or, if the supplied
default value is the NoDefault class, a MissingParameter exception is
raised.
If a filter function is supplied, it is called with a single argument,
the string value of the URI parameter, and its return value is returned
from getParameter(). If the filter function raises an exception (other
than DisplayMessage or sub-classes thereof) an InvalidParameterValue
exception is raised. Note: the filter function is not applied to
default values, meaning that the default value can be of a different
type than actual parameter values.
"""
value = self.parsed_query.get(name)
if value is None:
if default is NoDefault:
raise MissingParameter(name)
return default
def filter_value(value):
try:
return filter(value)
except (base.Error, auth.AccessDenied):
raise
except Exception:
if filter is int:
expected = "integer"
else:
expected = "something else"
raise InvalidParameterValue(name, value, expected)
value = [filter_value(element) for element in value]
if len(value) == 1:
return value[0]
return value
def getParameters(self):
return { name: value[0] if len(value) == 1 else value
for name, value in self.parsed_query.items() }
def getRequestHeader(self, name, default=None):
"""\
Get HTTP request header by name.
The name is case-insensitive. If the request header was not present in
the request, the default value is returned (or None if no default value
is provided.) If the request header was present, its value is returned
as a string.
"""
return self.__environ.get("HTTP_" + name.upper().replace("-", "_"), default)
def getRequestHeaders(self):
"""\
Get a dictionary containing all HTTP request headers.
The header names are converted to all lower-case, and any underscores
('_') in the header name is replaced with a dash ('-'). The reason for
this name transformation is that the header names are already
transformed in the WSGI layer from their original form to all
upper-case, with dashes replaced by underscores, so the original name is
not available.
The returned dictionary is a copy of the underlying storage, so the
caller can modify it without the modifications having any side-effects.
"""
headers = {}
for name, value in self.__environ.items():
if name.startswith("HTTP_"):
headers[name[5:].lower().replace("_", "-")] = value
return headers
def getReferrer(self):
try: return self.getRequestHeader("Referer")
except: return "N/A"
def read(self, bufsize=None):
"""\
Return the HTTP request body, or an empty string if there is none.
"""
if self.__request_body_length:
max_bufsize = self.__request_body_length - self.__request_body_read
if bufsize is None:
bufsize = max_bufsize
else:
bufsize = min(bufsize, max_bufsize)
if "wsgi.input" not in self.__environ or not bufsize:
return ""
data = self.__environ["wsgi.input"].read(bufsize)
self.__request_body_read += len(data)
return data
def write(self, data):
"""
Write HTTP response body chunk.
"""
self.__write(data)
def setStatus(self, code, message=None):
"""\
Set the HTTP status code, and optionally the status message.
If the message argument is None, a default status message for the
specified HTTP status code is used. If the specified status code is not
one included in httplib.responses, an KeyError exception is raised.
If this method is not called, the HTTP status will be "200 OK".
This method must be called before the response is started. (This really
only matters for incremental pages that returns the response body in
chunks; they can't call this method once they've yielded the first body
chunk.)
"""
assert not self.__started, "Response already started!"
if message is None: message = httplib.responses[code]
self.__status = "%d %s" % (code, message)
def hasContentType(self):
return self.__content_type is not None
def setContentType(self, content_type):
"""\
Set the response content type (the "Content-Type" header).
If the specified content type doesn't have a "charset=X" addition, the
string "; charset=utf-8" is appended to the content type.
If this method is not called, the Content-Type header's value will be
"text/html; charset=utf-8".
This function must be used rather than addResponseHeader() to set the
Content-Type header, and must be called before the response is started.
"""
assert not self.__started, "Response already started!"
if content_type.startswith("text/") and "charset=" not in content_type: content_type += "; charset=utf-8"
self.__content_type = content_type
def addResponseHeader(self, name, value):
"""\
Add HTTP response header.
Append a response header to the list of response headers passed to the
WSGI start_response() callback when the response is started.
Note: This function does not replace existing headers or merge headers
with the same name; calling code has to handle such things. No headers
(except Content-Type) are added automatically.
This function must not be used to add a Content-Type header, and must be
called before the response is started.
"""
assert not self.__started, "Response already started!"
assert name.lower() != "content-type", "Use Request.setContentType() instead!"
self.__response_headers.append((name, value))
def setCookie(self, name, value, secure=False):
if secure and configuration.base.ACCESS_SCHEME != "http":
modifier = "Secure"
else:
modifier = "HttpOnly"
self.addResponseHeader(
"Set-Cookie",
"%s=%s; Max-Age=31536000; Path=/; %s" % (name, value, modifier))
def deleteCookie(self, name):
if self.cookies.has_key(name):
self.addResponseHeader(
"Set-Cookie",
"%s=invalid; Path=/; Expires=Thursday 01-Jan-1970 00:00:00 GMT" % name)
def start(self):
"""\
Start the response by calling the WSGI start_response() callback.
This function is called automatically by the main request handling
function (critic.py::main) and should typically not be called from any
other code.
This function can be called multiple times; repeated calls do nothing.
"""
if not self.__started:
if self.__status is None:
self.setStatus(200)
if self.__content_type is None:
self.setContentType("text/plain")
headers = [("Content-Type", self.__content_type)]
headers.extend(self.__response_headers)
self.__write = self.__start_response(self.__status, headers)
self.__started = True
def isStarted(self):
"""\
Check if the response has been started.
"""
return self.__started
def getContentType(self):
"""\
Return the currently set response content type.
The returned value includes the automatically added "charset=utf-8". If
the response hasn't been started yet, and setContentType() hasn't been
called, None is returned.
"""
return self.__content_type
def ensureSecure(self):
if configuration.base.ACCESS_SCHEME != "http":
current_url = self.getRequestURI()
secure_url = re.sub("^http:", "https:", current_url)
if current_url != secure_url:
raise MovedTemporarily(secure_url, True)
def requestHTTPAuthentication(self, realm="Critic"):
self.setStatus(401)
self.addResponseHeader("WWW-Authenticate", "Basic realm=\"%s\"" % realm)
self.start()
def allowRedirect(self, status):
"""Return true if it is safe to redirect this request"""
return self.method in ("GET", "HEAD") or status == 303
| 35.214022 | 136 | 0.640889 |
import re
import urllib
import urlparse
import httplib
import wsgiref.util
import base
import auth
import configuration
import dbutils
INSECURE_PATHS = set(["login", "validatelogin",
"createuser", "registeruser"])
def decodeURIComponent(text):
return urllib.unquote_plus(text)
class NoDefault:
pass
class HTTPResponse(Exception):
def __init__(self, status):
self.status = status
self.body = []
self.content_type = "text/plain"
def execute(self, db, req):
req.setStatus(self.status)
if self.body:
req.setContentType(self.content_type)
req.start()
return self.body
class NoContent(HTTPResponse):
def __init__(self):
super(NoContent, self).__init__(204)
class NotModified(HTTPResponse):
def __init__(self):
super(NotModified, self).__init__(304)
class Forbidden(HTTPResponse):
def __init__(self, message="Forbidden"):
super(Forbidden, self).__init__(403)
self.body = [message]
class NotFound(HTTPResponse):
def __init__(self, message="Not found"):
super(NotFound, self).__init__(404)
self.body = [message]
class Redirect(HTTPResponse):
def __init__(self, status, location, no_cache=False):
super(Redirect, self).__init__(status)
self.location = location
self.no_cache = no_cache
def execute(self, db, req):
from htmlutils import htmlify
if not req.allowRedirect(self.status):
self.status = 403
self.body = ["Cowardly refusing to redirect %s request."
% req.method]
else:
req.addResponseHeader("Location", self.location)
self.body = ["<p>Please go here: <a href=%s>%s</a>."
% (htmlify(self.location, attributeValue=True),
htmlify(self.location))]
self.content_type = "text/html"
return super(Redirect, self).execute(db, req)
class Found(Redirect):
def __init__(self, location):
super(Found, self).__init__(302, location)
class SeeOther(Redirect):
def __init__(self, location):
super(SeeOther, self).__init__(303, location)
class MovedTemporarily(Redirect):
def __init__(self, location, no_cache=False):
super(MovedTemporarily, self).__init__(307, location)
self.no_cache = no_cache
def execute(self, db, req):
if self.no_cache:
req.addResponseHeader("Cache-Control", "no-cache")
return super(MovedTemporarily, self).execute(db, req)
class NeedLogin(MovedTemporarily):
def __init__(self, source, optional=False):
if isinstance(source, Request):
target = source.getTargetURL()
else:
target = str(source)
location = "/login?target=%s" % urllib.quote(target)
if optional:
location += "&optional=yes"
return super(NeedLogin, self).__init__(location, no_cache=True)
class RequestHTTPAuthentication(HTTPResponse):
def __init__(self):
super(RequestHTTPAuthentication, self).__init__(401)
def execute(self, db, req):
import page.utils
self.body = str(page.utils.displayMessage(
db, req, dbutils.User.makeAnonymous(),
title="Authentication required",
message=("You must provide valid HTTP authentication to access "
"this system.")))
self.content_type = "text/html"
req.addResponseHeader("WWW-Authenticate", "Basic realm=\"Critic\"")
return super(RequestHTTPAuthentication, self).execute(db, req)
class DisplayMessage(base.Error):
def __init__(self, title, body=None, review=None, html=False, status=200):
self.title = title
self.body = body
self.review = review
self.html = html
self.status = status
class InvalidParameterValue(DisplayMessage):
def __init__(self, name, value, expected):
DisplayMessage.__init__(self, "Invalid URI Parameter Value!", "Got '%s=%s', expected %s." % (name, value, expected), status=400)
class MissingParameter(DisplayMessage):
def __init__(self, name):
DisplayMessage.__init__(self, "Missing URI Parameter!", "Expected '%s' parameter." % name, status=400)
class MissingWSGIRemoteUser(Exception):
pass
class Request:
def __init__(self, db, environ, start_response):
self.__db = db
self.__environ = environ
self.__start_response = start_response
self.__status = None
self.__content_type = None
self.__response_headers = []
self.__started = False
content_length = environ.get("CONTENT_LENGTH")
self.__request_body_length = int(content_length) if content_length else 0
self.__request_body_read = 0
self.server_name = \
self.getRequestHeader("X-Forwarded-Host") \
or environ.get("SERVER_NAME") \
or configuration.base.HOSTNAME
self.method = environ.get("REQUEST_METHOD", "")
self.path = environ.get("PATH_INFO", "").lstrip("/")
self.original_path = self.path
self.query = environ.get("QUERY_STRING", "")
self.parsed_query = urlparse.parse_qs(self.query, keep_blank_values=True)
self.original_query = self.query
self.cookies = {}
header = self.getRequestHeader("Cookie")
if header:
for cookie in map(str.strip, header.split(";")):
name, _, value = cookie.partition("=")
if name and value:
self.cookies[name] = value
self.session_type = configuration.base.SESSION_TYPE
def updateQuery(self, items):
self.parsed_query.update(items)
self.query = urllib.urlencode(
sorted(self.parsed_query.items()), doseq=True)
@property
def user(self):
return self.__db.user
def getTargetURL(self):
target = "/" + self.path
if self.query:
target += "?" + self.query
return target
def getRequestURI(self):
return wsgiref.util.request_uri(self.__environ)
def getEnvironment(self):
return self.__environ
def getParameter(self, name, default=NoDefault, filter=lambda value: value):
value = self.parsed_query.get(name)
if value is None:
if default is NoDefault:
raise MissingParameter(name)
return default
def filter_value(value):
try:
return filter(value)
except (base.Error, auth.AccessDenied):
raise
except Exception:
if filter is int:
expected = "integer"
else:
expected = "something else"
raise InvalidParameterValue(name, value, expected)
value = [filter_value(element) for element in value]
if len(value) == 1:
return value[0]
return value
def getParameters(self):
return { name: value[0] if len(value) == 1 else value
for name, value in self.parsed_query.items() }
def getRequestHeader(self, name, default=None):
return self.__environ.get("HTTP_" + name.upper().replace("-", "_"), default)
def getRequestHeaders(self):
headers = {}
for name, value in self.__environ.items():
if name.startswith("HTTP_"):
headers[name[5:].lower().replace("_", "-")] = value
return headers
def getReferrer(self):
try: return self.getRequestHeader("Referer")
except: return "N/A"
def read(self, bufsize=None):
if self.__request_body_length:
max_bufsize = self.__request_body_length - self.__request_body_read
if bufsize is None:
bufsize = max_bufsize
else:
bufsize = min(bufsize, max_bufsize)
if "wsgi.input" not in self.__environ or not bufsize:
return ""
data = self.__environ["wsgi.input"].read(bufsize)
self.__request_body_read += len(data)
return data
def write(self, data):
self.__write(data)
def setStatus(self, code, message=None):
assert not self.__started, "Response already started!"
if message is None: message = httplib.responses[code]
self.__status = "%d %s" % (code, message)
def hasContentType(self):
return self.__content_type is not None
def setContentType(self, content_type):
assert not self.__started, "Response already started!"
if content_type.startswith("text/") and "charset=" not in content_type: content_type += "; charset=utf-8"
self.__content_type = content_type
def addResponseHeader(self, name, value):
assert not self.__started, "Response already started!"
assert name.lower() != "content-type", "Use Request.setContentType() instead!"
self.__response_headers.append((name, value))
def setCookie(self, name, value, secure=False):
if secure and configuration.base.ACCESS_SCHEME != "http":
modifier = "Secure"
else:
modifier = "HttpOnly"
self.addResponseHeader(
"Set-Cookie",
"%s=%s; Max-Age=31536000; Path=/; %s" % (name, value, modifier))
def deleteCookie(self, name):
if self.cookies.has_key(name):
self.addResponseHeader(
"Set-Cookie",
"%s=invalid; Path=/; Expires=Thursday 01-Jan-1970 00:00:00 GMT" % name)
def start(self):
if not self.__started:
if self.__status is None:
self.setStatus(200)
if self.__content_type is None:
self.setContentType("text/plain")
headers = [("Content-Type", self.__content_type)]
headers.extend(self.__response_headers)
self.__write = self.__start_response(self.__status, headers)
self.__started = True
def isStarted(self):
return self.__started
def getContentType(self):
return self.__content_type
def ensureSecure(self):
if configuration.base.ACCESS_SCHEME != "http":
current_url = self.getRequestURI()
secure_url = re.sub("^http:", "https:", current_url)
if current_url != secure_url:
raise MovedTemporarily(secure_url, True)
def requestHTTPAuthentication(self, realm="Critic"):
self.setStatus(401)
self.addResponseHeader("WWW-Authenticate", "Basic realm=\"%s\"" % realm)
self.start()
def allowRedirect(self, status):
return self.method in ("GET", "HEAD") or status == 303
| true | true |
f7fd5887998a3b7f5618cb537e4c36393bdfdad6 | 1,482 | py | Python | authentication/socialaccount/providers/mailru/tests.py | vo0doO/pydj-persweb | efcd6b7090230f7c0b9ec056008f6d1d9e876ed9 | [
"CC0-1.0"
] | null | null | null | authentication/socialaccount/providers/mailru/tests.py | vo0doO/pydj-persweb | efcd6b7090230f7c0b9ec056008f6d1d9e876ed9 | [
"CC0-1.0"
] | 4 | 2020-05-06T17:22:00.000Z | 2021-12-13T20:43:30.000Z | authentication/socialaccount/providers/mailru/tests.py | vo0doO/pydj-persweb | efcd6b7090230f7c0b9ec056008f6d1d9e876ed9 | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from authentication.socialaccount.tests import OAuth2TestsMixin
from authentication.tests import MockedResponse, TestCase
from .provider import MailRuProvider
class MailRuTests(OAuth2TestsMixin, TestCase):
provider_id = MailRuProvider.id
def get_mocked_response(self, verified_email=True):
return MockedResponse(200, """
[ { "uid": "15410773191172635989", "first_name": "Евгений", "last_name": "Маслов", "nick": "maslov", "email": "emaslov@mail.ru", "sex": 0, "birthday": "15.02.1980", "has_pic": 1, "pic": "http://avt.appsmail.ru/mail/emaslov/_avatar", "pic_small": "http://avt.appsmail.ru/mail/emaslov/_avatarsmall", "pic_big": "http://avt.appsmail.ru/mail/emaslov/_avatarbig", "link": "http://my.mail.ru/mail/emaslov/", "referer_type": "", "referer_id": "", "is_online": 1, "friends_count": 145, "is_verified": 1, "vip" : 0, "app_installed": 1, "location": { "country": { "name": "Россия", "id": "24" }, "city": { "name": "Москва", "id": "25" }, "region": { "name": "Москва", "id": "999999" } } }]""") # noqa
def get_login_response_json(self, with_refresh_token=True):
# FIXME: This is not an actual response. I added this in order
# to get the test suite going but did not verify to check the
# exact response being returned.
return '{"access_token": "testac", "uid": "weibo", "refresh_token": "testrf", "x_mailru_vid": "1"}' # noqa
| 67.363636 | 695 | 0.674089 |
from __future__ import absolute_import, unicode_literals
from authentication.socialaccount.tests import OAuth2TestsMixin
from authentication.tests import MockedResponse, TestCase
from .provider import MailRuProvider
class MailRuTests(OAuth2TestsMixin, TestCase):
provider_id = MailRuProvider.id
def get_mocked_response(self, verified_email=True):
return MockedResponse(200, """
[ { "uid": "15410773191172635989", "first_name": "Евгений", "last_name": "Маслов", "nick": "maslov", "email": "emaslov@mail.ru", "sex": 0, "birthday": "15.02.1980", "has_pic": 1, "pic": "http://avt.appsmail.ru/mail/emaslov/_avatar", "pic_small": "http://avt.appsmail.ru/mail/emaslov/_avatarsmall", "pic_big": "http://avt.appsmail.ru/mail/emaslov/_avatarbig", "link": "http://my.mail.ru/mail/emaslov/", "referer_type": "", "referer_id": "", "is_online": 1, "friends_count": 145, "is_verified": 1, "vip" : 0, "app_installed": 1, "location": { "country": { "name": "Россия", "id": "24" }, "city": { "name": "Москва", "id": "25" }, "region": { "name": "Москва", "id": "999999" } } }]""")
def get_login_response_json(self, with_refresh_token=True):
return '{"access_token": "testac", "uid": "weibo", "refresh_token": "testrf", "x_mailru_vid": "1"}'
| true | true |
f7fd596d6ee052f5f626bc50b739bace5907c7c5 | 11,222 | py | Python | 1. Knowledge/minesweeper/minesweeper.py | smpace/IntroductionToArtificialIntelligence | 3afb40b6fd6926e2745c0252b80c35f838532079 | [
"MIT"
] | null | null | null | 1. Knowledge/minesweeper/minesweeper.py | smpace/IntroductionToArtificialIntelligence | 3afb40b6fd6926e2745c0252b80c35f838532079 | [
"MIT"
] | null | null | null | 1. Knowledge/minesweeper/minesweeper.py | smpace/IntroductionToArtificialIntelligence | 3afb40b6fd6926e2745c0252b80c35f838532079 | [
"MIT"
] | null | null | null | import itertools
import random
import copy
class Minesweeper():
"""
Minesweeper game representation
"""
def __init__(self, height=8, width=8, mines=8):
# Set initial width, height, and number of mines
self.height = height
self.width = width
self.mines = set()
# Initialize an empty field with no mines
self.board = []
for i in range(self.height):
row = []
for j in range(self.width):
row.append(False)
self.board.append(row)
# Add mines randomly
while len(self.mines) != mines:
i = random.randrange(height)
j = random.randrange(width)
if not self.board[i][j]:
self.mines.add((i, j))
self.board[i][j] = True
# At first, player has found no mines
self.mines_found = set()
def print(self):
"""
Prints a text-based representation
of where mines are located.
"""
for i in range(self.height):
print("--" * self.width + "-")
for j in range(self.width):
if self.board[i][j]:
print("|X", end="")
else:
print("| ", end="")
print("|")
print("--" * self.width + "-")
def is_mine(self, cell):
i, j = cell
return self.board[i][j]
def nearby_mines(self, cell):
"""
Returns the number of mines that are
within one row and column of a given cell,
not including the cell itself.
"""
# Keep count of nearby mines
count = 0
# Loop over all cells within one row and column
for i in range(cell[0] - 1, cell[0] + 2):
for j in range(cell[1] - 1, cell[1] + 2):
# Ignore the cell itself
if (i, j) == cell:
continue
# Update count if cell in bounds and is mine
if 0 <= i < self.height and 0 <= j < self.width:
if self.board[i][j]:
count += 1
return count
def won(self):
"""
Checks if all mines have been flagged.
"""
return self.mines_found == self.mines
class Sentence():
"""
Logical statement about a Minesweeper game
A sentence consists of a set of board cells,
and a count of the number of those cells which are mines.
"""
def __init__(self, cells, count):
self.cells = set(cells)
self.count = count
def __eq__(self, other):
return self.cells == other.cells and self.count == other.count
def __str__(self):
return f"{self.cells} = {self.count}"
def known_mines(self):
"""
Returns the set of all cells in self.cells known to be mines.
"""
if self.count == len(self.cells):
return self.cells
else:
return set()
def known_safes(self):
"""
Returns the set of all cells in self.cells known to be safe.
"""
if self.count == 0:
return self.cells
else:
return set()
def mark_mine(self, cell):
"""
Updates internal knowledge representation given the fact that
a cell is known to be a mine.
"""
if cell in self.cells:
self.cells.remove(cell)
self.count -= 1
def mark_safe(self, cell):
"""
Updates internal knowledge representation given the fact that
a cell is known to be safe.
"""
if cell in self.cells:
self.cells.remove(cell)
def get_neighbors(self, cell, height, width):
"""
Simply adds set of all neighboring cells for a given cell to self.cells.
Does not check if cell is known or played.
"""
neighbors = set()
row = cell[0]
col = cell[1]
for i in range(0, height):
for j in range(0, width):
if (i == row - 1) or (i == row) or (i == row + 1):
if (j == col -1) or (j == col) or (j == col + 1):
if (i, j) != (row, col):
neighbors.add((i, j))
self.cells = neighbors
class MinesweeperAI():
"""
Minesweeper game player
"""
def __init__(self, height=8, width=8):
# Set initial height and width
self.height = height
self.width = width
# Keep track of which cells have been clicked on
self.moves_made = set()
# Keep track of cells known to be safe or mines
self.mines = set()
self.safes = set()
# List of sentences about the game known to be true
self.knowledge = []
def mark_mine(self, cell):
"""
Marks a cell as a mine, and updates all knowledge
to mark that cell as a mine as well.
"""
self.mines.add(cell)
for sentence in self.knowledge:
sentence.mark_mine(cell)
def mark_safe(self, cell):
"""
Marks a cell as safe, and updates all knowledge
to mark that cell as safe as well.
"""
self.safes.add(cell)
for sentence in self.knowledge:
sentence.mark_safe(cell)
def evaluate_knowledge(self):
"""
Iterate through knowledge base and clean the data and make further inferences
"""
while True:
knowledge_copy = copy.deepcopy(self.knowledge)
# Identify if any new safe moves were found
for sentence in self.knowledge:
if sentence.known_safes() != set():
new_safes = []
for safe_cell in sentence.known_safes():
if safe_cell not in self.safes:
print("new safe cell", safe_cell)
new_safes.append(safe_cell)
for cell in new_safes:
self.mark_safe(cell)
# Identify if any new mines were found
for sentence in self.knowledge:
if sentence.known_mines() != set():
found_mines = []
for new_mine in sentence.known_mines():
if new_mine not in self.mines:
print("new mine found", new_mine)
found_mines.append(new_mine)
for mine in found_mines:
self.mark_mine(mine)
# Clear out emply sets
for sentence in self.knowledge:
if sentence.cells == set():
self.knowledge.remove(sentence)
# Make inferences
for sentence in self.knowledge:
for another_sentence in self.knowledge:
if sentence != another_sentence:
print("checking issubset")
# Check to see if sentence is subset of another sentence
if sentence.cells.issubset(another_sentence.cells):
print("inferred sentence being made")
another_sentence.cells.difference_update(sentence.cells)
another_sentence.count = another_sentence.count - sentence.count
# inferred_sentence = Sentence(cells=inferred_set, count=inferred_count)
sentence.cells = set()
sentence.count = 0
# Apply the inference to knowledge base
# if inferred_sentence not in self.knowledge:
# self.knowledge.append(inferred_sentence)
# Break after all inferences are made
if knowledge_copy == self.knowledge:
break
def add_knowledge(self, cell, count):
"""
Called when the Minesweeper board tells us, for a given
safe cell, how many neighboring cells have mines in them.
This function should:
1) mark the cell as a move that has been made
2) mark the cell as safe
3) add a new sentence to the AI's knowledge base
based on the value of `cell` and `count`
4) mark any additional cells as safe or as mines
if it can be concluded based on the AI's knowledge base
5) add any new sentences to the AI's knowledge base
if they can be inferred from existing knowledge
"""
# Update knowledge to reflect move just made
self.moves_made.add(cell)
self.mark_safe(cell)
# Remove cell from all sentences in KB, and remove sentence if empty
for sentence in self.knowledge:
if cell in sentence.cells:
sentence.cells.remove(cell)
if sentence.cells == set():
self.knowledge.remove(sentence)
# Make sentence for new move
new_sentence = Sentence(cells=set(), count=count)
new_sentence.get_neighbors(cell, self.height, self.width)
# Remove any cells found in mines, safes, moves_made
new_sentence.cells.difference_update(self.mines)
new_sentence.cells.difference_update(self.safes)
new_sentence.cells.difference_update(self.moves_made)
# Give sentence to knowledge base
self.knowledge.append(new_sentence)
# Evaluate any updates or changes
self.evaluate_knowledge()
def make_safe_move(self):
"""
Returns a safe cell to choose on the Minesweeper board.
The move must be known to be safe, and not already a move
that has been made.
This function may use the knowledge in self.mines, self.safes
and self.moves_made, but should not modify any of those values.
"""
print("safe cells: ", self.safes)
print("mines: ", self.mines)
# Make new set of safe moves that have not been played
practice_safe_sets = self.safes.difference(self.moves_made) # ;)
# Pick random safe cell
if practice_safe_sets != set():
return random.sample(practice_safe_sets, 1)[0]
else:
return None
def make_random_move(self):
"""
Returns a move to make on the Minesweeper board.
Should choose randomly among cells that:
1) have not already been chosen, and
2) are not known to be mines
"""
# Grab all cell locations
available_cells = set()
for row in range(0, self.height):
for col in range(0, self.width):
available_cells.add((row, col))
# Remove locations with mines and made moves
available_cells.difference_update(self.mines)
available_cells.difference_update(self.moves_made)
# Pick a random cell
if available_cells != set():
return random.sample(available_cells, 1)[0]
else:
return None | 33.598802 | 100 | 0.536892 | import itertools
import random
import copy
class Minesweeper():
def __init__(self, height=8, width=8, mines=8):
self.height = height
self.width = width
self.mines = set()
self.board = []
for i in range(self.height):
row = []
for j in range(self.width):
row.append(False)
self.board.append(row)
while len(self.mines) != mines:
i = random.randrange(height)
j = random.randrange(width)
if not self.board[i][j]:
self.mines.add((i, j))
self.board[i][j] = True
self.mines_found = set()
def print(self):
for i in range(self.height):
print("--" * self.width + "-")
for j in range(self.width):
if self.board[i][j]:
print("|X", end="")
else:
print("| ", end="")
print("|")
print("--" * self.width + "-")
def is_mine(self, cell):
i, j = cell
return self.board[i][j]
def nearby_mines(self, cell):
count = 0
for i in range(cell[0] - 1, cell[0] + 2):
for j in range(cell[1] - 1, cell[1] + 2):
if (i, j) == cell:
continue
if 0 <= i < self.height and 0 <= j < self.width:
if self.board[i][j]:
count += 1
return count
def won(self):
return self.mines_found == self.mines
class Sentence():
def __init__(self, cells, count):
self.cells = set(cells)
self.count = count
def __eq__(self, other):
return self.cells == other.cells and self.count == other.count
def __str__(self):
return f"{self.cells} = {self.count}"
def known_mines(self):
if self.count == len(self.cells):
return self.cells
else:
return set()
def known_safes(self):
if self.count == 0:
return self.cells
else:
return set()
def mark_mine(self, cell):
if cell in self.cells:
self.cells.remove(cell)
self.count -= 1
def mark_safe(self, cell):
if cell in self.cells:
self.cells.remove(cell)
def get_neighbors(self, cell, height, width):
neighbors = set()
row = cell[0]
col = cell[1]
for i in range(0, height):
for j in range(0, width):
if (i == row - 1) or (i == row) or (i == row + 1):
if (j == col -1) or (j == col) or (j == col + 1):
if (i, j) != (row, col):
neighbors.add((i, j))
self.cells = neighbors
class MinesweeperAI():
def __init__(self, height=8, width=8):
self.height = height
self.width = width
self.moves_made = set()
self.mines = set()
self.safes = set()
self.knowledge = []
def mark_mine(self, cell):
self.mines.add(cell)
for sentence in self.knowledge:
sentence.mark_mine(cell)
def mark_safe(self, cell):
self.safes.add(cell)
for sentence in self.knowledge:
sentence.mark_safe(cell)
def evaluate_knowledge(self):
while True:
knowledge_copy = copy.deepcopy(self.knowledge)
for sentence in self.knowledge:
if sentence.known_safes() != set():
new_safes = []
for safe_cell in sentence.known_safes():
if safe_cell not in self.safes:
print("new safe cell", safe_cell)
new_safes.append(safe_cell)
for cell in new_safes:
self.mark_safe(cell)
for sentence in self.knowledge:
if sentence.known_mines() != set():
found_mines = []
for new_mine in sentence.known_mines():
if new_mine not in self.mines:
print("new mine found", new_mine)
found_mines.append(new_mine)
for mine in found_mines:
self.mark_mine(mine)
for sentence in self.knowledge:
if sentence.cells == set():
self.knowledge.remove(sentence)
for sentence in self.knowledge:
for another_sentence in self.knowledge:
if sentence != another_sentence:
print("checking issubset")
if sentence.cells.issubset(another_sentence.cells):
print("inferred sentence being made")
another_sentence.cells.difference_update(sentence.cells)
another_sentence.count = another_sentence.count - sentence.count
sentence.cells = set()
sentence.count = 0
if knowledge_copy == self.knowledge:
break
def add_knowledge(self, cell, count):
self.moves_made.add(cell)
self.mark_safe(cell)
for sentence in self.knowledge:
if cell in sentence.cells:
sentence.cells.remove(cell)
if sentence.cells == set():
self.knowledge.remove(sentence)
new_sentence = Sentence(cells=set(), count=count)
new_sentence.get_neighbors(cell, self.height, self.width)
new_sentence.cells.difference_update(self.mines)
new_sentence.cells.difference_update(self.safes)
new_sentence.cells.difference_update(self.moves_made)
self.knowledge.append(new_sentence)
self.evaluate_knowledge()
def make_safe_move(self):
print("safe cells: ", self.safes)
print("mines: ", self.mines)
practice_safe_sets = self.safes.difference(self.moves_made)
if practice_safe_sets != set():
return random.sample(practice_safe_sets, 1)[0]
else:
return None
def make_random_move(self):
available_cells = set()
for row in range(0, self.height):
for col in range(0, self.width):
available_cells.add((row, col))
available_cells.difference_update(self.mines)
available_cells.difference_update(self.moves_made)
if available_cells != set():
return random.sample(available_cells, 1)[0]
else:
return None | true | true |
f7fd59b151bf404ec4bd98e86694b245df82dc00 | 74 | py | Python | riki/tests/test_import.py | afenyvesi/riki | dfd6579b3400e8ebcad1c4a610902124fad8f302 | [
"MIT"
] | null | null | null | riki/tests/test_import.py | afenyvesi/riki | dfd6579b3400e8ebcad1c4a610902124fad8f302 | [
"MIT"
] | 1 | 2020-01-25T23:07:00.000Z | 2020-01-25T23:07:00.000Z | riki/tests/test_import.py | afenyvesi/riki | dfd6579b3400e8ebcad1c4a610902124fad8f302 | [
"MIT"
] | 2 | 2020-01-25T22:21:33.000Z | 2020-07-15T20:59:18.000Z | def test_import_version():
from riki import _version
assert True
| 14.8 | 29 | 0.72973 | def test_import_version():
from riki import _version
assert True
| true | true |
f7fd5a57714a93ffdd64a6ee4500241cd4cf6541 | 2,072 | py | Python | python/example_code/emr/emrfs-boto-step.py | gabehollombe-aws/aws-doc-sdk-examples | dfc0e06ebe1762ab127f3ef5f425507644c6a99c | [
"Apache-2.0"
] | 12 | 2020-07-28T01:20:15.000Z | 2021-12-10T10:52:49.000Z | python/example_code/emr/emrfs-boto-step.py | gabehollombe-aws/aws-doc-sdk-examples | dfc0e06ebe1762ab127f3ef5f425507644c6a99c | [
"Apache-2.0"
] | 5 | 2021-12-10T01:52:47.000Z | 2022-01-04T16:47:45.000Z | python/example_code/emr/emrfs-boto-step.py | gabehollombe-aws/aws-doc-sdk-examples | dfc0e06ebe1762ab127f3ef5f425507644c6a99c | [
"Apache-2.0"
] | 1 | 2021-10-04T23:39:14.000Z | 2021-10-04T23:39:14.000Z | #
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# snippet-sourcedescription:[emrfs-boto-step.py demonstrates how to add a step to an EMR cluster that adds objects in an Amazon S3 bucket to the default EMRFS metadata table.]
# snippet-service:[elasticmapreduce]
# snippet-keyword:[Python]
# snippet-sourcesyntax:[python]
# snippet-sourcesyntax:[python]
# snippet-keyword:[Amazon EMR]
# snippet-keyword:[Code Sample]
# snippet-sourcetype:[snippet]
# snippet-sourcedate:[2019-01-31]
# snippet-sourceauthor:[AWS]
# snippet-start:[emr.python.addstep.emrfs]
import boto3
from botocore.exceptions import ClientError
# Assign the ID of an existing cluster to the following variable
job_flow_id = 'CLUSTER_ID'
# Define a job flow step. Assign appropriate values as desired.
job_flow_step_01 = {
'Name': 'Example EMRFS Sync Step',
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 's3://elasticmapreduce/libs/script-runner/script-runner.jar',
'Args': [
'/usr/bin/emrfs',
'sync',
's3://elasticmapreduce/samples/cloudfront'
]
}
}
# Add the step(s)
emr_client = boto3.client('emr')
try:
response = emr_client.add_job_flow_steps(JobFlowId=job_flow_id,
Steps=[job_flow_step_01])
except ClientError as e:
print(e.response['Error']['Message'])
exit(1)
# Output the IDs of the added steps
print('Step IDs:')
for stepId in response['StepIds']:
print(stepId)
# snippet-end:[emr.python.addstep.emrfs]
| 34.533333 | 176 | 0.685811 |
import boto3
from botocore.exceptions import ClientError
job_flow_id = 'CLUSTER_ID'
job_flow_step_01 = {
'Name': 'Example EMRFS Sync Step',
'ActionOnFailure': 'CONTINUE',
'HadoopJarStep': {
'Jar': 's3://elasticmapreduce/libs/script-runner/script-runner.jar',
'Args': [
'/usr/bin/emrfs',
'sync',
's3://elasticmapreduce/samples/cloudfront'
]
}
}
emr_client = boto3.client('emr')
try:
response = emr_client.add_job_flow_steps(JobFlowId=job_flow_id,
Steps=[job_flow_step_01])
except ClientError as e:
print(e.response['Error']['Message'])
exit(1)
print('Step IDs:')
for stepId in response['StepIds']:
print(stepId)
| true | true |
f7fd5baff69a7e75a3c06084b48491cb0e7072d4 | 686 | py | Python | src/uff/linear_array.py | davidbradway/uff.py | 118001211018a4fc95d1dd7304ae6335bdf805f9 | [
"MIT"
] | 7 | 2021-11-16T17:27:54.000Z | 2021-12-25T18:09:35.000Z | src/uff/linear_array.py | davidbradway/uff.py | 118001211018a4fc95d1dd7304ae6335bdf805f9 | [
"MIT"
] | 6 | 2021-11-16T17:27:33.000Z | 2022-02-04T08:51:06.000Z | src/uff/linear_array.py | davidbradway/uff.py | 118001211018a4fc95d1dd7304ae6335bdf805f9 | [
"MIT"
] | 1 | 2021-11-16T19:26:36.000Z | 2021-11-16T19:26:36.000Z | from dataclasses import dataclass
from uff.probe import Probe
@dataclass
class LinearArray(Probe):
"""
Describes a linear array, made of identical elements, uniformly distributed on a line.
Attributes:
number_elements (int): Number of elements in the array
pitch (float): Distance between the acoustic ceneter of adyacent elements [m]
element_width (float): (Optional) Element size in the x-axis [m]
element_height (float): (Optional) Element size in the y-axis [m]
"""
def str_name(self):
return 'probe.linear_array'
number_elements: int
pitch: float
element_width: float
element_height: float
| 27.44 | 92 | 0.685131 | from dataclasses import dataclass
from uff.probe import Probe
@dataclass
class LinearArray(Probe):
def str_name(self):
return 'probe.linear_array'
number_elements: int
pitch: float
element_width: float
element_height: float
| true | true |
f7fd5bec504e17993ed33a1ea47575eb33eb8afb | 79,085 | py | Python | scipy/stats/tests/test_mstats_basic.py | jcharlong/scipy | 153467a9174b0c6f4b90ffeed5871e5018658108 | [
"BSD-3-Clause"
] | null | null | null | scipy/stats/tests/test_mstats_basic.py | jcharlong/scipy | 153467a9174b0c6f4b90ffeed5871e5018658108 | [
"BSD-3-Clause"
] | null | null | null | scipy/stats/tests/test_mstats_basic.py | jcharlong/scipy | 153467a9174b0c6f4b90ffeed5871e5018658108 | [
"BSD-3-Clause"
] | null | null | null | """
Tests for the stats.mstats module (support for masked arrays)
"""
import warnings
import platform
import numpy as np
from numpy import nan
import numpy.ma as ma
from numpy.ma import masked, nomask
import scipy.stats.mstats as mstats
from scipy import stats
from .common_tests import check_named_results
import pytest
from pytest import raises as assert_raises
from numpy.ma.testutils import (assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_almost_equal_nulp, assert_,
assert_allclose, assert_array_equal)
from numpy.testing import suppress_warnings
from scipy.stats import mstats_basic
class TestMquantiles:
def test_mquantiles_limit_keyword(self):
# Regression test for Trac ticket #867
data = np.array([[6., 7., 1.],
[47., 15., 2.],
[49., 36., 3.],
[15., 39., 4.],
[42., 40., -999.],
[41., 41., -999.],
[7., -999., -999.],
[39., -999., -999.],
[43., -999., -999.],
[40., -999., -999.],
[36., -999., -999.]])
desired = [[19.2, 14.6, 1.45],
[40.0, 37.5, 2.5],
[42.8, 40.05, 3.55]]
quants = mstats.mquantiles(data, axis=0, limit=(0, 50))
assert_almost_equal(quants, desired)
def check_equal_gmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):
# Note this doesn't test when axis is not specified
x = mstats.gmean(array_like, axis=axis, dtype=dtype)
assert_allclose(x, desired, rtol=rtol)
assert_equal(x.dtype, dtype)
def check_equal_hmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):
x = stats.hmean(array_like, axis=axis, dtype=dtype)
assert_allclose(x, desired, rtol=rtol)
assert_equal(x.dtype, dtype)
class TestGeoMean:
def test_1d(self):
a = [1, 2, 3, 4]
desired = np.power(1*2*3*4, 1./4.)
check_equal_gmean(a, desired, rtol=1e-14)
def test_1d_ma(self):
# Test a 1d masked array
a = ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
desired = 45.2872868812
check_equal_gmean(a, desired)
a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
desired = np.power(1*2*3, 1./3.)
check_equal_gmean(a, desired, rtol=1e-14)
def test_1d_ma_value(self):
# Test a 1d masked array with a masked value
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
desired = 41.4716627439
check_equal_gmean(a, desired)
def test_1d_ma0(self):
# Test a 1d masked array with zero element
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])
desired = 41.4716627439
with np.errstate(divide='ignore'):
check_equal_gmean(a, desired)
def test_1d_ma_inf(self):
# Test a 1d masked array with negative element
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1])
desired = 41.4716627439
with np.errstate(invalid='ignore'):
check_equal_gmean(a, desired)
@pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping')
def test_1d_float96(self):
a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
desired_dt = np.power(1*2*3, 1./3.).astype(np.float96)
check_equal_gmean(a, desired_dt, dtype=np.float96, rtol=1e-14)
def test_2d_ma(self):
a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]])
desired = np.array([1, 2, 3, 4])
check_equal_gmean(a, desired, axis=0, rtol=1e-14)
desired = ma.array([np.power(1*2*3*4, 1./4.),
np.power(2*3, 1./2.),
np.power(1*4, 1./2.)])
check_equal_gmean(a, desired, axis=-1, rtol=1e-14)
# Test a 2d masked array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 52.8885199
check_equal_gmean(np.ma.array(a), desired)
class TestHarMean:
def test_1d(self):
a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
desired = 3. / (1./1 + 1./2 + 1./3)
check_equal_hmean(a, desired, rtol=1e-14)
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
desired = 34.1417152147
check_equal_hmean(a, desired)
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
desired = 31.8137186141
check_equal_hmean(a, desired)
@pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping')
def test_1d_float96(self):
a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3), dtype=np.float96)
check_equal_hmean(a, desired_dt, dtype=np.float96)
def test_2d(self):
a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]])
desired = ma.array([1, 2, 3, 4])
check_equal_hmean(a, desired, axis=0, rtol=1e-14)
desired = [4./(1/1.+1/2.+1/3.+1/4.), 2./(1/2.+1/3.), 2./(1/1.+1/4.)]
check_equal_hmean(a, desired, axis=-1, rtol=1e-14)
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 38.6696271841
check_equal_hmean(np.ma.array(a), desired)
class TestRanking:
def test_ranking(self):
x = ma.array([0,1,1,1,2,3,4,5,5,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,3,3,5,6,7,8.5,8.5,10])
x[[3,4]] = masked
assert_almost_equal(mstats.rankdata(x),
[1,2.5,2.5,0,0,4,5,6.5,6.5,8])
assert_almost_equal(mstats.rankdata(x, use_missing=True),
[1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8])
x = ma.array([0,1,5,1,2,4,3,5,1,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,8.5,3,5,7,6,8.5,3,10])
x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])
assert_almost_equal(mstats.rankdata(x),
[[1,3,3,3,5], [6,7,8.5,8.5,10]])
assert_almost_equal(mstats.rankdata(x, axis=1),
[[1,3,3,3,5], [1,2,3.5,3.5,5]])
assert_almost_equal(mstats.rankdata(x,axis=0),
[[1,1,1,1,1], [2,2,2,2,2,]])
class TestCorr:
def test_pearsonr(self):
# Tests some computations of Pearson's r
x = ma.arange(10)
with warnings.catch_warnings():
# The tests in this context are edge cases, with perfect
# correlation or anticorrelation, or totally masked data.
# None of these should trigger a RuntimeWarning.
warnings.simplefilter("error", RuntimeWarning)
assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0)
assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0)
x = ma.array(x, mask=True)
pr = mstats.pearsonr(x, x)
assert_(pr[0] is masked)
assert_(pr[1] is masked)
x1 = ma.array([-1.0, 0.0, 1.0])
y1 = ma.array([0, 0, 3])
r, p = mstats.pearsonr(x1, y1)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
# (x2, y2) have the same unmasked data as (x1, y1).
mask = [False, False, False, True]
x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask)
y2 = ma.array([0, 0, 3, -1], mask=mask)
r, p = mstats.pearsonr(x2, y2)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
def test_pearsonr_misaligned_mask(self):
mx = np.ma.masked_array([1, 2, 3, 4, 5, 6], mask=[0, 1, 0, 0, 0, 0])
my = np.ma.masked_array([9, 8, 7, 6, 5, 9], mask=[0, 0, 1, 0, 0, 0])
x = np.array([1, 4, 5, 6])
y = np.array([9, 6, 5, 9])
mr, mp = mstats.pearsonr(mx, my)
r, p = stats.pearsonr(x, y)
assert_equal(mr, r)
assert_equal(mp, p)
def test_spearmanr(self):
# Tests some computations of Spearman's rho
(x, y) = ([5.05,6.75,3.21,2.66], [1.65,2.64,2.64,6.95])
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
(x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan])
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan]
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
# Next test is to make sure calculation uses sufficient precision.
# The denominator's value is ~n^3 and used to be represented as an
# int. 2000**3 > 2**32 so these arrays would cause overflow on
# some machines.
x = list(range(2000))
y = list(range(2000))
y[0], y[9] = y[9], y[0]
y[10], y[434] = y[434], y[10]
y[435], y[1509] = y[1509], y[435]
# rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1))
# = 1 - (1 / 500)
# = 0.998
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.998)
# test for namedtuple attributes
res = mstats.spearmanr(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_spearmanr_alternative(self):
# check against R
# options(digits=16)
# cor.test(c(2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
# 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7),
# c(22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
# 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4),
# alternative='two.sided', method='spearman')
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
r_exp = 0.6887298747763864 # from cor.test
r, p = mstats.spearmanr(x, y)
assert_allclose(r, r_exp)
assert_allclose(p, 0.004519192910756)
r, p = mstats.spearmanr(x, y, alternative='greater')
assert_allclose(r, r_exp)
assert_allclose(p, 0.002259596455378)
r, p = mstats.spearmanr(x, y, alternative='less')
assert_allclose(r, r_exp)
assert_allclose(p, 0.9977404035446)
# intuitive test (with obvious positive correlation)
n = 100
x = np.linspace(0, 5, n)
y = 0.1*x + np.random.rand(n) # y is positively correlated w/ x
stat1, p1 = mstats.spearmanr(x, y)
stat2, p2 = mstats.spearmanr(x, y, alternative="greater")
assert_allclose(p2, p1 / 2) # positive correlation -> small p
stat3, p3 = mstats.spearmanr(x, y, alternative="less")
assert_allclose(p3, 1 - p1 / 2) # positive correlation -> large p
assert stat1 == stat2 == stat3
with pytest.raises(ValueError, match="alternative must be 'less'..."):
mstats.spearmanr(x, y, alternative="ekki-ekki")
@pytest.mark.skipif(platform.machine() == 'ppc64le',
reason="fails/crashes on ppc64le")
def test_kendalltau(self):
# check case with with maximum disorder and p=1
x = ma.array(np.array([9, 2, 5, 6]))
y = ma.array(np.array([4, 7, 9, 11]))
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [0.0, 1.0]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# simple case without ties
x = ma.array(np.arange(10))
y = ma.array(np.arange(10))
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [1.0, 5.511463844797e-07]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# check exception in case of invalid method keyword
assert_raises(ValueError, mstats.kendalltau, x, y, method='banana')
# swap a couple of values
b = y[1]
y[1] = y[2]
y[2] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [0.9555555555555556, 5.511463844797e-06]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# swap a couple more
b = y[5]
y[5] = y[6]
y[6] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [0.9111111111111111, 2.976190476190e-05]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# same in opposite direction
x = ma.array(np.arange(10))
y = ma.array(np.arange(10)[::-1])
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [-1.0, 5.511463844797e-07]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# swap a couple of values
b = y[1]
y[1] = y[2]
y[2] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [-0.9555555555555556, 5.511463844797e-06]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# swap a couple more
b = y[5]
y[5] = y[6]
y[6] = b
# Cross-check with exact result from R:
# cor.test(x,y,method="kendall",exact=1)
expected = [-0.9111111111111111, 2.976190476190e-05]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
# Tests some computations of Kendall's tau
x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66, np.nan])
y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan])
z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan])
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)),
[+0.3333333, 0.75])
assert_almost_equal(np.asarray(mstats.kendalltau(x, y, method='asymptotic')),
[+0.3333333, 0.4969059])
assert_almost_equal(np.asarray(mstats.kendalltau(x, z)),
[-0.5477226, 0.2785987])
#
x = ma.fix_invalid([0, 0, 0, 0, 20, 20, 0, 60, 0, 20,
10, 10, 0, 40, 0, 20, 0, 0, 0, 0, 0, np.nan])
y = ma.fix_invalid([0, 80, 80, 80, 10, 33, 60, 0, 67, 27,
25, 80, 80, 80, 80, 80, 80, 0, 10, 45, np.nan, 0])
result = mstats.kendalltau(x, y)
assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009])
# test for namedtuple attributes
attributes = ('correlation', 'pvalue')
check_named_results(result, attributes, ma=True)
@pytest.mark.skipif(platform.machine() == 'ppc64le',
reason="fails/crashes on ppc64le")
@pytest.mark.slow
def test_kendalltau_large(self):
# make sure internal variable use correct precision with
# larger arrays
x = np.arange(2000, dtype=float)
x = ma.masked_greater(x, 1995)
y = np.arange(2000, dtype=float)
y = np.concatenate((y[1000:], y[:1000]))
assert_(np.isfinite(mstats.kendalltau(x, y)[1]))
def test_kendalltau_seasonal(self):
# Tests the seasonal Kendall tau.
x = [[nan, nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan],
[nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
output = mstats.kendalltau_seasonal(x)
assert_almost_equal(output['global p-value (indep)'], 0.008, 3)
assert_almost_equal(output['seasonal p-value'].round(2),
[0.18,0.53,0.20,0.04])
def test_kendall_p_exact_medium(self):
# Test for the exact method with medium samples (some n >= 171)
# expected values generated using SymPy
expectations = {(100, 2393): 0.62822615287956040664,
(101, 2436): 0.60439525773513602669,
(170, 0): 2.755801935583541e-307,
(171, 0): 0.0,
(171, 1): 2.755801935583541e-307,
(172, 1): 0.0,
(200, 9797): 0.74753983745929675209,
(201, 9656): 0.40959218958120363618}
for nc, expected in expectations.items():
res = mstats_basic._kendall_p_exact(nc[0], nc[1])
assert_almost_equal(res, expected)
@pytest.mark.slow
def test_kendall_p_exact_large(self):
# Test for the exact method with large samples (n >= 171)
# expected values generated using SymPy
expectations = {(400, 38965): 0.48444283672113314099,
(401, 39516): 0.66363159823474837662,
(800, 156772): 0.42265448483120932055,
(801, 157849): 0.53437553412194416236,
(1600, 637472): 0.84200727400323538419,
(1601, 630304): 0.34465255088058593946}
for nc, expected in expectations.items():
res = mstats_basic._kendall_p_exact(nc[0], nc[1])
assert_almost_equal(res, expected)
def test_pointbiserial(self):
x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,
0,0,0,0,1,-1]
y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,
2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,
0.8,0.7,0.6,0.5,0.2,0.2,0.1,np.nan]
assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5)
# test for namedtuple attributes
res = mstats.pointbiserialr(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestTrimming:
def test_trim(self):
a = ma.arange(10)
assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9])
a = ma.arange(10)
assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)),
[None,None,None,3,4,5,6,7,None,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True),
[None,1,2,3,4,5,6,7,None,None])
a = ma.arange(12)
a[[0,-1]] = a[5] = masked
assert_equal(mstats.trim(a, (2,8)),
[None, None, 2, 3, 4, None, 6, 7, 8, None, None, None])
x = ma.arange(100).reshape(10, 10)
expected = [1]*10 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx._mask.T.ravel(), expected)
# same as above, but with an extra masked row inserted
x = ma.arange(110).reshape(11, 10)
x[1] = masked
expected = [1]*20 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx.T._mask.ravel(), expected)
def test_trim_old(self):
x = ma.arange(100)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x,tail='r').count(), 80)
x[50:70] = masked
trimx = mstats.trimboth(x)
assert_equal(trimx.count(), 48)
assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16)
x._mask = nomask
x.shape = (10,10)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x).count(), 80)
def test_trimr(self):
x = ma.arange(10)
result = mstats.trimr(x, limits=(0.15, 0.14), inclusive=(False, False))
expected = ma.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 1])
assert_equal(result, expected)
assert_equal(result.mask, expected.mask)
def test_trimmedmean(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0)
def test_trimmed_stde(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5)
assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5)
def test_winsorization(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1),
21551.4, 1)
assert_almost_equal(
mstats.winsorize(data, (0.2,0.2),(False,False)).var(ddof=1),
11887.3, 1)
data[5] = masked
winsorized = mstats.winsorize(data)
assert_equal(winsorized.mask, data.mask)
def test_winsorization_nan(self):
data = ma.array([np.nan, np.nan, 0, 1, 2])
assert_raises(ValueError, mstats.winsorize, data, (0.05, 0.05),
nan_policy='raise')
# Testing propagate (default behavior)
assert_equal(mstats.winsorize(data, (0.4, 0.4)),
ma.array([2, 2, 2, 2, 2]))
assert_equal(mstats.winsorize(data, (0.8, 0.8)),
ma.array([np.nan, np.nan, np.nan, np.nan, np.nan]))
assert_equal(mstats.winsorize(data, (0.4, 0.4), nan_policy='omit'),
ma.array([np.nan, np.nan, 2, 2, 2]))
assert_equal(mstats.winsorize(data, (0.8, 0.8), nan_policy='omit'),
ma.array([np.nan, np.nan, 2, 2, 2]))
class TestMoments:
# Comparison numbers are found using R v.1.5.1
# note that length(testcase) = 4
# testmathworks comes from documentation for the
# Statistics Toolbox for Matlab and can be found at both
# https://www.mathworks.com/help/stats/kurtosis.html
# https://www.mathworks.com/help/stats/skewness.html
# Note that both test cases came from here.
testcase = [1,2,3,4]
testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965,
np.nan])
testcase_2d = ma.array(
np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149],
[0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407],
[0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733],
[0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998],
[0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]),
mask=np.array([[True, False, False, True, False],
[True, True, True, False, True],
[False, False, False, False, False],
[True, True, True, True, True],
[False, False, True, False, False]], dtype=bool))
def _assert_equal(self, actual, expect, *, shape=None, dtype=None):
expect = np.asarray(expect)
if shape is not None:
expect = np.broadcast_to(expect, shape)
assert_array_equal(actual, expect)
if dtype is None:
dtype = expect.dtype
assert actual.dtype == dtype
def test_moment(self):
y = mstats.moment(self.testcase,1)
assert_almost_equal(y,0.0,10)
y = mstats.moment(self.testcase,2)
assert_almost_equal(y,1.25)
y = mstats.moment(self.testcase,3)
assert_almost_equal(y,0.0)
y = mstats.moment(self.testcase,4)
assert_almost_equal(y,2.5625)
# check array_like input for moment
y = mstats.moment(self.testcase, [1, 2, 3, 4])
assert_allclose(y, [0, 1.25, 0, 2.5625])
# check moment input consists only of integers
y = mstats.moment(self.testcase, 0.0)
assert_allclose(y, 1.0)
assert_raises(ValueError, mstats.moment, self.testcase, 1.2)
y = mstats.moment(self.testcase, [1.0, 2, 3, 4.0])
assert_allclose(y, [0, 1.25, 0, 2.5625])
# test empty input
y = mstats.moment([])
self._assert_equal(y, np.nan, dtype=np.float64)
y = mstats.moment(np.array([], dtype=np.float32))
self._assert_equal(y, np.nan, dtype=np.float32)
y = mstats.moment(np.zeros((1, 0)), axis=0)
self._assert_equal(y, [], shape=(0,), dtype=np.float64)
y = mstats.moment([[]], axis=1)
self._assert_equal(y, np.nan, shape=(1,), dtype=np.float64)
y = mstats.moment([[]], moment=[0, 1], axis=0)
self._assert_equal(y, [], shape=(2, 0))
x = np.arange(10.)
x[9] = np.nan
assert_equal(mstats.moment(x, 2), ma.masked) # NaN value is ignored
def test_variation(self):
y = mstats.variation(self.testcase)
assert_almost_equal(y,0.44721359549996, 10)
def test_variation_ddof(self):
# test variation with delta degrees of freedom
# regression test for gh-13341
a = np.array([1, 2, 3, 4, 5])
y = mstats.variation(a, ddof=1)
assert_almost_equal(y, 0.5270462766947299)
def test_skewness(self):
y = mstats.skew(self.testmathworks)
assert_almost_equal(y,-0.29322304336607,10)
y = mstats.skew(self.testmathworks,bias=0)
assert_almost_equal(y,-0.437111105023940,10)
y = mstats.skew(self.testcase)
assert_almost_equal(y,0.0,10)
def test_kurtosis(self):
# Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis
# for compatibility with Matlab)
y = mstats.kurtosis(self.testmathworks, 0, fisher=0, bias=1)
assert_almost_equal(y, 2.1658856802973, 10)
# Note that MATLAB has confusing docs for the following case
# kurtosis(x,0) gives an unbiased estimate of Pearson's skewness
# kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
# The MATLAB docs imply that both should give Fisher's
y = mstats.kurtosis(self.testmathworks, fisher=0, bias=0)
assert_almost_equal(y, 3.663542721189047, 10)
y = mstats.kurtosis(self.testcase, 0, 0)
assert_almost_equal(y, 1.64)
# test that kurtosis works on multidimensional masked arrays
correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0.,
-1.26979517952]),
mask=np.array([False, False, False, True,
False], dtype=bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1),
correct_2d)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row), correct_2d[i])
correct_2d_bias_corrected = ma.array(
np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]),
mask=np.array([False, False, False, True, False], dtype=bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1,
bias=False),
correct_2d_bias_corrected)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row, bias=False),
correct_2d_bias_corrected[i])
# Check consistency between stats and mstats implementations
assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]),
stats.kurtosis(self.testcase_2d[2, :]),
nulp=4)
def test_mode(self):
a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7]
a2 = np.reshape(a1, (3,5))
a3 = np.array([1,2,3,4,5,6])
a4 = np.reshape(a3, (3,2))
ma1 = ma.masked_where(ma.array(a1) > 2, a1)
ma2 = ma.masked_where(a2 > 2, a2)
ma3 = ma.masked_where(a3 < 2, a3)
ma4 = ma.masked_where(ma.array(a4) < 2, a4)
assert_equal(mstats.mode(a1, axis=None), (3,4))
assert_equal(mstats.mode(a1, axis=0), (3,4))
assert_equal(mstats.mode(ma1, axis=None), (0,3))
assert_equal(mstats.mode(a2, axis=None), (3,4))
assert_equal(mstats.mode(ma2, axis=None), (0,3))
assert_equal(mstats.mode(a3, axis=None), (1,1))
assert_equal(mstats.mode(ma3, axis=None), (2,1))
assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]]))
assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]]))
assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]]))
assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]]))
a1_res = mstats.mode(a1, axis=None)
# test for namedtuple attributes
attributes = ('mode', 'count')
check_named_results(a1_res, attributes, ma=True)
def test_mode_modifies_input(self):
# regression test for gh-6428: mode(..., axis=None) may not modify
# the input array
im = np.zeros((100, 100))
im[:50, :] += 1
im[:, :50] += 1
cp = im.copy()
mstats.mode(im, None)
assert_equal(im, cp)
class TestPercentile:
def setup_method(self):
self.a1 = [3, 4, 5, 10, -3, -5, 6]
self.a2 = [3, -6, -2, 8, 7, 4, 2, 1]
self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0]
def test_percentile(self):
x = np.arange(8) * 0.5
assert_equal(mstats.scoreatpercentile(x, 0), 0.)
assert_equal(mstats.scoreatpercentile(x, 100), 3.5)
assert_equal(mstats.scoreatpercentile(x, 50), 1.75)
def test_2D(self):
x = ma.array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
assert_equal(mstats.scoreatpercentile(x, 50), [1, 1, 1])
class TestVariability:
""" Comparison numbers are found using R v.1.5.1
note that length(testcase) = 4
"""
testcase = ma.fix_invalid([1,2,3,4,np.nan])
def test_sem(self):
# This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3)
y = mstats.sem(self.testcase)
assert_almost_equal(y, 0.6454972244)
n = self.testcase.count()
assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
mstats.sem(self.testcase, ddof=2))
def test_zmap(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zmap(self.testcase, self.testcase)
desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired_unmaskedvals,
y.data[y.mask == False], decimal=12)
def test_zscore(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zscore(self.testcase)
desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999, np.nan])
assert_almost_equal(desired, y, decimal=12)
class TestMisc:
def test_obrientransform(self):
args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2,
[6]+[7]*2+[8]*4+[9]*9+[10]*16]
result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538],
[10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]]
assert_almost_equal(np.round(mstats.obrientransform(*args).T, 4),
result, 4)
def test_ks_2samp(self):
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan],
[nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
(winter, spring, summer, fall) = x.T
assert_almost_equal(np.round(mstats.ks_2samp(winter, spring), 4),
(0.1818, 0.9628))
assert_almost_equal(np.round(mstats.ks_2samp(winter, spring, 'g'), 4),
(0.1469, 0.6886))
assert_almost_equal(np.round(mstats.ks_2samp(winter, spring, 'l'), 4),
(0.1818, 0.6011))
def test_friedmanchisq(self):
# No missing values
args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0],
[7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0],
[6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0])
result = mstats.friedmanchisquare(*args)
assert_almost_equal(result[0], 10.4737, 4)
assert_almost_equal(result[1], 0.005317, 6)
# Missing values
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x)
result = mstats.friedmanchisquare(*x)
assert_almost_equal(result[0], 2.0156, 4)
assert_almost_equal(result[1], 0.5692, 4)
# test for namedtuple attributes
attributes = ('statistic', 'pvalue')
check_named_results(result, attributes, ma=True)
def test_regress_simple():
# Regress a line with sinusoidal noise. Test for #1273.
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
result = mstats.linregress(x, y)
# Result is of a correct class and with correct fields
lr = stats._stats_mstats_common.LinregressResult
assert_(isinstance(result, lr))
attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')
check_named_results(result, attributes, ma=True)
assert 'intercept_stderr' in dir(result)
# Slope and intercept are estimated correctly
assert_almost_equal(result.slope, 0.19644990055858422)
assert_almost_equal(result.intercept, 10.211269918932341)
assert_almost_equal(result.stderr, 0.002395781449783862)
assert_almost_equal(result.intercept_stderr, 0.13866936078570702)
def test_theilslopes():
# Test for basic slope and intercept.
slope, intercept, lower, upper = mstats.theilslopes([0, 1, 1])
assert_almost_equal(slope, 0.5)
assert_almost_equal(intercept, 0.5)
# Test for correct masking.
y = np.ma.array([0, 1, 100, 1], mask=[False, False, True, False])
slope, intercept, lower, upper = mstats.theilslopes(y)
assert_almost_equal(slope, 1./3)
assert_almost_equal(intercept, 2./3)
# Test of confidence intervals from example in Sen (1968).
x = [1, 2, 3, 4, 10, 12, 18]
y = [9, 15, 19, 20, 45, 55, 78]
slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07)
assert_almost_equal(slope, 4)
assert_almost_equal(upper, 4.38, decimal=2)
assert_almost_equal(lower, 3.71, decimal=2)
def test_siegelslopes():
# method should be exact for straight line
y = 2 * np.arange(10) + 0.5
assert_equal(mstats.siegelslopes(y), (2.0, 0.5))
assert_equal(mstats.siegelslopes(y, method='separate'), (2.0, 0.5))
x = 2 * np.arange(10)
y = 5 * x - 3.0
assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0))
assert_equal(mstats.siegelslopes(y, x, method='separate'), (5.0, -3.0))
# method is robust to outliers: brekdown point of 50%
y[:4] = 1000
assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0))
# if there are no outliers, results should be comparble to linregress
x = np.arange(10)
y = -2.3 + 0.3*x + stats.norm.rvs(size=10, random_state=231)
slope_ols, intercept_ols, _, _, _ = stats.linregress(x, y)
slope, intercept = mstats.siegelslopes(y, x)
assert_allclose(slope, slope_ols, rtol=0.1)
assert_allclose(intercept, intercept_ols, rtol=0.1)
slope, intercept = mstats.siegelslopes(y, x, method='separate')
assert_allclose(slope, slope_ols, rtol=0.1)
assert_allclose(intercept, intercept_ols, rtol=0.1)
def test_plotting_positions():
# Regression test for #1256
pos = mstats.plotting_positions(np.arange(3), 0, 0)
assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75]))
class TestNormalitytests():
def test_vs_nonmasked(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
assert_array_almost_equal(mstats.normaltest(x),
stats.normaltest(x))
assert_array_almost_equal(mstats.skewtest(x),
stats.skewtest(x))
assert_array_almost_equal(mstats.kurtosistest(x),
stats.kurtosistest(x))
funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest]
mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]
x = [1, 2, 3, 4]
for func, mfunc in zip(funcs, mfuncs):
assert_raises(ValueError, func, x)
assert_raises(ValueError, mfunc, x)
def test_axis_None(self):
# Test axis=None (equal to axis=0 for 1-D input)
x = np.array((-2,-1,0,1,2,3)*4)**2
assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x))
assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x))
assert_allclose(mstats.kurtosistest(x, axis=None),
mstats.kurtosistest(x))
def test_maskedarray_input(self):
# Add some masked values, test result doesn't change
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
xm = np.ma.array(np.r_[np.inf, x, 10],
mask=np.r_[True, [False] * x.size, True])
assert_allclose(mstats.normaltest(xm), stats.normaltest(x))
assert_allclose(mstats.skewtest(xm), stats.skewtest(x))
assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x))
def test_nd_input(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
x_2d = np.vstack([x] * 2).T
for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]:
res_1d = func(x)
res_2d = func(x_2d)
assert_allclose(res_2d[0], [res_1d[0]] * 2)
assert_allclose(res_2d[1], [res_1d[1]] * 2)
def test_normaltest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.normaltest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_kurtosistest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.kurtosistest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def regression_test_9033(self):
# x cleary non-normal but power of negtative denom needs
# to be handled correctly to reject normality
counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167]
x = np.hstack([np.full(c, i) for i, c in enumerate(counts)])
assert_equal(mstats.kurtosistest(x)[1] < 0.01, True)
@pytest.mark.parametrize("test", ["skewtest", "kurtosistest"])
@pytest.mark.parametrize("alternative", ["less", "greater"])
def test_alternative(self, test, alternative):
x = stats.norm.rvs(loc=10, scale=2.5, size=30, random_state=123)
stats_test = getattr(stats, test)
mstats_test = getattr(mstats, test)
z_ex, p_ex = stats_test(x, alternative=alternative)
z, p = mstats_test(x, alternative=alternative)
assert_allclose(z, z_ex, atol=1e-12)
assert_allclose(p, p_ex, atol=1e-12)
# test with masked arrays
x[1:5] = np.nan
x = np.ma.masked_array(x, mask=np.isnan(x))
z_ex, p_ex = stats_test(x.compressed(), alternative=alternative)
z, p = mstats_test(x, alternative=alternative)
assert_allclose(z, z_ex, atol=1e-12)
assert_allclose(p, p_ex, atol=1e-12)
def test_bad_alternative(self):
x = stats.norm.rvs(size=20, random_state=123)
msg = r"alternative must be 'less', 'greater' or 'two-sided'"
with pytest.raises(ValueError, match=msg):
mstats.skewtest(x, alternative='error')
with pytest.raises(ValueError, match=msg):
mstats.kurtosistest(x, alternative='error')
class TestFOneway():
def test_result_attributes(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
res = mstats.f_oneway(a, b)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestMannwhitneyu():
# data from gh-1428
x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1.])
y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
1., 1., 1., 1.])
def test_result_attributes(self):
res = mstats.mannwhitneyu(self.x, self.y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_against_stats(self):
# gh-4641 reported that stats.mannwhitneyu returned half the p-value
# of mstats.mannwhitneyu. Default alternative of stats.mannwhitneyu
# is now two-sided, so they match.
res1 = mstats.mannwhitneyu(self.x, self.y)
res2 = stats.mannwhitneyu(self.x, self.y)
assert res1.statistic == res2.statistic
assert_allclose(res1.pvalue, res2.pvalue)
class TestKruskal():
def test_result_attributes(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
res = mstats.kruskal(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
# TODO: for all ttest functions, add tests with masked array inputs
class TestTtest_rel():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1])
res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_fully_masked(self):
np.random.seed(1234567)
outcome = ma.masked_array(np.random.randn(3, 2),
mask=[[1, 1, 1], [0, 0, 0]])
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
for pair in [(outcome[:, 0], outcome[:, 1]), ([np.nan, np.nan], [1.0, 2.0])]:
t, p = mstats.ttest_rel(*pair)
assert_array_equal(t, (np.nan, np.nan))
assert_array_equal(p, (np.nan, np.nan))
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_invalid_input_size(self):
assert_raises(ValueError, mstats.ttest_rel,
np.arange(10), np.arange(11))
x = np.arange(24)
assert_raises(ValueError, mstats.ttest_rel,
x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=1)
assert_raises(ValueError, mstats.ttest_rel,
x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=2)
def test_empty(self):
res1 = mstats.ttest_rel([], [])
assert_(np.all(np.isnan(res1)))
def test_zero_division(self):
t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])
assert_equal((np.abs(t), p), (np.inf, 0))
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0])
assert_array_equal(t, np.array([np.nan, np.nan]))
assert_array_equal(p, np.array([np.nan, np.nan]))
def test_bad_alternative(self):
msg = r"alternative must be 'less', 'greater' or 'two-sided'"
with pytest.raises(ValueError, match=msg):
mstats.ttest_ind([1, 2, 3], [4, 5, 6], alternative='foo')
@pytest.mark.parametrize("alternative", ["less", "greater"])
def test_alternative(self, alternative):
x = stats.norm.rvs(loc=10, scale=5, size=25, random_state=42)
y = stats.norm.rvs(loc=8, scale=2, size=25, random_state=42)
t_ex, p_ex = stats.ttest_rel(x, y, alternative=alternative)
t, p = mstats.ttest_rel(x, y, alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
# test with masked arrays
x[1:10] = np.nan
y[1:10] = np.nan
x = np.ma.masked_array(x, mask=np.isnan(x))
y = np.ma.masked_array(y, mask=np.isnan(y))
t, p = mstats.ttest_rel(x, y, alternative=alternative)
t_ex, p_ex = stats.ttest_rel(x.compressed(), y.compressed(),
alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
class TestTtest_ind():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1])
res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
# Check default is axis=0
res3 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
# Check equal_var
res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
assert_allclose(res4, res5)
res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
assert_allclose(res4, res5)
def test_fully_masked(self):
np.random.seed(1234567)
outcome = ma.masked_array(np.random.randn(3, 2), mask=[[1, 1, 1], [0, 0, 0]])
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
for pair in [(outcome[:, 0], outcome[:, 1]), ([np.nan, np.nan], [1.0, 2.0])]:
t, p = mstats.ttest_ind(*pair)
assert_array_equal(t, (np.nan, np.nan))
assert_array_equal(p, (np.nan, np.nan))
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_empty(self):
res1 = mstats.ttest_ind([], [])
assert_(np.all(np.isnan(res1)))
def test_zero_division(self):
t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])
assert_equal((np.abs(t), p), (np.inf, 0))
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0])
assert_array_equal(t, (np.nan, np.nan))
assert_array_equal(p, (np.nan, np.nan))
t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False)
assert_equal((np.abs(t), p), (np.inf, 0))
assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0],
equal_var=False), (np.nan, np.nan))
def test_bad_alternative(self):
msg = r"alternative must be 'less', 'greater' or 'two-sided'"
with pytest.raises(ValueError, match=msg):
mstats.ttest_ind([1, 2, 3], [4, 5, 6], alternative='foo')
@pytest.mark.parametrize("alternative", ["less", "greater"])
def test_alternative(self, alternative):
x = stats.norm.rvs(loc=10, scale=2, size=100, random_state=123)
y = stats.norm.rvs(loc=8, scale=2, size=100, random_state=123)
t_ex, p_ex = stats.ttest_ind(x, y, alternative=alternative)
t, p = mstats.ttest_ind(x, y, alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
# test with masked arrays
x[1:10] = np.nan
y[80:90] = np.nan
x = np.ma.masked_array(x, mask=np.isnan(x))
y = np.ma.masked_array(y, mask=np.isnan(y))
t_ex, p_ex = stats.ttest_ind(x.compressed(), y.compressed(),
alternative=alternative)
t, p = mstats.ttest_ind(x, y, alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
class TestTtest_1samp():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
# 1-D inputs
res1 = stats.ttest_1samp(outcome[:, 0], 1)
res2 = mstats.ttest_1samp(outcome[:, 0], 1)
assert_allclose(res1, res2)
# 2-D inputs
res1 = stats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2, atol=1e-15)
# Check default is axis=0
res3 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_fully_masked(self):
np.random.seed(1234567)
outcome = ma.masked_array(np.random.randn(3), mask=[1, 1, 1])
expected = (np.nan, np.nan)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
for pair in [((np.nan, np.nan), 0.0), (outcome, 0.0)]:
t, p = mstats.ttest_1samp(*pair)
assert_array_equal(p, expected)
assert_array_equal(t, expected)
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_1samp(outcome[:, 0], 1)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_empty(self):
res1 = mstats.ttest_1samp([], 1)
assert_(np.all(np.isnan(res1)))
def test_zero_division(self):
t, p = mstats.ttest_1samp([0, 0, 0], 1)
assert_equal((np.abs(t), p), (np.inf, 0))
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
t, p = mstats.ttest_1samp([0, 0, 0], 0)
assert_(np.isnan(t))
assert_array_equal(p, (np.nan, np.nan))
def test_bad_alternative(self):
msg = r"alternative must be 'less', 'greater' or 'two-sided'"
with pytest.raises(ValueError, match=msg):
mstats.ttest_1samp([1, 2, 3], 4, alternative='foo')
@pytest.mark.parametrize("alternative", ["less", "greater"])
def test_alternative(self, alternative):
x = stats.norm.rvs(loc=10, scale=2, size=100, random_state=123)
t_ex, p_ex = stats.ttest_1samp(x, 9, alternative=alternative)
t, p = mstats.ttest_1samp(x, 9, alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
# test with masked arrays
x[1:10] = np.nan
x = np.ma.masked_array(x, mask=np.isnan(x))
t_ex, p_ex = stats.ttest_1samp(x.compressed(), 9,
alternative=alternative)
t, p = mstats.ttest_1samp(x, 9, alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
class TestDescribe:
"""
Tests for mstats.describe.
Note that there are also tests for `mstats.describe` in the
class TestCompareWithStats.
"""
def test_basic_with_axis(self):
# This is a basic test that is also a regression test for gh-7303.
a = np.ma.masked_array([[0, 1, 2, 3, 4, 9],
[5, 5, 0, 9, 3, 3]],
mask=[[0, 0, 0, 0, 0, 1],
[0, 0, 1, 1, 0, 0]])
result = mstats.describe(a, axis=1)
assert_equal(result.nobs, [5, 4])
amin, amax = result.minmax
assert_equal(amin, [0, 3])
assert_equal(amax, [4, 5])
assert_equal(result.mean, [2.0, 4.0])
assert_equal(result.variance, [2.0, 1.0])
assert_equal(result.skewness, [0.0, 0.0])
assert_allclose(result.kurtosis, [-1.3, -2.0])
class TestCompareWithStats:
"""
Class to compare mstats results with stats results.
It is in general assumed that scipy.stats is at a more mature stage than
stats.mstats. If a routine in mstats results in similar results like in
scipy.stats, this is considered also as a proper validation of scipy.mstats
routine.
Different sample sizes are used for testing, as some problems between stats
and mstats are dependent on sample size.
Author: Alexander Loew
NOTE that some tests fail. This might be caused by
a) actual differences or bugs between stats and mstats
b) numerical inaccuracies
c) different definitions of routine interfaces
These failures need to be checked. Current workaround is to have disabled these tests,
but issuing reports on scipy-dev
"""
def get_n(self):
""" Returns list of sample sizes to be used for comparison. """
return [1000, 100, 10, 5]
def generate_xy_sample(self, n):
# This routine generates numpy arrays and corresponding masked arrays
# with the same data, but additional masked values
np.random.seed(1234567)
x = np.random.randn(n)
y = x + np.random.randn(n)
xm = np.full(len(x) + 5, 1e16)
ym = np.full(len(y) + 5, 1e16)
xm[0:len(x)] = x
ym[0:len(y)] = y
mask = xm > 9e15
xm = np.ma.array(xm, mask=mask)
ym = np.ma.array(ym, mask=mask)
return x, y, xm, ym
def generate_xy_sample2D(self, n, nx):
x = np.full((n, nx), np.nan)
y = np.full((n, nx), np.nan)
xm = np.full((n+5, nx), np.nan)
ym = np.full((n+5, nx), np.nan)
for i in range(nx):
x[:, i], y[:, i], dx, dy = self.generate_xy_sample(n)
xm[0:n, :] = x[0:n]
ym[0:n, :] = y[0:n]
xm = np.ma.array(xm, mask=np.isnan(xm))
ym = np.ma.array(ym, mask=np.isnan(ym))
return x, y, xm, ym
def test_linregress(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
result1 = stats.linregress(x, y)
result2 = stats.mstats.linregress(xm, ym)
assert_allclose(np.asarray(result1), np.asarray(result2))
def test_pearsonr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.pearsonr(x, y)
rm, pm = stats.mstats.pearsonr(xm, ym)
assert_almost_equal(r, rm, decimal=14)
assert_almost_equal(p, pm, decimal=14)
def test_spearmanr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.spearmanr(x, y)
rm, pm = stats.mstats.spearmanr(xm, ym)
assert_almost_equal(r, rm, 14)
assert_almost_equal(p, pm, 14)
def test_spearmanr_backcompat_useties(self):
# A regression test to ensure we don't break backwards compat
# more than we have to (see gh-9204).
x = np.arange(6)
assert_raises(ValueError, mstats.spearmanr, x, x, False)
def test_gmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.gmean(abs(x))
rm = stats.mstats.gmean(abs(xm))
assert_allclose(r, rm, rtol=1e-13)
r = stats.gmean(abs(y))
rm = stats.mstats.gmean(abs(ym))
assert_allclose(r, rm, rtol=1e-13)
def test_hmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.hmean(abs(x))
rm = stats.mstats.hmean(abs(xm))
assert_almost_equal(r, rm, 10)
r = stats.hmean(abs(y))
rm = stats.mstats.hmean(abs(ym))
assert_almost_equal(r, rm, 10)
def test_skew(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skew(x)
rm = stats.mstats.skew(xm)
assert_almost_equal(r, rm, 10)
r = stats.skew(y)
rm = stats.mstats.skew(ym)
assert_almost_equal(r, rm, 10)
def test_moment(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.moment(x)
rm = stats.mstats.moment(xm)
assert_almost_equal(r, rm, 10)
r = stats.moment(y)
rm = stats.mstats.moment(ym)
assert_almost_equal(r, rm, 10)
def test_zscore(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
# reference solution
zx = (x - x.mean()) / x.std()
zy = (y - y.mean()) / y.std()
# validate stats
assert_allclose(stats.zscore(x), zx, rtol=1e-10)
assert_allclose(stats.zscore(y), zy, rtol=1e-10)
# compare stats and mstats
assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]),
rtol=1e-10)
assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]),
rtol=1e-10)
def test_kurtosis(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kurtosis(x)
rm = stats.mstats.kurtosis(xm)
assert_almost_equal(r, rm, 10)
r = stats.kurtosis(y)
rm = stats.mstats.kurtosis(ym)
assert_almost_equal(r, rm, 10)
def test_sem(self):
# example from stats.sem doc
a = np.arange(20).reshape(5, 4)
am = np.ma.array(a)
r = stats.sem(a, ddof=1)
rm = stats.mstats.sem(am, ddof=1)
assert_allclose(r, 2.82842712, atol=1e-5)
assert_allclose(rm, 2.82842712, atol=1e-5)
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0),
stats.sem(x, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0),
stats.sem(y, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1),
stats.sem(x, axis=None, ddof=1), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1),
stats.sem(y, axis=None, ddof=1), decimal=13)
def test_describe(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.describe(x, ddof=1)
rm = stats.mstats.describe(xm, ddof=1)
for ii in range(6):
assert_almost_equal(np.asarray(r[ii]),
np.asarray(rm[ii]),
decimal=12)
def test_describe_result_attributes(self):
actual = mstats.describe(np.arange(5))
attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis')
check_named_results(actual, attributes, ma=True)
def test_rankdata(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.rankdata(x)
rm = stats.mstats.rankdata(x)
assert_allclose(r, rm)
def test_tmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14)
assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14)
def test_tmax(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmax(x,2.),
stats.mstats.tmax(xm,2.), 10)
assert_almost_equal(stats.tmax(y,2.),
stats.mstats.tmax(ym,2.), 10)
assert_almost_equal(stats.tmax(x, upperlimit=3.),
stats.mstats.tmax(xm, upperlimit=3.), 10)
assert_almost_equal(stats.tmax(y, upperlimit=3.),
stats.mstats.tmax(ym, upperlimit=3.), 10)
def test_tmin(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_equal(stats.tmin(x), stats.mstats.tmin(xm))
assert_equal(stats.tmin(y), stats.mstats.tmin(ym))
assert_almost_equal(stats.tmin(x, lowerlimit=-1.),
stats.mstats.tmin(xm, lowerlimit=-1.), 10)
assert_almost_equal(stats.tmin(y, lowerlimit=-1.),
stats.mstats.tmin(ym, lowerlimit=-1.), 10)
def test_zmap(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
z = stats.zmap(x, y)
zm = stats.mstats.zmap(xm, ym)
assert_allclose(z, zm[0:len(z)], atol=1e-10)
def test_variation(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.variation(x), stats.mstats.variation(xm),
decimal=12)
assert_almost_equal(stats.variation(y), stats.mstats.variation(ym),
decimal=12)
def test_tvar(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm),
decimal=12)
assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym),
decimal=12)
def test_trimboth(self):
a = np.arange(20)
b = stats.trimboth(a, 0.1)
bm = stats.mstats.trimboth(a, 0.1)
assert_allclose(np.sort(b), bm.data[~bm.mask])
def test_tsem(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tsem(x), stats.mstats.tsem(xm),
decimal=14)
assert_almost_equal(stats.tsem(y), stats.mstats.tsem(ym),
decimal=14)
assert_almost_equal(stats.tsem(x, limits=(-2., 2.)),
stats.mstats.tsem(xm, limits=(-2., 2.)),
decimal=14)
def test_skewtest(self):
# this test is for 1D data
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_allclose(r, rm)
def test_skewtest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.skewtest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_skewtest_2D_notmasked(self):
# a normal ndarray is passed to the masked function
x = np.random.random((20, 2)) * 20.
r = stats.skewtest(x)
rm = stats.mstats.skewtest(x)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_skewtest_2D_WithMask(self):
nx = 2
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample2D(n, nx)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_equal(r[0][0], rm[0][0])
assert_equal(r[0][1], rm[0][1])
def test_normaltest(self):
with np.errstate(over='raise'), suppress_warnings() as sup:
sup.filter(UserWarning, "kurtosistest only valid for n>=20")
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.normaltest(x)
rm = stats.mstats.normaltest(xm)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_find_repeats(self):
x = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4]).astype('float')
tmp = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5]).astype('float')
mask = (tmp == 5.)
xm = np.ma.array(tmp, mask=mask)
x_orig, xm_orig = x.copy(), xm.copy()
r = stats.find_repeats(x)
rm = stats.mstats.find_repeats(xm)
assert_equal(r, rm)
assert_equal(x, x_orig)
assert_equal(xm, xm_orig)
# This crazy behavior is expected by count_tied_groups, but is not
# in the docstring...
_, counts = stats.mstats.find_repeats([])
assert_equal(counts, np.array(0, dtype=np.intp))
def test_kendalltau(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kendalltau(x, y)
rm = stats.mstats.kendalltau(xm, ym)
assert_almost_equal(r[0], rm[0], decimal=10)
assert_almost_equal(r[1], rm[1], decimal=7)
def test_obrientransform(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.obrientransform(x)
rm = stats.mstats.obrientransform(xm)
assert_almost_equal(r.T, rm[0:len(x)])
def test_ks_1samp(self):
"""Checks that mstats.ks_1samp and stats.ks_1samp agree on masked arrays."""
for mode in ['auto', 'exact', 'asymp']:
with suppress_warnings() as sup:
for alternative in ['less', 'greater', 'two-sided']:
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative, mode=mode)
res2 = stats.mstats.ks_1samp(xm, stats.norm.cdf, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res2))
res3 = stats.ks_1samp(xm, stats.norm.cdf, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res3))
def test_kstest_1samp(self):
"""Checks that 1-sample mstats.kstest and stats.kstest agree on masked arrays."""
for mode in ['auto', 'exact', 'asymp']:
with suppress_warnings() as sup:
for alternative in ['less', 'greater', 'two-sided']:
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.kstest(x, 'norm', alternative=alternative, mode=mode)
res2 = stats.mstats.kstest(xm, 'norm', alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res2))
res3 = stats.kstest(xm, 'norm', alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res3))
def test_ks_2samp(self):
"""Checks that mstats.ks_2samp and stats.ks_2samp agree on masked arrays.
gh-8431"""
for mode in ['auto', 'exact', 'asymp']:
with suppress_warnings() as sup:
if mode in ['auto', 'exact']:
sup.filter(RuntimeWarning,
"ks_2samp: Exact calculation unsuccessful. Switching to mode=asymp.")
for alternative in ['less', 'greater', 'two-sided']:
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.ks_2samp(x, y, alternative=alternative, mode=mode)
res2 = stats.mstats.ks_2samp(xm, ym, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res2))
res3 = stats.ks_2samp(xm, y, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res3))
def test_kstest_2samp(self):
"""Checks that 2-sample mstats.kstest and stats.kstest agree on masked arrays."""
for mode in ['auto', 'exact', 'asymp']:
with suppress_warnings() as sup:
if mode in ['auto', 'exact']:
sup.filter(RuntimeWarning,
"ks_2samp: Exact calculation unsuccessful. Switching to mode=asymp.")
for alternative in ['less', 'greater', 'two-sided']:
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.kstest(x, y, alternative=alternative, mode=mode)
res2 = stats.mstats.kstest(xm, ym, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res2))
res3 = stats.kstest(xm, y, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res3))
def test_nametuples_agree(self):
result = stats.kstest([1, 2], [3, 4])
assert_(isinstance(result, stats.stats.KstestResult))
result2 = stats.stats.Ks_2sampResult(result.statistic, result.pvalue)
assert_(isinstance(result2, stats.stats.Ks_2sampResult))
assert_equal(result, result2)
class TestBrunnerMunzel:
# Data from (Lumley, 1996)
X = np.ma.masked_invalid([1, 2, 1, 1, 1, np.nan, 1, 1,
1, 1, 1, 2, 4, 1, 1, np.nan])
Y = np.ma.masked_invalid([3, 3, 4, 3, np.nan, 1, 2, 3, 1, 1, 5, 4])
significant = 14
def test_brunnermunzel_one_sided(self):
# Results are compared with R's lawstat package.
u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='less')
u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='greater')
u3, p3 = mstats.brunnermunzel(self.X, self.Y, alternative='greater')
u4, p4 = mstats.brunnermunzel(self.Y, self.X, alternative='less')
assert_almost_equal(p1, p2, decimal=self.significant)
assert_almost_equal(p3, p4, decimal=self.significant)
assert_(p1 != p3)
assert_almost_equal(u1, 3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u2, -3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u3, 3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u4, -3.1374674823029505,
decimal=self.significant)
assert_almost_equal(p1, 0.0028931043330757342,
decimal=self.significant)
assert_almost_equal(p3, 0.99710689566692423,
decimal=self.significant)
def test_brunnermunzel_two_sided(self):
# Results are compared with R's lawstat package.
u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='two-sided')
u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='two-sided')
assert_almost_equal(p1, p2, decimal=self.significant)
assert_almost_equal(u1, 3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u2, -3.1374674823029505,
decimal=self.significant)
assert_almost_equal(p1, 0.0057862086661515377,
decimal=self.significant)
def test_brunnermunzel_default(self):
# The default value for alternative is two-sided
u1, p1 = mstats.brunnermunzel(self.X, self.Y)
u2, p2 = mstats.brunnermunzel(self.Y, self.X)
assert_almost_equal(p1, p2, decimal=self.significant)
assert_almost_equal(u1, 3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u2, -3.1374674823029505,
decimal=self.significant)
assert_almost_equal(p1, 0.0057862086661515377,
decimal=self.significant)
def test_brunnermunzel_alternative_error(self):
alternative = "error"
distribution = "t"
assert_(alternative not in ["two-sided", "greater", "less"])
assert_raises(ValueError,
mstats.brunnermunzel,
self.X,
self.Y,
alternative,
distribution)
def test_brunnermunzel_distribution_norm(self):
u1, p1 = mstats.brunnermunzel(self.X, self.Y, distribution="normal")
u2, p2 = mstats.brunnermunzel(self.Y, self.X, distribution="normal")
assert_almost_equal(p1, p2, decimal=self.significant)
assert_almost_equal(u1, 3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u2, -3.1374674823029505,
decimal=self.significant)
assert_almost_equal(p1, 0.0017041417600383024,
decimal=self.significant)
def test_brunnermunzel_distribution_error(self):
alternative = "two-sided"
distribution = "error"
assert_(alternative not in ["t", "normal"])
assert_raises(ValueError,
mstats.brunnermunzel,
self.X,
self.Y,
alternative,
distribution)
def test_brunnermunzel_empty_imput(self):
u1, p1 = mstats.brunnermunzel(self.X, [])
u2, p2 = mstats.brunnermunzel([], self.Y)
u3, p3 = mstats.brunnermunzel([], [])
assert_(np.isnan(u1))
assert_(np.isnan(p1))
assert_(np.isnan(u2))
assert_(np.isnan(p2))
assert_(np.isnan(u3))
assert_(np.isnan(p3))
| 42.223705 | 108 | 0.551925 | import warnings
import platform
import numpy as np
from numpy import nan
import numpy.ma as ma
from numpy.ma import masked, nomask
import scipy.stats.mstats as mstats
from scipy import stats
from .common_tests import check_named_results
import pytest
from pytest import raises as assert_raises
from numpy.ma.testutils import (assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_almost_equal_nulp, assert_,
assert_allclose, assert_array_equal)
from numpy.testing import suppress_warnings
from scipy.stats import mstats_basic
class TestMquantiles:
def test_mquantiles_limit_keyword(self):
data = np.array([[6., 7., 1.],
[47., 15., 2.],
[49., 36., 3.],
[15., 39., 4.],
[42., 40., -999.],
[41., 41., -999.],
[7., -999., -999.],
[39., -999., -999.],
[43., -999., -999.],
[40., -999., -999.],
[36., -999., -999.]])
desired = [[19.2, 14.6, 1.45],
[40.0, 37.5, 2.5],
[42.8, 40.05, 3.55]]
quants = mstats.mquantiles(data, axis=0, limit=(0, 50))
assert_almost_equal(quants, desired)
def check_equal_gmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):
x = mstats.gmean(array_like, axis=axis, dtype=dtype)
assert_allclose(x, desired, rtol=rtol)
assert_equal(x.dtype, dtype)
def check_equal_hmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):
x = stats.hmean(array_like, axis=axis, dtype=dtype)
assert_allclose(x, desired, rtol=rtol)
assert_equal(x.dtype, dtype)
class TestGeoMean:
def test_1d(self):
a = [1, 2, 3, 4]
desired = np.power(1*2*3*4, 1./4.)
check_equal_gmean(a, desired, rtol=1e-14)
def test_1d_ma(self):
# Test a 1d masked array
a = ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
desired = 45.2872868812
check_equal_gmean(a, desired)
a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
desired = np.power(1*2*3, 1./3.)
check_equal_gmean(a, desired, rtol=1e-14)
def test_1d_ma_value(self):
# Test a 1d masked array with a masked value
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
desired = 41.4716627439
check_equal_gmean(a, desired)
def test_1d_ma0(self):
# Test a 1d masked array with zero element
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])
desired = 41.4716627439
with np.errstate(divide='ignore'):
check_equal_gmean(a, desired)
def test_1d_ma_inf(self):
# Test a 1d masked array with negative element
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1])
desired = 41.4716627439
with np.errstate(invalid='ignore'):
check_equal_gmean(a, desired)
@pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping')
def test_1d_float96(self):
a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
desired_dt = np.power(1*2*3, 1./3.).astype(np.float96)
check_equal_gmean(a, desired_dt, dtype=np.float96, rtol=1e-14)
def test_2d_ma(self):
a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]])
desired = np.array([1, 2, 3, 4])
check_equal_gmean(a, desired, axis=0, rtol=1e-14)
desired = ma.array([np.power(1*2*3*4, 1./4.),
np.power(2*3, 1./2.),
np.power(1*4, 1./2.)])
check_equal_gmean(a, desired, axis=-1, rtol=1e-14)
# Test a 2d masked array
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 52.8885199
check_equal_gmean(np.ma.array(a), desired)
class TestHarMean:
def test_1d(self):
a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
desired = 3. / (1./1 + 1./2 + 1./3)
check_equal_hmean(a, desired, rtol=1e-14)
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])
desired = 34.1417152147
check_equal_hmean(a, desired)
a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],
mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
desired = 31.8137186141
check_equal_hmean(a, desired)
@pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping')
def test_1d_float96(self):
a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])
desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3), dtype=np.float96)
check_equal_hmean(a, desired_dt, dtype=np.float96)
def test_2d(self):
a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]])
desired = ma.array([1, 2, 3, 4])
check_equal_hmean(a, desired, axis=0, rtol=1e-14)
desired = [4./(1/1.+1/2.+1/3.+1/4.), 2./(1/2.+1/3.), 2./(1/1.+1/4.)]
check_equal_hmean(a, desired, axis=-1, rtol=1e-14)
a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]
desired = 38.6696271841
check_equal_hmean(np.ma.array(a), desired)
class TestRanking:
def test_ranking(self):
x = ma.array([0,1,1,1,2,3,4,5,5,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,3,3,5,6,7,8.5,8.5,10])
x[[3,4]] = masked
assert_almost_equal(mstats.rankdata(x),
[1,2.5,2.5,0,0,4,5,6.5,6.5,8])
assert_almost_equal(mstats.rankdata(x, use_missing=True),
[1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8])
x = ma.array([0,1,5,1,2,4,3,5,1,6,])
assert_almost_equal(mstats.rankdata(x),
[1,3,8.5,3,5,7,6,8.5,3,10])
x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])
assert_almost_equal(mstats.rankdata(x),
[[1,3,3,3,5], [6,7,8.5,8.5,10]])
assert_almost_equal(mstats.rankdata(x, axis=1),
[[1,3,3,3,5], [1,2,3.5,3.5,5]])
assert_almost_equal(mstats.rankdata(x,axis=0),
[[1,1,1,1,1], [2,2,2,2,2,]])
class TestCorr:
def test_pearsonr(self):
# Tests some computations of Pearson's r
x = ma.arange(10)
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0)
assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0)
x = ma.array(x, mask=True)
pr = mstats.pearsonr(x, x)
assert_(pr[0] is masked)
assert_(pr[1] is masked)
x1 = ma.array([-1.0, 0.0, 1.0])
y1 = ma.array([0, 0, 3])
r, p = mstats.pearsonr(x1, y1)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
mask = [False, False, False, True]
x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask)
y2 = ma.array([0, 0, 3, -1], mask=mask)
r, p = mstats.pearsonr(x2, y2)
assert_almost_equal(r, np.sqrt(3)/2)
assert_almost_equal(p, 1.0/3)
def test_pearsonr_misaligned_mask(self):
mx = np.ma.masked_array([1, 2, 3, 4, 5, 6], mask=[0, 1, 0, 0, 0, 0])
my = np.ma.masked_array([9, 8, 7, 6, 5, 9], mask=[0, 0, 1, 0, 0, 0])
x = np.array([1, 4, 5, 6])
y = np.array([9, 6, 5, 9])
mr, mp = mstats.pearsonr(mx, my)
r, p = stats.pearsonr(x, y)
assert_equal(mr, r)
assert_equal(mp, p)
def test_spearmanr(self):
(x, y) = ([5.05,6.75,3.21,2.66], [1.65,2.64,2.64,6.95])
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
(x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan])
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan]
(x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)
# Next test is to make sure calculation uses sufficient precision.
# The denominator's value is ~n^3 and used to be represented as an
x = list(range(2000))
y = list(range(2000))
y[0], y[9] = y[9], y[0]
y[10], y[434] = y[434], y[10]
y[435], y[1509] = y[1509], y[435]
assert_almost_equal(mstats.spearmanr(x,y)[0], 0.998)
res = mstats.spearmanr(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_spearmanr_alternative(self):
x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,
1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]
y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,
0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]
r_exp = 0.6887298747763864
r, p = mstats.spearmanr(x, y)
assert_allclose(r, r_exp)
assert_allclose(p, 0.004519192910756)
r, p = mstats.spearmanr(x, y, alternative='greater')
assert_allclose(r, r_exp)
assert_allclose(p, 0.002259596455378)
r, p = mstats.spearmanr(x, y, alternative='less')
assert_allclose(r, r_exp)
assert_allclose(p, 0.9977404035446)
n = 100
x = np.linspace(0, 5, n)
y = 0.1*x + np.random.rand(n)
stat1, p1 = mstats.spearmanr(x, y)
stat2, p2 = mstats.spearmanr(x, y, alternative="greater")
assert_allclose(p2, p1 / 2)
stat3, p3 = mstats.spearmanr(x, y, alternative="less")
assert_allclose(p3, 1 - p1 / 2)
assert stat1 == stat2 == stat3
with pytest.raises(ValueError, match="alternative must be 'less'..."):
mstats.spearmanr(x, y, alternative="ekki-ekki")
@pytest.mark.skipif(platform.machine() == 'ppc64le',
reason="fails/crashes on ppc64le")
def test_kendalltau(self):
x = ma.array(np.array([9, 2, 5, 6]))
y = ma.array(np.array([4, 7, 9, 11]))
expected = [0.0, 1.0]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
x = ma.array(np.arange(10))
y = ma.array(np.arange(10))
expected = [1.0, 5.511463844797e-07]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
assert_raises(ValueError, mstats.kendalltau, x, y, method='banana')
b = y[1]
y[1] = y[2]
y[2] = b
expected = [0.9555555555555556, 5.511463844797e-06]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
b = y[5]
y[5] = y[6]
y[6] = b
expected = [0.9111111111111111, 2.976190476190e-05]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
x = ma.array(np.arange(10))
y = ma.array(np.arange(10)[::-1])
expected = [-1.0, 5.511463844797e-07]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
b = y[1]
y[1] = y[2]
y[2] = b
expected = [-0.9555555555555556, 5.511463844797e-06]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
b = y[5]
y[5] = y[6]
y[6] = b
expected = [-0.9111111111111111, 2.976190476190e-05]
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)
x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66, np.nan])
y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan])
z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan])
assert_almost_equal(np.asarray(mstats.kendalltau(x, y)),
[+0.3333333, 0.75])
assert_almost_equal(np.asarray(mstats.kendalltau(x, y, method='asymptotic')),
[+0.3333333, 0.4969059])
assert_almost_equal(np.asarray(mstats.kendalltau(x, z)),
[-0.5477226, 0.2785987])
#
x = ma.fix_invalid([0, 0, 0, 0, 20, 20, 0, 60, 0, 20,
10, 10, 0, 40, 0, 20, 0, 0, 0, 0, 0, np.nan])
y = ma.fix_invalid([0, 80, 80, 80, 10, 33, 60, 0, 67, 27,
25, 80, 80, 80, 80, 80, 80, 0, 10, 45, np.nan, 0])
result = mstats.kendalltau(x, y)
assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009])
# test for namedtuple attributes
attributes = ('correlation', 'pvalue')
check_named_results(result, attributes, ma=True)
@pytest.mark.skipif(platform.machine() == 'ppc64le',
reason="fails/crashes on ppc64le")
@pytest.mark.slow
def test_kendalltau_large(self):
# make sure internal variable use correct precision with
# larger arrays
x = np.arange(2000, dtype=float)
x = ma.masked_greater(x, 1995)
y = np.arange(2000, dtype=float)
y = np.concatenate((y[1000:], y[:1000]))
assert_(np.isfinite(mstats.kendalltau(x, y)[1]))
def test_kendalltau_seasonal(self):
# Tests the seasonal Kendall tau.
x = [[nan, nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan],
[nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
output = mstats.kendalltau_seasonal(x)
assert_almost_equal(output['global p-value (indep)'], 0.008, 3)
assert_almost_equal(output['seasonal p-value'].round(2),
[0.18,0.53,0.20,0.04])
def test_kendall_p_exact_medium(self):
# Test for the exact method with medium samples (some n >= 171)
# expected values generated using SymPy
expectations = {(100, 2393): 0.62822615287956040664,
(101, 2436): 0.60439525773513602669,
(170, 0): 2.755801935583541e-307,
(171, 0): 0.0,
(171, 1): 2.755801935583541e-307,
(172, 1): 0.0,
(200, 9797): 0.74753983745929675209,
(201, 9656): 0.40959218958120363618}
for nc, expected in expectations.items():
res = mstats_basic._kendall_p_exact(nc[0], nc[1])
assert_almost_equal(res, expected)
@pytest.mark.slow
def test_kendall_p_exact_large(self):
# Test for the exact method with large samples (n >= 171)
# expected values generated using SymPy
expectations = {(400, 38965): 0.48444283672113314099,
(401, 39516): 0.66363159823474837662,
(800, 156772): 0.42265448483120932055,
(801, 157849): 0.53437553412194416236,
(1600, 637472): 0.84200727400323538419,
(1601, 630304): 0.34465255088058593946}
for nc, expected in expectations.items():
res = mstats_basic._kendall_p_exact(nc[0], nc[1])
assert_almost_equal(res, expected)
def test_pointbiserial(self):
x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,
0,0,0,0,1,-1]
y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,
2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,
0.8,0.7,0.6,0.5,0.2,0.2,0.1,np.nan]
assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5)
# test for namedtuple attributes
res = mstats.pointbiserialr(x, y)
attributes = ('correlation', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestTrimming:
def test_trim(self):
a = ma.arange(10)
assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9])
a = ma.arange(10)
assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)),
[None,None,None,3,4,5,6,7,None,None])
a = ma.arange(10)
assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True),
[None,1,2,3,4,5,6,7,None,None])
a = ma.arange(12)
a[[0,-1]] = a[5] = masked
assert_equal(mstats.trim(a, (2,8)),
[None, None, 2, 3, 4, None, 6, 7, 8, None, None, None])
x = ma.arange(100).reshape(10, 10)
expected = [1]*10 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx._mask.T.ravel(), expected)
# same as above, but with an extra masked row inserted
x = ma.arange(110).reshape(11, 10)
x[1] = masked
expected = [1]*20 + [0]*70 + [1]*20
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)
assert_equal(trimx._mask.ravel(), expected)
trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1)
assert_equal(trimx.T._mask.ravel(), expected)
def test_trim_old(self):
x = ma.arange(100)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x,tail='r').count(), 80)
x[50:70] = masked
trimx = mstats.trimboth(x)
assert_equal(trimx.count(), 48)
assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16)
x._mask = nomask
x.shape = (10,10)
assert_equal(mstats.trimboth(x).count(), 60)
assert_equal(mstats.trimtail(x).count(), 80)
def test_trimr(self):
x = ma.arange(10)
result = mstats.trimr(x, limits=(0.15, 0.14), inclusive=(False, False))
expected = ma.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 1])
assert_equal(result, expected)
assert_equal(result.mask, expected.mask)
def test_trimmedmean(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0)
assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0)
def test_trimmed_stde(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5)
assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5)
def test_winsorization(self):
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1),
21551.4, 1)
assert_almost_equal(
mstats.winsorize(data, (0.2,0.2),(False,False)).var(ddof=1),
11887.3, 1)
data[5] = masked
winsorized = mstats.winsorize(data)
assert_equal(winsorized.mask, data.mask)
def test_winsorization_nan(self):
data = ma.array([np.nan, np.nan, 0, 1, 2])
assert_raises(ValueError, mstats.winsorize, data, (0.05, 0.05),
nan_policy='raise')
# Testing propagate (default behavior)
assert_equal(mstats.winsorize(data, (0.4, 0.4)),
ma.array([2, 2, 2, 2, 2]))
assert_equal(mstats.winsorize(data, (0.8, 0.8)),
ma.array([np.nan, np.nan, np.nan, np.nan, np.nan]))
assert_equal(mstats.winsorize(data, (0.4, 0.4), nan_policy='omit'),
ma.array([np.nan, np.nan, 2, 2, 2]))
assert_equal(mstats.winsorize(data, (0.8, 0.8), nan_policy='omit'),
ma.array([np.nan, np.nan, 2, 2, 2]))
class TestMoments:
# Comparison numbers are found using R v.1.5.1
# note that length(testcase) = 4
# testmathworks comes from documentation for the
# Statistics Toolbox for Matlab and can be found at both
# https://www.mathworks.com/help/stats/kurtosis.html
# https://www.mathworks.com/help/stats/skewness.html
# Note that both test cases came from here.
testcase = [1,2,3,4]
testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965,
np.nan])
testcase_2d = ma.array(
np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149],
[0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407],
[0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733],
[0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998],
[0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]),
mask=np.array([[True, False, False, True, False],
[True, True, True, False, True],
[False, False, False, False, False],
[True, True, True, True, True],
[False, False, True, False, False]], dtype=bool))
def _assert_equal(self, actual, expect, *, shape=None, dtype=None):
expect = np.asarray(expect)
if shape is not None:
expect = np.broadcast_to(expect, shape)
assert_array_equal(actual, expect)
if dtype is None:
dtype = expect.dtype
assert actual.dtype == dtype
def test_moment(self):
y = mstats.moment(self.testcase,1)
assert_almost_equal(y,0.0,10)
y = mstats.moment(self.testcase,2)
assert_almost_equal(y,1.25)
y = mstats.moment(self.testcase,3)
assert_almost_equal(y,0.0)
y = mstats.moment(self.testcase,4)
assert_almost_equal(y,2.5625)
# check array_like input for moment
y = mstats.moment(self.testcase, [1, 2, 3, 4])
assert_allclose(y, [0, 1.25, 0, 2.5625])
# check moment input consists only of integers
y = mstats.moment(self.testcase, 0.0)
assert_allclose(y, 1.0)
assert_raises(ValueError, mstats.moment, self.testcase, 1.2)
y = mstats.moment(self.testcase, [1.0, 2, 3, 4.0])
assert_allclose(y, [0, 1.25, 0, 2.5625])
# test empty input
y = mstats.moment([])
self._assert_equal(y, np.nan, dtype=np.float64)
y = mstats.moment(np.array([], dtype=np.float32))
self._assert_equal(y, np.nan, dtype=np.float32)
y = mstats.moment(np.zeros((1, 0)), axis=0)
self._assert_equal(y, [], shape=(0,), dtype=np.float64)
y = mstats.moment([[]], axis=1)
self._assert_equal(y, np.nan, shape=(1,), dtype=np.float64)
y = mstats.moment([[]], moment=[0, 1], axis=0)
self._assert_equal(y, [], shape=(2, 0))
x = np.arange(10.)
x[9] = np.nan
assert_equal(mstats.moment(x, 2), ma.masked) # NaN value is ignored
def test_variation(self):
y = mstats.variation(self.testcase)
assert_almost_equal(y,0.44721359549996, 10)
def test_variation_ddof(self):
# test variation with delta degrees of freedom
# regression test for gh-13341
a = np.array([1, 2, 3, 4, 5])
y = mstats.variation(a, ddof=1)
assert_almost_equal(y, 0.5270462766947299)
def test_skewness(self):
y = mstats.skew(self.testmathworks)
assert_almost_equal(y,-0.29322304336607,10)
y = mstats.skew(self.testmathworks,bias=0)
assert_almost_equal(y,-0.437111105023940,10)
y = mstats.skew(self.testcase)
assert_almost_equal(y,0.0,10)
def test_kurtosis(self):
# Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis
y = mstats.kurtosis(self.testmathworks, 0, fisher=0, bias=1)
assert_almost_equal(y, 2.1658856802973, 10)
# kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)
y = mstats.kurtosis(self.testmathworks, fisher=0, bias=0)
assert_almost_equal(y, 3.663542721189047, 10)
y = mstats.kurtosis(self.testcase, 0, 0)
assert_almost_equal(y, 1.64)
# test that kurtosis works on multidimensional masked arrays
correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0.,
-1.26979517952]),
mask=np.array([False, False, False, True,
False], dtype=bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1),
correct_2d)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row), correct_2d[i])
correct_2d_bias_corrected = ma.array(
np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]),
mask=np.array([False, False, False, True, False], dtype=bool))
assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1,
bias=False),
correct_2d_bias_corrected)
for i, row in enumerate(self.testcase_2d):
assert_almost_equal(mstats.kurtosis(row, bias=False),
correct_2d_bias_corrected[i])
# Check consistency between stats and mstats implementations
assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]),
stats.kurtosis(self.testcase_2d[2, :]),
nulp=4)
def test_mode(self):
a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7]
a2 = np.reshape(a1, (3,5))
a3 = np.array([1,2,3,4,5,6])
a4 = np.reshape(a3, (3,2))
ma1 = ma.masked_where(ma.array(a1) > 2, a1)
ma2 = ma.masked_where(a2 > 2, a2)
ma3 = ma.masked_where(a3 < 2, a3)
ma4 = ma.masked_where(ma.array(a4) < 2, a4)
assert_equal(mstats.mode(a1, axis=None), (3,4))
assert_equal(mstats.mode(a1, axis=0), (3,4))
assert_equal(mstats.mode(ma1, axis=None), (0,3))
assert_equal(mstats.mode(a2, axis=None), (3,4))
assert_equal(mstats.mode(ma2, axis=None), (0,3))
assert_equal(mstats.mode(a3, axis=None), (1,1))
assert_equal(mstats.mode(ma3, axis=None), (2,1))
assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]]))
assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]]))
assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]]))
assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]]))
a1_res = mstats.mode(a1, axis=None)
# test for namedtuple attributes
attributes = ('mode', 'count')
check_named_results(a1_res, attributes, ma=True)
def test_mode_modifies_input(self):
# regression test for gh-6428: mode(..., axis=None) may not modify
# the input array
im = np.zeros((100, 100))
im[:50, :] += 1
im[:, :50] += 1
cp = im.copy()
mstats.mode(im, None)
assert_equal(im, cp)
class TestPercentile:
def setup_method(self):
self.a1 = [3, 4, 5, 10, -3, -5, 6]
self.a2 = [3, -6, -2, 8, 7, 4, 2, 1]
self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0]
def test_percentile(self):
x = np.arange(8) * 0.5
assert_equal(mstats.scoreatpercentile(x, 0), 0.)
assert_equal(mstats.scoreatpercentile(x, 100), 3.5)
assert_equal(mstats.scoreatpercentile(x, 50), 1.75)
def test_2D(self):
x = ma.array([[1, 1, 1],
[1, 1, 1],
[4, 4, 3],
[1, 1, 1],
[1, 1, 1]])
assert_equal(mstats.scoreatpercentile(x, 50), [1, 1, 1])
class TestVariability:
testcase = ma.fix_invalid([1,2,3,4,np.nan])
def test_sem(self):
# This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3)
y = mstats.sem(self.testcase)
assert_almost_equal(y, 0.6454972244)
n = self.testcase.count()
assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),
mstats.sem(self.testcase, ddof=2))
def test_zmap(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zmap(self.testcase, self.testcase)
desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999])
assert_array_almost_equal(desired_unmaskedvals,
y.data[y.mask == False], decimal=12)
def test_zscore(self):
# This is not in R, so tested by using:
# (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)
y = mstats.zscore(self.testcase)
desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996,
0.44721359549996, 1.3416407864999, np.nan])
assert_almost_equal(desired, y, decimal=12)
class TestMisc:
def test_obrientransform(self):
args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2,
[6]+[7]*2+[8]*4+[9]*9+[10]*16]
result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538],
[10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]]
assert_almost_equal(np.round(mstats.obrientransform(*args).T, 4),
result, 4)
def test_ks_2samp(self):
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan],
[nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x).T
(winter, spring, summer, fall) = x.T
assert_almost_equal(np.round(mstats.ks_2samp(winter, spring), 4),
(0.1818, 0.9628))
assert_almost_equal(np.round(mstats.ks_2samp(winter, spring, 'g'), 4),
(0.1469, 0.6886))
assert_almost_equal(np.round(mstats.ks_2samp(winter, spring, 'l'), 4),
(0.1818, 0.6011))
def test_friedmanchisq(self):
# No missing values
args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0],
[7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0],
[6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0])
result = mstats.friedmanchisquare(*args)
assert_almost_equal(result[0], 10.4737, 4)
assert_almost_equal(result[1], 0.005317, 6)
# Missing values
x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],
[4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],
[3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],
[nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]
x = ma.fix_invalid(x)
result = mstats.friedmanchisquare(*x)
assert_almost_equal(result[0], 2.0156, 4)
assert_almost_equal(result[1], 0.5692, 4)
# test for namedtuple attributes
attributes = ('statistic', 'pvalue')
check_named_results(result, attributes, ma=True)
def test_regress_simple():
# Regress a line with sinusoidal noise. Test for #1273.
x = np.linspace(0, 100, 100)
y = 0.2 * np.linspace(0, 100, 100) + 10
y += np.sin(np.linspace(0, 20, 100))
result = mstats.linregress(x, y)
# Result is of a correct class and with correct fields
lr = stats._stats_mstats_common.LinregressResult
assert_(isinstance(result, lr))
attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')
check_named_results(result, attributes, ma=True)
assert 'intercept_stderr' in dir(result)
# Slope and intercept are estimated correctly
assert_almost_equal(result.slope, 0.19644990055858422)
assert_almost_equal(result.intercept, 10.211269918932341)
assert_almost_equal(result.stderr, 0.002395781449783862)
assert_almost_equal(result.intercept_stderr, 0.13866936078570702)
def test_theilslopes():
# Test for basic slope and intercept.
slope, intercept, lower, upper = mstats.theilslopes([0, 1, 1])
assert_almost_equal(slope, 0.5)
assert_almost_equal(intercept, 0.5)
# Test for correct masking.
y = np.ma.array([0, 1, 100, 1], mask=[False, False, True, False])
slope, intercept, lower, upper = mstats.theilslopes(y)
assert_almost_equal(slope, 1./3)
assert_almost_equal(intercept, 2./3)
# Test of confidence intervals from example in Sen (1968).
x = [1, 2, 3, 4, 10, 12, 18]
y = [9, 15, 19, 20, 45, 55, 78]
slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07)
assert_almost_equal(slope, 4)
assert_almost_equal(upper, 4.38, decimal=2)
assert_almost_equal(lower, 3.71, decimal=2)
def test_siegelslopes():
# method should be exact for straight line
y = 2 * np.arange(10) + 0.5
assert_equal(mstats.siegelslopes(y), (2.0, 0.5))
assert_equal(mstats.siegelslopes(y, method='separate'), (2.0, 0.5))
x = 2 * np.arange(10)
y = 5 * x - 3.0
assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0))
assert_equal(mstats.siegelslopes(y, x, method='separate'), (5.0, -3.0))
# method is robust to outliers: brekdown point of 50%
y[:4] = 1000
assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0))
# if there are no outliers, results should be comparble to linregress
x = np.arange(10)
y = -2.3 + 0.3*x + stats.norm.rvs(size=10, random_state=231)
slope_ols, intercept_ols, _, _, _ = stats.linregress(x, y)
slope, intercept = mstats.siegelslopes(y, x)
assert_allclose(slope, slope_ols, rtol=0.1)
assert_allclose(intercept, intercept_ols, rtol=0.1)
slope, intercept = mstats.siegelslopes(y, x, method='separate')
assert_allclose(slope, slope_ols, rtol=0.1)
assert_allclose(intercept, intercept_ols, rtol=0.1)
def test_plotting_positions():
# Regression test for #1256
pos = mstats.plotting_positions(np.arange(3), 0, 0)
assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75]))
class TestNormalitytests():
def test_vs_nonmasked(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
assert_array_almost_equal(mstats.normaltest(x),
stats.normaltest(x))
assert_array_almost_equal(mstats.skewtest(x),
stats.skewtest(x))
assert_array_almost_equal(mstats.kurtosistest(x),
stats.kurtosistest(x))
funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest]
mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]
x = [1, 2, 3, 4]
for func, mfunc in zip(funcs, mfuncs):
assert_raises(ValueError, func, x)
assert_raises(ValueError, mfunc, x)
def test_axis_None(self):
# Test axis=None (equal to axis=0 for 1-D input)
x = np.array((-2,-1,0,1,2,3)*4)**2
assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x))
assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x))
assert_allclose(mstats.kurtosistest(x, axis=None),
mstats.kurtosistest(x))
def test_maskedarray_input(self):
# Add some masked values, test result doesn't change
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
xm = np.ma.array(np.r_[np.inf, x, 10],
mask=np.r_[True, [False] * x.size, True])
assert_allclose(mstats.normaltest(xm), stats.normaltest(x))
assert_allclose(mstats.skewtest(xm), stats.skewtest(x))
assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x))
def test_nd_input(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
x_2d = np.vstack([x] * 2).T
for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]:
res_1d = func(x)
res_2d = func(x_2d)
assert_allclose(res_2d[0], [res_1d[0]] * 2)
assert_allclose(res_2d[1], [res_1d[1]] * 2)
def test_normaltest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.normaltest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_kurtosistest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.kurtosistest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def regression_test_9033(self):
counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167]
x = np.hstack([np.full(c, i) for i, c in enumerate(counts)])
assert_equal(mstats.kurtosistest(x)[1] < 0.01, True)
@pytest.mark.parametrize("test", ["skewtest", "kurtosistest"])
@pytest.mark.parametrize("alternative", ["less", "greater"])
def test_alternative(self, test, alternative):
x = stats.norm.rvs(loc=10, scale=2.5, size=30, random_state=123)
stats_test = getattr(stats, test)
mstats_test = getattr(mstats, test)
z_ex, p_ex = stats_test(x, alternative=alternative)
z, p = mstats_test(x, alternative=alternative)
assert_allclose(z, z_ex, atol=1e-12)
assert_allclose(p, p_ex, atol=1e-12)
x[1:5] = np.nan
x = np.ma.masked_array(x, mask=np.isnan(x))
z_ex, p_ex = stats_test(x.compressed(), alternative=alternative)
z, p = mstats_test(x, alternative=alternative)
assert_allclose(z, z_ex, atol=1e-12)
assert_allclose(p, p_ex, atol=1e-12)
def test_bad_alternative(self):
x = stats.norm.rvs(size=20, random_state=123)
msg = r"alternative must be 'less', 'greater' or 'two-sided'"
with pytest.raises(ValueError, match=msg):
mstats.skewtest(x, alternative='error')
with pytest.raises(ValueError, match=msg):
mstats.kurtosistest(x, alternative='error')
class TestFOneway():
def test_result_attributes(self):
a = np.array([655, 788], dtype=np.uint16)
b = np.array([789, 772], dtype=np.uint16)
res = mstats.f_oneway(a, b)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestMannwhitneyu():
x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1.])
y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
1., 1., 1., 1.])
def test_result_attributes(self):
res = mstats.mannwhitneyu(self.x, self.y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_against_stats(self):
res1 = mstats.mannwhitneyu(self.x, self.y)
res2 = stats.mannwhitneyu(self.x, self.y)
assert res1.statistic == res2.statistic
assert_allclose(res1.pvalue, res2.pvalue)
class TestKruskal():
def test_result_attributes(self):
x = [1, 3, 5, 7, 9]
y = [2, 4, 6, 8, 10]
res = mstats.kruskal(x, y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
class TestTtest_rel():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1])
res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
assert_allclose(res1, res2)
res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
res3 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_fully_masked(self):
np.random.seed(1234567)
outcome = ma.masked_array(np.random.randn(3, 2),
mask=[[1, 1, 1], [0, 0, 0]])
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
for pair in [(outcome[:, 0], outcome[:, 1]), ([np.nan, np.nan], [1.0, 2.0])]:
t, p = mstats.ttest_rel(*pair)
assert_array_equal(t, (np.nan, np.nan))
assert_array_equal(p, (np.nan, np.nan))
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_invalid_input_size(self):
assert_raises(ValueError, mstats.ttest_rel,
np.arange(10), np.arange(11))
x = np.arange(24)
assert_raises(ValueError, mstats.ttest_rel,
x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=1)
assert_raises(ValueError, mstats.ttest_rel,
x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=2)
def test_empty(self):
res1 = mstats.ttest_rel([], [])
assert_(np.all(np.isnan(res1)))
def test_zero_division(self):
t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])
assert_equal((np.abs(t), p), (np.inf, 0))
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0])
assert_array_equal(t, np.array([np.nan, np.nan]))
assert_array_equal(p, np.array([np.nan, np.nan]))
def test_bad_alternative(self):
msg = r"alternative must be 'less', 'greater' or 'two-sided'"
with pytest.raises(ValueError, match=msg):
mstats.ttest_ind([1, 2, 3], [4, 5, 6], alternative='foo')
@pytest.mark.parametrize("alternative", ["less", "greater"])
def test_alternative(self, alternative):
x = stats.norm.rvs(loc=10, scale=5, size=25, random_state=42)
y = stats.norm.rvs(loc=8, scale=2, size=25, random_state=42)
t_ex, p_ex = stats.ttest_rel(x, y, alternative=alternative)
t, p = mstats.ttest_rel(x, y, alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
x[1:10] = np.nan
y[1:10] = np.nan
x = np.ma.masked_array(x, mask=np.isnan(x))
y = np.ma.masked_array(y, mask=np.isnan(y))
t, p = mstats.ttest_rel(x, y, alternative=alternative)
t_ex, p_ex = stats.ttest_rel(x.compressed(), y.compressed(),
alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
class TestTtest_ind():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1])
res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
assert_allclose(res1, res2)
res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2)
res3 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)
assert_allclose(res4, res5)
res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)
assert_allclose(res4, res5)
def test_fully_masked(self):
np.random.seed(1234567)
outcome = ma.masked_array(np.random.randn(3, 2), mask=[[1, 1, 1], [0, 0, 0]])
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
for pair in [(outcome[:, 0], outcome[:, 1]), ([np.nan, np.nan], [1.0, 2.0])]:
t, p = mstats.ttest_ind(*pair)
assert_array_equal(t, (np.nan, np.nan))
assert_array_equal(p, (np.nan, np.nan))
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_empty(self):
res1 = mstats.ttest_ind([], [])
assert_(np.all(np.isnan(res1)))
def test_zero_division(self):
t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])
assert_equal((np.abs(t), p), (np.inf, 0))
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0])
assert_array_equal(t, (np.nan, np.nan))
assert_array_equal(p, (np.nan, np.nan))
t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False)
assert_equal((np.abs(t), p), (np.inf, 0))
assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0],
equal_var=False), (np.nan, np.nan))
def test_bad_alternative(self):
msg = r"alternative must be 'less', 'greater' or 'two-sided'"
with pytest.raises(ValueError, match=msg):
mstats.ttest_ind([1, 2, 3], [4, 5, 6], alternative='foo')
@pytest.mark.parametrize("alternative", ["less", "greater"])
def test_alternative(self, alternative):
x = stats.norm.rvs(loc=10, scale=2, size=100, random_state=123)
y = stats.norm.rvs(loc=8, scale=2, size=100, random_state=123)
t_ex, p_ex = stats.ttest_ind(x, y, alternative=alternative)
t, p = mstats.ttest_ind(x, y, alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
x[1:10] = np.nan
y[80:90] = np.nan
x = np.ma.masked_array(x, mask=np.isnan(x))
y = np.ma.masked_array(y, mask=np.isnan(y))
t_ex, p_ex = stats.ttest_ind(x.compressed(), y.compressed(),
alternative=alternative)
t, p = mstats.ttest_ind(x, y, alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
class TestTtest_1samp():
def test_vs_nonmasked(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res1 = stats.ttest_1samp(outcome[:, 0], 1)
res2 = mstats.ttest_1samp(outcome[:, 0], 1)
assert_allclose(res1, res2)
res1 = stats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)
res2 = mstats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)
assert_allclose(res1, res2)
res1 = stats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)
res2 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)
assert_allclose(res1, res2, atol=1e-15)
res3 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:])
assert_allclose(res2, res3)
def test_fully_masked(self):
np.random.seed(1234567)
outcome = ma.masked_array(np.random.randn(3), mask=[1, 1, 1])
expected = (np.nan, np.nan)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
for pair in [((np.nan, np.nan), 0.0), (outcome, 0.0)]:
t, p = mstats.ttest_1samp(*pair)
assert_array_equal(p, expected)
assert_array_equal(t, expected)
def test_result_attributes(self):
np.random.seed(1234567)
outcome = np.random.randn(20, 4) + [0, 0, 1, 2]
res = mstats.ttest_1samp(outcome[:, 0], 1)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_empty(self):
res1 = mstats.ttest_1samp([], 1)
assert_(np.all(np.isnan(res1)))
def test_zero_division(self):
t, p = mstats.ttest_1samp([0, 0, 0], 1)
assert_equal((np.abs(t), p), (np.inf, 0))
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in absolute")
t, p = mstats.ttest_1samp([0, 0, 0], 0)
assert_(np.isnan(t))
assert_array_equal(p, (np.nan, np.nan))
def test_bad_alternative(self):
msg = r"alternative must be 'less', 'greater' or 'two-sided'"
with pytest.raises(ValueError, match=msg):
mstats.ttest_1samp([1, 2, 3], 4, alternative='foo')
@pytest.mark.parametrize("alternative", ["less", "greater"])
def test_alternative(self, alternative):
x = stats.norm.rvs(loc=10, scale=2, size=100, random_state=123)
t_ex, p_ex = stats.ttest_1samp(x, 9, alternative=alternative)
t, p = mstats.ttest_1samp(x, 9, alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
x[1:10] = np.nan
x = np.ma.masked_array(x, mask=np.isnan(x))
t_ex, p_ex = stats.ttest_1samp(x.compressed(), 9,
alternative=alternative)
t, p = mstats.ttest_1samp(x, 9, alternative=alternative)
assert_allclose(t, t_ex, rtol=1e-14)
assert_allclose(p, p_ex, rtol=1e-14)
class TestDescribe:
def test_basic_with_axis(self):
a = np.ma.masked_array([[0, 1, 2, 3, 4, 9],
[5, 5, 0, 9, 3, 3]],
mask=[[0, 0, 0, 0, 0, 1],
[0, 0, 1, 1, 0, 0]])
result = mstats.describe(a, axis=1)
assert_equal(result.nobs, [5, 4])
amin, amax = result.minmax
assert_equal(amin, [0, 3])
assert_equal(amax, [4, 5])
assert_equal(result.mean, [2.0, 4.0])
assert_equal(result.variance, [2.0, 1.0])
assert_equal(result.skewness, [0.0, 0.0])
assert_allclose(result.kurtosis, [-1.3, -2.0])
class TestCompareWithStats:
def get_n(self):
return [1000, 100, 10, 5]
def generate_xy_sample(self, n):
np.random.seed(1234567)
x = np.random.randn(n)
y = x + np.random.randn(n)
xm = np.full(len(x) + 5, 1e16)
ym = np.full(len(y) + 5, 1e16)
xm[0:len(x)] = x
ym[0:len(y)] = y
mask = xm > 9e15
xm = np.ma.array(xm, mask=mask)
ym = np.ma.array(ym, mask=mask)
return x, y, xm, ym
def generate_xy_sample2D(self, n, nx):
x = np.full((n, nx), np.nan)
y = np.full((n, nx), np.nan)
xm = np.full((n+5, nx), np.nan)
ym = np.full((n+5, nx), np.nan)
for i in range(nx):
x[:, i], y[:, i], dx, dy = self.generate_xy_sample(n)
xm[0:n, :] = x[0:n]
ym[0:n, :] = y[0:n]
xm = np.ma.array(xm, mask=np.isnan(xm))
ym = np.ma.array(ym, mask=np.isnan(ym))
return x, y, xm, ym
def test_linregress(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
result1 = stats.linregress(x, y)
result2 = stats.mstats.linregress(xm, ym)
assert_allclose(np.asarray(result1), np.asarray(result2))
def test_pearsonr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.pearsonr(x, y)
rm, pm = stats.mstats.pearsonr(xm, ym)
assert_almost_equal(r, rm, decimal=14)
assert_almost_equal(p, pm, decimal=14)
def test_spearmanr(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r, p = stats.spearmanr(x, y)
rm, pm = stats.mstats.spearmanr(xm, ym)
assert_almost_equal(r, rm, 14)
assert_almost_equal(p, pm, 14)
def test_spearmanr_backcompat_useties(self):
# more than we have to (see gh-9204).
x = np.arange(6)
assert_raises(ValueError, mstats.spearmanr, x, x, False)
def test_gmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.gmean(abs(x))
rm = stats.mstats.gmean(abs(xm))
assert_allclose(r, rm, rtol=1e-13)
r = stats.gmean(abs(y))
rm = stats.mstats.gmean(abs(ym))
assert_allclose(r, rm, rtol=1e-13)
def test_hmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.hmean(abs(x))
rm = stats.mstats.hmean(abs(xm))
assert_almost_equal(r, rm, 10)
r = stats.hmean(abs(y))
rm = stats.mstats.hmean(abs(ym))
assert_almost_equal(r, rm, 10)
def test_skew(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skew(x)
rm = stats.mstats.skew(xm)
assert_almost_equal(r, rm, 10)
r = stats.skew(y)
rm = stats.mstats.skew(ym)
assert_almost_equal(r, rm, 10)
def test_moment(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.moment(x)
rm = stats.mstats.moment(xm)
assert_almost_equal(r, rm, 10)
r = stats.moment(y)
rm = stats.mstats.moment(ym)
assert_almost_equal(r, rm, 10)
def test_zscore(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
# reference solution
zx = (x - x.mean()) / x.std()
zy = (y - y.mean()) / y.std()
# validate stats
assert_allclose(stats.zscore(x), zx, rtol=1e-10)
assert_allclose(stats.zscore(y), zy, rtol=1e-10)
# compare stats and mstats
assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]),
rtol=1e-10)
assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]),
rtol=1e-10)
def test_kurtosis(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kurtosis(x)
rm = stats.mstats.kurtosis(xm)
assert_almost_equal(r, rm, 10)
r = stats.kurtosis(y)
rm = stats.mstats.kurtosis(ym)
assert_almost_equal(r, rm, 10)
def test_sem(self):
# example from stats.sem doc
a = np.arange(20).reshape(5, 4)
am = np.ma.array(a)
r = stats.sem(a, ddof=1)
rm = stats.mstats.sem(am, ddof=1)
assert_allclose(r, 2.82842712, atol=1e-5)
assert_allclose(rm, 2.82842712, atol=1e-5)
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0),
stats.sem(x, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0),
stats.sem(y, axis=None, ddof=0), decimal=13)
assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1),
stats.sem(x, axis=None, ddof=1), decimal=13)
assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1),
stats.sem(y, axis=None, ddof=1), decimal=13)
def test_describe(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.describe(x, ddof=1)
rm = stats.mstats.describe(xm, ddof=1)
for ii in range(6):
assert_almost_equal(np.asarray(r[ii]),
np.asarray(rm[ii]),
decimal=12)
def test_describe_result_attributes(self):
actual = mstats.describe(np.arange(5))
attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis')
check_named_results(actual, attributes, ma=True)
def test_rankdata(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.rankdata(x)
rm = stats.mstats.rankdata(x)
assert_allclose(r, rm)
def test_tmean(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14)
assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14)
def test_tmax(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tmax(x,2.),
stats.mstats.tmax(xm,2.), 10)
assert_almost_equal(stats.tmax(y,2.),
stats.mstats.tmax(ym,2.), 10)
assert_almost_equal(stats.tmax(x, upperlimit=3.),
stats.mstats.tmax(xm, upperlimit=3.), 10)
assert_almost_equal(stats.tmax(y, upperlimit=3.),
stats.mstats.tmax(ym, upperlimit=3.), 10)
def test_tmin(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_equal(stats.tmin(x), stats.mstats.tmin(xm))
assert_equal(stats.tmin(y), stats.mstats.tmin(ym))
assert_almost_equal(stats.tmin(x, lowerlimit=-1.),
stats.mstats.tmin(xm, lowerlimit=-1.), 10)
assert_almost_equal(stats.tmin(y, lowerlimit=-1.),
stats.mstats.tmin(ym, lowerlimit=-1.), 10)
def test_zmap(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
z = stats.zmap(x, y)
zm = stats.mstats.zmap(xm, ym)
assert_allclose(z, zm[0:len(z)], atol=1e-10)
def test_variation(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.variation(x), stats.mstats.variation(xm),
decimal=12)
assert_almost_equal(stats.variation(y), stats.mstats.variation(ym),
decimal=12)
def test_tvar(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm),
decimal=12)
assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym),
decimal=12)
def test_trimboth(self):
a = np.arange(20)
b = stats.trimboth(a, 0.1)
bm = stats.mstats.trimboth(a, 0.1)
assert_allclose(np.sort(b), bm.data[~bm.mask])
def test_tsem(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
assert_almost_equal(stats.tsem(x), stats.mstats.tsem(xm),
decimal=14)
assert_almost_equal(stats.tsem(y), stats.mstats.tsem(ym),
decimal=14)
assert_almost_equal(stats.tsem(x, limits=(-2., 2.)),
stats.mstats.tsem(xm, limits=(-2., 2.)),
decimal=14)
def test_skewtest(self):
# this test is for 1D data
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_allclose(r, rm)
def test_skewtest_result_attributes(self):
x = np.array((-2, -1, 0, 1, 2, 3)*4)**2
res = mstats.skewtest(x)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_skewtest_2D_notmasked(self):
# a normal ndarray is passed to the masked function
x = np.random.random((20, 2)) * 20.
r = stats.skewtest(x)
rm = stats.mstats.skewtest(x)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_skewtest_2D_WithMask(self):
nx = 2
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample2D(n, nx)
r = stats.skewtest(x)
rm = stats.mstats.skewtest(xm)
assert_equal(r[0][0], rm[0][0])
assert_equal(r[0][1], rm[0][1])
def test_normaltest(self):
with np.errstate(over='raise'), suppress_warnings() as sup:
sup.filter(UserWarning, "kurtosistest only valid for n>=20")
for n in self.get_n():
if n > 8:
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.normaltest(x)
rm = stats.mstats.normaltest(xm)
assert_allclose(np.asarray(r), np.asarray(rm))
def test_find_repeats(self):
x = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4]).astype('float')
tmp = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5]).astype('float')
mask = (tmp == 5.)
xm = np.ma.array(tmp, mask=mask)
x_orig, xm_orig = x.copy(), xm.copy()
r = stats.find_repeats(x)
rm = stats.mstats.find_repeats(xm)
assert_equal(r, rm)
assert_equal(x, x_orig)
assert_equal(xm, xm_orig)
# This crazy behavior is expected by count_tied_groups, but is not
# in the docstring...
_, counts = stats.mstats.find_repeats([])
assert_equal(counts, np.array(0, dtype=np.intp))
def test_kendalltau(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.kendalltau(x, y)
rm = stats.mstats.kendalltau(xm, ym)
assert_almost_equal(r[0], rm[0], decimal=10)
assert_almost_equal(r[1], rm[1], decimal=7)
def test_obrientransform(self):
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
r = stats.obrientransform(x)
rm = stats.mstats.obrientransform(xm)
assert_almost_equal(r.T, rm[0:len(x)])
def test_ks_1samp(self):
for mode in ['auto', 'exact', 'asymp']:
with suppress_warnings() as sup:
for alternative in ['less', 'greater', 'two-sided']:
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative, mode=mode)
res2 = stats.mstats.ks_1samp(xm, stats.norm.cdf, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res2))
res3 = stats.ks_1samp(xm, stats.norm.cdf, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res3))
def test_kstest_1samp(self):
for mode in ['auto', 'exact', 'asymp']:
with suppress_warnings() as sup:
for alternative in ['less', 'greater', 'two-sided']:
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.kstest(x, 'norm', alternative=alternative, mode=mode)
res2 = stats.mstats.kstest(xm, 'norm', alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res2))
res3 = stats.kstest(xm, 'norm', alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res3))
def test_ks_2samp(self):
for mode in ['auto', 'exact', 'asymp']:
with suppress_warnings() as sup:
if mode in ['auto', 'exact']:
sup.filter(RuntimeWarning,
"ks_2samp: Exact calculation unsuccessful. Switching to mode=asymp.")
for alternative in ['less', 'greater', 'two-sided']:
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.ks_2samp(x, y, alternative=alternative, mode=mode)
res2 = stats.mstats.ks_2samp(xm, ym, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res2))
res3 = stats.ks_2samp(xm, y, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res3))
def test_kstest_2samp(self):
for mode in ['auto', 'exact', 'asymp']:
with suppress_warnings() as sup:
if mode in ['auto', 'exact']:
sup.filter(RuntimeWarning,
"ks_2samp: Exact calculation unsuccessful. Switching to mode=asymp.")
for alternative in ['less', 'greater', 'two-sided']:
for n in self.get_n():
x, y, xm, ym = self.generate_xy_sample(n)
res1 = stats.kstest(x, y, alternative=alternative, mode=mode)
res2 = stats.mstats.kstest(xm, ym, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res2))
res3 = stats.kstest(xm, y, alternative=alternative, mode=mode)
assert_equal(np.asarray(res1), np.asarray(res3))
def test_nametuples_agree(self):
result = stats.kstest([1, 2], [3, 4])
assert_(isinstance(result, stats.stats.KstestResult))
result2 = stats.stats.Ks_2sampResult(result.statistic, result.pvalue)
assert_(isinstance(result2, stats.stats.Ks_2sampResult))
assert_equal(result, result2)
class TestBrunnerMunzel:
# Data from (Lumley, 1996)
X = np.ma.masked_invalid([1, 2, 1, 1, 1, np.nan, 1, 1,
1, 1, 1, 2, 4, 1, 1, np.nan])
Y = np.ma.masked_invalid([3, 3, 4, 3, np.nan, 1, 2, 3, 1, 1, 5, 4])
significant = 14
def test_brunnermunzel_one_sided(self):
# Results are compared with R's lawstat package.
u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='less')
u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='greater')
u3, p3 = mstats.brunnermunzel(self.X, self.Y, alternative='greater')
u4, p4 = mstats.brunnermunzel(self.Y, self.X, alternative='less')
assert_almost_equal(p1, p2, decimal=self.significant)
assert_almost_equal(p3, p4, decimal=self.significant)
assert_(p1 != p3)
assert_almost_equal(u1, 3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u2, -3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u3, 3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u4, -3.1374674823029505,
decimal=self.significant)
assert_almost_equal(p1, 0.0028931043330757342,
decimal=self.significant)
assert_almost_equal(p3, 0.99710689566692423,
decimal=self.significant)
def test_brunnermunzel_two_sided(self):
u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='two-sided')
u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='two-sided')
assert_almost_equal(p1, p2, decimal=self.significant)
assert_almost_equal(u1, 3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u2, -3.1374674823029505,
decimal=self.significant)
assert_almost_equal(p1, 0.0057862086661515377,
decimal=self.significant)
def test_brunnermunzel_default(self):
# The default value for alternative is two-sided
u1, p1 = mstats.brunnermunzel(self.X, self.Y)
u2, p2 = mstats.brunnermunzel(self.Y, self.X)
assert_almost_equal(p1, p2, decimal=self.significant)
assert_almost_equal(u1, 3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u2, -3.1374674823029505,
decimal=self.significant)
assert_almost_equal(p1, 0.0057862086661515377,
decimal=self.significant)
def test_brunnermunzel_alternative_error(self):
alternative = "error"
distribution = "t"
assert_(alternative not in ["two-sided", "greater", "less"])
assert_raises(ValueError,
mstats.brunnermunzel,
self.X,
self.Y,
alternative,
distribution)
def test_brunnermunzel_distribution_norm(self):
u1, p1 = mstats.brunnermunzel(self.X, self.Y, distribution="normal")
u2, p2 = mstats.brunnermunzel(self.Y, self.X, distribution="normal")
assert_almost_equal(p1, p2, decimal=self.significant)
assert_almost_equal(u1, 3.1374674823029505,
decimal=self.significant)
assert_almost_equal(u2, -3.1374674823029505,
decimal=self.significant)
assert_almost_equal(p1, 0.0017041417600383024,
decimal=self.significant)
def test_brunnermunzel_distribution_error(self):
alternative = "two-sided"
distribution = "error"
assert_(alternative not in ["t", "normal"])
assert_raises(ValueError,
mstats.brunnermunzel,
self.X,
self.Y,
alternative,
distribution)
def test_brunnermunzel_empty_imput(self):
u1, p1 = mstats.brunnermunzel(self.X, [])
u2, p2 = mstats.brunnermunzel([], self.Y)
u3, p3 = mstats.brunnermunzel([], [])
assert_(np.isnan(u1))
assert_(np.isnan(p1))
assert_(np.isnan(u2))
assert_(np.isnan(p2))
assert_(np.isnan(u3))
assert_(np.isnan(p3))
| true | true |
f7fd5c2b6fdd6bd9ae86f2c80b85dd2da201fb55 | 1,913 | py | Python | astroquery/utils/progressbar.py | wschoenell/astroquery | fe8a5e31035a1e9cdcf2603fb4da9e2fc5000d31 | [
"BSD-3-Clause"
] | 1 | 2015-05-10T00:58:21.000Z | 2015-05-10T00:58:21.000Z | astroquery/utils/progressbar.py | wschoenell/astroquery | fe8a5e31035a1e9cdcf2603fb4da9e2fc5000d31 | [
"BSD-3-Clause"
] | null | null | null | astroquery/utils/progressbar.py | wschoenell/astroquery | fe8a5e31035a1e9cdcf2603fb4da9e2fc5000d31 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import gzip
import sys
from astropy.extern.six import StringIO
from astropy.extern.six.moves import urllib
from astropy.io import fits
__all__ = ['chunk_report','chunk_read']
def chunk_report(bytes_so_far, chunk_size, total_size):
if total_size > 0:
percent = float(bytes_so_far) / total_size
percent = round(percent*100, 2)
sys.stdout.write("Downloaded %12.2g of %12.2g Mb (%6.2f%%)\r" %
(bytes_so_far / 1024.**2, total_size / 1024.**2, percent))
else:
sys.stdout.write("Downloaded %10.2g Mb\r" %
(bytes_so_far / 1024.**2))
def chunk_read(response, chunk_size=1024, report_hook=None):
content_length = response.info().get('Content-Length')
if content_length is None:
total_size = 0
else:
total_size = content_length.strip()
total_size = int(total_size)
bytes_so_far = 0
result_string = b""
# sys.stdout.write("Beginning download.\n")
while True:
chunk = response.read(chunk_size)
result_string += chunk
bytes_so_far += len(chunk)
if not chunk:
if report_hook:
sys.stdout.write('\n')
break
if report_hook:
report_hook(bytes_so_far, chunk_size, total_size)
return result_string
def retrieve(url, outfile, opener=None, overwrite=False):
"""
"retrieve" (i.e., download to file) a URL.
"""
if opener is None:
opener = urllib.build_opener()
page = opener.open(url)
results = chunk_read(page, report_hook=chunk_report)
S = StringIO(results)
try:
fitsfile = fits.open(S,ignore_missing_end=True)
except IOError:
S.seek(0)
G = gzip.GzipFile(fileobj=S)
fitsfile = fits.open(G,ignore_missing_end=True)
fitsfile.writeto(outfile, clobber=overwrite)
| 25.506667 | 71 | 0.636696 |
import gzip
import sys
from astropy.extern.six import StringIO
from astropy.extern.six.moves import urllib
from astropy.io import fits
__all__ = ['chunk_report','chunk_read']
def chunk_report(bytes_so_far, chunk_size, total_size):
if total_size > 0:
percent = float(bytes_so_far) / total_size
percent = round(percent*100, 2)
sys.stdout.write("Downloaded %12.2g of %12.2g Mb (%6.2f%%)\r" %
(bytes_so_far / 1024.**2, total_size / 1024.**2, percent))
else:
sys.stdout.write("Downloaded %10.2g Mb\r" %
(bytes_so_far / 1024.**2))
def chunk_read(response, chunk_size=1024, report_hook=None):
content_length = response.info().get('Content-Length')
if content_length is None:
total_size = 0
else:
total_size = content_length.strip()
total_size = int(total_size)
bytes_so_far = 0
result_string = b""
while True:
chunk = response.read(chunk_size)
result_string += chunk
bytes_so_far += len(chunk)
if not chunk:
if report_hook:
sys.stdout.write('\n')
break
if report_hook:
report_hook(bytes_so_far, chunk_size, total_size)
return result_string
def retrieve(url, outfile, opener=None, overwrite=False):
if opener is None:
opener = urllib.build_opener()
page = opener.open(url)
results = chunk_read(page, report_hook=chunk_report)
S = StringIO(results)
try:
fitsfile = fits.open(S,ignore_missing_end=True)
except IOError:
S.seek(0)
G = gzip.GzipFile(fileobj=S)
fitsfile = fits.open(G,ignore_missing_end=True)
fitsfile.writeto(outfile, clobber=overwrite)
| true | true |
f7fd5c9c66f2625fcbdc54656e71dd7e346dd0eb | 7,729 | py | Python | datageneration/dbmake_contour.py | utlive/VIDMAP | 60656d532ac497c1070f1c94c06807b2d57e2af4 | [
"Unlicense"
] | 1 | 2022-02-21T02:45:25.000Z | 2022-02-21T02:45:25.000Z | datageneration/dbmake_contour.py | utlive/VIDMAP | 60656d532ac497c1070f1c94c06807b2d57e2af4 | [
"Unlicense"
] | null | null | null | datageneration/dbmake_contour.py | utlive/VIDMAP | 60656d532ac497c1070f1c94c06807b2d57e2af4 | [
"Unlicense"
] | null | null | null | import skimage.io
import skvideo.io
import os
import h5py
from sklearn.externals import joblib
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import f1_score
import scipy.misc
import scipy.signal
import numpy as np
from sporco import util
import matplotlib.pyplot as plt
import pylab as py
import glob
from PIL import Image
import cv2
import sys
# normalizations
def gauss_window(lw, sigma):
sd = float(sigma)
lw = int(lw)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd *= sd
for ii in range(1, lw + 1):
tmp = np.exp(-0.5 * float(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
return weights
avg_window = gauss_window(100, 30.0)
def hp_image(image, window_arr):
extend_mode = 'reflect'
image = np.array(image).astype(np.float32)
w, h = image.shape
mu_image = np.zeros((w, h))
scipy.ndimage.correlate1d(image, window_arr, 0, mu_image, mode=extend_mode)
scipy.ndimage.correlate1d(mu_image, window_arr, 1, mu_image, mode=extend_mode)
return image - mu_image, mu_image
def get_postrainpatches(hdf5_im, hdf5_lab, hdf5_trainset, idx=0, traintest=0):
return genericpospatcher(hdf5_im, hdf5_lab, hdf5_trainset, idx=idx, traintest=traintest)
def genericpospatcher(hdf5_im, hdf5_lab, hdf5_trainset, idx=0, traintest=0):
width = 100+40
height = 100+40
#lst480p = np.array(glob.glob("/mnt/hd3/scenes/480p/*avi"))
lst = np.array(glob.glob("images/*"))
#lst480p = np.sort(lst480p)
n_samples = 1000
for jjj in range(n_samples):
print jjj, n_samples
#vid_pris = skvideo.io.vread(fname, as_grey=True).astype(np.float32)
vid_pris = np.random.random(size=(1, 500, 500, 1))*255
mi = np.min(vid_pris)
ma = np.max(vid_pris)
randomwidth=np.random.random()*20 + 20
avg_window = gauss_window(np.int32(np.round(randomwidth)*3), randomwidth)
_, blur = hp_image(vid_pris[0, :, :, 0], avg_window)
blur -= np.min(blur)
blur /= np.max(blur)
blur *= (ma - mi)
blur += mi
# film grain noise
blur = blur.astype(np.uint8)
vid_pris[0, :, :, 0] = blur
T, H, W, C = vid_pris.shape
adj_h = H - height
adj_w = W - width
iv, jv = np.meshgrid(np.arange(adj_h), np.arange(adj_w), sparse=False, indexing='ij')
iv = iv.reshape(-1)
jv = jv.reshape(-1)
jdx = np.random.permutation(adj_h*adj_w)
iv = iv[jdx]
jv = jv[jdx]
rpy = 0
limit = 0
for (y, x) in zip(iv, jv):
rpy += 1
t = 0
goodpatch = vid_pris[0, y:y+height, x:x+width, 0]
badpatch = goodpatch.copy()
badpatch = badpatch.astype(np.float32)
badpatch2 = badpatch.copy()
A = np.random.normal(size=badpatch.shape)
B = np.random.random(1)*10
if B>5:
B -= 5
else:
B *= 0
badpatch += A*B#np.random.normal(size=vid_pris.shape)
badpatch[badpatch<0] = 0
badpatch[badpatch>255] = 255
# random amount of change
amt = np.random.randint(3, 6)
badpatch /= 2**amt
badpatch = np.floor(badpatch)
badpatch *= 2**amt
badpatch2 /= 2**amt
badpatch2 = np.floor(badpatch2)
badpatch2 *= 2**amt
diff = np.mean((badpatch - goodpatch)**2)
if diff < 1.5:
continue
#print diff
#skimage.io.imsave("extract/test_%d.png" % (idx,), badpatch.astype(np.uint8))
#exit(0)
# make sure there is some non-zero variance in the center of the patch
if(np.std(badpatch2[55:-55, 55:-55])<1e-9):
print "bad patch"
continue
#preprocess = preprocess[:, 5:-5, 5:-5]
badpatch = badpatch[20:-20, 20:-20]
hdf5_im[idx] = badpatch
hdf5_lab[idx] = 1
hdf5_trainset[idx] = traintest
#skimage.io.imsave("extract/%d.png" % (idx,), patch)
limit += 1
idx += 1
if limit >= 200:
break
return idx
def get_negtrainpatches(image_patches, labels, trainset, idx=0, traintest=0):
return genericnegpatcher(image_patches, labels, trainset, idx=idx, traintest=traintest)
def genericnegpatcher(hdf5_im, hdf5_lab, hdf5_trainset, idx=0, traintest=0):
width = 100+40
height = 100+40
lst = np.array(glob.glob("images/*"))
#lst480p = np.sort(lst480p)
tlst = []
for i in xrange(1000/10):
tlst = np.hstack((tlst, lst[:10]))
lst = tlst
lst480p = np.array(glob.glob("/mnt/hd3/scenes/480p/*avi"))
lst1080p = np.array(glob.glob("/mnt/hd3/scenes/1080p/*avi"))
lst480p = np.sort(lst480p)
lst1080p = np.sort(lst1080p)
if traintest == 0:
lst = np.hstack((lst, lst480p[:575], lst1080p[:215]))
else:
lst = np.hstack((lst, lst480p[575:], lst1080p[215:]))
#lst = np.hstack((lst, lst480p[57:114], lst1080p[21:42]))
n_samples = len(lst)
for jjj, fname in enumerate(lst):
print jjj, n_samples
if "images" in fname:
print "gen"
vid_pris = np.random.random(size=(1, 500, 500, 1))*255
mi = np.min(vid_pris)
ma = np.max(vid_pris)
randomwidth=np.random.random()*20 + 10
avg_window = gauss_window(np.int32(np.round(randomwidth)*3), randomwidth)
_, blur = hp_image(vid_pris[0, :, :, 0], avg_window)
blur -= np.min(blur)
blur /= np.max(blur)
blur *= (ma - mi)
blur += mi
blur = blur.astype(np.uint8)
vid_pris[0, :, :, 0] = blur
vid_pris = vid_pris.astype(np.float32)
else:
vid_pris = skvideo.io.vread(fname, as_grey=True).astype(np.float32)
T, H, W, C = vid_pris.shape
adj_h = H - height
adj_w = W - width
iv, jv = np.meshgrid(np.arange(adj_h), np.arange(adj_w), sparse=False, indexing='ij')
iv = iv.reshape(-1)
jv = jv.reshape(-1)
jdx = np.random.permutation(adj_h*adj_w)
iv = iv[jdx]
jv = jv[jdx]
limit = 0
rby = 0
tv = np.arange(T)
for (y, x) in zip(iv, jv):
np.random.shuffle(tv)
t = tv[0]
goodpatch = vid_pris[0, y:y+height, x:x+width, 0].astype(np.float32)
#preprocess = preprocess[:, 5:-5, 5:-5]
goodpatch = goodpatch[20:-20, 20:-20]
if "images" in fname:
A = np.random.normal(size=goodpatch.shape)
B = np.random.random(1)*10
if B>5:
B -= 5
else:
B *= 0
goodpatch += A*B#np.random.normal(size=vid_pris.shape)
goodpatch[goodpatch<0] = 0
goodpatch[goodpatch>255] = 255
hdf5_im[idx] = goodpatch
hdf5_lab[idx] = 0
hdf5_trainset[idx] = traintest
#skimage.io.imsave("extract/%d.png" % (idx,), patch)
limit += 1
idx += 1
if limit >= 100:
break
return idx
# get the number of patches
np.random.seed(12345)
n_total_images = 730600 #12000
patch_height = 100
patch_width = 100
n_channels = 1
# sf = single frame
# fd = frame diff
f = h5py.File('contourdataset_sf.hdf5', mode='w')
image_patches = f.create_dataset('image_patches', (n_total_images, n_channels, patch_height, patch_width), dtype='float')
image_patches.dims[0].label = 'batch'
image_patches.dims[1].label = 'channel'
image_patches.dims[2].label = 'height'
image_patches.dims[3].label = 'width'
labels = f.create_dataset('labels', (n_total_images,), dtype='uint8')
trainset = f.create_dataset('set', (n_total_images,), dtype='uint8')
n_idx = 0
n_idx = get_postrainpatches(image_patches, labels, trainset, n_idx, 0)
n_idx = get_negtrainpatches(image_patches, labels, trainset, n_idx, 0)
n_idx = get_postrainpatches(image_patches, labels, trainset, n_idx, 1)
n_idx = get_negtrainpatches(image_patches, labels, trainset, n_idx, 1)
print n_idx, n_total_images
f.flush()
f.close()
| 28.105455 | 121 | 0.629706 | import skimage.io
import skvideo.io
import os
import h5py
from sklearn.externals import joblib
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import f1_score
import scipy.misc
import scipy.signal
import numpy as np
from sporco import util
import matplotlib.pyplot as plt
import pylab as py
import glob
from PIL import Image
import cv2
import sys
def gauss_window(lw, sigma):
sd = float(sigma)
lw = int(lw)
weights = [0.0] * (2 * lw + 1)
weights[lw] = 1.0
sum = 1.0
sd *= sd
for ii in range(1, lw + 1):
tmp = np.exp(-0.5 * float(ii * ii) / sd)
weights[lw + ii] = tmp
weights[lw - ii] = tmp
sum += 2.0 * tmp
for ii in range(2 * lw + 1):
weights[ii] /= sum
return weights
avg_window = gauss_window(100, 30.0)
def hp_image(image, window_arr):
extend_mode = 'reflect'
image = np.array(image).astype(np.float32)
w, h = image.shape
mu_image = np.zeros((w, h))
scipy.ndimage.correlate1d(image, window_arr, 0, mu_image, mode=extend_mode)
scipy.ndimage.correlate1d(mu_image, window_arr, 1, mu_image, mode=extend_mode)
return image - mu_image, mu_image
def get_postrainpatches(hdf5_im, hdf5_lab, hdf5_trainset, idx=0, traintest=0):
return genericpospatcher(hdf5_im, hdf5_lab, hdf5_trainset, idx=idx, traintest=traintest)
def genericpospatcher(hdf5_im, hdf5_lab, hdf5_trainset, idx=0, traintest=0):
width = 100+40
height = 100+40
lst = np.array(glob.glob("images/*"))
n_samples = 1000
for jjj in range(n_samples):
print jjj, n_samples
vid_pris = np.random.random(size=(1, 500, 500, 1))*255
mi = np.min(vid_pris)
ma = np.max(vid_pris)
randomwidth=np.random.random()*20 + 20
avg_window = gauss_window(np.int32(np.round(randomwidth)*3), randomwidth)
_, blur = hp_image(vid_pris[0, :, :, 0], avg_window)
blur -= np.min(blur)
blur /= np.max(blur)
blur *= (ma - mi)
blur += mi
blur = blur.astype(np.uint8)
vid_pris[0, :, :, 0] = blur
T, H, W, C = vid_pris.shape
adj_h = H - height
adj_w = W - width
iv, jv = np.meshgrid(np.arange(adj_h), np.arange(adj_w), sparse=False, indexing='ij')
iv = iv.reshape(-1)
jv = jv.reshape(-1)
jdx = np.random.permutation(adj_h*adj_w)
iv = iv[jdx]
jv = jv[jdx]
rpy = 0
limit = 0
for (y, x) in zip(iv, jv):
rpy += 1
t = 0
goodpatch = vid_pris[0, y:y+height, x:x+width, 0]
badpatch = goodpatch.copy()
badpatch = badpatch.astype(np.float32)
badpatch2 = badpatch.copy()
A = np.random.normal(size=badpatch.shape)
B = np.random.random(1)*10
if B>5:
B -= 5
else:
B *= 0
badpatch += A*B
badpatch[badpatch<0] = 0
badpatch[badpatch>255] = 255
amt = np.random.randint(3, 6)
badpatch /= 2**amt
badpatch = np.floor(badpatch)
badpatch *= 2**amt
badpatch2 /= 2**amt
badpatch2 = np.floor(badpatch2)
badpatch2 *= 2**amt
diff = np.mean((badpatch - goodpatch)**2)
if diff < 1.5:
continue
if(np.std(badpatch2[55:-55, 55:-55])<1e-9):
print "bad patch"
continue
badpatch = badpatch[20:-20, 20:-20]
hdf5_im[idx] = badpatch
hdf5_lab[idx] = 1
hdf5_trainset[idx] = traintest
limit += 1
idx += 1
if limit >= 200:
break
return idx
def get_negtrainpatches(image_patches, labels, trainset, idx=0, traintest=0):
return genericnegpatcher(image_patches, labels, trainset, idx=idx, traintest=traintest)
def genericnegpatcher(hdf5_im, hdf5_lab, hdf5_trainset, idx=0, traintest=0):
width = 100+40
height = 100+40
lst = np.array(glob.glob("images/*"))
tlst = []
for i in xrange(1000/10):
tlst = np.hstack((tlst, lst[:10]))
lst = tlst
lst480p = np.array(glob.glob("/mnt/hd3/scenes/480p/*avi"))
lst1080p = np.array(glob.glob("/mnt/hd3/scenes/1080p/*avi"))
lst480p = np.sort(lst480p)
lst1080p = np.sort(lst1080p)
if traintest == 0:
lst = np.hstack((lst, lst480p[:575], lst1080p[:215]))
else:
lst = np.hstack((lst, lst480p[575:], lst1080p[215:]))
n_samples = len(lst)
for jjj, fname in enumerate(lst):
print jjj, n_samples
if "images" in fname:
print "gen"
vid_pris = np.random.random(size=(1, 500, 500, 1))*255
mi = np.min(vid_pris)
ma = np.max(vid_pris)
randomwidth=np.random.random()*20 + 10
avg_window = gauss_window(np.int32(np.round(randomwidth)*3), randomwidth)
_, blur = hp_image(vid_pris[0, :, :, 0], avg_window)
blur -= np.min(blur)
blur /= np.max(blur)
blur *= (ma - mi)
blur += mi
blur = blur.astype(np.uint8)
vid_pris[0, :, :, 0] = blur
vid_pris = vid_pris.astype(np.float32)
else:
vid_pris = skvideo.io.vread(fname, as_grey=True).astype(np.float32)
T, H, W, C = vid_pris.shape
adj_h = H - height
adj_w = W - width
iv, jv = np.meshgrid(np.arange(adj_h), np.arange(adj_w), sparse=False, indexing='ij')
iv = iv.reshape(-1)
jv = jv.reshape(-1)
jdx = np.random.permutation(adj_h*adj_w)
iv = iv[jdx]
jv = jv[jdx]
limit = 0
rby = 0
tv = np.arange(T)
for (y, x) in zip(iv, jv):
np.random.shuffle(tv)
t = tv[0]
goodpatch = vid_pris[0, y:y+height, x:x+width, 0].astype(np.float32)
goodpatch = goodpatch[20:-20, 20:-20]
if "images" in fname:
A = np.random.normal(size=goodpatch.shape)
B = np.random.random(1)*10
if B>5:
B -= 5
else:
B *= 0
goodpatch += A*B
goodpatch[goodpatch<0] = 0
goodpatch[goodpatch>255] = 255
hdf5_im[idx] = goodpatch
hdf5_lab[idx] = 0
hdf5_trainset[idx] = traintest
limit += 1
idx += 1
if limit >= 100:
break
return idx
np.random.seed(12345)
n_total_images = 730600
patch_height = 100
patch_width = 100
n_channels = 1
f = h5py.File('contourdataset_sf.hdf5', mode='w')
image_patches = f.create_dataset('image_patches', (n_total_images, n_channels, patch_height, patch_width), dtype='float')
image_patches.dims[0].label = 'batch'
image_patches.dims[1].label = 'channel'
image_patches.dims[2].label = 'height'
image_patches.dims[3].label = 'width'
labels = f.create_dataset('labels', (n_total_images,), dtype='uint8')
trainset = f.create_dataset('set', (n_total_images,), dtype='uint8')
n_idx = 0
n_idx = get_postrainpatches(image_patches, labels, trainset, n_idx, 0)
n_idx = get_negtrainpatches(image_patches, labels, trainset, n_idx, 0)
n_idx = get_postrainpatches(image_patches, labels, trainset, n_idx, 1)
n_idx = get_negtrainpatches(image_patches, labels, trainset, n_idx, 1)
print n_idx, n_total_images
f.flush()
f.close()
| false | true |
f7fd5d0a98d5d7b6ed4a472562be817ce79fd403 | 5,609 | py | Python | routes.py | itsjatin135s/Ekchhat | 66d1d14314c75a2937350a467afa571ed4a32fe4 | [
"MIT"
] | null | null | null | routes.py | itsjatin135s/Ekchhat | 66d1d14314c75a2937350a467afa571ed4a32fe4 | [
"MIT"
] | null | null | null | routes.py | itsjatin135s/Ekchhat | 66d1d14314c75a2937350a467afa571ed4a32fe4 | [
"MIT"
] | null | null | null | from flask import render_template, redirect, url_for, flash,request
from forms import ContactUsForm,DonateForm,PartnerForm
from models import ContactUs,Donate,Partner
from __init__ import db, app
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
from webdriver_manager.chrome import ChromeDriverManager
driver = webdriver.Chrome(ChromeDriverManager().install())
#success page
# routes for index,register,login,logout,error...
@app.route('/' ,methods=['GET','POST'])
def contact():
forms = ContactUsForm()
if forms.validate_on_submit():
contactus = ContactUs(name=forms.name.data,
email=forms.email.data, address=forms.address.data,phone=forms.phone.data,comments=forms.comments.data)
db.session.add(contactus)
db.session.commit()
#flash('hurreey account created','success')
#return redirect(url_for('home'))
#return redirect('contact')
return render_template('index.html', forms=forms)
@app.route('/Donate_Food' ,methods=['GET','POST'])
def donate():
#driver = webdriver.Chrome()
products=[] #List to store name of the product
prices=[] #List to store price of the product
ratings=[] #List to store rating of the product
driver.get("https://www.flipkart.com/search?q=nokia+mobiles&sid=tyy%2C4io&as=on&as-show=on&otracker=AS_QueryStore_OrganicAutoSuggest_1_1_na_na_na&otracker1=AS_QueryStore_OrganicAutoSuggest_1_1_na_na_na&as-pos=1&as-type=RECENT&suggestionId=nokia+mobiles%7CMobiles&requestId=34c5d1f7-8967-44ef-82e4-d7d691ad0f72&as-backfill=on")
content = driver.page_source
soup = BeautifulSoup(content)
for a in soup.findAll('a',href=True, attrs={'class':'_31qSD5'}):
name=a.find('div', attrs={'class':'_3wU53n'})
price=a.find('div', attrs={'class':'_1vC4OE _2rQ-NK'})
rating=a.find('div', attrs={'class':'hGSR34 _2beYZw'})
products.append(name.text)
prices.append(price.text)
#ratings.append(rating.text)
df = pd.DataFrame({'Product Name':products,'Price':prices})
df.to_csv('products.csv', index=False, encoding='utf-8')
return "Success"
"""def donate():
forms = DonateForm()
if forms.validate_on_submit():
donatefood = Donate(name=forms.name.data,
email=forms.email.data, address=forms.address.data,phone=forms.phone.data,food=forms.food.data)
db.session.add(donatefood)
db.session.commit()
#flash('hurreey account created','success')
return render_template('donate_food.html', forms=forms)"""
@app.route('/Partner' ,methods=['GET','POST'])
def partner():
forms = PartnerForm()
if forms.validate_on_submit():
partner = Partner(orgname=forms.orgname.data,ownername=forms.ownername.data,
email=forms.email.data, phone=forms.phone.data,state=forms.state.data,city=forms.city.data,address=forms.address.data)
db.session.add(partner)
db.session.commit()
import smtplib
from string import Template
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
MY_ADDRESS = 'your_mail_id'
PASSWORD = 'your_password'
def get_contacts(filename):
"""
Return two lists names, emails containing names and email addresses
read from a file specified by filename.
"""
names = []
emails = []
with open(filename, mode='r', encoding='utf-8') as contacts_file:
for a_contact in contacts_file:
names.append(a_contact.split()[0])
emails.append(a_contact.split()[1])
return names, emails
def read_template(filename):
"""
Returns a Template object comprising the contents of the
file specified by filename.
"""
with open(filename, 'r', encoding='utf-8') as template_file:
template_file_content = template_file.read()
return Template(template_file_content)
def main():
names, emails = get_contacts('mycontact.txt') # read contacts
message_template = read_template('message.txt')
# set up the SMTP server
s = smtplib.SMTP(host='smtp.gmail.com', port=587)
s.starttls()
s.login(MY_ADDRESS, PASSWORD)
# For each contact, send the email:
for name, email in zip(names, emails):
msg = MIMEMultipart() # create a message
# add in the actual person name to the message template
message = message_template.substitute(PERSON_NAME=name.title())
# Prints out the message body for our sake
print(message)
# setup the parameters of the message
msg['From']=MY_ADDRESS
msg['To']=email
msg['Subject']="Thanks For Joining"
# add in the message body
msg.attach(MIMEText(message, 'plain'))
# send the message via the server set up earlier.
s.send_message(msg)
del msg
# Terminate the SMTP session and close the connection
s.quit()
main()
return render_template('partner.html', forms=forms)
@app.route('/error')
def error():
return render_template('error.html')
| 34.411043 | 328 | 0.617757 | from flask import render_template, redirect, url_for, flash,request
from forms import ContactUsForm,DonateForm,PartnerForm
from models import ContactUs,Donate,Partner
from __init__ import db, app
from selenium import webdriver
from bs4 import BeautifulSoup
import pandas as pd
from webdriver_manager.chrome import ChromeDriverManager
driver = webdriver.Chrome(ChromeDriverManager().install())
@app.route('/' ,methods=['GET','POST'])
def contact():
forms = ContactUsForm()
if forms.validate_on_submit():
contactus = ContactUs(name=forms.name.data,
email=forms.email.data, address=forms.address.data,phone=forms.phone.data,comments=forms.comments.data)
db.session.add(contactus)
db.session.commit()
return render_template('index.html', forms=forms)
@app.route('/Donate_Food' ,methods=['GET','POST'])
def donate():
products=[]
prices=[]
ratings=[]
driver.get("https://www.flipkart.com/search?q=nokia+mobiles&sid=tyy%2C4io&as=on&as-show=on&otracker=AS_QueryStore_OrganicAutoSuggest_1_1_na_na_na&otracker1=AS_QueryStore_OrganicAutoSuggest_1_1_na_na_na&as-pos=1&as-type=RECENT&suggestionId=nokia+mobiles%7CMobiles&requestId=34c5d1f7-8967-44ef-82e4-d7d691ad0f72&as-backfill=on")
content = driver.page_source
soup = BeautifulSoup(content)
for a in soup.findAll('a',href=True, attrs={'class':'_31qSD5'}):
name=a.find('div', attrs={'class':'_3wU53n'})
price=a.find('div', attrs={'class':'_1vC4OE _2rQ-NK'})
rating=a.find('div', attrs={'class':'hGSR34 _2beYZw'})
products.append(name.text)
prices.append(price.text)
df = pd.DataFrame({'Product Name':products,'Price':prices})
df.to_csv('products.csv', index=False, encoding='utf-8')
return "Success"
@app.route('/Partner' ,methods=['GET','POST'])
def partner():
forms = PartnerForm()
if forms.validate_on_submit():
partner = Partner(orgname=forms.orgname.data,ownername=forms.ownername.data,
email=forms.email.data, phone=forms.phone.data,state=forms.state.data,city=forms.city.data,address=forms.address.data)
db.session.add(partner)
db.session.commit()
import smtplib
from string import Template
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
MY_ADDRESS = 'your_mail_id'
PASSWORD = 'your_password'
def get_contacts(filename):
names = []
emails = []
with open(filename, mode='r', encoding='utf-8') as contacts_file:
for a_contact in contacts_file:
names.append(a_contact.split()[0])
emails.append(a_contact.split()[1])
return names, emails
def read_template(filename):
with open(filename, 'r', encoding='utf-8') as template_file:
template_file_content = template_file.read()
return Template(template_file_content)
def main():
names, emails = get_contacts('mycontact.txt')
message_template = read_template('message.txt')
s = smtplib.SMTP(host='smtp.gmail.com', port=587)
s.starttls()
s.login(MY_ADDRESS, PASSWORD)
for name, email in zip(names, emails):
msg = MIMEMultipart()
message = message_template.substitute(PERSON_NAME=name.title())
print(message)
msg['From']=MY_ADDRESS
msg['To']=email
msg['Subject']="Thanks For Joining"
msg.attach(MIMEText(message, 'plain'))
s.send_message(msg)
del msg
s.quit()
main()
return render_template('partner.html', forms=forms)
@app.route('/error')
def error():
return render_template('error.html')
| true | true |
f7fd5d2765f642a996e1ea57f95263cadb792804 | 16,827 | py | Python | Tests/test_Crystal.py | erpeg/biopython | 296b6b451ce7161fdace2fd36d0817722491d733 | [
"BSD-3-Clause"
] | 2 | 2020-06-25T12:52:03.000Z | 2020-07-11T09:47:34.000Z | Tests/test_Crystal.py | cosign070128/biopython | 2f02e34ba76306e9c27eec9e051809bec2cece9b | [
"BSD-3-Clause"
] | 9 | 2020-05-05T00:54:23.000Z | 2020-06-09T17:10:45.000Z | Tests/test_Crystal.py | cosign070128/biopython | 2f02e34ba76306e9c27eec9e051809bec2cece9b | [
"BSD-3-Clause"
] | 3 | 2020-05-17T19:43:05.000Z | 2020-06-04T20:44:38.000Z | # Copyright 2002 by Katharine Lindner. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# python unittest framework
"""Tests for Crystal module (OBSOLETE)."""
import unittest
import copy
import warnings
from Bio import BiopythonDeprecationWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonDeprecationWarning)
# modules to be tested
from Bio.Crystal import Hetero, Chain, Crystal, CrystalError
class ChainTestCase(unittest.TestCase):
def setUp(self):
self.a = "C A A C T A G G T C A C U A G G T C A G"
self.b = "C T G A C C T A G T G A C C T A G T T G"
self.c = "THR LYS LEU ASN GLY MET VAL LEU LEU CYS LYS VAL CYS GLY ASP"
self.d = "THR LYS LEU ASN GLY MET VAL LEU LEU CYS LYS VAL CYS GLY ASP "
self.e = "TYR LYS LEU ASN GLY MET VAL LEU LEU CYS LYS VAL CYS GLY ASP "
self.f = "THR LYS LEU ASN GLY MET VAL LEU LEU CYS LYS VAL CYS GLY SER "
self.g = "C A A C T A G G T C A C U A G G T C A T"
self.h = "G A A C T A G G T C A C U A G G T C A G"
def testEquals(self):
first = Chain(self.a)
second = Chain(self.a)
self.assertEqual(first, second)
first = Chain(self.b)
second = Chain(self.b)
self.assertEqual(first, second)
first = Chain(self.c)
second = Chain(self.c)
self.assertEqual(first, second)
first = Chain(self.a)
second = Chain(self.g)
self.assertNotEqual(first, second)
first = Chain(self.a)
second = Chain(self.h)
self.assertNotEqual(first, second)
first = Chain(self.c)
second = Chain(self.e)
self.assertNotEqual(first, second)
first = Chain(self.c)
second = Chain(self.f)
self.assertNotEqual(first, second)
def testLen(self):
chain = Chain(self.a)
elements = self.a.strip().split()
num_elements = len(elements)
self.assertEqual(len(chain), num_elements)
chain = Chain(self.b)
elements = self.b.strip().split()
num_elements = len(elements)
self.assertEqual(len(chain), num_elements)
chain = Chain(self.c)
elements = self.c.strip().split()
num_elements = len(elements)
self.assertEqual(len(chain), num_elements)
def testAppend(self):
chain = Chain(self.a[:])
chain.append("U")
elements = self.a.strip().split()
num_elements = len(elements)
last_element = chain.data[-1]
self.assertEqual("u", last_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.a[:])
chain.append(Hetero("A"))
elements = self.a.strip().split()
num_elements = len(elements)
last_element = chain.data[-1]
self.assertEqual("a", last_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.b[:])
chain.append("t")
elements = self.b.strip().split()
num_elements = len(elements)
last_element = chain.data[-1]
self.assertEqual("t", last_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.b[:])
chain.append(Hetero("C"))
elements = self.b.strip().split()
num_elements = len(elements)
last_element = chain.data[-1]
self.assertEqual("c", last_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.c[:])
chain.append("ser")
elements = self.c.strip().split()
num_elements = len(elements)
last_element = chain.data[-1]
self.assertEqual("ser", last_element.data)
self.assertEqual(len(chain), num_elements + 1)
def testInsert(self):
chain = Chain(self.a[:])
i = 4
chain.insert(i, "g")
elements = self.a.strip().split()
num_elements = len(elements)
target_element = chain.data[i]
self.assertEqual("g", target_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.a[:])
i = 0
chain.insert(i, "t")
elements = self.a.strip().split()
num_elements = len(elements)
target_element = chain.data[i]
self.assertEqual("t", target_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.b[:])
i = 9
chain.insert(i, Hetero("a"))
elements = self.a.strip().split()
num_elements = len(elements)
target_element = chain.data[i]
self.assertEqual("a", target_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.c[:])
i = 5
chain.insert(i, "gln")
elements = self.c.strip().split()
num_elements = len(elements)
target_element = chain.data[i]
self.assertEqual("gln", target_element.data)
self.assertEqual(len(chain), num_elements + 1)
def testRemove(self):
chain = Chain(self.a[:])
elements = self.a.strip().split()
num_elements = len(elements)
num_a = chain.data.count(Hetero("a"))
chain.remove("a")
num_a_remaining = chain.data.count(Hetero("a"))
self.assertEqual(num_a_remaining, num_a - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.b[:])
elements = self.b.strip().split()
num_elements = len(elements)
num_b = chain.data.count(Hetero("t"))
chain.remove("t")
num_b_remaining = chain.data.count(Hetero("t"))
self.assertEqual(num_b_remaining, num_b - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.c[:])
elements = self.c.strip().split()
num_elements = len(elements)
num_leu = chain.data.count(Hetero("leu"))
chain.remove("leu")
num_leu_remaining = chain.data.count(Hetero("leu"))
self.assertEqual(num_leu_remaining, num_leu - 1)
self.assertEqual(len(chain), num_elements - 1)
def testCount(self):
chain = Chain(self.a[:])
num_a = chain.data.count(Hetero("a"))
self.assertEqual(chain.count("a"), num_a)
chain = Chain(self.b[:])
num_a = chain.data.count(Hetero("t"))
self.assertEqual(chain.count("t"), num_a)
chain = Chain(self.c[:])
num_a = chain.data.count(Hetero("leu"))
self.assertEqual(chain.count("leu"), num_a)
chain = Chain(self.c[:])
num_a = chain.data.count(Hetero("cys"))
self.assertEqual(chain.count("cys"), num_a)
def testIndex(self):
chain = Chain(self.a[:])
index_g = chain.data.index(Hetero("g"))
self.assertEqual(chain.index("g"), index_g)
chain = Chain(self.b[:])
index_c = chain.data.index(Hetero("c"))
self.assertEqual(chain.index("c"), index_c)
chain = Chain(self.c[:])
index_met = chain.data.index(Hetero("met"))
self.assertEqual(chain.index("met"), index_met)
def testGetItem(self):
chain = Chain(self.a[:])
element_3 = chain.data[3]
self.assertEqual(chain[3], element_3)
chain = Chain(self.a[:])
element_0 = chain.data[0]
self.assertEqual(chain[0], element_0)
chain = Chain(self.b[:])
element_7 = chain.data[7]
self.assertEqual(chain[7], element_7)
chain = Chain(self.b[:])
last_element = chain.data[-1]
self.assertEqual(chain[-1], last_element)
chain = Chain(self.c[:])
element_8 = chain.data[8]
self.assertEqual(chain[8], element_8)
def testSetItem(self):
chain = Chain(self.a[:])
chain[2] = "t"
element_2 = chain.data[2]
self.assertEqual(chain[2], element_2)
chain = Chain(self.a[:])
chain[0] = Hetero("U")
element_0 = chain.data[0]
self.assertEqual(chain[0], element_0)
chain = Chain(self.b[:])
chain[-1] = Hetero("c")
last_element = chain.data[-1]
self.assertEqual(chain[-1], last_element)
chain = Chain(self.b[:])
chain[1] = "a"
element_1 = chain.data[1]
self.assertEqual(chain[1], element_1)
chain = Chain(self.c[:])
chain[5] = "ser"
element_5 = chain.data[5]
self.assertEqual(chain[5], element_5)
def testDelItem(self):
chain = Chain(self.a[:])
elements = self.a.strip().split()
num_elements = len(elements)
num_t = chain.data.count(Hetero("t"))
del chain[4]
num_t_remaining = chain.data.count(Hetero("t"))
self.assertEqual(num_t_remaining, num_t - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.a[:])
elements = self.a.strip().split()
num_elements = len(elements)
num_u = chain.data.count(Hetero("u"))
del chain[12]
num_u_remaining = 0
self.assertEqual(num_u_remaining, num_u - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.b[:])
elements = self.b.strip().split()
num_elements = len(elements)
num_c = chain.data.count(Hetero("c"))
del chain[0]
num_c_remaining = chain.data.count(Hetero("c"))
self.assertEqual(num_c_remaining, num_c - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.b[:])
elements = self.b.strip().split()
num_elements = len(elements)
num_g = chain.data.count(Hetero("t"))
del chain[6]
num_g_remaining = chain.data.count(Hetero("t"))
self.assertEqual(num_g_remaining, num_g - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.c[:])
elements = self.c.strip().split()
num_elements = len(elements)
num_thr = chain.data.count(Hetero("thr"))
del chain[0]
num_thr_remaining = chain.data.count(Hetero("thr"))
self.assertEqual(num_thr_remaining, num_thr - 1)
self.assertEqual(len(chain), num_elements - 1)
def testGetSlice(self):
chain = Chain(self.a[:])
first = 0
last = len(chain)
slice = chain[:]
other = chain.data[:]
self.assertEqual(slice.data, other)
chain = Chain(self.a[:])
first = 0
last = 4
slice = chain[first:last]
other = chain.data[first:last]
self.assertEqual(slice.data, other)
chain = Chain(self.b[:])
first = 2
last = len(chain)
slice = chain[first:last]
other = chain.data[first:last]
self.assertEqual(slice.data, other)
chain = Chain(self.b[:])
first = -1
slice = chain[first:]
other = chain.data[first:]
self.assertEqual(slice.data, other)
chain = Chain(self.c[:])
first = 3
last = 7
slice = chain[first:last]
other = chain.data[first:last]
self.assertEqual(slice.data, other)
chain = Chain(self.c[:])
first = 3
last = -1
slice = chain[first:last]
other = chain.data[first:last]
self.assertEqual(slice.data, other)
def testSetSlice(self):
chain = Chain(self.a[:])
slice = "G T C A G 5NC G C A T G G"
chain[:] = slice[4:7]
other = Chain(slice[4:7])
self.assertEqual(chain, other)
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
slice = "MET ILE GLU ILE LYS ASP"
chain[2:5] = slice
other = Chain(old_chain.data[:2] + Chain(slice).data + old_chain.data[5:])
self.assertEqual(chain, other)
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
slice = "CYS GLY ALA GLU CYS VAL TYR"
chain[7:] = slice
other = Chain(old_chain.data[:7] + Chain(slice).data)
self.assertEqual(chain, other)
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
slice = "SER ASN GLU TRP ASP "
chain[:9] = slice
other = Chain(Chain(slice).data + old_chain.data[9:])
self.assertEqual(chain, other)
def testDelSlice(self):
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
del chain[3:8]
other = Chain(old_chain.data[:3] + old_chain.data[8:])
self.assertEqual(chain, other)
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
del chain[:4]
other = Chain(old_chain.data[4:])
self.assertEqual(chain, other)
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
del chain[9:]
other = Chain(old_chain.data[:9])
self.assertEqual(chain, other)
def testContains(self):
chain = Chain(self.c[:])
self.assertNotIn("ser", chain)
self.assertIn("lys", chain)
self.assertIn("asp", chain)
def testAdd(self):
texta = "G U G G U C U G A U G A G G C C"
textb = "G G C C G A A A C U C G U A A G A G U C A C C A C"
targeta = texta + Chain(textb)
targetb = Chain(texta) + textb
targetc = Chain(texta) + Chain(textb)
self.assertEqual(targeta, targetc)
self.assertEqual(targetb, targetc)
self.assertEqual(targeta, targetb)
self.assertEqual(len(targeta), len(Chain(texta)) + len(Chain(textb)))
targetd = Chain(texta)
targetd += textb
targete = Chain(texta)
targete += Chain(textb)
self.assertEqual(targetd, targetc)
self.assertEqual(targete, targetb)
class CrystalTestCase(unittest.TestCase):
def setUp(self):
self.crystal = Crystal({"a": "T T G A C T C T C T T A A",
"b": Chain("G A G A G T C A"),
"c": "T T G A C T C T C T T A A",
"d": Chain("G A G A G T C A")
})
def testLen(self):
self.assertEqual(len(self.crystal), len(self.crystal.data))
def testGetItem(self):
self.assertEqual(self.crystal["a"], self.crystal.data["a"])
def testSetItem(self):
target = copy.deepcopy(self.crystal)
e = "MET ALA LEU THR ASN ALA GLN ILE LEU ALA VAL ILE ASP SER"
f = "LEU GLY GLY GLY LEU GLN GLY THR LEU HIS CYS TYR GLU ILE PRO LEU"
target["e"] = e
target["f"] = Chain(f)
self.assertEqual(Chain(e), target["e"])
self.assertEqual(Chain(f), target["f"])
def testDelItem(self):
target = copy.deepcopy(self.crystal)
del target["b"]
self.assertNotIn("b", target.data)
self.assertIn("a", target.data)
self.assertIn("c", target.data)
def testClear(self):
target = copy.deepcopy(self.crystal)
target.clear()
self.assertEqual(len(target.data), 0)
def testKeys(self):
self.assertEqual(list(self.crystal.keys()),
list(self.crystal.data.keys()))
def testValues(self):
self.assertEqual(list(self.crystal.values()),
list(self.crystal.data.values()))
def testItems(self):
self.assertEqual(list(self.crystal.items()),
list(self.crystal.data.items()))
def testHasKey(self):
self.assertIn("b", self.crystal)
self.assertIn("c", self.crystal)
self.assertNotIn("z", self.crystal)
class HeteroTestCase(unittest.TestCase):
def testInit(self):
self.assertRaises(CrystalError, Hetero, "abcd")
self.assertRaises(CrystalError, Hetero, "")
self.assertRaises(CrystalError, Hetero, "A@#")
self.assertRaises(CrystalError, Hetero, [])
self.assertRaises(CrystalError, Hetero, {})
def testLen(self):
bru = Hetero("bru")
self.assertEqual(len(bru), 3)
_14w = Hetero("14w")
self.assertEqual(len(_14w), 3)
a = Hetero("a")
self.assertEqual(len(a), 1)
ga = Hetero("ga")
self.assertEqual(len(ga), 2)
def testEquals(self):
u = Hetero("u")
u1 = Hetero("u")
self.assertEqual(u, u1)
self.assertEqual(u, Hetero("U"))
self.assertNotEqual(u, Hetero("u1"))
self.assertNotEqual(u, Hetero("x"))
gna = Hetero("gna")
self.assertEqual(gna, Hetero("gNA"))
self.assertEqual(gna, Hetero("GnA"))
self.assertNotEqual(gna, Hetero("gnb"))
self.assertNotEqual(gna, Hetero("na"))
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| 32.737354 | 82 | 0.574493 |
import unittest
import copy
import warnings
from Bio import BiopythonDeprecationWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", BiopythonDeprecationWarning)
from Bio.Crystal import Hetero, Chain, Crystal, CrystalError
class ChainTestCase(unittest.TestCase):
def setUp(self):
self.a = "C A A C T A G G T C A C U A G G T C A G"
self.b = "C T G A C C T A G T G A C C T A G T T G"
self.c = "THR LYS LEU ASN GLY MET VAL LEU LEU CYS LYS VAL CYS GLY ASP"
self.d = "THR LYS LEU ASN GLY MET VAL LEU LEU CYS LYS VAL CYS GLY ASP "
self.e = "TYR LYS LEU ASN GLY MET VAL LEU LEU CYS LYS VAL CYS GLY ASP "
self.f = "THR LYS LEU ASN GLY MET VAL LEU LEU CYS LYS VAL CYS GLY SER "
self.g = "C A A C T A G G T C A C U A G G T C A T"
self.h = "G A A C T A G G T C A C U A G G T C A G"
def testEquals(self):
first = Chain(self.a)
second = Chain(self.a)
self.assertEqual(first, second)
first = Chain(self.b)
second = Chain(self.b)
self.assertEqual(first, second)
first = Chain(self.c)
second = Chain(self.c)
self.assertEqual(first, second)
first = Chain(self.a)
second = Chain(self.g)
self.assertNotEqual(first, second)
first = Chain(self.a)
second = Chain(self.h)
self.assertNotEqual(first, second)
first = Chain(self.c)
second = Chain(self.e)
self.assertNotEqual(first, second)
first = Chain(self.c)
second = Chain(self.f)
self.assertNotEqual(first, second)
def testLen(self):
chain = Chain(self.a)
elements = self.a.strip().split()
num_elements = len(elements)
self.assertEqual(len(chain), num_elements)
chain = Chain(self.b)
elements = self.b.strip().split()
num_elements = len(elements)
self.assertEqual(len(chain), num_elements)
chain = Chain(self.c)
elements = self.c.strip().split()
num_elements = len(elements)
self.assertEqual(len(chain), num_elements)
def testAppend(self):
chain = Chain(self.a[:])
chain.append("U")
elements = self.a.strip().split()
num_elements = len(elements)
last_element = chain.data[-1]
self.assertEqual("u", last_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.a[:])
chain.append(Hetero("A"))
elements = self.a.strip().split()
num_elements = len(elements)
last_element = chain.data[-1]
self.assertEqual("a", last_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.b[:])
chain.append("t")
elements = self.b.strip().split()
num_elements = len(elements)
last_element = chain.data[-1]
self.assertEqual("t", last_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.b[:])
chain.append(Hetero("C"))
elements = self.b.strip().split()
num_elements = len(elements)
last_element = chain.data[-1]
self.assertEqual("c", last_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.c[:])
chain.append("ser")
elements = self.c.strip().split()
num_elements = len(elements)
last_element = chain.data[-1]
self.assertEqual("ser", last_element.data)
self.assertEqual(len(chain), num_elements + 1)
def testInsert(self):
chain = Chain(self.a[:])
i = 4
chain.insert(i, "g")
elements = self.a.strip().split()
num_elements = len(elements)
target_element = chain.data[i]
self.assertEqual("g", target_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.a[:])
i = 0
chain.insert(i, "t")
elements = self.a.strip().split()
num_elements = len(elements)
target_element = chain.data[i]
self.assertEqual("t", target_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.b[:])
i = 9
chain.insert(i, Hetero("a"))
elements = self.a.strip().split()
num_elements = len(elements)
target_element = chain.data[i]
self.assertEqual("a", target_element.data)
self.assertEqual(len(chain), num_elements + 1)
chain = Chain(self.c[:])
i = 5
chain.insert(i, "gln")
elements = self.c.strip().split()
num_elements = len(elements)
target_element = chain.data[i]
self.assertEqual("gln", target_element.data)
self.assertEqual(len(chain), num_elements + 1)
def testRemove(self):
chain = Chain(self.a[:])
elements = self.a.strip().split()
num_elements = len(elements)
num_a = chain.data.count(Hetero("a"))
chain.remove("a")
num_a_remaining = chain.data.count(Hetero("a"))
self.assertEqual(num_a_remaining, num_a - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.b[:])
elements = self.b.strip().split()
num_elements = len(elements)
num_b = chain.data.count(Hetero("t"))
chain.remove("t")
num_b_remaining = chain.data.count(Hetero("t"))
self.assertEqual(num_b_remaining, num_b - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.c[:])
elements = self.c.strip().split()
num_elements = len(elements)
num_leu = chain.data.count(Hetero("leu"))
chain.remove("leu")
num_leu_remaining = chain.data.count(Hetero("leu"))
self.assertEqual(num_leu_remaining, num_leu - 1)
self.assertEqual(len(chain), num_elements - 1)
def testCount(self):
chain = Chain(self.a[:])
num_a = chain.data.count(Hetero("a"))
self.assertEqual(chain.count("a"), num_a)
chain = Chain(self.b[:])
num_a = chain.data.count(Hetero("t"))
self.assertEqual(chain.count("t"), num_a)
chain = Chain(self.c[:])
num_a = chain.data.count(Hetero("leu"))
self.assertEqual(chain.count("leu"), num_a)
chain = Chain(self.c[:])
num_a = chain.data.count(Hetero("cys"))
self.assertEqual(chain.count("cys"), num_a)
def testIndex(self):
chain = Chain(self.a[:])
index_g = chain.data.index(Hetero("g"))
self.assertEqual(chain.index("g"), index_g)
chain = Chain(self.b[:])
index_c = chain.data.index(Hetero("c"))
self.assertEqual(chain.index("c"), index_c)
chain = Chain(self.c[:])
index_met = chain.data.index(Hetero("met"))
self.assertEqual(chain.index("met"), index_met)
def testGetItem(self):
chain = Chain(self.a[:])
element_3 = chain.data[3]
self.assertEqual(chain[3], element_3)
chain = Chain(self.a[:])
element_0 = chain.data[0]
self.assertEqual(chain[0], element_0)
chain = Chain(self.b[:])
element_7 = chain.data[7]
self.assertEqual(chain[7], element_7)
chain = Chain(self.b[:])
last_element = chain.data[-1]
self.assertEqual(chain[-1], last_element)
chain = Chain(self.c[:])
element_8 = chain.data[8]
self.assertEqual(chain[8], element_8)
def testSetItem(self):
chain = Chain(self.a[:])
chain[2] = "t"
element_2 = chain.data[2]
self.assertEqual(chain[2], element_2)
chain = Chain(self.a[:])
chain[0] = Hetero("U")
element_0 = chain.data[0]
self.assertEqual(chain[0], element_0)
chain = Chain(self.b[:])
chain[-1] = Hetero("c")
last_element = chain.data[-1]
self.assertEqual(chain[-1], last_element)
chain = Chain(self.b[:])
chain[1] = "a"
element_1 = chain.data[1]
self.assertEqual(chain[1], element_1)
chain = Chain(self.c[:])
chain[5] = "ser"
element_5 = chain.data[5]
self.assertEqual(chain[5], element_5)
def testDelItem(self):
chain = Chain(self.a[:])
elements = self.a.strip().split()
num_elements = len(elements)
num_t = chain.data.count(Hetero("t"))
del chain[4]
num_t_remaining = chain.data.count(Hetero("t"))
self.assertEqual(num_t_remaining, num_t - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.a[:])
elements = self.a.strip().split()
num_elements = len(elements)
num_u = chain.data.count(Hetero("u"))
del chain[12]
num_u_remaining = 0
self.assertEqual(num_u_remaining, num_u - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.b[:])
elements = self.b.strip().split()
num_elements = len(elements)
num_c = chain.data.count(Hetero("c"))
del chain[0]
num_c_remaining = chain.data.count(Hetero("c"))
self.assertEqual(num_c_remaining, num_c - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.b[:])
elements = self.b.strip().split()
num_elements = len(elements)
num_g = chain.data.count(Hetero("t"))
del chain[6]
num_g_remaining = chain.data.count(Hetero("t"))
self.assertEqual(num_g_remaining, num_g - 1)
self.assertEqual(len(chain), num_elements - 1)
chain = Chain(self.c[:])
elements = self.c.strip().split()
num_elements = len(elements)
num_thr = chain.data.count(Hetero("thr"))
del chain[0]
num_thr_remaining = chain.data.count(Hetero("thr"))
self.assertEqual(num_thr_remaining, num_thr - 1)
self.assertEqual(len(chain), num_elements - 1)
def testGetSlice(self):
chain = Chain(self.a[:])
first = 0
last = len(chain)
slice = chain[:]
other = chain.data[:]
self.assertEqual(slice.data, other)
chain = Chain(self.a[:])
first = 0
last = 4
slice = chain[first:last]
other = chain.data[first:last]
self.assertEqual(slice.data, other)
chain = Chain(self.b[:])
first = 2
last = len(chain)
slice = chain[first:last]
other = chain.data[first:last]
self.assertEqual(slice.data, other)
chain = Chain(self.b[:])
first = -1
slice = chain[first:]
other = chain.data[first:]
self.assertEqual(slice.data, other)
chain = Chain(self.c[:])
first = 3
last = 7
slice = chain[first:last]
other = chain.data[first:last]
self.assertEqual(slice.data, other)
chain = Chain(self.c[:])
first = 3
last = -1
slice = chain[first:last]
other = chain.data[first:last]
self.assertEqual(slice.data, other)
def testSetSlice(self):
chain = Chain(self.a[:])
slice = "G T C A G 5NC G C A T G G"
chain[:] = slice[4:7]
other = Chain(slice[4:7])
self.assertEqual(chain, other)
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
slice = "MET ILE GLU ILE LYS ASP"
chain[2:5] = slice
other = Chain(old_chain.data[:2] + Chain(slice).data + old_chain.data[5:])
self.assertEqual(chain, other)
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
slice = "CYS GLY ALA GLU CYS VAL TYR"
chain[7:] = slice
other = Chain(old_chain.data[:7] + Chain(slice).data)
self.assertEqual(chain, other)
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
slice = "SER ASN GLU TRP ASP "
chain[:9] = slice
other = Chain(Chain(slice).data + old_chain.data[9:])
self.assertEqual(chain, other)
def testDelSlice(self):
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
del chain[3:8]
other = Chain(old_chain.data[:3] + old_chain.data[8:])
self.assertEqual(chain, other)
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
del chain[:4]
other = Chain(old_chain.data[4:])
self.assertEqual(chain, other)
chain = Chain(self.c[:])
old_chain = Chain(self.c[:])
del chain[9:]
other = Chain(old_chain.data[:9])
self.assertEqual(chain, other)
def testContains(self):
chain = Chain(self.c[:])
self.assertNotIn("ser", chain)
self.assertIn("lys", chain)
self.assertIn("asp", chain)
def testAdd(self):
texta = "G U G G U C U G A U G A G G C C"
textb = "G G C C G A A A C U C G U A A G A G U C A C C A C"
targeta = texta + Chain(textb)
targetb = Chain(texta) + textb
targetc = Chain(texta) + Chain(textb)
self.assertEqual(targeta, targetc)
self.assertEqual(targetb, targetc)
self.assertEqual(targeta, targetb)
self.assertEqual(len(targeta), len(Chain(texta)) + len(Chain(textb)))
targetd = Chain(texta)
targetd += textb
targete = Chain(texta)
targete += Chain(textb)
self.assertEqual(targetd, targetc)
self.assertEqual(targete, targetb)
class CrystalTestCase(unittest.TestCase):
def setUp(self):
self.crystal = Crystal({"a": "T T G A C T C T C T T A A",
"b": Chain("G A G A G T C A"),
"c": "T T G A C T C T C T T A A",
"d": Chain("G A G A G T C A")
})
def testLen(self):
self.assertEqual(len(self.crystal), len(self.crystal.data))
def testGetItem(self):
self.assertEqual(self.crystal["a"], self.crystal.data["a"])
def testSetItem(self):
target = copy.deepcopy(self.crystal)
e = "MET ALA LEU THR ASN ALA GLN ILE LEU ALA VAL ILE ASP SER"
f = "LEU GLY GLY GLY LEU GLN GLY THR LEU HIS CYS TYR GLU ILE PRO LEU"
target["e"] = e
target["f"] = Chain(f)
self.assertEqual(Chain(e), target["e"])
self.assertEqual(Chain(f), target["f"])
def testDelItem(self):
target = copy.deepcopy(self.crystal)
del target["b"]
self.assertNotIn("b", target.data)
self.assertIn("a", target.data)
self.assertIn("c", target.data)
def testClear(self):
target = copy.deepcopy(self.crystal)
target.clear()
self.assertEqual(len(target.data), 0)
def testKeys(self):
self.assertEqual(list(self.crystal.keys()),
list(self.crystal.data.keys()))
def testValues(self):
self.assertEqual(list(self.crystal.values()),
list(self.crystal.data.values()))
def testItems(self):
self.assertEqual(list(self.crystal.items()),
list(self.crystal.data.items()))
def testHasKey(self):
self.assertIn("b", self.crystal)
self.assertIn("c", self.crystal)
self.assertNotIn("z", self.crystal)
class HeteroTestCase(unittest.TestCase):
def testInit(self):
self.assertRaises(CrystalError, Hetero, "abcd")
self.assertRaises(CrystalError, Hetero, "")
self.assertRaises(CrystalError, Hetero, "A@#")
self.assertRaises(CrystalError, Hetero, [])
self.assertRaises(CrystalError, Hetero, {})
def testLen(self):
bru = Hetero("bru")
self.assertEqual(len(bru), 3)
_14w = Hetero("14w")
self.assertEqual(len(_14w), 3)
a = Hetero("a")
self.assertEqual(len(a), 1)
ga = Hetero("ga")
self.assertEqual(len(ga), 2)
def testEquals(self):
u = Hetero("u")
u1 = Hetero("u")
self.assertEqual(u, u1)
self.assertEqual(u, Hetero("U"))
self.assertNotEqual(u, Hetero("u1"))
self.assertNotEqual(u, Hetero("x"))
gna = Hetero("gna")
self.assertEqual(gna, Hetero("gNA"))
self.assertEqual(gna, Hetero("GnA"))
self.assertNotEqual(gna, Hetero("gnb"))
self.assertNotEqual(gna, Hetero("na"))
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| true | true |
f7fd5df126bc1afb609fc7078534d1b8c043c1b8 | 367 | py | Python | spiketools/tests/utils/test_base.py | claire98han/SpikeTools | f1cdffd50e2cbdb75961a716425c4665aa930f54 | [
"Apache-2.0"
] | 1 | 2022-03-09T19:40:37.000Z | 2022-03-09T19:40:37.000Z | spiketools/tests/utils/test_base.py | claire98han/SpikeTools | f1cdffd50e2cbdb75961a716425c4665aa930f54 | [
"Apache-2.0"
] | 35 | 2021-09-28T15:13:31.000Z | 2021-11-26T04:38:08.000Z | spiketools/tests/utils/test_base.py | claire98han/SpikeTools | f1cdffd50e2cbdb75961a716425c4665aa930f54 | [
"Apache-2.0"
] | 4 | 2021-09-28T14:56:24.000Z | 2022-03-09T21:00:31.000Z | """Tests for spiketools.utils.base"""
from spiketools.utils.base import *
###################################################################################################
###################################################################################################
def test_flatten():
lsts = [[1, 2], [3, 4]]
assert flatten(lsts) == [1, 2, 3, 4]
| 30.583333 | 99 | 0.27248 |
from spiketools.utils.base import *
| true | true |
f7fd5e66626e04c24eeba472406b4b7174c9665b | 1,848 | py | Python | acme/jax/networks/rescaling.py | ostap-viniavskyi/acme | 8fbae90217557a35e1d773aa63ab80890e799765 | [
"Apache-2.0"
] | 2,650 | 2020-06-01T16:31:25.000Z | 2022-03-31T07:32:41.000Z | acme/jax/networks/rescaling.py | ostap-viniavskyi/acme | 8fbae90217557a35e1d773aa63ab80890e799765 | [
"Apache-2.0"
] | 199 | 2020-06-02T01:09:09.000Z | 2022-03-31T17:11:20.000Z | acme/jax/networks/rescaling.py | ostap-viniavskyi/acme | 8fbae90217557a35e1d773aa63ab80890e799765 | [
"Apache-2.0"
] | 344 | 2020-06-01T16:45:21.000Z | 2022-03-30T11:15:09.000Z | # python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rescaling layers (e.g. to match action specs)."""
import dataclasses
from acme import specs
from jax import lax
import jax.numpy as jnp
@dataclasses.dataclass
class ClipToSpec:
"""Clips inputs to within a BoundedArraySpec."""
spec: specs.BoundedArray
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
return jnp.clip(inputs, self.spec.minimum, self.spec.maximum)
@dataclasses.dataclass
class RescaleToSpec:
"""Rescales inputs in [-1, 1] to match a BoundedArraySpec."""
spec: specs.BoundedArray
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
scale = self.spec.maximum - self.spec.minimum
offset = self.spec.minimum
inputs = 0.5 * (inputs + 1.0) # [0, 1]
output = inputs * scale + offset # [minimum, maximum]
return output
@dataclasses.dataclass
class TanhToSpec:
"""Squashes real-valued inputs to match a BoundedArraySpec."""
spec: specs.BoundedArray
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
scale = self.spec.maximum - self.spec.minimum
offset = self.spec.minimum
inputs = lax.tanh(inputs) # [-1, 1]
inputs = 0.5 * (inputs + 1.0) # [0, 1]
output = inputs * scale + offset # [minimum, maximum]
return output
| 31.322034 | 74 | 0.715368 |
import dataclasses
from acme import specs
from jax import lax
import jax.numpy as jnp
@dataclasses.dataclass
class ClipToSpec:
spec: specs.BoundedArray
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
return jnp.clip(inputs, self.spec.minimum, self.spec.maximum)
@dataclasses.dataclass
class RescaleToSpec:
spec: specs.BoundedArray
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
scale = self.spec.maximum - self.spec.minimum
offset = self.spec.minimum
inputs = 0.5 * (inputs + 1.0)
output = inputs * scale + offset
return output
@dataclasses.dataclass
class TanhToSpec:
spec: specs.BoundedArray
def __call__(self, inputs: jnp.ndarray) -> jnp.ndarray:
scale = self.spec.maximum - self.spec.minimum
offset = self.spec.minimum
inputs = lax.tanh(inputs)
inputs = 0.5 * (inputs + 1.0)
output = inputs * scale + offset
return output
| true | true |
f7fd5f530f5d2ba4fdc361b2b928df78c48f46a2 | 45,510 | py | Python | tensorflow/python/ipu/tests/keras/keras_functional_model_test.py | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 74 | 2020-07-06T17:11:39.000Z | 2022-01-28T06:31:28.000Z | tensorflow/python/ipu/tests/keras/keras_functional_model_test.py | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 9 | 2020-10-13T23:25:29.000Z | 2022-02-10T06:54:48.000Z | tensorflow/python/ipu/tests/keras/keras_functional_model_test.py | chenzhengda/tensorflow | 8debb698097670458b5f21d728bc6f734a7b5a53 | [
"Apache-2.0"
] | 12 | 2020-07-08T07:27:17.000Z | 2021-12-27T08:54:27.000Z | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for IPU Keras Model"""
import numpy as np
import pva
from tensorflow.python.ipu.config import IPUConfig
from tensorflow.compiler.plugin.poplar.driver.trace_pb2 import IpuTraceEvent
from tensorflow.compiler.plugin.poplar.tests import test_utils as tu
from tensorflow.python import ipu
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def test_dataset(length=None, batch_size=1, x_val=1.0, y_val=0.2):
constant_d = constant_op.constant(x_val, shape=[32])
constant_l = constant_op.constant(y_val, shape=[2])
ds = dataset_ops.Dataset.from_tensors((constant_d, constant_l))
ds = ds.repeat(length)
ds = ds.batch(batch_size, drop_remainder=True)
return ds
def test_inference_dataset(length=None, batch_size=1, x_val=1.0):
constant_d = constant_op.constant(x_val, shape=[32])
ds = dataset_ops.Dataset.from_tensors(constant_d)
ds = ds.repeat(length)
ds = ds.batch(batch_size, drop_remainder=True)
return ds
def test_dataset_two_input_output(length=None,
batch_size=1,
x_val=1.0,
y_val=0.2,
input_names=None,
target_names=None):
ds = dataset_ops.Dataset.from_tensors(({
input_names[0]:
constant_op.constant(x_val, shape=[32]),
input_names[1]:
constant_op.constant(x_val, shape=[16])
}, {
target_names[0]:
constant_op.constant(y_val, shape=[2]),
target_names[1]:
constant_op.constant(y_val, shape=[1])
}))
ds = ds.repeat(length)
ds = ds.batch(batch_size, drop_remainder=True)
return ds
def test_dataset_two_input_output_np(length=96,
x_val=1.0,
y_val=0.2,
input_names=None,
target_names=None):
inputs = {
input_names[0]: np.ones((length, 32), dtype=np.float32) * x_val,
input_names[1]: np.ones((length, 16), dtype=np.float32) * x_val
}
targets = {
target_names[0]: np.ones((length, 2), dtype=np.float32) * y_val,
target_names[1]: np.ones((length, 1), dtype=np.float32) * y_val
}
return (inputs, targets)
def simple_model(x, layer_sizes, w=None):
assert layer_sizes
init = 'glorot_uniform'
if w:
assert w > 0
init = keras.initializers.Constant(w)
y = keras.layers.Dense(layer_sizes[0],
activation=keras.activations.relu,
kernel_initializer=init)(x)
for n in layer_sizes[1:]:
y = keras.layers.Dense(n,
activation=keras.activations.relu,
kernel_initializer=init)(y)
return y
class BatchCallbackCounter(keras.callbacks.Callback):
def __init__(self):
super(BatchCallbackCounter, self).__init__()
self._count = 0
self._logs = []
def on_batch_end(self, batch, logs=None):
self._logs.append(logs)
self._count = self._count + 1
def count(self):
return self._count
def logs(self):
return self._logs
class IPUModelModelTest(test.TestCase):
@test_util.run_v2_only
def testModelCreation(self):
# Simple single input, single output model.
input_layer = keras.layers.Input(shape=(2))
x = simple_model(input_layer, [2, 4])
y = keras.layers.Activation(keras.activations.relu)(x)
m = keras.Model(inputs=input_layer, outputs=y)
# Verify dims.
self.assertEqual(
m._input_layers[0].get_output_at(0).get_shape().as_list(), # pylint: disable=protected-access
[None, 2])
self.assertEqual(
m._output_layers[0].get_output_at(0).get_shape().as_list(), [None, 4]) # pylint: disable=protected-access
@test_util.run_v2_only
def testModelCreationMultipleInput(self):
# Simple two input, one output model.
input_layer = keras.layers.Input(shape=(2))
input_layer_two = keras.layers.Input(shape=(2))
x = simple_model(input_layer, [2, 4])
xx = simple_model(input_layer_two, [2, 4])
x_con = keras.layers.concatenate([x, xx])
y = keras.layers.Activation(keras.activations.relu)(x_con)
m = keras.Model(inputs=[input_layer, input_layer_two], outputs=y)
# Verify dims.
self.assertEqual(len(m._input_layers), 2) # pylint: disable=protected-access
for d in m._input_layers: # pylint: disable=protected-access
self.assertEqual(d.get_output_at(0).get_shape().as_list(), [None, 2])
self.assertEqual(
m._output_layers[0].get_output_at(0).get_shape().as_list(), [None, 8]) # pylint: disable=protected-access
@test_util.run_v2_only
def testModelCreationMultipleOutput(self):
# Simple one input, two output model.
input_layer = keras.layers.Input(shape=(2))
x = simple_model(input_layer, [2, 4])
y = keras.layers.Activation(keras.activations.tanh)(x)
yy = keras.layers.Activation(keras.activations.sigmoid)(x)
m = keras.Model(inputs=input_layer, outputs=[y, yy])
self.assertEqual(
m._input_layers[0].get_output_at(0).get_shape().as_list(), # pylint: disable=protected-access
[None, 2])
self.assertEqual(len(m._output_layers), 2) # pylint: disable=protected-access
for d in m._output_layers: # pylint: disable=protected-access
self.assertEqual(d.get_output_at(0).get_shape().as_list(), [None, 4])
@test_util.run_v2_only
def testMustCallCompileFit(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8])
m = keras.Model(inputs=input_layer, outputs=x)
with self.assertRaisesRegex(
RuntimeError, "You must compile your model before training/testing"):
m.fit(test_dataset())
@test_util.run_v2_only
def testMustCallCompileEvaluate(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8])
m = keras.Model(inputs=input_layer, outputs=x)
with self.assertRaisesRegex(
RuntimeError, "You must compile your model before training/testing"):
m.evaluate(test_dataset())
@test_util.run_v2_only
def testNeedTupleDatasetFit(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8])
m = keras.Model(inputs=input_layer, outputs=x)
m.compile('sgd', loss='mse')
with self.assertRaisesRegex(ValueError,
r"When providing an infinite dataset"):
m.fit(test_inference_dataset())
@test_util.run_v2_only
def testNeedTupleDatasetEvaluate(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8])
m = keras.Model(inputs=input_layer, outputs=x)
m.compile('sgd', loss='mse')
with self.assertRaisesRegex(ValueError,
r"When providing an infinite dataset"):
m.evaluate(test_inference_dataset())
# @test_util.run_v2_only
def testNeedNonTupleDatasetPredict(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8])
m = keras.Model(inputs=input_layer, outputs=x)
with self.assertRaisesRegex(ValueError,
r"When providing an infinite dataset"):
m.predict(test_dataset())
@test_util.run_v2_only
def testUnlimitedDatasetHasNoStepsPerEpoch(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2])
m = keras.Model(inputs=input_layer, outputs=x)
m.compile('sgd', loss='mse')
with self.assertRaisesRegex(ValueError,
r"When providing an infinite dataset"):
m.fit(test_dataset(), epochs=2)
@test_util.run_v2_only
def testResultsOneEpochWithTfOptimizerNoAccumulation_CpuMatch(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = gradient_descent.GradientDescentOptimizer(0.001)
m.compile(opt, loss='mse')
# Fit the weights to the dataset
history = m.fit(test_dataset(length=96))
# Should be only a loss stored in the history, and it should contain
# only the single epochs value
self.assertEqual(list(history.history.keys()), ['loss'])
self.assertEqual(type(history.history['loss']), list)
self.assertEqual(len(history.history['loss']), 1)
# Run the CPU equivalent.
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m_cpu = keras.Model(inputs=input_layer, outputs=x)
opt_cpu = gradient_descent.GradientDescentOptimizer(0.001)
m_cpu.compile(opt_cpu, loss='mse')
# Fit the weights to the dataset
cpu_loss = m_cpu.fit(test_dataset(length=96)).history['loss'][0]
# history['loss'] is one loss value per epoch (of which there is 1)
ipu_loss = history.history['loss'][0]
self.assertAllClose(ipu_loss, cpu_loss)
@test_util.run_v2_only
def testFitWithTensorData(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse')
# Input data
input_x = constant_op.constant(1.0, shape=[72, 32])
input_y = constant_op.constant(0.2, shape=[72, 2])
# Fit the weights to the dataset
history = m.fit(input_x, input_y, batch_size=1)
# Should be only a loss stored in the history, and it should contain
# only the single epochs value
self.assertEqual(list(history.history.keys()), ['loss'])
self.assertEqual(type(history.history['loss']), list)
self.assertEqual(len(history.history['loss']), 1)
self.assertEqual(type(history.history['loss'][0]), float)
@test_util.run_v2_only
def testFitWithNumpyData(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse')
# Input data
input_x = np.full([72, 32], 1.0, dtype=np.single)
input_y = np.full([72, 2], 0.2, dtype=np.single)
# Fit the weights to the dataset
history = m.fit(input_x, input_y, batch_size=1)
# Should be only a loss stored in the history, and it should contain
# only the single epochs value
self.assertEqual(list(history.history.keys()), ['loss'])
self.assertEqual(type(history.history['loss']), list)
self.assertEqual(len(history.history['loss']), 1)
self.assertEqual(type(history.history['loss'][0]), float)
@test_util.run_v2_only
def testEvalWithNumpyData(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse')
# Input data
input_x = np.full([72, 32], 1.0, dtype=np.single)
input_y = np.full([72, 2], 0.2, dtype=np.single)
# Fit the weights to the dataset
result = m.evaluate(input_x, input_y, batch_size=1)
self.assertEqual(type(result), float)
@test_util.run_v2_only
def testPredictWithNumpyDataBs1(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse', steps_per_execution=8)
# Input data
input_x = np.full([96, 32], 1.0, dtype=np.single)
# Generate predictions
result = m.predict(input_x, batch_size=1)
self.assertEqual(type(result), np.ndarray)
self.assertEqual(result.shape[0], 96)
for i, r in enumerate(result):
self.assertAllEqual(r, result[i - 1])
# Compare with CPU
m = keras.Model(inputs=input_layer, outputs=x)
cpu_result = m.predict(input_x, batch_size=1)
self.assertEqual(cpu_result.shape, result.shape)
@test_util.run_v2_only
def testFitHistoryWithKerasOptimizer(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse')
# Fit the weights to the dataset
history = m.fit(test_dataset(length=72))
# Should be only a loss stored in the history, and it should contain
# only the single epochs value
self.assertEqual(list(history.history.keys()), ['loss'])
self.assertEqual(type(history.history['loss']), list)
self.assertEqual(len(history.history['loss']), 1)
self.assertEqual(type(history.history['loss'][0]), float)
@test_util.run_v2_only
def testFitHistoryTwoEpochs(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse')
# Fit the weights to the dataset
history = m.fit(test_dataset(length=72), epochs=2)
# Should be only a loss stored in the history, and it should contain
# only the single epochs value
self.assertEqual(list(history.history.keys()), ['loss'])
self.assertEqual(type(history.history['loss']), list)
self.assertEqual(len(history.history['loss']), 2)
self.assertEqual(type(history.history['loss'][0]), float)
self.assertEqual(type(history.history['loss'][1]), float)
@test_util.run_v2_only
def testFitHistoryStepsPerExecution(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse', steps_per_execution=2)
# Check that the callback is called for every two steps due to
# `steps_per_execution`.
cb = BatchCallbackCounter()
m.fit(test_dataset(length=96), callbacks=[cb])
self.assertEqual(cb.count(), 48)
@test_util.run_v2_only
def testFitTwice(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg, output_execution_profile=True)
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
ds = test_dataset()
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [16, 8, 2])
m = keras.Model(inputs=input_layer, outputs=x)
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse')
# Fit the weights to the dataset
history = m.fit(ds, steps_per_epoch=1)
l = history.history['loss'][0]
# # Record weights
w_1 = [w.numpy() for w in m.weights]
# Fit the weights to the dataset
history = m.fit(ds, steps_per_epoch=1)
# Loss should be different after second training.
self.assertTrue(l > history.history['loss'][0])
w_2 = [w.numpy() for w in m.weights]
# Weights should be different too.
for w1, w2 in zip(w_1, w_2):
self.assertFalse(np.all(w1 == w2))
# Should have compiled the graph once, and executed twice.
self.assert_num_reports(report_helper, 1)
report = pva.openReport(report_helper.find_report())
self.assert_number_of_executions(report, 2)
report_helper.clear_reports()
# Fit the weights with a new dataset
history = m.fit(test_dataset(), steps_per_epoch=1)
# Loss should be different after second training.
self.assertTrue(l > history.history['loss'][0])
w_3 = [w.numpy() for w in m.weights]
# Weights should be different too.
for w2, w3 in zip(w_2, w_3):
self.assertFalse(np.all(w2 == w3))
# Don't need to compile the graph again.
self.assert_num_reports(report_helper, 0)
@test_util.run_v2_only
def testFitHistoryStepsPerEpochTwoEpochs(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2])
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse')
# Fit the weights to the dataset
history = m.fit(test_dataset(), steps_per_epoch=144, epochs=2)
# Should be only a loss stored in the history, and it should contain
# only the single epochs value
self.assertEqual(list(history.history.keys()), ['loss'])
self.assertEqual(type(history.history['loss']), list)
self.assertEqual(len(history.history['loss']), 2)
self.assertEqual(type(history.history['loss'][0]), float)
self.assertEqual(type(history.history['loss'][1]), float)
@test_util.run_v2_only
def testFitWithLearningRateDecay(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
tu.enable_ipu_events(cfg)
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
report_json = tu.ReportJSON(self, eager_mode=True)
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
# Clear old reports
report_json.reset()
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2])
m = keras.Model(inputs=input_layer, outputs=x)
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001,
decay=0.1)
m.compile(opt, loss='mse', steps_per_execution=8)
# Fit the weights to the dataset
m.fit(test_dataset(length=72), epochs=4)
# Ensure that we are only downloading the weights at the end of each
# epoch.
report_json.parse_log()
report_json.assert_num_host_to_device_transfer_events(4)
@test_util.run_v2_only
def testFitWithExponentialDecayLearningRateSchedule(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
tu.enable_ipu_events(cfg)
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
report_json = tu.ReportJSON(self, eager_mode=True)
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
# Clear old reports
report_json.reset()
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2])
m = keras.Model(inputs=input_layer, outputs=x)
lrs = keras.optimizer_v2.learning_rate_schedule.ExponentialDecay(
0.001, 4, 0.1, staircase=True)
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=lrs)
m.compile(opt, loss='mse')
# Fit the weights to the dataset
m.fit(test_dataset(length=72), epochs=4)
# Ensure that we are only downloading the weights at the end of each
# epoch.
report_json.parse_log()
report_json.assert_num_host_to_device_transfer_events(4)
@test_util.run_v2_only
def testFitWithPiecewiseConstantDecayLearningRateSchedule(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
tu.enable_ipu_events(cfg)
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
report_json = tu.ReportJSON(self, eager_mode=True)
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
# Clear old reports
report_json.reset()
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2])
m = keras.Model(inputs=input_layer, outputs=x)
lrs = keras.optimizer_v2.learning_rate_schedule.PiecewiseConstantDecay(
boundaries=[8, 16], values=[0.001, 0.0005, 0.0001])
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=lrs)
m.compile(opt, loss='mse')
# Fit the weights to the dataset
m.fit(test_dataset(length=72), epochs=4)
# Ensure that we are only downloading the weights at the end of each
# epoch.
report_json.parse_log()
report_json.assert_num_host_to_device_transfer_events(4)
@test_util.run_v2_only
def testFitWithMetrics(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2])
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.0001)
m.compile(opt, loss='mse', metrics=['accuracy'], steps_per_execution=2)
# Fit the weights to the dataset
history = m.fit(test_dataset(), steps_per_epoch=2, epochs=2)
self.assertEqual(list(history.history.keys()), ['loss', 'accuracy'])
self.assertEqual(type(history.history['loss']), list)
self.assertEqual(type(history.history['accuracy']), list)
self.assertEqual(len(history.history['loss']), 2)
self.assertEqual(type(history.history['loss'][0]), float)
self.assertEqual(len(history.history['accuracy']), 2)
self.assertEqual(type(history.history['loss'][1]), float)
self.assertEqual(type(history.history['accuracy'][0]), float)
self.assertEqual(type(history.history['accuracy'][1]), float)
@test_util.run_v2_only
def testEval_CpuMatch(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
m.compile("sgd", loss='mse')
# Fit the weights to the dataset
result = m.evaluate(test_dataset(length=96))
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m_cpu = keras.Model(inputs=input_layer, outputs=x)
m_cpu.compile("sgd", loss='mse')
cpu_result = m.evaluate(test_dataset(length=96))
self.assertAllClose(result, cpu_result)
@test_util.run_v2_only
def testCallOrder(self):
# Test which verifies that we can call evaluate/predict before fit.
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
m.compile(optimizer="rmsprop", loss='mse')
# Fit the weights to the dataset
m.evaluate(test_dataset(length=96))
m.predict(test_inference_dataset(length=96))
m.fit(test_dataset(length=96))
# No exception.
@test_util.run_v2_only
def testPredict_CpuMatch(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
# Generate predictions
ipu_out = m.predict(test_inference_dataset(length=96))
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m_cpu = keras.Model(inputs=input_layer, outputs=x)
cpu_out = m_cpu.predict(test_inference_dataset(length=96))
self.assertAllClose(cpu_out, ipu_out)
@test_util.run_v2_only
def testTrainMultipleInput(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_a = keras.layers.Input(shape=(32))
input_b = keras.layers.Input(shape=(16))
block_a = simple_model(input_a, [8, 8], w=0.4)
block_b = simple_model(input_b, [8, 8], w=0.4)
concat_ab = keras.layers.concatenate([block_a, block_b])
block_c = simple_model(concat_ab, [32, 2])
block_d = simple_model(concat_ab, [32, 1])
m = keras.Model(inputs=[input_a, input_b], outputs=[block_c, block_d])
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
m.compile("sgd", loss=['mse', 'mse'], steps_per_execution=2)
ds = test_dataset_two_input_output(
length=96,
batch_size=4,
input_names=[input_a.name, input_b.name],
target_names=[
block_c.name.partition("/")[0],
block_d.name.partition("/")[0]
])
m.fit(ds)
@test_util.run_v2_only
def testTrainMultipleInputMap(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_a = keras.layers.Input(shape=(32))
input_b = keras.layers.Input(shape=(16))
block_a = simple_model(input_a, [8, 8], w=0.4)
block_b = simple_model(input_b, [8, 8], w=0.4)
concat_ab = keras.layers.concatenate([block_a, block_b])
block_c = simple_model(concat_ab, [32, 2])
block_d = simple_model(concat_ab, [32, 1])
m = keras.Model(inputs=[input_a, input_b], outputs=[block_c, block_d])
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
m.compile("sgd", loss=['mse', 'mse'], metrics=['accuracy'])
ds = test_dataset_two_input_output_np(
length=96,
input_names=[input_a.name, input_b.name],
target_names=[
block_c.name.partition("/")[0],
block_d.name.partition("/")[0]
])
m.fit(*ds, batch_size=4)
@test_util.run_v2_only
def testPredictNumpyData(self):
xs = np.stack([np.ones(32, dtype=np.float32) * i for i in range(48)])
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 1], w=1)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
ipu_out = m.predict(xs, batch_size=2)
# CPU
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 1], w=1)
m = keras.Model(inputs=input_layer, outputs=x)
cpu_out = m.predict(xs, batch_size=2)
self.assertEqual(cpu_out.shape, ipu_out.shape)
self.assertAllClose(cpu_out, ipu_out)
@test_util.run_v2_only
def testPredictNumpyDataTwoOutput(self):
xs = np.stack([np.ones(32, dtype=np.float32) * i for i in range(48)])
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 1], w=1)
m = keras.Model(inputs=input_layer, outputs=[x, x])
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
ipu_out = m.predict(xs, batch_size=2)
# CPU
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 1], w=1)
m = keras.Model(inputs=input_layer, outputs=[x, x])
cpu_out = m.predict(xs, batch_size=2)
for t_cpu, t_ipu in zip(cpu_out, ipu_out):
self.assertAllClose(t_cpu, t_ipu)
@test_util.run_v2_only
def testPredictNumpyData3D(self):
xs = np.stack([np.ones(32, dtype=np.float32) * i for i in range(48)])
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 48], w=1)
x = keras.layers.Reshape((4, 4, 3))(x)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
ipu_out = m.predict(xs, batch_size=2)
# CPU
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 48], w=1)
x = keras.layers.Reshape((4, 4, 3))(x)
m = keras.Model(inputs=input_layer, outputs=x)
cpu_out = m.predict(xs, batch_size=2)
self.assertEqual(cpu_out.shape, ipu_out.shape)
self.assertAllClose(cpu_out, ipu_out)
@test_util.run_v2_only
def testPredictNumpyDataTwoOutput3D(self):
xs = np.stack([np.ones(32, dtype=np.float32) * i for i in range(48)])
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 48], w=1)
x = keras.layers.Reshape((4, 4, 3))(x)
m = keras.Model(inputs=input_layer, outputs=[x, x])
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
ipu_out = m.predict(xs, batch_size=2)
# CPU
xs = np.stack([np.ones(32, dtype=np.float32) * i for i in range(48)])
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 48], w=1)
x = keras.layers.Reshape((4, 4, 3))(x)
m = keras.Model(inputs=input_layer, outputs=[x, x])
cpu_out = m.predict(xs, batch_size=2)
self.assertEqual(np.shape(cpu_out), np.shape(ipu_out))
for t_cpu, t_ipu in zip(cpu_out, ipu_out):
self.assertAllClose(t_cpu, t_ipu)
@test_util.run_v2_only
def testFitVanillaKerasMatch(self):
# IPU Model.
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 1], w=1)
m = keras.Model(inputs=input_layer, outputs=x)
m.compile('sgd', 'mse')
ipu_out = m.fit(test_dataset(length=96), epochs=2)
# CPU Model.
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 1], w=1)
m = keras.Model(inputs=input_layer, outputs=x)
m.compile('sgd', 'mse')
cpu_out = m.fit(test_dataset(length=96), epochs=2)
# Compare.
self.assertAllClose(ipu_out.history['loss'], cpu_out.history['loss'])
@test_util.run_v2_only
def testTrainMultipleInputMultipleOutput(self):
# 3 inputs, 2 outputs.
def data_fn():
x1 = np.ones((32), dtype=np.float64)
x2 = np.ones((32), dtype=np.float64)
x3 = np.ones((32), dtype=np.float64)
y1 = np.ones((1), dtype=np.float64)
y2 = np.ones((1), dtype=np.float64)
ds_x = dataset_ops.Dataset.from_tensors((x1, x2, x3))
ds_y = dataset_ops.Dataset.from_tensors((y1, y2))
ds_xy = dataset_ops.Dataset.zip(
(ds_x, ds_y)).repeat(32).batch(4, drop_remainder=True)
return ds_xy
# Intentional skip from input to middle of model.
def model_fn():
input_1 = keras.Input(32)
input_2 = keras.Input(32)
input_3 = keras.Input(32)
init = keras.initializers.Constant(1)
dense_1 = keras.layers.Dense(16,
kernel_initializer=init,
activation=keras.activations.relu)(input_1)
dense_2 = keras.layers.Dense(16,
kernel_initializer=init,
activation=keras.activations.relu)(input_2)
cat = keras.layers.Concatenate()([dense_1, dense_2, input_3])
dense_3 = keras.layers.Dense(1,
kernel_initializer=init,
activation=keras.activations.relu,
name="output1")(cat)
dense_4 = keras.layers.Dense(1,
kernel_initializer=init,
activation=keras.activations.relu,
name="output2")(cat)
return ((input_1, input_2, input_3), (dense_3, dense_4))
# IPU Test.
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
model = keras.Model(*model_fn())
model.compile('sgd', ['mse', 'mse'], metrics=['accuracy'])
out = model.fit(data_fn(), steps_per_epoch=1, epochs=2)
# CPU Test.
cpu_model = keras.Model(*model_fn())
cpu_model.compile('sgd', ['mse', 'mse'], metrics=['accuracy'])
cpu_out = cpu_model.fit(data_fn(), steps_per_epoch=1, epochs=2)
# Comparison.
self.assertEqual(len(out.history), len(cpu_out.history))
# Check per output loss and metrics exist.
self.assertTrue("output1_loss" in out.history)
self.assertTrue("output2_loss" in out.history)
self.assertTrue("output1_accuracy" in out.history)
self.assertTrue("output2_accuracy" in out.history)
for key in out.history:
self.assertAllClose(out.history[key], cpu_out.history[key])
@test_util.run_v2_only
def testNestedClasses(self):
init = keras.initializers.Constant(1)
# 3 inputs, 2 outputs.
def data_fn():
x1 = np.ones((64, 32), dtype=np.float32)
x2 = np.ones((64, 32), dtype=np.float32)
x3 = np.ones((64, 32), dtype=np.float32)
y1 = np.ones((64, 1), dtype=np.float32)
y2 = np.ones((64, 3), dtype=np.float32)
return (x1, x2, x3), (y1, y2)
# pylint: disable=abstract-method
class MyDenseModel(keras.Model):
def __init__(self, units):
super().__init__()
self.dense1 = keras.layers.Dense(units,
kernel_initializer=init,
activation=keras.activations.relu)
self.dense2 = keras.layers.Dense(units,
kernel_initializer=init,
activation=keras.activations.softmax)
# pylint: disable=arguments-differ
def call(self, in0, in1):
x = self.dense1(in0)
return x, self.dense2(in1)
class MyLayer(keras.layers.Layer):
def __init__(self):
super().__init__()
self.concat = keras.layers.Concatenate()
self.dense1 = keras.layers.Dense(1,
kernel_initializer=init,
activation=keras.activations.relu)
self.dense2 = keras.layers.Dense(3,
kernel_initializer=init,
activation=keras.activations.softmax)
# pylint: disable=arguments-differ
def call(self, inputs):
cat = self.concat(inputs)
return ((self.dense1(cat),), self.dense2(cat))
def model_fn():
input_1 = keras.Input(32)
input_2 = keras.Input(32)
input_3 = keras.Input(32)
dense_1, dense_2 = MyDenseModel(16)(input_1, input_2)
output = MyLayer()([dense_1, dense_2, input_3])
return ((input_1, input_2, input_3), ((output[0][0], output[1])))
# IPU Test.
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
model = keras.Model(*model_fn())
model.compile('sgd', ['mse', 'mse'])
out = model.fit(*data_fn(), batch_size=4)
# CPU Test.
cpu_model = keras.Model(*model_fn())
cpu_model.compile('sgd', ['mse', 'mse'])
cpu_out = cpu_model.fit(*data_fn(), batch_size=4)
# Comparison.
self.assertEqual(np.shape(cpu_out), np.shape(out))
self.assertAllClose(out.history['loss'], cpu_out.history['loss'])
@test_util.run_v2_only
def testPredictMultipleOutput(self):
def predict_input_fn():
x1 = np.ones((64, 32), dtype=np.float32)
x2 = np.ones((64, 32), dtype=np.float32)
x3 = np.ones((64, 32), dtype=np.float32)
return (x1, x2, x3)
# Intentional skip from input to middle of model.
def model_fn():
input_1 = keras.Input(32)
input_2 = keras.Input(32)
input_3 = keras.Input(32)
init = keras.initializers.Constant(1)
dense_1 = keras.layers.Dense(16,
kernel_initializer=init,
activation=keras.activations.relu)(input_1)
dense_2 = keras.layers.Dense(16,
kernel_initializer=init,
activation=keras.activations.relu)(input_2)
cat = keras.layers.Concatenate()([dense_1, dense_2, input_3])
dense_3 = keras.layers.Dense(1,
kernel_initializer=init,
activation=keras.activations.relu)(cat)
dense_4 = keras.layers.Dense(1,
kernel_initializer=init,
activation=keras.activations.relu)(cat)
return ((input_1, input_2, input_3), ((dense_3, dense_4)))
# IPU Test.
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
model = keras.Model(*model_fn())
model.compile('sgd', ['mse', 'mse'])
ipu_predict_out = model.predict(predict_input_fn(), batch_size=4)
# CPU Test.
cpu_model = keras.Model(*model_fn())
cpu_model.compile('sgd', ['mse', 'mse'])
cpu_predict_out = cpu_model.predict(predict_input_fn(), batch_size=4)
# Comparison.
self.assertAllClose(cpu_predict_out, ipu_predict_out)
@test_util.run_v2_only
def testPredictMultipleOutputDifferentShapes(self):
def predict_input_fn():
x1 = np.ones((64, 32), dtype=np.float32)
x2 = np.ones((64, 32), dtype=np.float32)
x3 = np.ones((64, 32), dtype=np.float32)
return (x1, x2, x3)
# Intentional skip from input to middle of model.
def model_fn():
input_1 = keras.Input(32)
input_2 = keras.Input(32)
input_3 = keras.Input(32)
init = keras.initializers.Constant(1)
dense_1 = keras.layers.Dense(16,
kernel_initializer=init,
activation=keras.activations.relu)(input_1)
dense_2 = keras.layers.Dense(16,
kernel_initializer=init,
activation=keras.activations.relu)(input_2)
cat = keras.layers.Concatenate()([dense_1, dense_2, input_3])
dense_3 = keras.layers.Dense(1,
kernel_initializer=init,
activation=keras.activations.relu)(cat)
dense_4 = keras.layers.Dense(2,
kernel_initializer=init,
activation=keras.activations.relu)(cat)
dense_5 = keras.layers.Dense(2,
kernel_initializer=init,
activation=keras.activations.relu)(cat)
return ((input_1, input_2, input_3), (dense_3, (dense_4, dense_5)))
# CPU Test.
cpu_model = keras.Model(*model_fn())
cpu_model.compile('sgd', ['mse', 'mse', 'mse'])
cpu_predict_out = cpu_model.predict(predict_input_fn(), batch_size=4)
# IPU Test.
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
model = keras.Model(*model_fn())
model.compile('sgd', ['mse', 'mse', 'mse'])
ipu_predict_out = model.predict(predict_input_fn(), batch_size=4)
self.assertAllClose(cpu_predict_out, ipu_predict_out)
@test_util.run_v2_only
def testAutocast_ComplexDatasetStructure(self):
base_layer_utils.enable_v2_dtype_behavior()
def f():
input_1 = keras.Input(32)
input_2 = keras.Input(32)
input_3 = keras.Input(32)
init = keras.initializers.Constant(1)
dense_1 = keras.layers.Dense(16,
kernel_initializer=init,
activation=keras.activations.relu)(input_1)
dense_2 = keras.layers.Dense(16,
kernel_initializer=init,
activation=keras.activations.relu)(input_2)
cat = keras.layers.Concatenate()([dense_1, dense_2, input_3])
dense_3 = keras.layers.Dense(1,
kernel_initializer=init,
activation=keras.activations.relu)(cat)
dense_4 = keras.layers.Dense(1,
kernel_initializer=init,
activation=keras.activations.relu)(cat)
return ((input_1, input_2, input_3), (dense_3, dense_4))
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
m = keras.Model(*f())
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse')
# Input data
x1 = np.ones((32), dtype=np.float64)
x2 = np.ones((32), dtype=np.float64)
x3 = np.ones((32), dtype=np.float64)
y1 = np.ones((1), dtype=np.float64)
y2 = np.ones((1), dtype=np.float64)
ds_x = dataset_ops.Dataset.from_tensors((x1, x2, x3))
ds_y = dataset_ops.Dataset.from_tensors((y1, y2))
ds_xy = dataset_ops.Dataset.zip(
(ds_x, ds_y)).repeat(32).batch(4, drop_remainder=True)
ds_x_tuple = dataset_ops.Dataset.zip(
(ds_x,)).repeat(32).batch(4, drop_remainder=True)
m.fit(ds_xy)
m.predict(ds_x_tuple)
m.evaluate(ds_xy)
# No exceptions thrown
@test_util.run_v2_only
def testUint8(self):
dataset = dataset_ops.Dataset.from_tensor_slices(np.array(range(30)))
dataset = dataset.map(lambda x: math_ops.cast(x, dtype=np.uint8)).batch(
1, drop_remainder=True).batch(1, drop_remainder=True)
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
i = keras.layers.Input(shape=[1])
ci = keras.layers.Lambda(lambda x: math_ops.cast(x, dtype=np.float16))(i)
o = keras.layers.Dense(1, kernel_initializer='ones')(ci)
m = keras.Model(i, o)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
m.compile(steps_per_execution=10)
output = m.predict(dataset)
self.assertAllClose(output.flatten(), range(30))
if __name__ == '__main__':
test.main()
| 34.846861 | 114 | 0.641266 |
import numpy as np
import pva
from tensorflow.python.ipu.config import IPUConfig
from tensorflow.compiler.plugin.poplar.driver.trace_pb2 import IpuTraceEvent
from tensorflow.compiler.plugin.poplar.tests import test_utils as tu
from tensorflow.python import ipu
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def test_dataset(length=None, batch_size=1, x_val=1.0, y_val=0.2):
constant_d = constant_op.constant(x_val, shape=[32])
constant_l = constant_op.constant(y_val, shape=[2])
ds = dataset_ops.Dataset.from_tensors((constant_d, constant_l))
ds = ds.repeat(length)
ds = ds.batch(batch_size, drop_remainder=True)
return ds
def test_inference_dataset(length=None, batch_size=1, x_val=1.0):
constant_d = constant_op.constant(x_val, shape=[32])
ds = dataset_ops.Dataset.from_tensors(constant_d)
ds = ds.repeat(length)
ds = ds.batch(batch_size, drop_remainder=True)
return ds
def test_dataset_two_input_output(length=None,
batch_size=1,
x_val=1.0,
y_val=0.2,
input_names=None,
target_names=None):
ds = dataset_ops.Dataset.from_tensors(({
input_names[0]:
constant_op.constant(x_val, shape=[32]),
input_names[1]:
constant_op.constant(x_val, shape=[16])
}, {
target_names[0]:
constant_op.constant(y_val, shape=[2]),
target_names[1]:
constant_op.constant(y_val, shape=[1])
}))
ds = ds.repeat(length)
ds = ds.batch(batch_size, drop_remainder=True)
return ds
def test_dataset_two_input_output_np(length=96,
x_val=1.0,
y_val=0.2,
input_names=None,
target_names=None):
inputs = {
input_names[0]: np.ones((length, 32), dtype=np.float32) * x_val,
input_names[1]: np.ones((length, 16), dtype=np.float32) * x_val
}
targets = {
target_names[0]: np.ones((length, 2), dtype=np.float32) * y_val,
target_names[1]: np.ones((length, 1), dtype=np.float32) * y_val
}
return (inputs, targets)
def simple_model(x, layer_sizes, w=None):
assert layer_sizes
init = 'glorot_uniform'
if w:
assert w > 0
init = keras.initializers.Constant(w)
y = keras.layers.Dense(layer_sizes[0],
activation=keras.activations.relu,
kernel_initializer=init)(x)
for n in layer_sizes[1:]:
y = keras.layers.Dense(n,
activation=keras.activations.relu,
kernel_initializer=init)(y)
return y
class BatchCallbackCounter(keras.callbacks.Callback):
def __init__(self):
super(BatchCallbackCounter, self).__init__()
self._count = 0
self._logs = []
def on_batch_end(self, batch, logs=None):
self._logs.append(logs)
self._count = self._count + 1
def count(self):
return self._count
def logs(self):
return self._logs
class IPUModelModelTest(test.TestCase):
@test_util.run_v2_only
def testModelCreation(self):
input_layer = keras.layers.Input(shape=(2))
x = simple_model(input_layer, [2, 4])
y = keras.layers.Activation(keras.activations.relu)(x)
m = keras.Model(inputs=input_layer, outputs=y)
self.assertEqual(
m._input_layers[0].get_output_at(0).get_shape().as_list(),
[None, 2])
self.assertEqual(
m._output_layers[0].get_output_at(0).get_shape().as_list(), [None, 4])
@test_util.run_v2_only
def testModelCreationMultipleInput(self):
input_layer = keras.layers.Input(shape=(2))
input_layer_two = keras.layers.Input(shape=(2))
x = simple_model(input_layer, [2, 4])
xx = simple_model(input_layer_two, [2, 4])
x_con = keras.layers.concatenate([x, xx])
y = keras.layers.Activation(keras.activations.relu)(x_con)
m = keras.Model(inputs=[input_layer, input_layer_two], outputs=y)
self.assertEqual(len(m._input_layers), 2)
for d in m._input_layers:
self.assertEqual(d.get_output_at(0).get_shape().as_list(), [None, 2])
self.assertEqual(
m._output_layers[0].get_output_at(0).get_shape().as_list(), [None, 8])
@test_util.run_v2_only
def testModelCreationMultipleOutput(self):
input_layer = keras.layers.Input(shape=(2))
x = simple_model(input_layer, [2, 4])
y = keras.layers.Activation(keras.activations.tanh)(x)
yy = keras.layers.Activation(keras.activations.sigmoid)(x)
m = keras.Model(inputs=input_layer, outputs=[y, yy])
self.assertEqual(
m._input_layers[0].get_output_at(0).get_shape().as_list(),
[None, 2])
self.assertEqual(len(m._output_layers), 2)
for d in m._output_layers:
self.assertEqual(d.get_output_at(0).get_shape().as_list(), [None, 4])
@test_util.run_v2_only
def testMustCallCompileFit(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8])
m = keras.Model(inputs=input_layer, outputs=x)
with self.assertRaisesRegex(
RuntimeError, "You must compile your model before training/testing"):
m.fit(test_dataset())
@test_util.run_v2_only
def testMustCallCompileEvaluate(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8])
m = keras.Model(inputs=input_layer, outputs=x)
with self.assertRaisesRegex(
RuntimeError, "You must compile your model before training/testing"):
m.evaluate(test_dataset())
@test_util.run_v2_only
def testNeedTupleDatasetFit(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8])
m = keras.Model(inputs=input_layer, outputs=x)
m.compile('sgd', loss='mse')
with self.assertRaisesRegex(ValueError,
r"When providing an infinite dataset"):
m.fit(test_inference_dataset())
@test_util.run_v2_only
def testNeedTupleDatasetEvaluate(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8])
m = keras.Model(inputs=input_layer, outputs=x)
m.compile('sgd', loss='mse')
with self.assertRaisesRegex(ValueError,
r"When providing an infinite dataset"):
m.evaluate(test_inference_dataset())
def testNeedNonTupleDatasetPredict(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8])
m = keras.Model(inputs=input_layer, outputs=x)
with self.assertRaisesRegex(ValueError,
r"When providing an infinite dataset"):
m.predict(test_dataset())
@test_util.run_v2_only
def testUnlimitedDatasetHasNoStepsPerEpoch(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2])
m = keras.Model(inputs=input_layer, outputs=x)
m.compile('sgd', loss='mse')
with self.assertRaisesRegex(ValueError,
r"When providing an infinite dataset"):
m.fit(test_dataset(), epochs=2)
@test_util.run_v2_only
def testResultsOneEpochWithTfOptimizerNoAccumulation_CpuMatch(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = gradient_descent.GradientDescentOptimizer(0.001)
m.compile(opt, loss='mse')
history = m.fit(test_dataset(length=96))
self.assertEqual(list(history.history.keys()), ['loss'])
self.assertEqual(type(history.history['loss']), list)
self.assertEqual(len(history.history['loss']), 1)
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m_cpu = keras.Model(inputs=input_layer, outputs=x)
opt_cpu = gradient_descent.GradientDescentOptimizer(0.001)
m_cpu.compile(opt_cpu, loss='mse')
cpu_loss = m_cpu.fit(test_dataset(length=96)).history['loss'][0]
ipu_loss = history.history['loss'][0]
self.assertAllClose(ipu_loss, cpu_loss)
@test_util.run_v2_only
def testFitWithTensorData(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse')
input_x = constant_op.constant(1.0, shape=[72, 32])
input_y = constant_op.constant(0.2, shape=[72, 2])
history = m.fit(input_x, input_y, batch_size=1)
self.assertEqual(list(history.history.keys()), ['loss'])
self.assertEqual(type(history.history['loss']), list)
self.assertEqual(len(history.history['loss']), 1)
self.assertEqual(type(history.history['loss'][0]), float)
@test_util.run_v2_only
def testFitWithNumpyData(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse')
input_x = np.full([72, 32], 1.0, dtype=np.single)
input_y = np.full([72, 2], 0.2, dtype=np.single)
history = m.fit(input_x, input_y, batch_size=1)
self.assertEqual(list(history.history.keys()), ['loss'])
self.assertEqual(type(history.history['loss']), list)
self.assertEqual(len(history.history['loss']), 1)
self.assertEqual(type(history.history['loss'][0]), float)
@test_util.run_v2_only
def testEvalWithNumpyData(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse')
input_x = np.full([72, 32], 1.0, dtype=np.single)
input_y = np.full([72, 2], 0.2, dtype=np.single)
result = m.evaluate(input_x, input_y, batch_size=1)
self.assertEqual(type(result), float)
@test_util.run_v2_only
def testPredictWithNumpyDataBs1(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse', steps_per_execution=8)
input_x = np.full([96, 32], 1.0, dtype=np.single)
result = m.predict(input_x, batch_size=1)
self.assertEqual(type(result), np.ndarray)
self.assertEqual(result.shape[0], 96)
for i, r in enumerate(result):
self.assertAllEqual(r, result[i - 1])
m = keras.Model(inputs=input_layer, outputs=x)
cpu_result = m.predict(input_x, batch_size=1)
self.assertEqual(cpu_result.shape, result.shape)
@test_util.run_v2_only
def testFitHistoryWithKerasOptimizer(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse')
history = m.fit(test_dataset(length=72))
self.assertEqual(list(history.history.keys()), ['loss'])
self.assertEqual(type(history.history['loss']), list)
self.assertEqual(len(history.history['loss']), 1)
self.assertEqual(type(history.history['loss'][0]), float)
@test_util.run_v2_only
def testFitHistoryTwoEpochs(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse')
history = m.fit(test_dataset(length=72), epochs=2)
self.assertEqual(list(history.history.keys()), ['loss'])
self.assertEqual(type(history.history['loss']), list)
self.assertEqual(len(history.history['loss']), 2)
self.assertEqual(type(history.history['loss'][0]), float)
self.assertEqual(type(history.history['loss'][1]), float)
@test_util.run_v2_only
def testFitHistoryStepsPerExecution(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse', steps_per_execution=2)
cb = BatchCallbackCounter()
m.fit(test_dataset(length=96), callbacks=[cb])
self.assertEqual(cb.count(), 48)
@test_util.run_v2_only
def testFitTwice(self):
cfg = IPUConfig()
report_helper = tu.ReportHelper()
report_helper.set_autoreport_options(cfg, output_execution_profile=True)
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
ds = test_dataset()
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [16, 8, 2])
m = keras.Model(inputs=input_layer, outputs=x)
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse')
history = m.fit(ds, steps_per_epoch=1)
l = history.history['loss'][0]
numpy() for w in m.weights]
history = m.fit(ds, steps_per_epoch=1)
self.assertTrue(l > history.history['loss'][0])
w_2 = [w.numpy() for w in m.weights]
for w1, w2 in zip(w_1, w_2):
self.assertFalse(np.all(w1 == w2))
self.assert_num_reports(report_helper, 1)
report = pva.openReport(report_helper.find_report())
self.assert_number_of_executions(report, 2)
report_helper.clear_reports()
history = m.fit(test_dataset(), steps_per_epoch=1)
self.assertTrue(l > history.history['loss'][0])
w_3 = [w.numpy() for w in m.weights]
for w2, w3 in zip(w_2, w_3):
self.assertFalse(np.all(w2 == w3))
self.assert_num_reports(report_helper, 0)
@test_util.run_v2_only
def testFitHistoryStepsPerEpochTwoEpochs(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2])
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse')
# Fit the weights to the dataset
history = m.fit(test_dataset(), steps_per_epoch=144, epochs=2)
# Should be only a loss stored in the history, and it should contain
# only the single epochs value
self.assertEqual(list(history.history.keys()), ['loss'])
self.assertEqual(type(history.history['loss']), list)
self.assertEqual(len(history.history['loss']), 2)
self.assertEqual(type(history.history['loss'][0]), float)
self.assertEqual(type(history.history['loss'][1]), float)
@test_util.run_v2_only
def testFitWithLearningRateDecay(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
tu.enable_ipu_events(cfg)
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
report_json = tu.ReportJSON(self, eager_mode=True)
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
# Clear old reports
report_json.reset()
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2])
m = keras.Model(inputs=input_layer, outputs=x)
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001,
decay=0.1)
m.compile(opt, loss='mse', steps_per_execution=8)
# Fit the weights to the dataset
m.fit(test_dataset(length=72), epochs=4)
# Ensure that we are only downloading the weights at the end of each
# epoch.
report_json.parse_log()
report_json.assert_num_host_to_device_transfer_events(4)
@test_util.run_v2_only
def testFitWithExponentialDecayLearningRateSchedule(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
tu.enable_ipu_events(cfg)
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
report_json = tu.ReportJSON(self, eager_mode=True)
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
# Clear old reports
report_json.reset()
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2])
m = keras.Model(inputs=input_layer, outputs=x)
lrs = keras.optimizer_v2.learning_rate_schedule.ExponentialDecay(
0.001, 4, 0.1, staircase=True)
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=lrs)
m.compile(opt, loss='mse')
# Fit the weights to the dataset
m.fit(test_dataset(length=72), epochs=4)
# Ensure that we are only downloading the weights at the end of each
# epoch.
report_json.parse_log()
report_json.assert_num_host_to_device_transfer_events(4)
@test_util.run_v2_only
def testFitWithPiecewiseConstantDecayLearningRateSchedule(self):
cfg = IPUConfig()
cfg.ipu_model.compile_ipu_code = False
tu.enable_ipu_events(cfg)
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
report_json = tu.ReportJSON(self, eager_mode=True)
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
# Clear old reports
report_json.reset()
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2])
m = keras.Model(inputs=input_layer, outputs=x)
lrs = keras.optimizer_v2.learning_rate_schedule.PiecewiseConstantDecay(
boundaries=[8, 16], values=[0.001, 0.0005, 0.0001])
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=lrs)
m.compile(opt, loss='mse')
# Fit the weights to the dataset
m.fit(test_dataset(length=72), epochs=4)
# Ensure that we are only downloading the weights at the end of each
# epoch.
report_json.parse_log()
report_json.assert_num_host_to_device_transfer_events(4)
@test_util.run_v2_only
def testFitWithMetrics(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2])
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.0001)
m.compile(opt, loss='mse', metrics=['accuracy'], steps_per_execution=2)
# Fit the weights to the dataset
history = m.fit(test_dataset(), steps_per_epoch=2, epochs=2)
self.assertEqual(list(history.history.keys()), ['loss', 'accuracy'])
self.assertEqual(type(history.history['loss']), list)
self.assertEqual(type(history.history['accuracy']), list)
self.assertEqual(len(history.history['loss']), 2)
self.assertEqual(type(history.history['loss'][0]), float)
self.assertEqual(len(history.history['accuracy']), 2)
self.assertEqual(type(history.history['loss'][1]), float)
self.assertEqual(type(history.history['accuracy'][0]), float)
self.assertEqual(type(history.history['accuracy'][1]), float)
@test_util.run_v2_only
def testEval_CpuMatch(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
m.compile("sgd", loss='mse')
# Fit the weights to the dataset
result = m.evaluate(test_dataset(length=96))
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m_cpu = keras.Model(inputs=input_layer, outputs=x)
m_cpu.compile("sgd", loss='mse')
cpu_result = m.evaluate(test_dataset(length=96))
self.assertAllClose(result, cpu_result)
@test_util.run_v2_only
def testCallOrder(self):
# Test which verifies that we can call evaluate/predict before fit.
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
m.compile(optimizer="rmsprop", loss='mse')
# Fit the weights to the dataset
m.evaluate(test_dataset(length=96))
m.predict(test_inference_dataset(length=96))
m.fit(test_dataset(length=96))
# No exception.
@test_util.run_v2_only
def testPredict_CpuMatch(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
# Generate predictions
ipu_out = m.predict(test_inference_dataset(length=96))
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [8, 8, 2], w=0.4)
m_cpu = keras.Model(inputs=input_layer, outputs=x)
cpu_out = m_cpu.predict(test_inference_dataset(length=96))
self.assertAllClose(cpu_out, ipu_out)
@test_util.run_v2_only
def testTrainMultipleInput(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_a = keras.layers.Input(shape=(32))
input_b = keras.layers.Input(shape=(16))
block_a = simple_model(input_a, [8, 8], w=0.4)
block_b = simple_model(input_b, [8, 8], w=0.4)
concat_ab = keras.layers.concatenate([block_a, block_b])
block_c = simple_model(concat_ab, [32, 2])
block_d = simple_model(concat_ab, [32, 1])
m = keras.Model(inputs=[input_a, input_b], outputs=[block_c, block_d])
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
m.compile("sgd", loss=['mse', 'mse'], steps_per_execution=2)
ds = test_dataset_two_input_output(
length=96,
batch_size=4,
input_names=[input_a.name, input_b.name],
target_names=[
block_c.name.partition("/")[0],
block_d.name.partition("/")[0]
])
m.fit(ds)
@test_util.run_v2_only
def testTrainMultipleInputMap(self):
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_a = keras.layers.Input(shape=(32))
input_b = keras.layers.Input(shape=(16))
block_a = simple_model(input_a, [8, 8], w=0.4)
block_b = simple_model(input_b, [8, 8], w=0.4)
concat_ab = keras.layers.concatenate([block_a, block_b])
block_c = simple_model(concat_ab, [32, 2])
block_d = simple_model(concat_ab, [32, 1])
m = keras.Model(inputs=[input_a, input_b], outputs=[block_c, block_d])
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
m.compile("sgd", loss=['mse', 'mse'], metrics=['accuracy'])
ds = test_dataset_two_input_output_np(
length=96,
input_names=[input_a.name, input_b.name],
target_names=[
block_c.name.partition("/")[0],
block_d.name.partition("/")[0]
])
m.fit(*ds, batch_size=4)
@test_util.run_v2_only
def testPredictNumpyData(self):
xs = np.stack([np.ones(32, dtype=np.float32) * i for i in range(48)])
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 1], w=1)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
ipu_out = m.predict(xs, batch_size=2)
# CPU
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 1], w=1)
m = keras.Model(inputs=input_layer, outputs=x)
cpu_out = m.predict(xs, batch_size=2)
self.assertEqual(cpu_out.shape, ipu_out.shape)
self.assertAllClose(cpu_out, ipu_out)
@test_util.run_v2_only
def testPredictNumpyDataTwoOutput(self):
xs = np.stack([np.ones(32, dtype=np.float32) * i for i in range(48)])
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 1], w=1)
m = keras.Model(inputs=input_layer, outputs=[x, x])
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
ipu_out = m.predict(xs, batch_size=2)
# CPU
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 1], w=1)
m = keras.Model(inputs=input_layer, outputs=[x, x])
cpu_out = m.predict(xs, batch_size=2)
for t_cpu, t_ipu in zip(cpu_out, ipu_out):
self.assertAllClose(t_cpu, t_ipu)
@test_util.run_v2_only
def testPredictNumpyData3D(self):
xs = np.stack([np.ones(32, dtype=np.float32) * i for i in range(48)])
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 48], w=1)
x = keras.layers.Reshape((4, 4, 3))(x)
m = keras.Model(inputs=input_layer, outputs=x)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
ipu_out = m.predict(xs, batch_size=2)
# CPU
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 48], w=1)
x = keras.layers.Reshape((4, 4, 3))(x)
m = keras.Model(inputs=input_layer, outputs=x)
cpu_out = m.predict(xs, batch_size=2)
self.assertEqual(cpu_out.shape, ipu_out.shape)
self.assertAllClose(cpu_out, ipu_out)
@test_util.run_v2_only
def testPredictNumpyDataTwoOutput3D(self):
xs = np.stack([np.ones(32, dtype=np.float32) * i for i in range(48)])
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 48], w=1)
x = keras.layers.Reshape((4, 4, 3))(x)
m = keras.Model(inputs=input_layer, outputs=[x, x])
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
ipu_out = m.predict(xs, batch_size=2)
# CPU
xs = np.stack([np.ones(32, dtype=np.float32) * i for i in range(48)])
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 48], w=1)
x = keras.layers.Reshape((4, 4, 3))(x)
m = keras.Model(inputs=input_layer, outputs=[x, x])
cpu_out = m.predict(xs, batch_size=2)
self.assertEqual(np.shape(cpu_out), np.shape(ipu_out))
for t_cpu, t_ipu in zip(cpu_out, ipu_out):
self.assertAllClose(t_cpu, t_ipu)
@test_util.run_v2_only
def testFitVanillaKerasMatch(self):
# IPU Model.
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 1], w=1)
m = keras.Model(inputs=input_layer, outputs=x)
m.compile('sgd', 'mse')
ipu_out = m.fit(test_dataset(length=96), epochs=2)
# CPU Model.
input_layer = keras.layers.Input(shape=(32))
x = simple_model(input_layer, [32, 32, 1], w=1)
m = keras.Model(inputs=input_layer, outputs=x)
m.compile('sgd', 'mse')
cpu_out = m.fit(test_dataset(length=96), epochs=2)
# Compare.
self.assertAllClose(ipu_out.history['loss'], cpu_out.history['loss'])
@test_util.run_v2_only
def testTrainMultipleInputMultipleOutput(self):
# 3 inputs, 2 outputs.
def data_fn():
x1 = np.ones((32), dtype=np.float64)
x2 = np.ones((32), dtype=np.float64)
x3 = np.ones((32), dtype=np.float64)
y1 = np.ones((1), dtype=np.float64)
y2 = np.ones((1), dtype=np.float64)
ds_x = dataset_ops.Dataset.from_tensors((x1, x2, x3))
ds_y = dataset_ops.Dataset.from_tensors((y1, y2))
ds_xy = dataset_ops.Dataset.zip(
(ds_x, ds_y)).repeat(32).batch(4, drop_remainder=True)
return ds_xy
# Intentional skip from input to middle of model.
def model_fn():
input_1 = keras.Input(32)
input_2 = keras.Input(32)
input_3 = keras.Input(32)
init = keras.initializers.Constant(1)
dense_1 = keras.layers.Dense(16,
kernel_initializer=init,
activation=keras.activations.relu)(input_1)
dense_2 = keras.layers.Dense(16,
kernel_initializer=init,
activation=keras.activations.relu)(input_2)
cat = keras.layers.Concatenate()([dense_1, dense_2, input_3])
dense_3 = keras.layers.Dense(1,
kernel_initializer=init,
activation=keras.activations.relu,
name="output1")(cat)
dense_4 = keras.layers.Dense(1,
kernel_initializer=init,
activation=keras.activations.relu,
name="output2")(cat)
return ((input_1, input_2, input_3), (dense_3, dense_4))
# IPU Test.
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
model = keras.Model(*model_fn())
model.compile('sgd', ['mse', 'mse'], metrics=['accuracy'])
out = model.fit(data_fn(), steps_per_epoch=1, epochs=2)
# CPU Test.
cpu_model = keras.Model(*model_fn())
cpu_model.compile('sgd', ['mse', 'mse'], metrics=['accuracy'])
cpu_out = cpu_model.fit(data_fn(), steps_per_epoch=1, epochs=2)
# Comparison.
self.assertEqual(len(out.history), len(cpu_out.history))
# Check per output loss and metrics exist.
self.assertTrue("output1_loss" in out.history)
self.assertTrue("output2_loss" in out.history)
self.assertTrue("output1_accuracy" in out.history)
self.assertTrue("output2_accuracy" in out.history)
for key in out.history:
self.assertAllClose(out.history[key], cpu_out.history[key])
@test_util.run_v2_only
def testNestedClasses(self):
init = keras.initializers.Constant(1)
# 3 inputs, 2 outputs.
def data_fn():
x1 = np.ones((64, 32), dtype=np.float32)
x2 = np.ones((64, 32), dtype=np.float32)
x3 = np.ones((64, 32), dtype=np.float32)
y1 = np.ones((64, 1), dtype=np.float32)
y2 = np.ones((64, 3), dtype=np.float32)
return (x1, x2, x3), (y1, y2)
# pylint: disable=abstract-method
class MyDenseModel(keras.Model):
def __init__(self, units):
super().__init__()
self.dense1 = keras.layers.Dense(units,
kernel_initializer=init,
activation=keras.activations.relu)
self.dense2 = keras.layers.Dense(units,
kernel_initializer=init,
activation=keras.activations.softmax)
# pylint: disable=arguments-differ
def call(self, in0, in1):
x = self.dense1(in0)
return x, self.dense2(in1)
class MyLayer(keras.layers.Layer):
def __init__(self):
super().__init__()
self.concat = keras.layers.Concatenate()
self.dense1 = keras.layers.Dense(1,
kernel_initializer=init,
activation=keras.activations.relu)
self.dense2 = keras.layers.Dense(3,
kernel_initializer=init,
activation=keras.activations.softmax)
# pylint: disable=arguments-differ
def call(self, inputs):
cat = self.concat(inputs)
return ((self.dense1(cat),), self.dense2(cat))
def model_fn():
input_1 = keras.Input(32)
input_2 = keras.Input(32)
input_3 = keras.Input(32)
dense_1, dense_2 = MyDenseModel(16)(input_1, input_2)
output = MyLayer()([dense_1, dense_2, input_3])
return ((input_1, input_2, input_3), ((output[0][0], output[1])))
# IPU Test.
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
model = keras.Model(*model_fn())
model.compile('sgd', ['mse', 'mse'])
out = model.fit(*data_fn(), batch_size=4)
# CPU Test.
cpu_model = keras.Model(*model_fn())
cpu_model.compile('sgd', ['mse', 'mse'])
cpu_out = cpu_model.fit(*data_fn(), batch_size=4)
# Comparison.
self.assertEqual(np.shape(cpu_out), np.shape(out))
self.assertAllClose(out.history['loss'], cpu_out.history['loss'])
@test_util.run_v2_only
def testPredictMultipleOutput(self):
def predict_input_fn():
x1 = np.ones((64, 32), dtype=np.float32)
x2 = np.ones((64, 32), dtype=np.float32)
x3 = np.ones((64, 32), dtype=np.float32)
return (x1, x2, x3)
# Intentional skip from input to middle of model.
def model_fn():
input_1 = keras.Input(32)
input_2 = keras.Input(32)
input_3 = keras.Input(32)
init = keras.initializers.Constant(1)
dense_1 = keras.layers.Dense(16,
kernel_initializer=init,
activation=keras.activations.relu)(input_1)
dense_2 = keras.layers.Dense(16,
kernel_initializer=init,
activation=keras.activations.relu)(input_2)
cat = keras.layers.Concatenate()([dense_1, dense_2, input_3])
dense_3 = keras.layers.Dense(1,
kernel_initializer=init,
activation=keras.activations.relu)(cat)
dense_4 = keras.layers.Dense(1,
kernel_initializer=init,
activation=keras.activations.relu)(cat)
return ((input_1, input_2, input_3), ((dense_3, dense_4)))
# IPU Test.
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
model = keras.Model(*model_fn())
model.compile('sgd', ['mse', 'mse'])
ipu_predict_out = model.predict(predict_input_fn(), batch_size=4)
# CPU Test.
cpu_model = keras.Model(*model_fn())
cpu_model.compile('sgd', ['mse', 'mse'])
cpu_predict_out = cpu_model.predict(predict_input_fn(), batch_size=4)
# Comparison.
self.assertAllClose(cpu_predict_out, ipu_predict_out)
@test_util.run_v2_only
def testPredictMultipleOutputDifferentShapes(self):
def predict_input_fn():
x1 = np.ones((64, 32), dtype=np.float32)
x2 = np.ones((64, 32), dtype=np.float32)
x3 = np.ones((64, 32), dtype=np.float32)
return (x1, x2, x3)
# Intentional skip from input to middle of model.
def model_fn():
input_1 = keras.Input(32)
input_2 = keras.Input(32)
input_3 = keras.Input(32)
init = keras.initializers.Constant(1)
dense_1 = keras.layers.Dense(16,
kernel_initializer=init,
activation=keras.activations.relu)(input_1)
dense_2 = keras.layers.Dense(16,
kernel_initializer=init,
activation=keras.activations.relu)(input_2)
cat = keras.layers.Concatenate()([dense_1, dense_2, input_3])
dense_3 = keras.layers.Dense(1,
kernel_initializer=init,
activation=keras.activations.relu)(cat)
dense_4 = keras.layers.Dense(2,
kernel_initializer=init,
activation=keras.activations.relu)(cat)
dense_5 = keras.layers.Dense(2,
kernel_initializer=init,
activation=keras.activations.relu)(cat)
return ((input_1, input_2, input_3), (dense_3, (dense_4, dense_5)))
# CPU Test.
cpu_model = keras.Model(*model_fn())
cpu_model.compile('sgd', ['mse', 'mse', 'mse'])
cpu_predict_out = cpu_model.predict(predict_input_fn(), batch_size=4)
# IPU Test.
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
model = keras.Model(*model_fn())
model.compile('sgd', ['mse', 'mse', 'mse'])
ipu_predict_out = model.predict(predict_input_fn(), batch_size=4)
self.assertAllClose(cpu_predict_out, ipu_predict_out)
@test_util.run_v2_only
def testAutocast_ComplexDatasetStructure(self):
base_layer_utils.enable_v2_dtype_behavior()
def f():
input_1 = keras.Input(32)
input_2 = keras.Input(32)
input_3 = keras.Input(32)
init = keras.initializers.Constant(1)
dense_1 = keras.layers.Dense(16,
kernel_initializer=init,
activation=keras.activations.relu)(input_1)
dense_2 = keras.layers.Dense(16,
kernel_initializer=init,
activation=keras.activations.relu)(input_2)
cat = keras.layers.Concatenate()([dense_1, dense_2, input_3])
dense_3 = keras.layers.Dense(1,
kernel_initializer=init,
activation=keras.activations.relu)(cat)
dense_4 = keras.layers.Dense(1,
kernel_initializer=init,
activation=keras.activations.relu)(cat)
return ((input_1, input_2, input_3), (dense_3, dense_4))
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
m = keras.Model(*f())
opt = keras.optimizer_v2.gradient_descent.SGD(learning_rate=0.001)
m.compile(opt, loss='mse')
# Input data
x1 = np.ones((32), dtype=np.float64)
x2 = np.ones((32), dtype=np.float64)
x3 = np.ones((32), dtype=np.float64)
y1 = np.ones((1), dtype=np.float64)
y2 = np.ones((1), dtype=np.float64)
ds_x = dataset_ops.Dataset.from_tensors((x1, x2, x3))
ds_y = dataset_ops.Dataset.from_tensors((y1, y2))
ds_xy = dataset_ops.Dataset.zip(
(ds_x, ds_y)).repeat(32).batch(4, drop_remainder=True)
ds_x_tuple = dataset_ops.Dataset.zip(
(ds_x,)).repeat(32).batch(4, drop_remainder=True)
m.fit(ds_xy)
m.predict(ds_x_tuple)
m.evaluate(ds_xy)
# No exceptions thrown
@test_util.run_v2_only
def testUint8(self):
dataset = dataset_ops.Dataset.from_tensor_slices(np.array(range(30)))
dataset = dataset.map(lambda x: math_ops.cast(x, dtype=np.uint8)).batch(
1, drop_remainder=True).batch(1, drop_remainder=True)
strategy = ipu.ipu_strategy.IPUStrategyV1()
with strategy.scope():
i = keras.layers.Input(shape=[1])
ci = keras.layers.Lambda(lambda x: math_ops.cast(x, dtype=np.float16))(i)
o = keras.layers.Dense(1, kernel_initializer='ones')(ci)
m = keras.Model(i, o)
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
m.compile(steps_per_execution=10)
output = m.predict(dataset)
self.assertAllClose(output.flatten(), range(30))
if __name__ == '__main__':
test.main()
| true | true |
f7fd60b8107b557db3c0795c315dc838075afca8 | 3,036 | py | Python | eppy/function_helpers.py | lymereJ/eppy | beef781a61cc50b4567f11e3fa767c466a654e17 | [
"MIT"
] | 1 | 2019-01-06T14:16:24.000Z | 2019-01-06T14:16:24.000Z | eppy/function_helpers.py | samuelduchesne/eppy | beef781a61cc50b4567f11e3fa767c466a654e17 | [
"MIT"
] | null | null | null | eppy/function_helpers.py | samuelduchesne/eppy | beef781a61cc50b4567f11e3fa767c466a654e17 | [
"MIT"
] | null | null | null | # Copyright (c) 2012 Santosh Philip
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
"""helper functions for the functions called by bunchdt"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six.moves import zip_longest
import itertools
from eppy.constructions import thermal_properties
from eppy.geometry import surface as g_surface
import eppy.fanpower
def grouper(num, iterable, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx
args = [iter(iterable)] * num
return zip_longest(fillvalue=fillvalue, *args)
def getcoords(ddtt):
"""return the coordinates of the surface"""
n_vertices_index = ddtt.objls.index('Number_of_Vertices')
first_x = n_vertices_index + 1 # X of first coordinate
pts = ddtt.obj[first_x:]
return list(grouper(3, pts))
def area(ddtt):
"""area of the surface"""
coords = getcoords(ddtt)
return g_surface.area(coords)
def height(ddtt):
"""height of the surface"""
coords = getcoords(ddtt)
return g_surface.height(coords)
def width(ddtt):
"""width of the surface"""
coords = getcoords(ddtt)
return g_surface.width(coords)
def azimuth(ddtt):
"""azimuth of the surface"""
coords = getcoords(ddtt)
return g_surface.azimuth(coords)
def tilt(ddtt):
"""tilt of the surface"""
coords = getcoords(ddtt)
return g_surface.tilt(coords)
def buildingname(ddtt):
"""return building name"""
idf = ddtt.theidf
building = idf.idfobjects['building'.upper()][0]
return building.Name
def zonesurfaces(ddtt):
"""return al list of surfaces that belong to the zone"""
kwargs = {'fields':[u'Zone_Name', ],
'iddgroups':[u'Thermal Zones and Surfaces', ]}
return ddtt.getreferingobjs(**kwargs)
def subsurfaces(ddtt):
"""return al list of surfaces that belong to the zone"""
kwargs = {'fields':[u'Building_Surface_Name', ],
'iddgroups':[u'Thermal Zones and Surfaces', ]}
return ddtt.getreferingobjs(**kwargs)
def rvalue(ddtt):
return thermal_properties.rvalue(ddtt)
def ufactor(ddtt):
return thermal_properties.ufactor(ddtt)
def ufactor_ip(ddtt):
return thermal_properties.ufactor_ip(ddtt)
def rvalue_ip(ddtt):
return thermal_properties.rvalue_ip(ddtt)
def heatcapacity(ddtt):
return thermal_properties.heatcapacity(ddtt)
def fanpower_bhp(ddtt):
"""return fanpower in bhp"""
return eppy.fanpower.fanpower_bhp(ddtt)
def fanpower_watts(ddtt):
"""return fanpower in watts"""
return eppy.fanpower.fanpower_watts(ddtt)
def fan_maxcfm(ddtt):
"""return the Maximum_Flow_Rate in cfm"""
return eppy.fanpower.fan_maxcfm(ddtt) | 29.764706 | 73 | 0.674572 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from six.moves import zip_longest
import itertools
from eppy.constructions import thermal_properties
from eppy.geometry import surface as g_surface
import eppy.fanpower
def grouper(num, iterable, fillvalue=None):
args = [iter(iterable)] * num
return zip_longest(fillvalue=fillvalue, *args)
def getcoords(ddtt):
n_vertices_index = ddtt.objls.index('Number_of_Vertices')
first_x = n_vertices_index + 1
pts = ddtt.obj[first_x:]
return list(grouper(3, pts))
def area(ddtt):
coords = getcoords(ddtt)
return g_surface.area(coords)
def height(ddtt):
coords = getcoords(ddtt)
return g_surface.height(coords)
def width(ddtt):
coords = getcoords(ddtt)
return g_surface.width(coords)
def azimuth(ddtt):
coords = getcoords(ddtt)
return g_surface.azimuth(coords)
def tilt(ddtt):
coords = getcoords(ddtt)
return g_surface.tilt(coords)
def buildingname(ddtt):
idf = ddtt.theidf
building = idf.idfobjects['building'.upper()][0]
return building.Name
def zonesurfaces(ddtt):
kwargs = {'fields':[u'Zone_Name', ],
'iddgroups':[u'Thermal Zones and Surfaces', ]}
return ddtt.getreferingobjs(**kwargs)
def subsurfaces(ddtt):
kwargs = {'fields':[u'Building_Surface_Name', ],
'iddgroups':[u'Thermal Zones and Surfaces', ]}
return ddtt.getreferingobjs(**kwargs)
def rvalue(ddtt):
return thermal_properties.rvalue(ddtt)
def ufactor(ddtt):
return thermal_properties.ufactor(ddtt)
def ufactor_ip(ddtt):
return thermal_properties.ufactor_ip(ddtt)
def rvalue_ip(ddtt):
return thermal_properties.rvalue_ip(ddtt)
def heatcapacity(ddtt):
return thermal_properties.heatcapacity(ddtt)
def fanpower_bhp(ddtt):
return eppy.fanpower.fanpower_bhp(ddtt)
def fanpower_watts(ddtt):
return eppy.fanpower.fanpower_watts(ddtt)
def fan_maxcfm(ddtt):
return eppy.fanpower.fan_maxcfm(ddtt) | true | true |
f7fd60c721ef6a9fb663b35e862869b1e6506048 | 13,799 | py | Python | pox/host_tracker/host_tracker.py | brenocg29/TP1RedesInteligentes | 3b73b3567089f9eb2e475ec8402113bf8803bb59 | [
"Apache-2.0"
] | 11 | 2019-03-02T20:39:34.000Z | 2021-09-02T19:47:38.000Z | pox/host_tracker/host_tracker.py | brenocg29/TP1RedesInteligentes | 3b73b3567089f9eb2e475ec8402113bf8803bb59 | [
"Apache-2.0"
] | 29 | 2019-01-17T15:44:48.000Z | 2021-06-02T00:19:40.000Z | OFCONTROLLERS/pox/pox/host_tracker/host_tracker.py | ViniGarcia/NIEP | 5cdf779795b9248e1bbc12195479083475f3edab | [
"MIT"
] | 11 | 2019-01-28T05:00:55.000Z | 2021-11-12T03:08:32.000Z | # Copyright 2011 Dorgival Guedes
# Copyright 2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tracks host location and configuration
Keep track of hosts in the network, where they are and how they are
configured (at least MAC/IP addresses).
For the time being, it keeps tables with the information; later, it should
transfer that information to Topology and handle just the actual
discovery/update of host information.
Timer configuration can be changed when needed (e.g., for debugging) using
the launch facility (check timeoutSec dict and PingCtrl.pingLim).
You can set various timeouts from the commandline. Names and defaults:
arpAware=60*2 Quiet ARP-responding entries are pinged after this
arpSilent=60*20 This is for uiet entries not known to answer ARP
arpReply=4 Time to wait for an ARP reply before retrial
timerInterval=5 Seconds between timer routine activations
entryMove=60 Minimum expected time to move a physical entry
Good values for testing:
--arpAware=15 --arpSilent=45 --arpReply=1 --entryMove=4
You can also specify how many ARP pings we try before deciding it failed:
--pingLim=2
"""
from pox.core import core
from pox.lib.addresses import EthAddr
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.arp import arp
from pox.lib.recoco import Timer
from pox.lib.revent import Event, EventHalt
import pox.openflow.libopenflow_01 as of
import pox.openflow.discovery as discovery
from pox.lib.revent.revent import *
import time
import pox
log = core.getLogger()
# Times (in seconds) to use for differente timouts:
timeoutSec = dict(
arpAware=60*2, # Quiet ARP-responding entries are pinged after this
arpSilent=60*20, # This is for uiet entries not known to answer ARP
arpReply=4, # Time to wait for an ARP reply before retrial
timerInterval=5, # Seconds between timer routine activations
entryMove=60 # Minimum expected time to move a physical entry
)
# Address to send ARP pings from.
# The particular one here is just an arbitrary locally administered address.
DEFAULT_ARP_PING_SRC_MAC = '02:00:00:00:be:ef'
class HostEvent (Event):
"""
Event when hosts join, leave, or move within the network
"""
def __init__ (self, entry, new_dpid = None, new_port = None, join = False,
leave = False, move = False):
super(HostEvent,self).__init__()
self.entry = entry
self.join = join
self.leave = leave
self.move = move
assert sum(1 for x in [join,leave,move] if x) == 1
# You can alter these and they'll change where we think it goes...
self._new_dpid = new_dpid
self._new_port = new_port
#TODO: Allow us to cancel add/removes
@property
def new_dpid (self):
"""
New DPID for move events"
"""
assert self.move
return self._new_dpid
@property
def new_port (self):
"""
New port for move events"
"""
assert self.move
return self._new_port
class Alive (object):
"""
Holds liveliness information for MAC and IP entries
"""
def __init__ (self, livelinessInterval=timeoutSec['arpAware']):
self.lastTimeSeen = time.time()
self.interval=livelinessInterval
def expired (self):
return time.time() > self.lastTimeSeen + self.interval
def refresh (self):
self.lastTimeSeen = time.time()
class PingCtrl (Alive):
"""
Holds information for handling ARP pings for hosts
"""
# Number of ARP ping attemps before deciding it failed
pingLim=3
def __init__ (self):
super(PingCtrl,self).__init__(timeoutSec['arpReply'])
self.pending = 0
def sent (self):
self.refresh()
self.pending += 1
def failed (self):
return self.pending > PingCtrl.pingLim
def received (self):
# Clear any pending timeouts related to ARP pings
self.pending = 0
class IpEntry (Alive):
"""
This entry keeps track of IP addresses seen from each MAC entry and will
be kept in the macEntry object's ipAddrs dictionary. At least for now,
there is no need to refer to the original macEntry as the code is organized.
"""
def __init__ (self, hasARP):
if hasARP:
super(IpEntry,self).__init__(timeoutSec['arpAware'])
else:
super(IpEntry,self).__init__(timeoutSec['arpSilent'])
self.hasARP = hasARP
self.pings = PingCtrl()
def setHasARP (self):
if not self.hasARP:
self.hasARP = True
self.interval = timeoutSec['arpAware']
class MacEntry (Alive):
"""
Not strictly an ARP entry.
When it gets moved to Topology, may include other host info, like
services, and it may replace dpid by a general switch object reference
We use the port to determine which port to forward traffic out of.
"""
def __init__ (self, dpid, port, macaddr):
super(MacEntry,self).__init__()
self.dpid = dpid
self.port = port
self.macaddr = macaddr
self.ipAddrs = {}
def __str__(self):
return ' '.join([str(self.dpid), str(self.port), str(self.macaddr)])
def __eq__ (self, other):
if other is None:
return False
elif type(other) == tuple:
return (self.dpid,self.port,self.macaddr)==other
if self.dpid != other.dpid: return False
if self.port != other.port: return False
if self.macaddr != other.macaddr: return False
if self.dpid != other.dpid: return False
# What about ipAddrs??
return True
def __ne__ (self, other):
return not self.__eq__(other)
class host_tracker (EventMixin):
"""
Host tracking component
"""
_eventMixin_events = set([HostEvent])
def __init__ (self, ping_src_mac = None, install_flow = True,
eat_packets = True):
if ping_src_mac is None:
ping_src_mac = DEFAULT_ARP_PING_SRC_MAC
self.ping_src_mac = EthAddr(ping_src_mac)
self.install_flow = install_flow
self.eat_packets = eat_packets
# The following tables should go to Topology later
self.entryByMAC = {}
self._t = Timer(timeoutSec['timerInterval'],
self._check_timeouts, recurring=True)
# Listen to openflow with high priority if we want to eat our ARP replies
listen_args = {}
if eat_packets:
listen_args={'openflow':{'priority':0}}
core.listen_to_dependencies(self, listen_args=listen_args)
def _all_dependencies_met (self):
log.info("host_tracker ready")
# The following two functions should go to Topology also
def getMacEntry (self, macaddr):
try:
result = self.entryByMAC[macaddr]
except KeyError as e:
result = None
return result
def sendPing (self, macEntry, ipAddr):
"""
Builds an ETH/IP any-to-any ARP packet (an "ARP ping")
"""
r = arp()
r.opcode = arp.REQUEST
r.hwdst = macEntry.macaddr
r.hwsrc = self.ping_src_mac
r.protodst = ipAddr
# src is IP_ANY
e = ethernet(type=ethernet.ARP_TYPE, src=r.hwsrc, dst=r.hwdst)
e.payload = r
log.debug("%i %i sending ARP REQ to %s %s",
macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst))
msg = of.ofp_packet_out(data = e.pack(),
action = of.ofp_action_output(port=macEntry.port))
if core.openflow.sendToDPID(macEntry.dpid, msg.pack()):
ipEntry = macEntry.ipAddrs[ipAddr]
ipEntry.pings.sent()
else:
# macEntry is stale, remove it.
log.debug("%i %i ERROR sending ARP REQ to %s %s",
macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst))
del macEntry.ipAddrs[ipAddr]
return
def getSrcIPandARP (self, packet):
"""
Gets source IPv4 address for packets that have one (IPv4 and ARP)
Returns (ip_address, has_arp). If no IP, returns (None, False).
"""
if isinstance(packet, ipv4):
log.debug("IP %s => %s",str(packet.srcip),str(packet.dstip))
return ( packet.srcip, False )
elif isinstance(packet, arp):
log.debug("ARP %s %s => %s",
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(packet.opcode,
'op:%i' % (packet.opcode,)),
str(packet.protosrc), str(packet.protodst))
if (packet.hwtype == arp.HW_TYPE_ETHERNET and
packet.prototype == arp.PROTO_TYPE_IP and
packet.protosrc != 0):
return ( packet.protosrc, True )
return ( None, False )
def updateIPInfo (self, pckt_srcip, macEntry, hasARP):
"""
Update given MacEntry
If there is IP info in the incoming packet, update the macEntry
accordingly. In the past we assumed a 1:1 mapping between MAC and IP
addresses, but removed that restriction later to accomodate cases
like virtual interfaces (1:n) and distributed packet rewriting (n:1)
"""
if pckt_srcip in macEntry.ipAddrs:
# that entry already has that IP
ipEntry = macEntry.ipAddrs[pckt_srcip]
ipEntry.refresh()
log.debug("%s already has IP %s, refreshing",
str(macEntry), str(pckt_srcip) )
else:
# new mapping
ipEntry = IpEntry(hasARP)
macEntry.ipAddrs[pckt_srcip] = ipEntry
log.info("Learned %s got IP %s", str(macEntry), str(pckt_srcip) )
if hasARP:
ipEntry.pings.received()
def _handle_openflow_ConnectionUp (self, event):
if not self.install_flow: return
log.debug("Installing flow for ARP ping responses")
m = of.ofp_flow_mod()
m.priority += 1 # Higher than normal
m.match.dl_type = ethernet.ARP_TYPE
m.match.dl_dst = self.ping_src_mac
m.actions.append(of.ofp_action_output(port=of.OFPP_CONTROLLER))
event.connection.send(m)
def _handle_openflow_PacketIn (self, event):
"""
Populate MAC and IP tables based on incoming packets.
Handles only packets from ports identified as not switch-only.
If a MAC was not seen before, insert it in the MAC table;
otherwise, update table and enry.
If packet has a source IP, update that info for the macEntry (may require
removing the info from antoher entry previously with that IP address).
It does not forward any packets, just extract info from them.
"""
dpid = event.connection.dpid
inport = event.port
packet = event.parsed
if not packet.parsed:
log.warning("%i %i ignoring unparsed packet", dpid, inport)
return
if packet.type == ethernet.LLDP_TYPE: # Ignore LLDP packets
return
# This should use Topology later
if not core.openflow_discovery.is_edge_port(dpid, inport):
# No host should be right behind a switch-only port
log.debug("%i %i ignoring packetIn at switch-only port", dpid, inport)
return
log.debug("PacketIn: %i %i ETH %s => %s",
dpid, inport, str(packet.src), str(packet.dst))
# Learn or update dpid/port/MAC info
macEntry = self.getMacEntry(packet.src)
if macEntry is None:
# there is no known host by that MAC
# should we raise a NewHostFound event (at the end)?
macEntry = MacEntry(dpid,inport,packet.src)
self.entryByMAC[packet.src] = macEntry
log.info("Learned %s", str(macEntry))
self.raiseEventNoErrors(HostEvent, macEntry, join=True)
elif macEntry != (dpid, inport, packet.src):
# there is already an entry of host with that MAC, but host has moved
# should we raise a HostMoved event (at the end)?
log.info("Learned %s moved to %i %i", str(macEntry), dpid, inport)
# if there has not been long since heard from it...
if time.time() - macEntry.lastTimeSeen < timeoutSec['entryMove']:
log.warning("Possible duplicate: %s at time %i, now (%i %i), time %i",
str(macEntry), macEntry.lastTimeSeen,
dpid, inport, time.time())
# should we create a whole new entry, or keep the previous host info?
# for now, we keep it: IP info, answers pings, etc.
e = HostEvent(macEntry, move=True, new_dpid = dpid, new_port = inport)
self.raiseEventNoErrors(e)
macEntry.dpid = e._new_dpid
macEntry.inport = e._new_port
macEntry.refresh()
(pckt_srcip, hasARP) = self.getSrcIPandARP(packet.next)
if pckt_srcip is not None:
self.updateIPInfo(pckt_srcip,macEntry,hasARP)
if self.eat_packets and packet.dst == self.ping_src_mac:
return EventHalt
def _check_timeouts (self):
"""
Checks for timed out entries
"""
for macEntry in self.entryByMAC.values():
entryPinged = False
for ip_addr, ipEntry in macEntry.ipAddrs.items():
if ipEntry.expired():
if ipEntry.pings.failed():
del macEntry.ipAddrs[ip_addr]
log.info("Entry %s: IP address %s expired",
str(macEntry), str(ip_addr) )
else:
self.sendPing(macEntry,ip_addr)
ipEntry.pings.sent()
entryPinged = True
if macEntry.expired() and not entryPinged:
log.info("Entry %s expired", str(macEntry))
# sanity check: there should be no IP addresses left
if len(macEntry.ipAddrs) > 0:
for ip in macEntry.ipAddrs.keys():
log.warning("Entry %s expired but still had IP address %s",
str(macEntry), str(ip_addr) )
del macEntry.ipAddrs[ip_addr]
self.raiseEventNoErrors(HostEvent, macEntry, leave=True)
del self.entryByMAC[macEntry.macaddr]
| 33.091127 | 78 | 0.675266 |
from pox.core import core
from pox.lib.addresses import EthAddr
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.ipv4 import ipv4
from pox.lib.packet.arp import arp
from pox.lib.recoco import Timer
from pox.lib.revent import Event, EventHalt
import pox.openflow.libopenflow_01 as of
import pox.openflow.discovery as discovery
from pox.lib.revent.revent import *
import time
import pox
log = core.getLogger()
timeoutSec = dict(
arpAware=60*2,
arpSilent=60*20,
arpReply=4,
timerInterval=5,
entryMove=60
)
DEFAULT_ARP_PING_SRC_MAC = '02:00:00:00:be:ef'
class HostEvent (Event):
def __init__ (self, entry, new_dpid = None, new_port = None, join = False,
leave = False, move = False):
super(HostEvent,self).__init__()
self.entry = entry
self.join = join
self.leave = leave
self.move = move
assert sum(1 for x in [join,leave,move] if x) == 1
self._new_dpid = new_dpid
self._new_port = new_port
#TODO: Allow us to cancel add/removes
@property
def new_dpid (self):
assert self.move
return self._new_dpid
@property
def new_port (self):
assert self.move
return self._new_port
class Alive (object):
def __init__ (self, livelinessInterval=timeoutSec['arpAware']):
self.lastTimeSeen = time.time()
self.interval=livelinessInterval
def expired (self):
return time.time() > self.lastTimeSeen + self.interval
def refresh (self):
self.lastTimeSeen = time.time()
class PingCtrl (Alive):
# Number of ARP ping attemps before deciding it failed
pingLim=3
def __init__ (self):
super(PingCtrl,self).__init__(timeoutSec['arpReply'])
self.pending = 0
def sent (self):
self.refresh()
self.pending += 1
def failed (self):
return self.pending > PingCtrl.pingLim
def received (self):
# Clear any pending timeouts related to ARP pings
self.pending = 0
class IpEntry (Alive):
def __init__ (self, hasARP):
if hasARP:
super(IpEntry,self).__init__(timeoutSec['arpAware'])
else:
super(IpEntry,self).__init__(timeoutSec['arpSilent'])
self.hasARP = hasARP
self.pings = PingCtrl()
def setHasARP (self):
if not self.hasARP:
self.hasARP = True
self.interval = timeoutSec['arpAware']
class MacEntry (Alive):
def __init__ (self, dpid, port, macaddr):
super(MacEntry,self).__init__()
self.dpid = dpid
self.port = port
self.macaddr = macaddr
self.ipAddrs = {}
def __str__(self):
return ' '.join([str(self.dpid), str(self.port), str(self.macaddr)])
def __eq__ (self, other):
if other is None:
return False
elif type(other) == tuple:
return (self.dpid,self.port,self.macaddr)==other
if self.dpid != other.dpid: return False
if self.port != other.port: return False
if self.macaddr != other.macaddr: return False
if self.dpid != other.dpid: return False
# What about ipAddrs??
return True
def __ne__ (self, other):
return not self.__eq__(other)
class host_tracker (EventMixin):
_eventMixin_events = set([HostEvent])
def __init__ (self, ping_src_mac = None, install_flow = True,
eat_packets = True):
if ping_src_mac is None:
ping_src_mac = DEFAULT_ARP_PING_SRC_MAC
self.ping_src_mac = EthAddr(ping_src_mac)
self.install_flow = install_flow
self.eat_packets = eat_packets
# The following tables should go to Topology later
self.entryByMAC = {}
self._t = Timer(timeoutSec['timerInterval'],
self._check_timeouts, recurring=True)
# Listen to openflow with high priority if we want to eat our ARP replies
listen_args = {}
if eat_packets:
listen_args={'openflow':{'priority':0}}
core.listen_to_dependencies(self, listen_args=listen_args)
def _all_dependencies_met (self):
log.info("host_tracker ready")
# The following two functions should go to Topology also
def getMacEntry (self, macaddr):
try:
result = self.entryByMAC[macaddr]
except KeyError as e:
result = None
return result
def sendPing (self, macEntry, ipAddr):
r = arp()
r.opcode = arp.REQUEST
r.hwdst = macEntry.macaddr
r.hwsrc = self.ping_src_mac
r.protodst = ipAddr
# src is IP_ANY
e = ethernet(type=ethernet.ARP_TYPE, src=r.hwsrc, dst=r.hwdst)
e.payload = r
log.debug("%i %i sending ARP REQ to %s %s",
macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst))
msg = of.ofp_packet_out(data = e.pack(),
action = of.ofp_action_output(port=macEntry.port))
if core.openflow.sendToDPID(macEntry.dpid, msg.pack()):
ipEntry = macEntry.ipAddrs[ipAddr]
ipEntry.pings.sent()
else:
# macEntry is stale, remove it.
log.debug("%i %i ERROR sending ARP REQ to %s %s",
macEntry.dpid, macEntry.port, str(r.hwdst), str(r.protodst))
del macEntry.ipAddrs[ipAddr]
return
def getSrcIPandARP (self, packet):
if isinstance(packet, ipv4):
log.debug("IP %s => %s",str(packet.srcip),str(packet.dstip))
return ( packet.srcip, False )
elif isinstance(packet, arp):
log.debug("ARP %s %s => %s",
{arp.REQUEST:"request",arp.REPLY:"reply"}.get(packet.opcode,
'op:%i' % (packet.opcode,)),
str(packet.protosrc), str(packet.protodst))
if (packet.hwtype == arp.HW_TYPE_ETHERNET and
packet.prototype == arp.PROTO_TYPE_IP and
packet.protosrc != 0):
return ( packet.protosrc, True )
return ( None, False )
def updateIPInfo (self, pckt_srcip, macEntry, hasARP):
if pckt_srcip in macEntry.ipAddrs:
# that entry already has that IP
ipEntry = macEntry.ipAddrs[pckt_srcip]
ipEntry.refresh()
log.debug("%s already has IP %s, refreshing",
str(macEntry), str(pckt_srcip) )
else:
# new mapping
ipEntry = IpEntry(hasARP)
macEntry.ipAddrs[pckt_srcip] = ipEntry
log.info("Learned %s got IP %s", str(macEntry), str(pckt_srcip) )
if hasARP:
ipEntry.pings.received()
def _handle_openflow_ConnectionUp (self, event):
if not self.install_flow: return
log.debug("Installing flow for ARP ping responses")
m = of.ofp_flow_mod()
m.priority += 1 # Higher than normal
m.match.dl_type = ethernet.ARP_TYPE
m.match.dl_dst = self.ping_src_mac
m.actions.append(of.ofp_action_output(port=of.OFPP_CONTROLLER))
event.connection.send(m)
def _handle_openflow_PacketIn (self, event):
dpid = event.connection.dpid
inport = event.port
packet = event.parsed
if not packet.parsed:
log.warning("%i %i ignoring unparsed packet", dpid, inport)
return
if packet.type == ethernet.LLDP_TYPE: # Ignore LLDP packets
return
# This should use Topology later
if not core.openflow_discovery.is_edge_port(dpid, inport):
# No host should be right behind a switch-only port
log.debug("%i %i ignoring packetIn at switch-only port", dpid, inport)
return
log.debug("PacketIn: %i %i ETH %s => %s",
dpid, inport, str(packet.src), str(packet.dst))
# Learn or update dpid/port/MAC info
macEntry = self.getMacEntry(packet.src)
if macEntry is None:
# there is no known host by that MAC
# should we raise a NewHostFound event (at the end)?
macEntry = MacEntry(dpid,inport,packet.src)
self.entryByMAC[packet.src] = macEntry
log.info("Learned %s", str(macEntry))
self.raiseEventNoErrors(HostEvent, macEntry, join=True)
elif macEntry != (dpid, inport, packet.src):
# there is already an entry of host with that MAC, but host has moved
# should we raise a HostMoved event (at the end)?
log.info("Learned %s moved to %i %i", str(macEntry), dpid, inport)
# if there has not been long since heard from it...
if time.time() - macEntry.lastTimeSeen < timeoutSec['entryMove']:
log.warning("Possible duplicate: %s at time %i, now (%i %i), time %i",
str(macEntry), macEntry.lastTimeSeen,
dpid, inport, time.time())
# should we create a whole new entry, or keep the previous host info?
# for now, we keep it: IP info, answers pings, etc.
e = HostEvent(macEntry, move=True, new_dpid = dpid, new_port = inport)
self.raiseEventNoErrors(e)
macEntry.dpid = e._new_dpid
macEntry.inport = e._new_port
macEntry.refresh()
(pckt_srcip, hasARP) = self.getSrcIPandARP(packet.next)
if pckt_srcip is not None:
self.updateIPInfo(pckt_srcip,macEntry,hasARP)
if self.eat_packets and packet.dst == self.ping_src_mac:
return EventHalt
def _check_timeouts (self):
for macEntry in self.entryByMAC.values():
entryPinged = False
for ip_addr, ipEntry in macEntry.ipAddrs.items():
if ipEntry.expired():
if ipEntry.pings.failed():
del macEntry.ipAddrs[ip_addr]
log.info("Entry %s: IP address %s expired",
str(macEntry), str(ip_addr) )
else:
self.sendPing(macEntry,ip_addr)
ipEntry.pings.sent()
entryPinged = True
if macEntry.expired() and not entryPinged:
log.info("Entry %s expired", str(macEntry))
# sanity check: there should be no IP addresses left
if len(macEntry.ipAddrs) > 0:
for ip in macEntry.ipAddrs.keys():
log.warning("Entry %s expired but still had IP address %s",
str(macEntry), str(ip_addr) )
del macEntry.ipAddrs[ip_addr]
self.raiseEventNoErrors(HostEvent, macEntry, leave=True)
del self.entryByMAC[macEntry.macaddr]
| true | true |
f7fd62e52913383735ffd0f60d4a9c0ffae22c53 | 5,027 | py | Python | armageddon/ensemble.py | oahul14/MetTrack | dce04ad9bb61a0a1c4becafd25c932bb242d73c0 | [
"MIT"
] | null | null | null | armageddon/ensemble.py | oahul14/MetTrack | dce04ad9bb61a0a1c4becafd25c932bb242d73c0 | [
"MIT"
] | null | null | null | armageddon/ensemble.py | oahul14/MetTrack | dce04ad9bb61a0a1c4becafd25c932bb242d73c0 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import scipy.special as ssp
import scipy.optimize as sop
class Dist():
prob_val = 0.1
def __init__(self, prob_vals):
self.prob_vals = prob_vals
def velocity_dist(self,v):
return ssp.erf(v/(11*np.sqrt(2))) - (v/11)*(np.sqrt(2/np.pi)) * np.exp(-1*(v**2)/(2*(11**2))) - self.prob_val
def density_dist(self, rho):
return 0.5*( 1 + ssp.erf((rho-3000)/(1000*np.sqrt(2))) ) - self.prob_val
def inverse_radius_distribution(self, rmin, rmax):
return self.prob_vals*(rmax-rmin) + rmin
def inverse_strength_distribution(self,ymin=1e3,ymax=10e6):
return ymin * (10**(self.prob_vals * np.log10(ymax/ymin)))
def inverse_angle_distribution(self,amin=0,amax=np.pi/2):
return np.arccos(np.sqrt(self.prob_vals))
def inverse_velocity_distribution(self,v_guess=(50-11)/2):
v_array = []
for prob in self.prob_vals:
self.prob_val = prob
v_val = sop.newton_krylov(self.velocity_dist,v_guess)
v_array.append(v_val)
v_np = np.array(v_array)
return v_np
def inverse_density_distribution(self, rho_guess=(3000)):
rho_array = []
for prob in self.prob_vals:
self.prob_val = prob
rho_val = sop.diagbroyden(self.density_dist,rho_guess)
rho_array.append(rho_val)
rho_np = np.array(rho_array)
return rho_np
def solve_ensemble(
planet,
fiducial_impact,
variables,
radians=False,
rmin=8, rmax=12,
):
"""
Run asteroid simulation for a distribution of initial conditions and
find the burst distribution
Parameters
----------
planet : object
The Planet class instance on which to perform the ensemble calculation
fiducial_impact : dict
Dictionary of the fiducial values of radius, angle, strength, velocity
and density
variables : list
List of strings of all impact parameters to be varied in the ensemble
calculation
rmin : float, optional
Minimum radius, in m, to use in the ensemble calculation,
if radius is one of the parameters to be varied.
rmax : float, optional
Maximum radius, in m, to use in the ensemble calculation,
if radius is one of the parameters to be varied.
Returns
-------
ensemble : DataFrame
DataFrame with columns of any parameters that are varied and the
airburst altitude
"""
#convert to degrees
if radians:
fiducial_impact['angle'] = fiducial_impact['angle'] * 180/np.pi
#Number of samples
N = 500
prob_distribution = np.random.uniform(0.0,1.0,N)
distribution = Dist(prob_distribution)
ensemble_df = pd.DataFrame()
for var in variables:
# Remove these as you implement each distribution
if var == 'radius':
radius_dist = distribution.inverse_radius_distribution(rmin,rmax)
fiducial_impact['radius'] = radius_dist
ensemble_df['radius'] = radius_dist
if var == 'angle':
angle_dist = distribution.inverse_angle_distribution()
angle_dist = angle_dist*180/np.pi #convert to degrees
fiducial_impact['angle'] = angle_dist
ensemble_df['angle'] = angle_dist
if var == 'strength':
strength_dist = distribution.inverse_strength_distribution()
fiducial_impact['strength'] = strength_dist
ensemble_df['strength'] = strength_dist
if var == 'velocity':
velocity_dist = distribution.inverse_velocity_distribution()
impact_dist = np.sqrt( (11e3)**2 + (velocity_dist*1000)**2 )
fiducial_impact['velocity'] = impact_dist
ensemble_df['velocity'] = impact_dist
if var == 'density':
density_dist = distribution.inverse_density_distribution()
fiducial_impact['density'] = density_dist
ensemble_df['density'] = density_dist
#check for parameters in fiducial_impact that are not in variables
const_vals = np.setdiff1d([*fiducial_impact], variables)
for val in const_vals:
fiducial_impact[val] = [fiducial_impact[val]] * N
fiducial_impact[val] = np.array(fiducial_impact[val])
burst_altitude = []
for rad,ang,vel,dens,stren in np.stack([fiducial_impact['radius'], fiducial_impact['angle'],
fiducial_impact['velocity'],fiducial_impact['density'],
fiducial_impact['strength']], axis = -1):
output = planet.get_only_outcome(rad,vel,dens,stren,ang, dt=0.1)
if 'burst_altitude' in output:
burst_altitude.append(output['burst_altitude'])
else:
burst_altitude.append(0.0)
ensemble_df['burst_altitude'] = np.array(burst_altitude)
return ensemble_df
| 34.431507 | 117 | 0.623433 | import numpy as np
import pandas as pd
import scipy.special as ssp
import scipy.optimize as sop
class Dist():
prob_val = 0.1
def __init__(self, prob_vals):
self.prob_vals = prob_vals
def velocity_dist(self,v):
return ssp.erf(v/(11*np.sqrt(2))) - (v/11)*(np.sqrt(2/np.pi)) * np.exp(-1*(v**2)/(2*(11**2))) - self.prob_val
def density_dist(self, rho):
return 0.5*( 1 + ssp.erf((rho-3000)/(1000*np.sqrt(2))) ) - self.prob_val
def inverse_radius_distribution(self, rmin, rmax):
return self.prob_vals*(rmax-rmin) + rmin
def inverse_strength_distribution(self,ymin=1e3,ymax=10e6):
return ymin * (10**(self.prob_vals * np.log10(ymax/ymin)))
def inverse_angle_distribution(self,amin=0,amax=np.pi/2):
return np.arccos(np.sqrt(self.prob_vals))
def inverse_velocity_distribution(self,v_guess=(50-11)/2):
v_array = []
for prob in self.prob_vals:
self.prob_val = prob
v_val = sop.newton_krylov(self.velocity_dist,v_guess)
v_array.append(v_val)
v_np = np.array(v_array)
return v_np
def inverse_density_distribution(self, rho_guess=(3000)):
rho_array = []
for prob in self.prob_vals:
self.prob_val = prob
rho_val = sop.diagbroyden(self.density_dist,rho_guess)
rho_array.append(rho_val)
rho_np = np.array(rho_array)
return rho_np
def solve_ensemble(
planet,
fiducial_impact,
variables,
radians=False,
rmin=8, rmax=12,
):
if radians:
fiducial_impact['angle'] = fiducial_impact['angle'] * 180/np.pi
N = 500
prob_distribution = np.random.uniform(0.0,1.0,N)
distribution = Dist(prob_distribution)
ensemble_df = pd.DataFrame()
for var in variables:
if var == 'radius':
radius_dist = distribution.inverse_radius_distribution(rmin,rmax)
fiducial_impact['radius'] = radius_dist
ensemble_df['radius'] = radius_dist
if var == 'angle':
angle_dist = distribution.inverse_angle_distribution()
angle_dist = angle_dist*180/np.pi
fiducial_impact['angle'] = angle_dist
ensemble_df['angle'] = angle_dist
if var == 'strength':
strength_dist = distribution.inverse_strength_distribution()
fiducial_impact['strength'] = strength_dist
ensemble_df['strength'] = strength_dist
if var == 'velocity':
velocity_dist = distribution.inverse_velocity_distribution()
impact_dist = np.sqrt( (11e3)**2 + (velocity_dist*1000)**2 )
fiducial_impact['velocity'] = impact_dist
ensemble_df['velocity'] = impact_dist
if var == 'density':
density_dist = distribution.inverse_density_distribution()
fiducial_impact['density'] = density_dist
ensemble_df['density'] = density_dist
const_vals = np.setdiff1d([*fiducial_impact], variables)
for val in const_vals:
fiducial_impact[val] = [fiducial_impact[val]] * N
fiducial_impact[val] = np.array(fiducial_impact[val])
burst_altitude = []
for rad,ang,vel,dens,stren in np.stack([fiducial_impact['radius'], fiducial_impact['angle'],
fiducial_impact['velocity'],fiducial_impact['density'],
fiducial_impact['strength']], axis = -1):
output = planet.get_only_outcome(rad,vel,dens,stren,ang, dt=0.1)
if 'burst_altitude' in output:
burst_altitude.append(output['burst_altitude'])
else:
burst_altitude.append(0.0)
ensemble_df['burst_altitude'] = np.array(burst_altitude)
return ensemble_df
| true | true |
f7fd6307284eeaddb5b346fffa0a61598bc293dd | 7,282 | py | Python | tests/cli/test_session_pool_manager.py | test-gh-org-workflow/probable-garbanzo | c6b8a0dbc573a2a0073b5ab7c8619c4d0baf7088 | [
"Apache-2.0"
] | 4 | 2017-01-31T14:05:19.000Z | 2019-04-10T16:35:44.000Z | tests/cli/test_session_pool_manager.py | test-gh-org-workflow/probable-garbanzo | c6b8a0dbc573a2a0073b5ab7c8619c4d0baf7088 | [
"Apache-2.0"
] | 89 | 2016-05-25T14:17:38.000Z | 2022-03-17T13:09:59.000Z | tests/cli/test_session_pool_manager.py | test-gh-org-workflow/probable-garbanzo | c6b8a0dbc573a2a0073b5ab7c8619c4d0baf7088 | [
"Apache-2.0"
] | 6 | 2016-07-21T12:24:10.000Z | 2022-02-21T06:33:18.000Z | from unittest import TestCase
from cloudshell.cli.service.session_pool_manager import (
SessionPoolException,
SessionPoolManager,
)
try:
from unittest.mock import MagicMock, Mock
except ImportError:
from mock import MagicMock, Mock
class TestSessionPoolManager(TestCase):
def setUp(self):
self._session_manager = Mock()
self._pool = Mock()
self._condition = MagicMock()
self._session_pool_manager = SessionPoolManager(
session_manager=self._session_manager, pool=self._pool
)
self._session_pool_manager._session_condition = self._condition
self._logger = Mock()
self._new_sessions = Mock()
self._command_mode = Mock()
self._prompt = Mock()
def test_get_session_with_condition_enter(self):
self._pool.empty.return_value = True
self._pool.maxsize = 4
self._session_manager.existing_sessions_count.return_value = 0
self._session_pool_manager._new_session = Mock()
self._session_pool_manager.get_session(
self._new_sessions, self._prompt, self._logger
)
self._condition.__enter__.assert_called_once()
def test_get_session_get_from_pool(self):
self._pool.empty.return_value = False
self._session_pool_manager._get_from_pool = Mock()
self._pool.maxsize = 4
self._session_manager.existing_sessions_count.return_value = 0
self._session_pool_manager._new_session = Mock()
self._session_pool_manager.get_session(
self._new_sessions, self._prompt, self._logger
)
self._session_pool_manager._get_from_pool.assert_called_once_with(
self._new_sessions, self._prompt, self._logger
)
def test_get_session_create_new(self):
self._pool.empty.return_value = True
self._pool.maxsize = 2
self._session_manager.existing_sessions_count.return_value = 1
self._session_pool_manager._new_session = Mock()
self._session_pool_manager.get_session(
self._new_sessions, self._prompt, self._logger
)
self._session_pool_manager._new_session.assert_called_once_with(
self._new_sessions, self._prompt, self._logger
)
def test_get_session_condition_wait_raises(self):
self._pool.empty.return_value = True
self._pool.maxsize = 1
self._session_manager.existing_sessions_count.return_value = 1
self._session_pool_manager._new_session = Mock()
pool_timeout = 1
self._session_pool_manager._pool_timeout = pool_timeout
exception = SessionPoolException
with self.assertRaises(exception):
self._session_pool_manager.get_session(
self._new_sessions, self._prompt, self._logger
)
self._condition.wait.assert_called_once_with(pool_timeout)
def test_get_session_with_condition_exit(self):
prompt = Mock()
self._pool.maxsize = 4
self._session_manager.existing_sessions_count.return_value = 0
self._session_pool_manager._new_session = Mock()
self._session_pool_manager.get_session(self._new_sessions, prompt, self._logger)
self._condition.__exit__.assert_called_once()
def test_remove_session_with_condition_enter(self):
session = Mock()
self._session_pool_manager.remove_session(session, self._logger)
self._condition.__enter__.assert_called_once()
def test_remove_session_call(self):
session = Mock()
self._session_pool_manager.remove_session(session, self._logger)
self._session_manager.remove_session.assert_called_once_with(
session, self._logger
)
def test_remove_session_condition_notify(self):
session = Mock()
self._session_pool_manager.remove_session(session, self._logger)
self._condition.notify.assert_called_once()
def test_remove_session_with_condition_exit(self):
session = Mock()
self._session_pool_manager.remove_session(session, self._logger)
self._condition.__exit__.assert_called_once()
def test_return_session_with_condition_enter(self):
session = Mock()
self._session_pool_manager.return_session(session, self._logger)
self._condition.__enter__.assert_called_once()
def test_return_session_call(self):
session = Mock()
self._session_pool_manager.return_session(session, self._logger)
self._pool.put.assert_called_once_with(session)
def test_return_session_condition_notify(self):
session = Mock()
self._session_pool_manager.return_session(session, self._logger)
self._condition.notify.assert_called_once()
def test_return_session_with_condition_exit(self):
session = Mock()
self._session_pool_manager.return_session(session, self._logger)
self._condition.__exit__.assert_called_once()
def test__new_session_called(self):
prompt = Mock()
self._session_pool_manager._new_session(
self._new_sessions, prompt, self._logger
)
self._session_manager.new_session.assert_called_once_with(
self._new_sessions, prompt, self._logger
)
def test__new_session_has_attr_new_session_true(self):
prompt = Mock()
session = self._session_pool_manager._new_session(
self._new_sessions, prompt, self._logger
)
self.assertTrue(hasattr(session, "new_session") and session.new_session)
def test__get_from_pool_called_get(self):
prompt = Mock()
self._session_pool_manager._get_from_pool(
self._new_sessions, prompt, self._logger
)
self._pool.get.assert_called_once_with(False)
def test__get_from_pool_is_compatible_called(self):
prompt = Mock()
session = Mock()
self._pool.get.return_value = session
self._session_pool_manager._get_from_pool(
self._new_sessions, prompt, self._logger
)
self._session_manager.is_compatible.assert_called_once_with(
session, self._new_sessions, self._logger
)
def test__get_from_pool_remove_called(self):
prompt = Mock()
self._session_manager.is_compatible.return_value = False
self._session_pool_manager.remove_session = Mock()
session = Mock()
self._pool.get.return_value = session
self._session_pool_manager._get_from_pool(
self._new_sessions, prompt, self._logger
)
self._session_pool_manager.remove_session.assert_called_once_with(
session, self._logger
)
def test__get_from_pool_new_session_called(self):
prompt = Mock()
self._session_manager.is_compatible.return_value = False
self._session_pool_manager.remove_session = Mock()
self._session_pool_manager._new_session = Mock()
session = Mock()
self._pool.get.return_value = session
self._session_pool_manager._get_from_pool(
self._new_sessions, prompt, self._logger
)
self._session_pool_manager._new_session.assert_called_once_with(
self._new_sessions, prompt, self._logger
)
| 38.734043 | 88 | 0.698297 | from unittest import TestCase
from cloudshell.cli.service.session_pool_manager import (
SessionPoolException,
SessionPoolManager,
)
try:
from unittest.mock import MagicMock, Mock
except ImportError:
from mock import MagicMock, Mock
class TestSessionPoolManager(TestCase):
def setUp(self):
self._session_manager = Mock()
self._pool = Mock()
self._condition = MagicMock()
self._session_pool_manager = SessionPoolManager(
session_manager=self._session_manager, pool=self._pool
)
self._session_pool_manager._session_condition = self._condition
self._logger = Mock()
self._new_sessions = Mock()
self._command_mode = Mock()
self._prompt = Mock()
def test_get_session_with_condition_enter(self):
self._pool.empty.return_value = True
self._pool.maxsize = 4
self._session_manager.existing_sessions_count.return_value = 0
self._session_pool_manager._new_session = Mock()
self._session_pool_manager.get_session(
self._new_sessions, self._prompt, self._logger
)
self._condition.__enter__.assert_called_once()
def test_get_session_get_from_pool(self):
self._pool.empty.return_value = False
self._session_pool_manager._get_from_pool = Mock()
self._pool.maxsize = 4
self._session_manager.existing_sessions_count.return_value = 0
self._session_pool_manager._new_session = Mock()
self._session_pool_manager.get_session(
self._new_sessions, self._prompt, self._logger
)
self._session_pool_manager._get_from_pool.assert_called_once_with(
self._new_sessions, self._prompt, self._logger
)
def test_get_session_create_new(self):
self._pool.empty.return_value = True
self._pool.maxsize = 2
self._session_manager.existing_sessions_count.return_value = 1
self._session_pool_manager._new_session = Mock()
self._session_pool_manager.get_session(
self._new_sessions, self._prompt, self._logger
)
self._session_pool_manager._new_session.assert_called_once_with(
self._new_sessions, self._prompt, self._logger
)
def test_get_session_condition_wait_raises(self):
self._pool.empty.return_value = True
self._pool.maxsize = 1
self._session_manager.existing_sessions_count.return_value = 1
self._session_pool_manager._new_session = Mock()
pool_timeout = 1
self._session_pool_manager._pool_timeout = pool_timeout
exception = SessionPoolException
with self.assertRaises(exception):
self._session_pool_manager.get_session(
self._new_sessions, self._prompt, self._logger
)
self._condition.wait.assert_called_once_with(pool_timeout)
def test_get_session_with_condition_exit(self):
prompt = Mock()
self._pool.maxsize = 4
self._session_manager.existing_sessions_count.return_value = 0
self._session_pool_manager._new_session = Mock()
self._session_pool_manager.get_session(self._new_sessions, prompt, self._logger)
self._condition.__exit__.assert_called_once()
def test_remove_session_with_condition_enter(self):
session = Mock()
self._session_pool_manager.remove_session(session, self._logger)
self._condition.__enter__.assert_called_once()
def test_remove_session_call(self):
session = Mock()
self._session_pool_manager.remove_session(session, self._logger)
self._session_manager.remove_session.assert_called_once_with(
session, self._logger
)
def test_remove_session_condition_notify(self):
session = Mock()
self._session_pool_manager.remove_session(session, self._logger)
self._condition.notify.assert_called_once()
def test_remove_session_with_condition_exit(self):
session = Mock()
self._session_pool_manager.remove_session(session, self._logger)
self._condition.__exit__.assert_called_once()
def test_return_session_with_condition_enter(self):
session = Mock()
self._session_pool_manager.return_session(session, self._logger)
self._condition.__enter__.assert_called_once()
def test_return_session_call(self):
session = Mock()
self._session_pool_manager.return_session(session, self._logger)
self._pool.put.assert_called_once_with(session)
def test_return_session_condition_notify(self):
session = Mock()
self._session_pool_manager.return_session(session, self._logger)
self._condition.notify.assert_called_once()
def test_return_session_with_condition_exit(self):
session = Mock()
self._session_pool_manager.return_session(session, self._logger)
self._condition.__exit__.assert_called_once()
def test__new_session_called(self):
prompt = Mock()
self._session_pool_manager._new_session(
self._new_sessions, prompt, self._logger
)
self._session_manager.new_session.assert_called_once_with(
self._new_sessions, prompt, self._logger
)
def test__new_session_has_attr_new_session_true(self):
prompt = Mock()
session = self._session_pool_manager._new_session(
self._new_sessions, prompt, self._logger
)
self.assertTrue(hasattr(session, "new_session") and session.new_session)
def test__get_from_pool_called_get(self):
prompt = Mock()
self._session_pool_manager._get_from_pool(
self._new_sessions, prompt, self._logger
)
self._pool.get.assert_called_once_with(False)
def test__get_from_pool_is_compatible_called(self):
prompt = Mock()
session = Mock()
self._pool.get.return_value = session
self._session_pool_manager._get_from_pool(
self._new_sessions, prompt, self._logger
)
self._session_manager.is_compatible.assert_called_once_with(
session, self._new_sessions, self._logger
)
def test__get_from_pool_remove_called(self):
prompt = Mock()
self._session_manager.is_compatible.return_value = False
self._session_pool_manager.remove_session = Mock()
session = Mock()
self._pool.get.return_value = session
self._session_pool_manager._get_from_pool(
self._new_sessions, prompt, self._logger
)
self._session_pool_manager.remove_session.assert_called_once_with(
session, self._logger
)
def test__get_from_pool_new_session_called(self):
prompt = Mock()
self._session_manager.is_compatible.return_value = False
self._session_pool_manager.remove_session = Mock()
self._session_pool_manager._new_session = Mock()
session = Mock()
self._pool.get.return_value = session
self._session_pool_manager._get_from_pool(
self._new_sessions, prompt, self._logger
)
self._session_pool_manager._new_session.assert_called_once_with(
self._new_sessions, prompt, self._logger
)
| true | true |
f7fd63910b11379d4d9bb02948cc429212eff5b7 | 7,926 | py | Python | tests/managers/loadbal_tests.py | acamacho82/softlayer-python | 8a755be00dcb86abc20fcc4b4f69e3155ba187e8 | [
"MIT"
] | 2 | 2016-07-06T15:31:48.000Z | 2016-07-06T15:40:25.000Z | tests/managers/loadbal_tests.py | acamacho82/softlayer-python | 8a755be00dcb86abc20fcc4b4f69e3155ba187e8 | [
"MIT"
] | 73 | 2016-07-05T15:17:51.000Z | 2016-08-18T18:16:29.000Z | tests/managers/loadbal_tests.py | kyubifire/softlayer-python | bee36eec73474a8b6a1813fbbcc0512f81bf1779 | [
"MIT"
] | 1 | 2019-07-22T05:20:39.000Z | 2019-07-22T05:20:39.000Z | """
SoftLayer.tests.managers.loadbal_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import SoftLayer
from SoftLayer import testing
VIRT_IP_SERVICE = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_VirtualIpAddress')
class LoadBalancerTests(testing.TestCase):
def set_up(self):
self.lb_mgr = SoftLayer.LoadBalancerManager(self.client)
def test_get_lb_pkgs(self):
result = self.lb_mgr.get_lb_pkgs()
self.assertEqual(len(result), 13)
_filter = {
'items': {
'description': {
'operation': '*= Load Balancer'
}
}
}
self.assert_called_with('SoftLayer_Product_Package', 'getItems',
identifier=0,
filter=_filter)
def test_get_hc_types(self):
result = self.lb_mgr.get_hc_types()
self.assertEqual(len(result), 6)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Health_Check_Type')
self.assert_called_with(service, 'getAllObjects')
def test_get_routing_methods(self):
result = self.lb_mgr.get_routing_methods()
self.assertEqual(len(result), 12)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Routing_Method')
self.assert_called_with(service, 'getAllObjects')
def test_get_location(self):
id1 = self.lb_mgr._get_location('sjc01')
self.assertEqual(id1, 168642)
id2 = self.lb_mgr._get_location('dal05')
self.assertEqual(id2, 'FIRST_AVAILABLE')
def test_get_routing_types(self):
result = self.lb_mgr.get_routing_types()
self.assertEqual(len(result), 6)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Routing_Type')
self.assert_called_with(service, 'getAllObjects')
def test_cancel_lb(self):
result = self.lb_mgr.cancel_lb(6327)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Billing_Item', 'cancelService',
identifier=21370814)
def test_add_local_lb(self):
self.lb_mgr.add_local_lb(6327, 'sjc01')
args = ({
'complexType': 'SoftLayer_Container_Product_Order_Network_'
'LoadBalancer',
'quantity': 1,
'packageId': 0,
"location": 168642,
'prices': [{'id': 6327}]
},)
self.assert_called_with('SoftLayer_Product_Order', 'placeOrder',
args=args)
def test_get_local_lbs(self):
result = self.lb_mgr.get_local_lbs()
self.assertEqual(len(result), 0)
mask = 'mask[loadBalancerHardware[datacenter],ipAddress]'
self.assert_called_with('SoftLayer_Account', 'getAdcLoadBalancers',
mask=mask)
def test_get_local_lb(self):
result = self.lb_mgr.get_local_lb(22348)
self.assertEqual(result['id'], 22348)
mask = ('mask['
'loadBalancerHardware[datacenter], '
'ipAddress, virtualServers[serviceGroups'
'[routingMethod,routingType,services'
'[healthChecks[type], groupReferences,'
' ipAddress]]]]')
self.assert_called_with(VIRT_IP_SERVICE, 'getObject',
identifier=22348,
mask=mask)
def test_delete_service(self):
result = self.lb_mgr.delete_service(1234)
self.assertEqual(result, True)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Service')
self.assert_called_with(service, 'deleteObject', identifier=1234)
def test_delete_service_group(self):
result = self.lb_mgr.delete_service_group(1234)
self.assertEqual(result, True)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_VirtualServer')
self.assert_called_with(service, 'deleteObject', identifier=1234)
def test_toggle_service_status(self):
result = self.lb_mgr.toggle_service_status(1234)
self.assertEqual(result, True)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Service')
self.assert_called_with(service, 'toggleStatus', identifier=1234)
def test_edit_service(self):
self.lb_mgr.edit_service(12345, 1234, '9.9.9.9', 80, True, 21, 1)
_filter = {
'virtualServers': {
'serviceGroups': {
'services': {
'id': {
'operation': 1234
}
}
}
}
}
mask = 'mask[serviceGroups[services[groupReferences,healthChecks]]]'
self.assert_called_with(VIRT_IP_SERVICE, 'getVirtualServers',
identifier=12345,
filter=_filter,
mask=mask)
self.assert_called_with(VIRT_IP_SERVICE, 'editObject')
def test_add_service(self):
self.lb_mgr.add_service(12345, 50718, 123, 80, True, 21, 1)
mask = 'mask[virtualServers[serviceGroups[services[groupReferences]]]]'
self.assert_called_with(VIRT_IP_SERVICE, 'getObject',
mask=mask,
identifier=12345)
self.assert_called_with(VIRT_IP_SERVICE, 'editObject',
identifier=12345)
arg = self.calls(VIRT_IP_SERVICE, 'editObject')[0].args[0]
self.assertEqual(
len(arg['virtualServers'][0]['serviceGroups'][0]['services']),
2)
def test_edit_service_group(self):
self.lb_mgr.edit_service_group(12345,
group_id=50718,
allocation=100,
port=80,
routing_type=2,
routing_method=10)
mask = 'mask[virtualServers[serviceGroups[services[groupReferences]]]]'
self.assert_called_with(VIRT_IP_SERVICE, 'getObject',
identifier=12345,
mask=mask)
self.assert_called_with(VIRT_IP_SERVICE, 'getObject', identifier=12345)
def test_add_service_group(self):
self.lb_mgr.add_service_group(12345, 100, 80, 2, 10)
mask = 'mask[virtualServers[serviceGroups[services[groupReferences]]]]'
self.assert_called_with(VIRT_IP_SERVICE, 'getObject',
mask=mask,
identifier=12345)
self.assert_called_with(VIRT_IP_SERVICE, 'editObject',
identifier=12345)
arg = self.calls(VIRT_IP_SERVICE, 'editObject')[0].args[0]
self.assertEqual(len(arg['virtualServers']), 2)
def test_reset_service_group(self):
result = self.lb_mgr.reset_service_group(12345, group_id=50718)
self.assertEqual(result, True)
_filter = {'virtualServers': {'id': {'operation': 50718}}}
self.assert_called_with(VIRT_IP_SERVICE, 'getVirtualServers',
identifier=12345,
filter=_filter,
mask='mask[serviceGroups]')
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Service_Group')
self.assert_called_with(service, 'kickAllConnections',
identifier=51758)
| 37.563981 | 79 | 0.574565 | import SoftLayer
from SoftLayer import testing
VIRT_IP_SERVICE = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_VirtualIpAddress')
class LoadBalancerTests(testing.TestCase):
def set_up(self):
self.lb_mgr = SoftLayer.LoadBalancerManager(self.client)
def test_get_lb_pkgs(self):
result = self.lb_mgr.get_lb_pkgs()
self.assertEqual(len(result), 13)
_filter = {
'items': {
'description': {
'operation': '*= Load Balancer'
}
}
}
self.assert_called_with('SoftLayer_Product_Package', 'getItems',
identifier=0,
filter=_filter)
def test_get_hc_types(self):
result = self.lb_mgr.get_hc_types()
self.assertEqual(len(result), 6)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Health_Check_Type')
self.assert_called_with(service, 'getAllObjects')
def test_get_routing_methods(self):
result = self.lb_mgr.get_routing_methods()
self.assertEqual(len(result), 12)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Routing_Method')
self.assert_called_with(service, 'getAllObjects')
def test_get_location(self):
id1 = self.lb_mgr._get_location('sjc01')
self.assertEqual(id1, 168642)
id2 = self.lb_mgr._get_location('dal05')
self.assertEqual(id2, 'FIRST_AVAILABLE')
def test_get_routing_types(self):
result = self.lb_mgr.get_routing_types()
self.assertEqual(len(result), 6)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Routing_Type')
self.assert_called_with(service, 'getAllObjects')
def test_cancel_lb(self):
result = self.lb_mgr.cancel_lb(6327)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Billing_Item', 'cancelService',
identifier=21370814)
def test_add_local_lb(self):
self.lb_mgr.add_local_lb(6327, 'sjc01')
args = ({
'complexType': 'SoftLayer_Container_Product_Order_Network_'
'LoadBalancer',
'quantity': 1,
'packageId': 0,
"location": 168642,
'prices': [{'id': 6327}]
},)
self.assert_called_with('SoftLayer_Product_Order', 'placeOrder',
args=args)
def test_get_local_lbs(self):
result = self.lb_mgr.get_local_lbs()
self.assertEqual(len(result), 0)
mask = 'mask[loadBalancerHardware[datacenter],ipAddress]'
self.assert_called_with('SoftLayer_Account', 'getAdcLoadBalancers',
mask=mask)
def test_get_local_lb(self):
result = self.lb_mgr.get_local_lb(22348)
self.assertEqual(result['id'], 22348)
mask = ('mask['
'loadBalancerHardware[datacenter], '
'ipAddress, virtualServers[serviceGroups'
'[routingMethod,routingType,services'
'[healthChecks[type], groupReferences,'
' ipAddress]]]]')
self.assert_called_with(VIRT_IP_SERVICE, 'getObject',
identifier=22348,
mask=mask)
def test_delete_service(self):
result = self.lb_mgr.delete_service(1234)
self.assertEqual(result, True)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Service')
self.assert_called_with(service, 'deleteObject', identifier=1234)
def test_delete_service_group(self):
result = self.lb_mgr.delete_service_group(1234)
self.assertEqual(result, True)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_VirtualServer')
self.assert_called_with(service, 'deleteObject', identifier=1234)
def test_toggle_service_status(self):
result = self.lb_mgr.toggle_service_status(1234)
self.assertEqual(result, True)
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Service')
self.assert_called_with(service, 'toggleStatus', identifier=1234)
def test_edit_service(self):
self.lb_mgr.edit_service(12345, 1234, '9.9.9.9', 80, True, 21, 1)
_filter = {
'virtualServers': {
'serviceGroups': {
'services': {
'id': {
'operation': 1234
}
}
}
}
}
mask = 'mask[serviceGroups[services[groupReferences,healthChecks]]]'
self.assert_called_with(VIRT_IP_SERVICE, 'getVirtualServers',
identifier=12345,
filter=_filter,
mask=mask)
self.assert_called_with(VIRT_IP_SERVICE, 'editObject')
def test_add_service(self):
self.lb_mgr.add_service(12345, 50718, 123, 80, True, 21, 1)
mask = 'mask[virtualServers[serviceGroups[services[groupReferences]]]]'
self.assert_called_with(VIRT_IP_SERVICE, 'getObject',
mask=mask,
identifier=12345)
self.assert_called_with(VIRT_IP_SERVICE, 'editObject',
identifier=12345)
arg = self.calls(VIRT_IP_SERVICE, 'editObject')[0].args[0]
self.assertEqual(
len(arg['virtualServers'][0]['serviceGroups'][0]['services']),
2)
def test_edit_service_group(self):
self.lb_mgr.edit_service_group(12345,
group_id=50718,
allocation=100,
port=80,
routing_type=2,
routing_method=10)
mask = 'mask[virtualServers[serviceGroups[services[groupReferences]]]]'
self.assert_called_with(VIRT_IP_SERVICE, 'getObject',
identifier=12345,
mask=mask)
self.assert_called_with(VIRT_IP_SERVICE, 'getObject', identifier=12345)
def test_add_service_group(self):
self.lb_mgr.add_service_group(12345, 100, 80, 2, 10)
mask = 'mask[virtualServers[serviceGroups[services[groupReferences]]]]'
self.assert_called_with(VIRT_IP_SERVICE, 'getObject',
mask=mask,
identifier=12345)
self.assert_called_with(VIRT_IP_SERVICE, 'editObject',
identifier=12345)
arg = self.calls(VIRT_IP_SERVICE, 'editObject')[0].args[0]
self.assertEqual(len(arg['virtualServers']), 2)
def test_reset_service_group(self):
result = self.lb_mgr.reset_service_group(12345, group_id=50718)
self.assertEqual(result, True)
_filter = {'virtualServers': {'id': {'operation': 50718}}}
self.assert_called_with(VIRT_IP_SERVICE, 'getVirtualServers',
identifier=12345,
filter=_filter,
mask='mask[serviceGroups]')
service = ('SoftLayer_Network_Application_Delivery_Controller_'
'LoadBalancer_Service_Group')
self.assert_called_with(service, 'kickAllConnections',
identifier=51758)
| true | true |
f7fd63c86136ef2e97281bd55a42379e20c23025 | 12,560 | py | Python | nn_dataflow/LoopBlockingSolver.py | leesh6796/planaria.code | 67c9df95a5843281c4ad6d44673526e96eec3664 | [
"Apache-2.0"
] | 16 | 2021-10-29T19:26:27.000Z | 2022-03-31T03:21:16.000Z | nn_dataflow/LoopBlockingSolver.py | leesh6796/planaria.code | 67c9df95a5843281c4ad6d44673526e96eec3664 | [
"Apache-2.0"
] | 1 | 2021-11-02T19:50:05.000Z | 2021-11-08T21:12:59.000Z | nn_dataflow/LoopBlockingSolver.py | leesh6796/planaria.code | 67c9df95a5843281c4ad6d44673526e96eec3664 | [
"Apache-2.0"
] | 4 | 2021-11-18T06:46:32.000Z | 2022-03-30T22:55:17.000Z | """ $lic$
Copyright (C) 2016-2017 by The Board of Trustees of Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
If you use this program in your research, we request that you reference the
TETRIS paper ("TETRIS: Scalable and Efficient Neural Network Acceleration with
3D Memory", in ASPLOS'17. April, 2017), and that you send us a citation of your
work.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import math
import itertools
from . import DataCategoryEnum as de
from . import Util
'''
Analytical solvers for loop blocking.
'''
def _solve_lpbl_iofmap_gbuf_reside(nested_loop_desc, resource, reside_dce):
'''
Given data category (ifm or ofm, according to `reside_dce` which is a
DataCategortyEnum) is the only one in gbuf; the other data category and
fil both bypass gbuf. Solve the analytical optimal loop blocking. Return
ti, to, tb, and orders, same format as
LoopBlocking.gen_loopblocking_gbuf_regf().
Denote xfm to be the one bypassing, yfm to be the other one residing.
Nested loop is:
tb[0], ty[0], tx[0], (tb[1] = 1), (tx[1] = 1), ty[1], tb[2], tx[2]/ty[2].
Note that ty[0] outside tx[0] means we stream the bypassing xfm multiple
times (equal to ty[0]), each for one chunk of yfm in gbuf. Otherwise, yfm
will be accessed multiple times (equal to tx[0]). Because ty[0] is gbuf
chunk count and tx[0] is regf chunk count (tx[1] = 1), ty[0] is likely
smaller and thus better.
Also note that tx[1] outside ty[1]. This indicates that xfm which is
streamed in bypassing gbuf is loaded into regf once, and then each part of
yfm chunk in gbuf is loaded into regf. Reversing this order is wrong, as
that means xfm streaming into regf multiple times for each gbuf-bypass
streaming, which requires store in gbuf.
Opt I.
min accesses to DRAM =
(Nx * sx * B) * fx * ty + (Ny * sy * B) * fy + (Nx * Ny * sf) * tb
s.t.
(Ny * sy / ty) * (B / tb) <= Sgbuf
1 <= ty <= Ny
1 <= tb <= B
Nx, Ny, B are numbers of xfms, yfms, and batch size.
sx, sy, sf are size of one xfm, yfm, fil.
ty, tb are top-level tiling for yfm and batch.
Opt II.
min refetch yfm from gbuf =
(Nx / nx)
s.t.
nx * sx * nb + ny * sy * nb + nx * ny * sf <= Sregf
nx, ny are numbers of xfms, yfms in regf. nb is number of batches in regf.
Solving Opt I and Opt II will give the results for ifmap or ofmap
bypassing.
'''
dce_y = reside_dce
if dce_y == de.OFM:
dce_x = de.IFM
nfmaps_x = nested_loop_desc.loopcnt_ifm
nfmaps_y = nested_loop_desc.loopcnt_ofm
facc_x = 1
facc_y = 2
elif dce_y == de.IFM:
dce_x = de.OFM
nfmaps_x = nested_loop_desc.loopcnt_ofm
nfmaps_y = nested_loop_desc.loopcnt_ifm
facc_x = 2
facc_y = 1
else:
raise RuntimeError('LoopBlockingSolver: only allow ifmap or ofmap '
'to bypass.')
nbats = nested_loop_desc.loopcnt_bat
usize_gbuf_x = nested_loop_desc.usize_gbuf_of(dce_x)
usize_gbuf_y = nested_loop_desc.usize_gbuf_of(dce_y)
usize_gbuf_fil = nested_loop_desc.usize_gbuf_of(de.FIL) + 1e-6
max_size_gbuf = resource.size_gbuf
usize_regf_x = nested_loop_desc.usize_regf_of(dce_x)
usize_regf_y = nested_loop_desc.usize_regf_of(dce_y)
usize_regf_fil = nested_loop_desc.usize_regf_of(de.FIL) + 1e-6
max_size_regf = resource.size_regf
# Opt I problem.
def goal(ty, tb): # pylint: disable=invalid-name
''' Goal function. min goal(). '''
return ((nfmaps_x * usize_gbuf_x * nbats) * facc_x * ty
+ (nfmaps_y * usize_gbuf_y * nbats) * facc_y
+ (nfmaps_x * nfmaps_y * usize_gbuf_fil) * tb)
def constraints(ty, tb): # pylint: disable=invalid-name
''' All constraints. s.t. constraints(). '''
c1 = ((nfmaps_y * usize_gbuf_y / float(ty)) * (nbats / float(tb))
< max_size_gbuf)
return c1
# Candidates of optimal ty, tb.
ty_tb_cands = []
# Analytical solution for min goal() s.t. constraints().
ty_top = nfmaps_y * math.sqrt(float(usize_gbuf_fil * usize_gbuf_y)
/ (usize_gbuf_x * max_size_gbuf * facc_x))
tb_top = nbats * math.sqrt(float(usize_gbuf_x * usize_gbuf_y * facc_x)
/ (usize_gbuf_fil * max_size_gbuf))
# Enforce to be a factor of total loop count.
ty_top_adj = Util.closest_factor(nfmaps_y, ty_top)
tb_top_adj = Util.closest_factor(nbats, tb_top)
# Add to candidates.
ty_tb_cands += itertools.product(ty_top_adj, tb_top_adj)
# Boundary points.
# When tb = B. Solve constraints().
tb_bnd = nbats
ty_bnd = nfmaps_y * usize_gbuf_y / float(max_size_gbuf)
tb_bnd_adj = [tb_bnd]
ty_bnd_adj = Util.closest_factor(nfmaps_y, ty_bnd)
# Add to candidates.
ty_tb_cands += itertools.product(ty_bnd_adj, tb_bnd_adj)
# When tb = 1. Solve constraints().
tb_bnd = 1
ty_bnd = nfmaps_y * nbats * usize_gbuf_y / float(max_size_gbuf)
tb_bnd_adj = [tb_bnd]
ty_bnd_adj = Util.closest_factor(nfmaps_y, ty_bnd)
# Add to candidates.
ty_tb_cands += itertools.product(ty_bnd_adj, tb_bnd_adj)
# When ty = Ny. Solve constraints().
ty_bnd = nfmaps_y
tb_bnd = nbats * usize_gbuf_y / float(max_size_gbuf)
ty_bnd_adj = [ty_bnd]
tb_bnd_adj = Util.closest_factor(nbats, tb_bnd)
# Add to candidates.
ty_tb_cands += itertools.product(ty_bnd_adj, tb_bnd_adj)
# When ty = 1. Solve constraints().
ty_bnd = 1
tb_bnd = nfmaps_y * nbats * usize_gbuf_y / float(max_size_gbuf)
ty_bnd_adj = [ty_bnd]
tb_bnd_adj = Util.closest_factor(nbats, tb_bnd)
# Add to candidates.
ty_tb_cands += itertools.product(ty_bnd_adj, tb_bnd_adj)
# Select best ty, tb from candidates.
best_ty_tb = min([(goal(*ty_tb_), ) + ty_tb_ for ty_tb_ in ty_tb_cands
if constraints(*ty_tb_)])
tb0 = best_ty_tb[2]
# Because fil bypasses gbuf, tb[1] = 1.
tb1 = 1
tb2 = nbats / tb0 / tb1
tb = (tb0, tb1, tb2)
ty0 = best_ty_tb[1]
# Opt II problem is trivial to solve, let ny = 1 and max nx
ty2 = 1
ty1 = nfmaps_y / ty0 / ty2
ty = (ty0, ty1, ty2)
tx_bottom = float(max_size_regf - usize_regf_y * ty[2] * tb[2]) \
/ (usize_regf_x * tb[2] + usize_regf_fil)
tx2 = Util.closest_factor(nfmaps_x, tx_bottom)[0]
# Because xfm bypasses gbuf, tx[1] = 1.
tx1 = 1
tx0 = nfmaps_x / tx1 / tx2
tx = (tx0, tx1, tx2)
# Compose return values.
# For orders see docstring: at gbuf, b, y, x; at regf, b, x, y.
if dce_x == de.IFM:
ti = tx
to = ty
orders = (None, (0, 1, 2), None, (1, 0, 2))
elif dce_x == de.OFM:
ti = ty
to = tx
orders = (None, (1, 0, 2), None, (0, 1, 2))
return ti, to, tb, orders
def _solve_lpbl_filter_gbuf_reside(nested_loop_desc, resource):
'''
The fil is the only one in gbuf; both ifm and ofm bypass gbuf. Solve the
analytical optimal loop blocking. Return ti, to, tb, and orders, same
format as LoopBlocking.gen_loopblocking_gbuf_regf().
Denote xfm loop to be inside yfm loop at the outermost level.
Nested loop is:
(tb[0] = 1), ty[0], tx[0], tb[1], (ty[1] = 1), (tx[1] = 1), tb[2],
tx[2]/ty[2].
tb[0] = 1 is because both fmaps bypass gbuf, and we loop across all batches
in the middle level tb[1].
Opt I.
min accesses to DRAM =
(Nx * sx * B) * fx * ty + (Ny * sy * B) * fy + (Nx * Ny * sf)
s.t.
nx * sx * nb + ny * sy * nb + nx * ny * sf <= Sregf
nx * ny * sf <= Sgbuf
Each time we bring in nx xfms and ny yfms with batch size nb into regf.
Solving Opt I will give the results for ifmap and ofmap both bypassing.
'''
goal_res = float('inf')
for dce_y in [de.IFM, de.OFM]:
if dce_y == de.OFM:
dce_x = de.IFM
nfmaps_x = nested_loop_desc.loopcnt_ifm
nfmaps_y = nested_loop_desc.loopcnt_ofm
facc_x = 1
facc_y = 2
elif dce_y == de.IFM:
dce_x = de.OFM
nfmaps_x = nested_loop_desc.loopcnt_ofm
nfmaps_y = nested_loop_desc.loopcnt_ifm
facc_x = 2
facc_y = 1
else:
raise RuntimeError('LoopBlockingSolver: only allow ifmap or ofmap '
'to bypass.')
nbats = nested_loop_desc.loopcnt_bat
usize_gbuf_x = nested_loop_desc.usize_gbuf_of(dce_x)
usize_gbuf_y = nested_loop_desc.usize_gbuf_of(dce_y)
usize_gbuf_fil = nested_loop_desc.usize_gbuf_of(de.FIL) + 1e-6
max_size_gbuf = resource.size_gbuf
usize_regf_x = nested_loop_desc.usize_regf_of(dce_x)
usize_regf_y = nested_loop_desc.usize_regf_of(dce_y)
usize_regf_fil = nested_loop_desc.usize_regf_of(de.FIL) + 1e-6
max_size_regf = resource.size_regf
# To minimize Opt I, minimize ty, i.e., maximize ny.
# Thus in constraints, set nx = 1 and nb = 1 to solve ny.
ny_top_cand = min(float(max_size_regf - usize_regf_x) \
/ (usize_regf_y + usize_regf_fil),
float(max_size_gbuf) / usize_gbuf_fil)
# Pick max-no-larger factor to stay with constraint.
ny_top = Util.closest_factor(nfmaps_y, ny_top_cand)[0]
assert ny_top <= ny_top_cand
ty_top = nfmaps_y / ny_top
# Re-solve to maximize nb by exploiting the margine.
nb_top_cand = float(max_size_regf - usize_regf_fil * ny_top)\
/ (usize_regf_x + usize_regf_y * ny_top)
# Pick max-no-larger factor to stay with constraint.
nb_top = Util.closest_factor(nbats, nb_top_cand)[0]
assert nb_top <= nb_top_cand
# Goal value.
goal_val = ((nfmaps_x * usize_gbuf_x * nbats) * facc_x * ty_top
+ (nfmaps_y * usize_gbuf_y * nbats) * facc_y
+ (nfmaps_x * nfmaps_y * usize_gbuf_fil))
# Constraints.
c1 = (1 * usize_regf_x * 1 + ny_top * usize_regf_y * 1
+ 1 * ny_top * usize_regf_fil <= max_size_regf)
c2 = (1 * ny_top * usize_gbuf_fil <= max_size_gbuf)
assert c1 and c2
if goal_val < goal_res:
# tb[0] = 1 due to docstring, tb[2] = nb_top.
tb = (1, nbats / nb_top, nb_top)
# ty[1] = 1 due to docstring, ty[0] = ty_top, ty[2] = ny_top.
ty = (ty_top, 1, nfmaps_y / ty_top)
# tx[1] = 1 due to docstring, tx[2] = nx = 1.
tx = (nfmaps_x, 1, 1)
# Compose return values.
# For orders see docstring: at gbuf, b, y, x; at regf, b, y, x.
if dce_x == de.IFM:
ti = tx
to = ty
orders = (None, (0, 1, 2), None, (0, 1, 2))
elif dce_x == de.OFM:
ti = ty
to = tx
orders = (None, (1, 0, 2), None, (1, 0, 2))
return ti, to, tb, orders
def gen_loopblocking_gbuf_regf(nested_loop_desc, resource, options):
'''
Generator for loop blocking schemes that are solved from iofmap gbuf bypass
analytical models.
'''
reside_dce_list = []
# reside_dce_list is a list of DataCategoryEnum, each element is a config
# with only that data category in gbuf, i.e., the others are all bypassed.
for reside_dce in range(de.NUM):
if all([denum == reside_dce or options.sw_gbuf_bypass[denum]
for denum in range(de.NUM)]):
reside_dce_list.append(reside_dce)
for reside_dce in reside_dce_list:
if reside_dce == de.FIL:
ti, to, tb, orders = _solve_lpbl_filter_gbuf_reside(
nested_loop_desc, resource)
else:
assert reside_dce == de.IFM or reside_dce == de.OFM
ti, to, tb, orders = _solve_lpbl_iofmap_gbuf_reside(
nested_loop_desc, resource, reside_dce)
yield ti, to, tb, orders
| 36.941176 | 79 | 0.619586 |
import math
import itertools
from . import DataCategoryEnum as de
from . import Util
def _solve_lpbl_iofmap_gbuf_reside(nested_loop_desc, resource, reside_dce):
dce_y = reside_dce
if dce_y == de.OFM:
dce_x = de.IFM
nfmaps_x = nested_loop_desc.loopcnt_ifm
nfmaps_y = nested_loop_desc.loopcnt_ofm
facc_x = 1
facc_y = 2
elif dce_y == de.IFM:
dce_x = de.OFM
nfmaps_x = nested_loop_desc.loopcnt_ofm
nfmaps_y = nested_loop_desc.loopcnt_ifm
facc_x = 2
facc_y = 1
else:
raise RuntimeError('LoopBlockingSolver: only allow ifmap or ofmap '
'to bypass.')
nbats = nested_loop_desc.loopcnt_bat
usize_gbuf_x = nested_loop_desc.usize_gbuf_of(dce_x)
usize_gbuf_y = nested_loop_desc.usize_gbuf_of(dce_y)
usize_gbuf_fil = nested_loop_desc.usize_gbuf_of(de.FIL) + 1e-6
max_size_gbuf = resource.size_gbuf
usize_regf_x = nested_loop_desc.usize_regf_of(dce_x)
usize_regf_y = nested_loop_desc.usize_regf_of(dce_y)
usize_regf_fil = nested_loop_desc.usize_regf_of(de.FIL) + 1e-6
max_size_regf = resource.size_regf
def goal(ty, tb):
return ((nfmaps_x * usize_gbuf_x * nbats) * facc_x * ty
+ (nfmaps_y * usize_gbuf_y * nbats) * facc_y
+ (nfmaps_x * nfmaps_y * usize_gbuf_fil) * tb)
def constraints(ty, tb):
c1 = ((nfmaps_y * usize_gbuf_y / float(ty)) * (nbats / float(tb))
< max_size_gbuf)
return c1
ty_tb_cands = []
ty_top = nfmaps_y * math.sqrt(float(usize_gbuf_fil * usize_gbuf_y)
/ (usize_gbuf_x * max_size_gbuf * facc_x))
tb_top = nbats * math.sqrt(float(usize_gbuf_x * usize_gbuf_y * facc_x)
/ (usize_gbuf_fil * max_size_gbuf))
ty_top_adj = Util.closest_factor(nfmaps_y, ty_top)
tb_top_adj = Util.closest_factor(nbats, tb_top)
ty_tb_cands += itertools.product(ty_top_adj, tb_top_adj)
tb_bnd = nbats
ty_bnd = nfmaps_y * usize_gbuf_y / float(max_size_gbuf)
tb_bnd_adj = [tb_bnd]
ty_bnd_adj = Util.closest_factor(nfmaps_y, ty_bnd)
ty_tb_cands += itertools.product(ty_bnd_adj, tb_bnd_adj)
tb_bnd = 1
ty_bnd = nfmaps_y * nbats * usize_gbuf_y / float(max_size_gbuf)
tb_bnd_adj = [tb_bnd]
ty_bnd_adj = Util.closest_factor(nfmaps_y, ty_bnd)
ty_tb_cands += itertools.product(ty_bnd_adj, tb_bnd_adj)
ty_bnd = nfmaps_y
tb_bnd = nbats * usize_gbuf_y / float(max_size_gbuf)
ty_bnd_adj = [ty_bnd]
tb_bnd_adj = Util.closest_factor(nbats, tb_bnd)
ty_tb_cands += itertools.product(ty_bnd_adj, tb_bnd_adj)
ty_bnd = 1
tb_bnd = nfmaps_y * nbats * usize_gbuf_y / float(max_size_gbuf)
ty_bnd_adj = [ty_bnd]
tb_bnd_adj = Util.closest_factor(nbats, tb_bnd)
ty_tb_cands += itertools.product(ty_bnd_adj, tb_bnd_adj)
best_ty_tb = min([(goal(*ty_tb_), ) + ty_tb_ for ty_tb_ in ty_tb_cands
if constraints(*ty_tb_)])
tb0 = best_ty_tb[2]
tb1 = 1
tb2 = nbats / tb0 / tb1
tb = (tb0, tb1, tb2)
ty0 = best_ty_tb[1]
ty2 = 1
ty1 = nfmaps_y / ty0 / ty2
ty = (ty0, ty1, ty2)
tx_bottom = float(max_size_regf - usize_regf_y * ty[2] * tb[2]) \
/ (usize_regf_x * tb[2] + usize_regf_fil)
tx2 = Util.closest_factor(nfmaps_x, tx_bottom)[0]
tx1 = 1
tx0 = nfmaps_x / tx1 / tx2
tx = (tx0, tx1, tx2)
if dce_x == de.IFM:
ti = tx
to = ty
orders = (None, (0, 1, 2), None, (1, 0, 2))
elif dce_x == de.OFM:
ti = ty
to = tx
orders = (None, (1, 0, 2), None, (0, 1, 2))
return ti, to, tb, orders
def _solve_lpbl_filter_gbuf_reside(nested_loop_desc, resource):
goal_res = float('inf')
for dce_y in [de.IFM, de.OFM]:
if dce_y == de.OFM:
dce_x = de.IFM
nfmaps_x = nested_loop_desc.loopcnt_ifm
nfmaps_y = nested_loop_desc.loopcnt_ofm
facc_x = 1
facc_y = 2
elif dce_y == de.IFM:
dce_x = de.OFM
nfmaps_x = nested_loop_desc.loopcnt_ofm
nfmaps_y = nested_loop_desc.loopcnt_ifm
facc_x = 2
facc_y = 1
else:
raise RuntimeError('LoopBlockingSolver: only allow ifmap or ofmap '
'to bypass.')
nbats = nested_loop_desc.loopcnt_bat
usize_gbuf_x = nested_loop_desc.usize_gbuf_of(dce_x)
usize_gbuf_y = nested_loop_desc.usize_gbuf_of(dce_y)
usize_gbuf_fil = nested_loop_desc.usize_gbuf_of(de.FIL) + 1e-6
max_size_gbuf = resource.size_gbuf
usize_regf_x = nested_loop_desc.usize_regf_of(dce_x)
usize_regf_y = nested_loop_desc.usize_regf_of(dce_y)
usize_regf_fil = nested_loop_desc.usize_regf_of(de.FIL) + 1e-6
max_size_regf = resource.size_regf
ny_top_cand = min(float(max_size_regf - usize_regf_x) \
/ (usize_regf_y + usize_regf_fil),
float(max_size_gbuf) / usize_gbuf_fil)
ny_top = Util.closest_factor(nfmaps_y, ny_top_cand)[0]
assert ny_top <= ny_top_cand
ty_top = nfmaps_y / ny_top
nb_top_cand = float(max_size_regf - usize_regf_fil * ny_top)\
/ (usize_regf_x + usize_regf_y * ny_top)
nb_top = Util.closest_factor(nbats, nb_top_cand)[0]
assert nb_top <= nb_top_cand
goal_val = ((nfmaps_x * usize_gbuf_x * nbats) * facc_x * ty_top
+ (nfmaps_y * usize_gbuf_y * nbats) * facc_y
+ (nfmaps_x * nfmaps_y * usize_gbuf_fil))
c1 = (1 * usize_regf_x * 1 + ny_top * usize_regf_y * 1
+ 1 * ny_top * usize_regf_fil <= max_size_regf)
c2 = (1 * ny_top * usize_gbuf_fil <= max_size_gbuf)
assert c1 and c2
if goal_val < goal_res:
tb = (1, nbats / nb_top, nb_top)
ty = (ty_top, 1, nfmaps_y / ty_top)
tx = (nfmaps_x, 1, 1)
if dce_x == de.IFM:
ti = tx
to = ty
orders = (None, (0, 1, 2), None, (0, 1, 2))
elif dce_x == de.OFM:
ti = ty
to = tx
orders = (None, (1, 0, 2), None, (1, 0, 2))
return ti, to, tb, orders
def gen_loopblocking_gbuf_regf(nested_loop_desc, resource, options):
reside_dce_list = []
for reside_dce in range(de.NUM):
if all([denum == reside_dce or options.sw_gbuf_bypass[denum]
for denum in range(de.NUM)]):
reside_dce_list.append(reside_dce)
for reside_dce in reside_dce_list:
if reside_dce == de.FIL:
ti, to, tb, orders = _solve_lpbl_filter_gbuf_reside(
nested_loop_desc, resource)
else:
assert reside_dce == de.IFM or reside_dce == de.OFM
ti, to, tb, orders = _solve_lpbl_iofmap_gbuf_reside(
nested_loop_desc, resource, reside_dce)
yield ti, to, tb, orders
| true | true |
f7fd63e5e478a6e63583c40e52a5b4d1352f0523 | 2,987 | py | Python | sayhello/test_sayhello.py | TyouSF/FlaskWebAC | da7bab8b35bcde34f844077c69099557d0f3cd32 | [
"MIT"
] | null | null | null | sayhello/test_sayhello.py | TyouSF/FlaskWebAC | da7bab8b35bcde34f844077c69099557d0f3cd32 | [
"MIT"
] | null | null | null | sayhello/test_sayhello.py | TyouSF/FlaskWebAC | da7bab8b35bcde34f844077c69099557d0f3cd32 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Author
"""
import unittest
from flask import abort
from sayhello import app, db
from sayhello.models import Message
from sayhello.commands import initdb, forge
class SayHelloTestCase(unittest.TestCase):
def setUp(self):
app.config.update(
TESTING=True,
WTF_CSRF_ENABLED=False,
SQLALCHEMY_DATABASE_URI='sqlite:///:memory:'
)
db.create_all()
self.client = app.test_client()
self.runner = app.test_cli_runner()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_app_exist(self):
self.assertFalse(app is None)
def test_app_is_testing(self):
self.assertTrue(app.config['TESTING'])
def test_404_page(self):
response = self.client.get('/nothing')
data = response.get_data(as_text=True)
self.assertIn('404 Error', data)
self.assertIn('Go Back', data)
self.assertEqual(response.status_code, 404)
def test_500_page(self):
# create route to abort the request with the 500 Error
@app.route('/500')
def internal_server_error_for_test():
abort(500)
response = self.client.get('/500')
data = response.get_data(as_text=True)
self.assertEqual(response.status_code, 500)
self.assertIn('500 Error', data)
self.assertIn('Go Back', data)
def test_index_page(self):
response = self.client.get('/')
data = response.get_data(as_text=True)
self.assertIn('Say Hello', data)
def test_create_message(self):
response = self.client.post('/', data=dict(
name='Peter',
body='Hello, world.'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('信息已成功发送', data)
self.assertIn('Hello, world.', data)
def test_form_validation(self):
response = self.client.post('/', data=dict(
name=' ',
body='Hello, world.'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('This field is required.', data)
def test_forge_command(self):
result = self.runner.invoke(forge)
self.assertIn('测试数据已生成完毕,总计 20 条', result.output)
self.assertEqual(Message.query.count(), 20)
def test_forge_command_with_count(self):
result = self.runner.invoke(forge, ['--count', '50'])
self.assertIn('测试数据已生成完毕,总计 50 条', result.output)
self.assertEqual(Message.query.count(), 50)
def test_initdb_command(self):
result = self.runner.invoke(initdb)
self.assertIn('数据已创建/初始化完毕', result.output)
def test_initdb_command_with_drop(self):
result = self.runner.invoke(initdb, ['--drop'], input='y\n')
self.assertIn(
'本操作将执行删除数据库,请确认是否继续', result.output)
self.assertIn('数据库已删除完毕', result.output)
if __name__ == '__main__':
unittest.main()
| 30.171717 | 68 | 0.623703 |
import unittest
from flask import abort
from sayhello import app, db
from sayhello.models import Message
from sayhello.commands import initdb, forge
class SayHelloTestCase(unittest.TestCase):
def setUp(self):
app.config.update(
TESTING=True,
WTF_CSRF_ENABLED=False,
SQLALCHEMY_DATABASE_URI='sqlite:///:memory:'
)
db.create_all()
self.client = app.test_client()
self.runner = app.test_cli_runner()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_app_exist(self):
self.assertFalse(app is None)
def test_app_is_testing(self):
self.assertTrue(app.config['TESTING'])
def test_404_page(self):
response = self.client.get('/nothing')
data = response.get_data(as_text=True)
self.assertIn('404 Error', data)
self.assertIn('Go Back', data)
self.assertEqual(response.status_code, 404)
def test_500_page(self):
@app.route('/500')
def internal_server_error_for_test():
abort(500)
response = self.client.get('/500')
data = response.get_data(as_text=True)
self.assertEqual(response.status_code, 500)
self.assertIn('500 Error', data)
self.assertIn('Go Back', data)
def test_index_page(self):
response = self.client.get('/')
data = response.get_data(as_text=True)
self.assertIn('Say Hello', data)
def test_create_message(self):
response = self.client.post('/', data=dict(
name='Peter',
body='Hello, world.'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('信息已成功发送', data)
self.assertIn('Hello, world.', data)
def test_form_validation(self):
response = self.client.post('/', data=dict(
name=' ',
body='Hello, world.'
), follow_redirects=True)
data = response.get_data(as_text=True)
self.assertIn('This field is required.', data)
def test_forge_command(self):
result = self.runner.invoke(forge)
self.assertIn('测试数据已生成完毕,总计 20 条', result.output)
self.assertEqual(Message.query.count(), 20)
def test_forge_command_with_count(self):
result = self.runner.invoke(forge, ['--count', '50'])
self.assertIn('测试数据已生成完毕,总计 50 条', result.output)
self.assertEqual(Message.query.count(), 50)
def test_initdb_command(self):
result = self.runner.invoke(initdb)
self.assertIn('数据已创建/初始化完毕', result.output)
def test_initdb_command_with_drop(self):
result = self.runner.invoke(initdb, ['--drop'], input='y\n')
self.assertIn(
'本操作将执行删除数据库,请确认是否继续', result.output)
self.assertIn('数据库已删除完毕', result.output)
if __name__ == '__main__':
unittest.main()
| true | true |
f7fd642bc98184b28bd45150f5576e5a64fb76b1 | 9,992 | py | Python | fsspec/tests/test_utils.py | dish59742/filesystem_spec | 87e5ca57fd8be7b636451d4237fe47f6e764fa79 | [
"BSD-3-Clause"
] | null | null | null | fsspec/tests/test_utils.py | dish59742/filesystem_spec | 87e5ca57fd8be7b636451d4237fe47f6e764fa79 | [
"BSD-3-Clause"
] | null | null | null | fsspec/tests/test_utils.py | dish59742/filesystem_spec | 87e5ca57fd8be7b636451d4237fe47f6e764fa79 | [
"BSD-3-Clause"
] | null | null | null | import io
import sys
import pytest
from fsspec.utils import (
can_be_local,
common_prefix,
infer_storage_options,
other_paths,
read_block,
seek_delimiter,
setup_logger,
)
WIN = sys.platform.startswith("win")
def test_read_block():
delimiter = b"\n"
data = delimiter.join([b"123", b"456", b"789"])
f = io.BytesIO(data)
assert read_block(f, 1, 2) == b"23"
assert read_block(f, 0, 1, delimiter=b"\n") == b"123\n"
assert read_block(f, 0, 2, delimiter=b"\n") == b"123\n"
assert read_block(f, 0, 3, delimiter=b"\n") == b"123\n"
assert read_block(f, 0, 5, delimiter=b"\n") == b"123\n456\n"
assert read_block(f, 0, 8, delimiter=b"\n") == b"123\n456\n789"
assert read_block(f, 0, 100, delimiter=b"\n") == b"123\n456\n789"
assert read_block(f, 1, 1, delimiter=b"\n") == b""
assert read_block(f, 1, 5, delimiter=b"\n") == b"456\n"
assert read_block(f, 1, 8, delimiter=b"\n") == b"456\n789"
for ols in [[(0, 3), (3, 3), (6, 3), (9, 2)], [(0, 4), (4, 4), (8, 4)]]:
out = [read_block(f, o, l, b"\n") for o, l in ols]
assert b"".join(filter(None, out)) == data
def test_read_block_split_before():
"""Test start/middle/end cases of split_before.""" # noqa: I
d = (
"#header" + "".join(">foo{i}\nFOOBAR{i}\n".format(i=i) for i in range(100000))
).encode()
# Read single record at beginning.
# All reads include beginning of file and read through termination of
# delimited record.
assert read_block(io.BytesIO(d), 0, 10, delimiter=b"\n") == b"#header>foo0\n"
assert (
read_block(io.BytesIO(d), 0, 10, delimiter=b"\n", split_before=True)
== b"#header>foo0"
)
assert (
read_block(io.BytesIO(d), 0, 10, delimiter=b">") == b"#header>foo0\nFOOBAR0\n>"
)
assert (
read_block(io.BytesIO(d), 0, 10, delimiter=b">", split_before=True)
== b"#header>foo0\nFOOBAR0\n"
)
# Read multiple records at beginning.
# All reads include beginning of file and read through termination of
# delimited record.
assert (
read_block(io.BytesIO(d), 0, 27, delimiter=b"\n")
== b"#header>foo0\nFOOBAR0\n>foo1\nFOOBAR1\n"
)
assert (
read_block(io.BytesIO(d), 0, 27, delimiter=b"\n", split_before=True)
== b"#header>foo0\nFOOBAR0\n>foo1\nFOOBAR1"
)
assert (
read_block(io.BytesIO(d), 0, 27, delimiter=b">")
== b"#header>foo0\nFOOBAR0\n>foo1\nFOOBAR1\n>"
)
assert (
read_block(io.BytesIO(d), 0, 27, delimiter=b">", split_before=True)
== b"#header>foo0\nFOOBAR0\n>foo1\nFOOBAR1\n"
)
# Read with offset spanning into next record, splits on either side of delimiter.
# Read not spanning the full record returns nothing.
assert read_block(io.BytesIO(d), 10, 3, delimiter=b"\n") == b"FOOBAR0\n"
assert (
read_block(io.BytesIO(d), 10, 3, delimiter=b"\n", split_before=True)
== b"\nFOOBAR0"
)
assert read_block(io.BytesIO(d), 10, 3, delimiter=b">") == b""
assert read_block(io.BytesIO(d), 10, 3, delimiter=b">", split_before=True) == b""
# Read with offset spanning multiple records, splits on either side of delimiter
assert (
read_block(io.BytesIO(d), 10, 20, delimiter=b"\n")
== b"FOOBAR0\n>foo1\nFOOBAR1\n"
)
assert (
read_block(io.BytesIO(d), 10, 20, delimiter=b"\n", split_before=True)
== b"\nFOOBAR0\n>foo1\nFOOBAR1"
)
assert read_block(io.BytesIO(d), 10, 20, delimiter=b">") == b"foo1\nFOOBAR1\n>"
assert (
read_block(io.BytesIO(d), 10, 20, delimiter=b">", split_before=True)
== b">foo1\nFOOBAR1\n"
)
# Read record at end, all records read to end
tlen = len(d)
assert (
read_block(io.BytesIO(d), tlen - 30, 35, delimiter=b"\n")
== b">foo99999\nFOOBAR99999\n"
)
assert (
read_block(io.BytesIO(d), tlen - 30, 35, delimiter=b"\n", split_before=True)
== b"\n>foo99999\nFOOBAR99999\n"
)
assert (
read_block(io.BytesIO(d), tlen - 30, 35, delimiter=b">")
== b"foo99999\nFOOBAR99999\n"
)
assert (
read_block(io.BytesIO(d), tlen - 30, 35, delimiter=b">", split_before=True)
== b">foo99999\nFOOBAR99999\n"
)
def test_seek_delimiter_endline():
f = io.BytesIO(b"123\n456\n789")
# if at zero, stay at zero
seek_delimiter(f, b"\n", 5)
assert f.tell() == 0
# choose the first block
for bs in [1, 5, 100]:
f.seek(1)
seek_delimiter(f, b"\n", blocksize=bs)
assert f.tell() == 4
# handle long delimiters well, even with short blocksizes
f = io.BytesIO(b"123abc456abc789")
for bs in [1, 2, 3, 4, 5, 6, 10]:
f.seek(1)
seek_delimiter(f, b"abc", blocksize=bs)
assert f.tell() == 6
# End at the end
f = io.BytesIO(b"123\n456")
f.seek(5)
seek_delimiter(f, b"\n", 5)
assert f.tell() == 7
def test_infer_options():
so = infer_storage_options("/mnt/datasets/test.csv")
assert so.pop("protocol") == "file"
assert so.pop("path") == "/mnt/datasets/test.csv"
assert not so
assert infer_storage_options("./test.csv")["path"] == "./test.csv"
assert infer_storage_options("../test.csv")["path"] == "../test.csv"
so = infer_storage_options("C:\\test.csv")
assert so.pop("protocol") == "file"
assert so.pop("path") == "C:\\test.csv"
assert not so
assert infer_storage_options("d:\\test.csv")["path"] == "d:\\test.csv"
assert infer_storage_options("\\test.csv")["path"] == "\\test.csv"
assert infer_storage_options(".\\test.csv")["path"] == ".\\test.csv"
assert infer_storage_options("test.csv")["path"] == "test.csv"
so = infer_storage_options(
"hdfs://username:pwd@Node:123/mnt/datasets/test.csv?q=1#fragm",
inherit_storage_options={"extra": "value"},
)
assert so.pop("protocol") == "hdfs"
assert so.pop("username") == "username"
assert so.pop("password") == "pwd"
assert so.pop("host") == "Node"
assert so.pop("port") == 123
assert so.pop("path") == "/mnt/datasets/test.csv#fragm"
assert so.pop("url_query") == "q=1"
assert so.pop("url_fragment") == "fragm"
assert so.pop("extra") == "value"
assert not so
so = infer_storage_options("hdfs://User-name@Node-name.com/mnt/datasets/test.csv")
assert so.pop("username") == "User-name"
assert so.pop("host") == "Node-name.com"
u = "http://127.0.0.1:8080/test.csv"
assert infer_storage_options(u) == {"protocol": "http", "path": u}
# For s3 and gcs the netloc is actually the bucket name, so we want to
# include it in the path. Test that:
# - Parsing doesn't lowercase the bucket
# - The bucket is included in path
for protocol in ["s3", "gcs", "gs"]:
options = infer_storage_options("%s://Bucket-name.com/test.csv" % protocol)
assert options["path"] == "Bucket-name.com/test.csv"
with pytest.raises(KeyError):
infer_storage_options("file:///bucket/file.csv", {"path": "collide"})
with pytest.raises(KeyError):
infer_storage_options("hdfs:///bucket/file.csv", {"protocol": "collide"})
def test_infer_simple():
out = infer_storage_options("//mnt/datasets/test.csv")
assert out["protocol"] == "file"
assert out["path"] == "//mnt/datasets/test.csv"
assert out.get("host", None) is None
@pytest.mark.parametrize(
"urlpath, expected_path",
(
(r"c:\foo\bar", r"c:\foo\bar"),
(r"C:\\foo\bar", r"C:\\foo\bar"),
(r"c:/foo/bar", r"c:/foo/bar"),
(r"file:///c|\foo\bar", r"c:\foo\bar"),
(r"file:///C|/foo/bar", r"C:/foo/bar"),
(r"file:///C:/foo/bar", r"C:/foo/bar"),
),
)
def test_infer_storage_options_c(urlpath, expected_path):
so = infer_storage_options(urlpath)
assert so["protocol"] == "file"
assert so["path"] == expected_path
@pytest.mark.parametrize(
"paths, out",
(
(["/more/dir/", "/more/dir/two", "/more/one", "/more/three"], "/more"),
(["/", "", "/"], ""),
(["/", "/"], "/"),
(["/more/", "/"], ""),
(["/more/", "/more"], "/more"),
(["more/dir/", "more/dir/two", "more/one", "more/three"], "more"),
),
)
def test_common_prefix(paths, out):
assert common_prefix(paths) == out
@pytest.mark.parametrize(
"paths, other, is_dir, expected",
(
(["/path1"], "/path2", False, ["/path2"]),
(["/path1"], "/path2", True, ["/path2/path1"]),
(["/path1"], "/path2", None, ["/path2"]),
(["/path1"], "/path2/", True, ["/path2/path1"]),
(["/path1"], ["/path2"], True, ["/path2"]),
(["/path1", "/path2"], "/path2", True, ["/path2/path1", "/path2/path2"]),
(
["/more/path1", "/more/path2"],
"/path2",
True,
["/path2/path1", "/path2/path2"],
),
(
["/more/path1", "/more/path2"],
"/path2",
False,
["/path2/path1", "/path2/path2"],
),
(
["/more/path1", "/more/path2"],
"/path2/",
None,
["/path2/path1", "/path2/path2"],
),
(
["/more/path1", "/diff/path2"],
"/path2/",
None,
["/path2/more/path1", "/path2/diff/path2"],
),
),
)
def test_other_paths(paths, other, is_dir, expected):
assert other_paths(paths, other, is_dir) == expected
def test_log():
import logging
logger = setup_logger("fsspec.test")
assert logger.level == logging.DEBUG
@pytest.mark.parametrize(
"par",
[
("afile", True),
("file://afile", True),
("noproto://afile", False),
("noproto::stuff", False),
("simplecache::stuff", True),
("simplecache://stuff", True),
],
)
def test_can_local(par):
url, outcome = par
assert can_be_local(url) == outcome
| 31.923323 | 87 | 0.567854 | import io
import sys
import pytest
from fsspec.utils import (
can_be_local,
common_prefix,
infer_storage_options,
other_paths,
read_block,
seek_delimiter,
setup_logger,
)
WIN = sys.platform.startswith("win")
def test_read_block():
delimiter = b"\n"
data = delimiter.join([b"123", b"456", b"789"])
f = io.BytesIO(data)
assert read_block(f, 1, 2) == b"23"
assert read_block(f, 0, 1, delimiter=b"\n") == b"123\n"
assert read_block(f, 0, 2, delimiter=b"\n") == b"123\n"
assert read_block(f, 0, 3, delimiter=b"\n") == b"123\n"
assert read_block(f, 0, 5, delimiter=b"\n") == b"123\n456\n"
assert read_block(f, 0, 8, delimiter=b"\n") == b"123\n456\n789"
assert read_block(f, 0, 100, delimiter=b"\n") == b"123\n456\n789"
assert read_block(f, 1, 1, delimiter=b"\n") == b""
assert read_block(f, 1, 5, delimiter=b"\n") == b"456\n"
assert read_block(f, 1, 8, delimiter=b"\n") == b"456\n789"
for ols in [[(0, 3), (3, 3), (6, 3), (9, 2)], [(0, 4), (4, 4), (8, 4)]]:
out = [read_block(f, o, l, b"\n") for o, l in ols]
assert b"".join(filter(None, out)) == data
def test_read_block_split_before():
d = (
"#header" + "".join(">foo{i}\nFOOBAR{i}\n".format(i=i) for i in range(100000))
).encode()
assert read_block(io.BytesIO(d), 0, 10, delimiter=b"\n") == b"#header>foo0\n"
assert (
read_block(io.BytesIO(d), 0, 10, delimiter=b"\n", split_before=True)
== b"#header>foo0"
)
assert (
read_block(io.BytesIO(d), 0, 10, delimiter=b">") == b"#header>foo0\nFOOBAR0\n>"
)
assert (
read_block(io.BytesIO(d), 0, 10, delimiter=b">", split_before=True)
== b"#header>foo0\nFOOBAR0\n"
)
assert (
read_block(io.BytesIO(d), 0, 27, delimiter=b"\n")
== b"#header>foo0\nFOOBAR0\n>foo1\nFOOBAR1\n"
)
assert (
read_block(io.BytesIO(d), 0, 27, delimiter=b"\n", split_before=True)
== b"#header>foo0\nFOOBAR0\n>foo1\nFOOBAR1"
)
assert (
read_block(io.BytesIO(d), 0, 27, delimiter=b">")
== b"#header>foo0\nFOOBAR0\n>foo1\nFOOBAR1\n>"
)
assert (
read_block(io.BytesIO(d), 0, 27, delimiter=b">", split_before=True)
== b"#header>foo0\nFOOBAR0\n>foo1\nFOOBAR1\n"
)
assert read_block(io.BytesIO(d), 10, 3, delimiter=b"\n") == b"FOOBAR0\n"
assert (
read_block(io.BytesIO(d), 10, 3, delimiter=b"\n", split_before=True)
== b"\nFOOBAR0"
)
assert read_block(io.BytesIO(d), 10, 3, delimiter=b">") == b""
assert read_block(io.BytesIO(d), 10, 3, delimiter=b">", split_before=True) == b""
assert (
read_block(io.BytesIO(d), 10, 20, delimiter=b"\n")
== b"FOOBAR0\n>foo1\nFOOBAR1\n"
)
assert (
read_block(io.BytesIO(d), 10, 20, delimiter=b"\n", split_before=True)
== b"\nFOOBAR0\n>foo1\nFOOBAR1"
)
assert read_block(io.BytesIO(d), 10, 20, delimiter=b">") == b"foo1\nFOOBAR1\n>"
assert (
read_block(io.BytesIO(d), 10, 20, delimiter=b">", split_before=True)
== b">foo1\nFOOBAR1\n"
)
tlen = len(d)
assert (
read_block(io.BytesIO(d), tlen - 30, 35, delimiter=b"\n")
== b">foo99999\nFOOBAR99999\n"
)
assert (
read_block(io.BytesIO(d), tlen - 30, 35, delimiter=b"\n", split_before=True)
== b"\n>foo99999\nFOOBAR99999\n"
)
assert (
read_block(io.BytesIO(d), tlen - 30, 35, delimiter=b">")
== b"foo99999\nFOOBAR99999\n"
)
assert (
read_block(io.BytesIO(d), tlen - 30, 35, delimiter=b">", split_before=True)
== b">foo99999\nFOOBAR99999\n"
)
def test_seek_delimiter_endline():
f = io.BytesIO(b"123\n456\n789")
seek_delimiter(f, b"\n", 5)
assert f.tell() == 0
for bs in [1, 5, 100]:
f.seek(1)
seek_delimiter(f, b"\n", blocksize=bs)
assert f.tell() == 4
f = io.BytesIO(b"123abc456abc789")
for bs in [1, 2, 3, 4, 5, 6, 10]:
f.seek(1)
seek_delimiter(f, b"abc", blocksize=bs)
assert f.tell() == 6
f = io.BytesIO(b"123\n456")
f.seek(5)
seek_delimiter(f, b"\n", 5)
assert f.tell() == 7
def test_infer_options():
so = infer_storage_options("/mnt/datasets/test.csv")
assert so.pop("protocol") == "file"
assert so.pop("path") == "/mnt/datasets/test.csv"
assert not so
assert infer_storage_options("./test.csv")["path"] == "./test.csv"
assert infer_storage_options("../test.csv")["path"] == "../test.csv"
so = infer_storage_options("C:\\test.csv")
assert so.pop("protocol") == "file"
assert so.pop("path") == "C:\\test.csv"
assert not so
assert infer_storage_options("d:\\test.csv")["path"] == "d:\\test.csv"
assert infer_storage_options("\\test.csv")["path"] == "\\test.csv"
assert infer_storage_options(".\\test.csv")["path"] == ".\\test.csv"
assert infer_storage_options("test.csv")["path"] == "test.csv"
so = infer_storage_options(
"hdfs://username:pwd@Node:123/mnt/datasets/test.csv?q=1#fragm",
inherit_storage_options={"extra": "value"},
)
assert so.pop("protocol") == "hdfs"
assert so.pop("username") == "username"
assert so.pop("password") == "pwd"
assert so.pop("host") == "Node"
assert so.pop("port") == 123
assert so.pop("path") == "/mnt/datasets/test.csv#fragm"
assert so.pop("url_query") == "q=1"
assert so.pop("url_fragment") == "fragm"
assert so.pop("extra") == "value"
assert not so
so = infer_storage_options("hdfs://User-name@Node-name.com/mnt/datasets/test.csv")
assert so.pop("username") == "User-name"
assert so.pop("host") == "Node-name.com"
u = "http://127.0.0.1:8080/test.csv"
assert infer_storage_options(u) == {"protocol": "http", "path": u}
# - The bucket is included in path
for protocol in ["s3", "gcs", "gs"]:
options = infer_storage_options("%s://Bucket-name.com/test.csv" % protocol)
assert options["path"] == "Bucket-name.com/test.csv"
with pytest.raises(KeyError):
infer_storage_options("file:///bucket/file.csv", {"path": "collide"})
with pytest.raises(KeyError):
infer_storage_options("hdfs:///bucket/file.csv", {"protocol": "collide"})
def test_infer_simple():
out = infer_storage_options("//mnt/datasets/test.csv")
assert out["protocol"] == "file"
assert out["path"] == "//mnt/datasets/test.csv"
assert out.get("host", None) is None
@pytest.mark.parametrize(
"urlpath, expected_path",
(
(r"c:\foo\bar", r"c:\foo\bar"),
(r"C:\\foo\bar", r"C:\\foo\bar"),
(r"c:/foo/bar", r"c:/foo/bar"),
(r"file:///c|\foo\bar", r"c:\foo\bar"),
(r"file:///C|/foo/bar", r"C:/foo/bar"),
(r"file:///C:/foo/bar", r"C:/foo/bar"),
),
)
def test_infer_storage_options_c(urlpath, expected_path):
so = infer_storage_options(urlpath)
assert so["protocol"] == "file"
assert so["path"] == expected_path
@pytest.mark.parametrize(
"paths, out",
(
(["/more/dir/", "/more/dir/two", "/more/one", "/more/three"], "/more"),
(["/", "", "/"], ""),
(["/", "/"], "/"),
(["/more/", "/"], ""),
(["/more/", "/more"], "/more"),
(["more/dir/", "more/dir/two", "more/one", "more/three"], "more"),
),
)
def test_common_prefix(paths, out):
assert common_prefix(paths) == out
@pytest.mark.parametrize(
"paths, other, is_dir, expected",
(
(["/path1"], "/path2", False, ["/path2"]),
(["/path1"], "/path2", True, ["/path2/path1"]),
(["/path1"], "/path2", None, ["/path2"]),
(["/path1"], "/path2/", True, ["/path2/path1"]),
(["/path1"], ["/path2"], True, ["/path2"]),
(["/path1", "/path2"], "/path2", True, ["/path2/path1", "/path2/path2"]),
(
["/more/path1", "/more/path2"],
"/path2",
True,
["/path2/path1", "/path2/path2"],
),
(
["/more/path1", "/more/path2"],
"/path2",
False,
["/path2/path1", "/path2/path2"],
),
(
["/more/path1", "/more/path2"],
"/path2/",
None,
["/path2/path1", "/path2/path2"],
),
(
["/more/path1", "/diff/path2"],
"/path2/",
None,
["/path2/more/path1", "/path2/diff/path2"],
),
),
)
def test_other_paths(paths, other, is_dir, expected):
assert other_paths(paths, other, is_dir) == expected
def test_log():
import logging
logger = setup_logger("fsspec.test")
assert logger.level == logging.DEBUG
@pytest.mark.parametrize(
"par",
[
("afile", True),
("file://afile", True),
("noproto://afile", False),
("noproto::stuff", False),
("simplecache::stuff", True),
("simplecache://stuff", True),
],
)
def test_can_local(par):
url, outcome = par
assert can_be_local(url) == outcome
| true | true |
f7fd649c3b9658c2a095f6addd34f415f5284097 | 2,743 | py | Python | configs/hpt-pretrain/bdd/finetune/all-labels/90-epoch-0_01-lr-finetune.py | Berkeley-Data/OpenSelfSup | 221191b88d891de57725b149caf237ffef72e529 | [
"Apache-2.0"
] | null | null | null | configs/hpt-pretrain/bdd/finetune/all-labels/90-epoch-0_01-lr-finetune.py | Berkeley-Data/OpenSelfSup | 221191b88d891de57725b149caf237ffef72e529 | [
"Apache-2.0"
] | 6 | 2021-03-11T05:35:54.000Z | 2021-04-03T22:25:11.000Z | configs/hpt-pretrain/bdd/finetune/all-labels/90-epoch-0_01-lr-finetune.py | Berkeley-Data/OpenSelfSup | 221191b88d891de57725b149caf237ffef72e529 | [
"Apache-2.0"
] | 1 | 2021-07-04T00:47:46.000Z | 2021-07-04T00:47:46.000Z | _base_ = "finetune-eval-base.py"
# dataset settings
data_source_cfg = dict(
type="ImageNet",
memcached=False,
mclient_path='/no/matter',
# this will be ignored if type != ImageListMultihead
)
data_train_list = "data/bdd/meta/train_weather_labeled.txt"
data_train_root = 'data/bdd'
data_val_list = "data/bdd/meta/val_weather_labeled.txt"
data_val_root = 'data/bdd'
data_test_list = "data/bdd/meta/test_weather_labeled.txt"
data_test_root = 'data/bdd'
dataset_type = "ClassificationDataset"
img_norm_cfg = dict(mean=[0.2789, 0.2929, 0.2902], std=[0.2474, 0.2653, 0.2761])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=256),
dict(type='CenterCrop', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
data = dict(
batch_size=64,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_source=dict(
list_file=data_val_list, root=data_val_root, **data_source_cfg),
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_source=dict(
list_file=data_test_list, root=data_test_root, **data_source_cfg),
pipeline=test_pipeline))
custom_hooks = [
dict(
name="val",
type='ValidateHook',
dataset=data['val'],
by_epoch=True,
initial=False,
interval=1,
imgs_per_gpu=32,
workers_per_gpu=4,
eval_param=dict(topk=(1,5))),
dict(
name="test",
type='ValidateHook',
dataset=data['test'],
by_epoch=True,
initial=False,
interval=1,
imgs_per_gpu=32,
workers_per_gpu=4,
eval_param=dict(topk=(1,5))),
]
by_iter =False
# learning policy
lr_config = dict(
by_epoch=True,
policy='step',
step=[30,60],
gamma=0.1 # multiply LR by this number at each step
)
# momentum and weight decay from VTAB and IDRL
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.,
paramwise_options={'\Ahead.': dict(lr_mult=100)})
# runtime settings
# total iters or total epochs
total_epochs=90
checkpoint_config = dict(interval=90)
log_config = dict(
interval=1,
by_epoch=True,
hooks=[
dict(type='TextLoggerHook', by_epoch=True),
dict(type='TensorboardLoggerHook', by_epoch=True)
])
optimizer_config = dict(update_interval=4)
| 25.165138 | 80 | 0.645643 | _base_ = "finetune-eval-base.py"
data_source_cfg = dict(
type="ImageNet",
memcached=False,
mclient_path='/no/matter',
)
data_train_list = "data/bdd/meta/train_weather_labeled.txt"
data_train_root = 'data/bdd'
data_val_list = "data/bdd/meta/val_weather_labeled.txt"
data_val_root = 'data/bdd'
data_test_list = "data/bdd/meta/test_weather_labeled.txt"
data_test_root = 'data/bdd'
dataset_type = "ClassificationDataset"
img_norm_cfg = dict(mean=[0.2789, 0.2929, 0.2902], std=[0.2474, 0.2653, 0.2761])
train_pipeline = [
dict(type='RandomResizedCrop', size=224),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
test_pipeline = [
dict(type='Resize', size=256),
dict(type='CenterCrop', size=224),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
data = dict(
batch_size=64,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_source=dict(
list_file=data_val_list, root=data_val_root, **data_source_cfg),
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_source=dict(
list_file=data_test_list, root=data_test_root, **data_source_cfg),
pipeline=test_pipeline))
custom_hooks = [
dict(
name="val",
type='ValidateHook',
dataset=data['val'],
by_epoch=True,
initial=False,
interval=1,
imgs_per_gpu=32,
workers_per_gpu=4,
eval_param=dict(topk=(1,5))),
dict(
name="test",
type='ValidateHook',
dataset=data['test'],
by_epoch=True,
initial=False,
interval=1,
imgs_per_gpu=32,
workers_per_gpu=4,
eval_param=dict(topk=(1,5))),
]
by_iter =False
lr_config = dict(
by_epoch=True,
policy='step',
step=[30,60],
gamma=0.1
)
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.,
paramwise_options={'\Ahead.': dict(lr_mult=100)})
total_epochs=90
checkpoint_config = dict(interval=90)
log_config = dict(
interval=1,
by_epoch=True,
hooks=[
dict(type='TextLoggerHook', by_epoch=True),
dict(type='TensorboardLoggerHook', by_epoch=True)
])
optimizer_config = dict(update_interval=4)
| true | true |
f7fd64cff1bf17107c5f0241c8e0162f787485c1 | 634 | py | Python | setup.py | FrostMN/FlaskAuth | 9cfc274dfe927a254b005809e65c9a620214236e | [
"MIT"
] | null | null | null | setup.py | FrostMN/FlaskAuth | 9cfc274dfe927a254b005809e65c9a620214236e | [
"MIT"
] | null | null | null | setup.py | FrostMN/FlaskAuth | 9cfc274dfe927a254b005809e65c9a620214236e | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="FlaskAuth",
version="0.0.1",
author="Aaron Frost",
author_email="author@example.com",
description="A small example package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/FrostMN/FlaskAuth",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
) | 28.818182 | 50 | 0.660883 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="FlaskAuth",
version="0.0.1",
author="Aaron Frost",
author_email="author@example.com",
description="A small example package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/FrostMN/FlaskAuth",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
) | true | true |
f7fd6510d8aa7a6879e882b0d076f52ae4870293 | 42,456 | py | Python | Lib/test/test_time.py | shreya1312/cpython | cae1a1951b90f6f99a92ed0209169276c106c56d | [
"0BSD"
] | 2 | 2021-09-18T04:38:55.000Z | 2022-01-27T09:23:30.000Z | Lib/test/test_time.py | shreya1312/cpython | cae1a1951b90f6f99a92ed0209169276c106c56d | [
"0BSD"
] | 11 | 2020-12-01T05:39:22.000Z | 2022-03-01T07:01:05.000Z | Lib/test/test_time.py | shreya1312/cpython | cae1a1951b90f6f99a92ed0209169276c106c56d | [
"0BSD"
] | 1 | 2017-09-11T21:15:52.000Z | 2017-09-11T21:15:52.000Z | from test import support
from test.support import warnings_helper
import decimal
import enum
import locale
import math
import platform
import sys
import sysconfig
import time
import threading
import unittest
try:
import _testcapi
except ImportError:
_testcapi = None
from test.support import skip_if_buggy_ucrt_strfptime
# Max year is only limited by the size of C int.
SIZEOF_INT = sysconfig.get_config_var('SIZEOF_INT') or 4
TIME_MAXYEAR = (1 << 8 * SIZEOF_INT - 1) - 1
TIME_MINYEAR = -TIME_MAXYEAR - 1 + 1900
SEC_TO_US = 10 ** 6
US_TO_NS = 10 ** 3
MS_TO_NS = 10 ** 6
SEC_TO_NS = 10 ** 9
NS_TO_SEC = 10 ** 9
class _PyTime(enum.IntEnum):
# Round towards minus infinity (-inf)
ROUND_FLOOR = 0
# Round towards infinity (+inf)
ROUND_CEILING = 1
# Round to nearest with ties going to nearest even integer
ROUND_HALF_EVEN = 2
# Round away from zero
ROUND_UP = 3
# _PyTime_t is int64_t
_PyTime_MIN = -2 ** 63
_PyTime_MAX = 2 ** 63 - 1
# Rounding modes supported by PyTime
ROUNDING_MODES = (
# (PyTime rounding method, decimal rounding method)
(_PyTime.ROUND_FLOOR, decimal.ROUND_FLOOR),
(_PyTime.ROUND_CEILING, decimal.ROUND_CEILING),
(_PyTime.ROUND_HALF_EVEN, decimal.ROUND_HALF_EVEN),
(_PyTime.ROUND_UP, decimal.ROUND_UP),
)
class TimeTestCase(unittest.TestCase):
def setUp(self):
self.t = time.time()
def test_data_attributes(self):
time.altzone
time.daylight
time.timezone
time.tzname
def test_time(self):
time.time()
info = time.get_clock_info('time')
self.assertFalse(info.monotonic)
self.assertTrue(info.adjustable)
def test_time_ns_type(self):
def check_ns(sec, ns):
self.assertIsInstance(ns, int)
sec_ns = int(sec * 1e9)
# tolerate a difference of 50 ms
self.assertLess((sec_ns - ns), 50 ** 6, (sec, ns))
check_ns(time.time(),
time.time_ns())
check_ns(time.monotonic(),
time.monotonic_ns())
check_ns(time.perf_counter(),
time.perf_counter_ns())
check_ns(time.process_time(),
time.process_time_ns())
if hasattr(time, 'thread_time'):
check_ns(time.thread_time(),
time.thread_time_ns())
if hasattr(time, 'clock_gettime'):
check_ns(time.clock_gettime(time.CLOCK_REALTIME),
time.clock_gettime_ns(time.CLOCK_REALTIME))
@unittest.skipUnless(hasattr(time, 'clock_gettime'),
'need time.clock_gettime()')
def test_clock_realtime(self):
t = time.clock_gettime(time.CLOCK_REALTIME)
self.assertIsInstance(t, float)
@unittest.skipUnless(hasattr(time, 'clock_gettime'),
'need time.clock_gettime()')
@unittest.skipUnless(hasattr(time, 'CLOCK_MONOTONIC'),
'need time.CLOCK_MONOTONIC')
def test_clock_monotonic(self):
a = time.clock_gettime(time.CLOCK_MONOTONIC)
b = time.clock_gettime(time.CLOCK_MONOTONIC)
self.assertLessEqual(a, b)
@unittest.skipUnless(hasattr(time, 'pthread_getcpuclockid'),
'need time.pthread_getcpuclockid()')
@unittest.skipUnless(hasattr(time, 'clock_gettime'),
'need time.clock_gettime()')
def test_pthread_getcpuclockid(self):
clk_id = time.pthread_getcpuclockid(threading.get_ident())
self.assertTrue(type(clk_id) is int)
# when in 32-bit mode AIX only returns the predefined constant
if not platform.system() == "AIX":
self.assertNotEqual(clk_id, time.CLOCK_THREAD_CPUTIME_ID)
elif (sys.maxsize.bit_length() > 32):
self.assertNotEqual(clk_id, time.CLOCK_THREAD_CPUTIME_ID)
else:
self.assertEqual(clk_id, time.CLOCK_THREAD_CPUTIME_ID)
t1 = time.clock_gettime(clk_id)
t2 = time.clock_gettime(clk_id)
self.assertLessEqual(t1, t2)
@unittest.skipUnless(hasattr(time, 'clock_getres'),
'need time.clock_getres()')
def test_clock_getres(self):
res = time.clock_getres(time.CLOCK_REALTIME)
self.assertGreater(res, 0.0)
self.assertLessEqual(res, 1.0)
@unittest.skipUnless(hasattr(time, 'clock_settime'),
'need time.clock_settime()')
def test_clock_settime(self):
t = time.clock_gettime(time.CLOCK_REALTIME)
try:
time.clock_settime(time.CLOCK_REALTIME, t)
except PermissionError:
pass
if hasattr(time, 'CLOCK_MONOTONIC'):
self.assertRaises(OSError,
time.clock_settime, time.CLOCK_MONOTONIC, 0)
def test_conversions(self):
self.assertEqual(time.ctime(self.t),
time.asctime(time.localtime(self.t)))
self.assertEqual(int(time.mktime(time.localtime(self.t))),
int(self.t))
def test_sleep(self):
self.assertRaises(ValueError, time.sleep, -2)
self.assertRaises(ValueError, time.sleep, -1)
time.sleep(1.2)
def test_strftime(self):
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = ' %' + directive
try:
time.strftime(format, tt)
except ValueError:
self.fail('conversion specifier: %r failed.' % format)
self.assertRaises(TypeError, time.strftime, b'%S', tt)
# embedded null character
self.assertRaises(ValueError, time.strftime, '%S\0', tt)
def _bounds_checking(self, func):
# Make sure that strftime() checks the bounds of the various parts
# of the time tuple (0 is valid for *all* values).
# The year field is tested by other test cases above
# Check month [1, 12] + zero support
func((1900, 0, 1, 0, 0, 0, 0, 1, -1))
func((1900, 12, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, -1, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 13, 1, 0, 0, 0, 0, 1, -1))
# Check day of month [1, 31] + zero support
func((1900, 1, 0, 0, 0, 0, 0, 1, -1))
func((1900, 1, 31, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, -1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 32, 0, 0, 0, 0, 1, -1))
# Check hour [0, 23]
func((1900, 1, 1, 23, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, -1, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 24, 0, 0, 0, 1, -1))
# Check minute [0, 59]
func((1900, 1, 1, 0, 59, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, -1, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 60, 0, 0, 1, -1))
# Check second [0, 61]
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, -1, 0, 1, -1))
# C99 only requires allowing for one leap second, but Python's docs say
# allow two leap seconds (0..61)
func((1900, 1, 1, 0, 0, 60, 0, 1, -1))
func((1900, 1, 1, 0, 0, 61, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 62, 0, 1, -1))
# No check for upper-bound day of week;
# value forced into range by a ``% 7`` calculation.
# Start check at -2 since gettmarg() increments value before taking
# modulo.
self.assertEqual(func((1900, 1, 1, 0, 0, 0, -1, 1, -1)),
func((1900, 1, 1, 0, 0, 0, +6, 1, -1)))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, -2, 1, -1))
# Check day of the year [1, 366] + zero support
func((1900, 1, 1, 0, 0, 0, 0, 0, -1))
func((1900, 1, 1, 0, 0, 0, 0, 366, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, 0, -1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, 0, 367, -1))
def test_strftime_bounding_check(self):
self._bounds_checking(lambda tup: time.strftime('', tup))
def test_strftime_format_check(self):
# Test that strftime does not crash on invalid format strings
# that may trigger a buffer overread. When not triggered,
# strftime may succeed or raise ValueError depending on
# the platform.
for x in [ '', 'A', '%A', '%AA' ]:
for y in range(0x0, 0x10):
for z in [ '%', 'A%', 'AA%', '%A%', 'A%A%', '%#' ]:
try:
time.strftime(x * y + z)
except ValueError:
pass
def test_default_values_for_zero(self):
# Make sure that using all zeros uses the proper default
# values. No test for daylight savings since strftime() does
# not change output based on its value and no test for year
# because systems vary in their support for year 0.
expected = "2000 01 01 00 00 00 1 001"
with warnings_helper.check_warnings():
result = time.strftime("%Y %m %d %H %M %S %w %j", (2000,)+(0,)*8)
self.assertEqual(expected, result)
@skip_if_buggy_ucrt_strfptime
def test_strptime(self):
# Should be able to go round-trip from strftime to strptime without
# raising an exception.
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = '%' + directive
strf_output = time.strftime(format, tt)
try:
time.strptime(strf_output, format)
except ValueError:
self.fail("conversion specifier %r failed with '%s' input." %
(format, strf_output))
def test_strptime_bytes(self):
# Make sure only strings are accepted as arguments to strptime.
self.assertRaises(TypeError, time.strptime, b'2009', "%Y")
self.assertRaises(TypeError, time.strptime, '2009', b'%Y')
def test_strptime_exception_context(self):
# check that this doesn't chain exceptions needlessly (see #17572)
with self.assertRaises(ValueError) as e:
time.strptime('', '%D')
self.assertIs(e.exception.__suppress_context__, True)
# additional check for IndexError branch (issue #19545)
with self.assertRaises(ValueError) as e:
time.strptime('19', '%Y %')
self.assertIs(e.exception.__suppress_context__, True)
def test_asctime(self):
time.asctime(time.gmtime(self.t))
# Max year is only limited by the size of C int.
for bigyear in TIME_MAXYEAR, TIME_MINYEAR:
asc = time.asctime((bigyear, 6, 1) + (0,) * 6)
self.assertEqual(asc[-len(str(bigyear)):], str(bigyear))
self.assertRaises(OverflowError, time.asctime,
(TIME_MAXYEAR + 1,) + (0,) * 8)
self.assertRaises(OverflowError, time.asctime,
(TIME_MINYEAR - 1,) + (0,) * 8)
self.assertRaises(TypeError, time.asctime, 0)
self.assertRaises(TypeError, time.asctime, ())
self.assertRaises(TypeError, time.asctime, (0,) * 10)
def test_asctime_bounding_check(self):
self._bounds_checking(time.asctime)
def test_ctime(self):
t = time.mktime((1973, 9, 16, 1, 3, 52, 0, 0, -1))
self.assertEqual(time.ctime(t), 'Sun Sep 16 01:03:52 1973')
t = time.mktime((2000, 1, 1, 0, 0, 0, 0, 0, -1))
self.assertEqual(time.ctime(t), 'Sat Jan 1 00:00:00 2000')
for year in [-100, 100, 1000, 2000, 2050, 10000]:
try:
testval = time.mktime((year, 1, 10) + (0,)*6)
except (ValueError, OverflowError):
# If mktime fails, ctime will fail too. This may happen
# on some platforms.
pass
else:
self.assertEqual(time.ctime(testval)[20:], str(year))
@unittest.skipUnless(hasattr(time, "tzset"),
"time module has no attribute tzset")
def test_tzset(self):
from os import environ
# Epoch time of midnight Dec 25th 2002. Never DST in northern
# hemisphere.
xmas2002 = 1040774400.0
# These formats are correct for 2002, and possibly future years
# This format is the 'standard' as documented at:
# http://www.opengroup.org/onlinepubs/007904975/basedefs/xbd_chap08.html
# They are also documented in the tzset(3) man page on most Unix
# systems.
eastern = 'EST+05EDT,M4.1.0,M10.5.0'
victoria = 'AEST-10AEDT-11,M10.5.0,M3.5.0'
utc='UTC+0'
org_TZ = environ.get('TZ',None)
try:
# Make sure we can switch to UTC time and results are correct
# Note that unknown timezones default to UTC.
# Note that altzone is undefined in UTC, as there is no DST
environ['TZ'] = eastern
time.tzset()
environ['TZ'] = utc
time.tzset()
self.assertEqual(
time.gmtime(xmas2002), time.localtime(xmas2002)
)
self.assertEqual(time.daylight, 0)
self.assertEqual(time.timezone, 0)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
# Make sure we can switch to US/Eastern
environ['TZ'] = eastern
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
self.assertEqual(time.tzname, ('EST', 'EDT'))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, 18000)
self.assertEqual(time.altzone, 14400)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
self.assertEqual(len(time.tzname), 2)
# Now go to the southern hemisphere.
environ['TZ'] = victoria
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
# Issue #11886: Australian Eastern Standard Time (UTC+10) is called
# "EST" (as Eastern Standard Time, UTC-5) instead of "AEST"
# (non-DST timezone), and "EDT" instead of "AEDT" (DST timezone),
# on some operating systems (e.g. FreeBSD), which is wrong. See for
# example this bug:
# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=93810
self.assertIn(time.tzname[0], ('AEST' 'EST'), time.tzname[0])
self.assertTrue(time.tzname[1] in ('AEDT', 'EDT'), str(time.tzname[1]))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, -36000)
self.assertEqual(time.altzone, -39600)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 1)
finally:
# Repair TZ environment variable in case any other tests
# rely on it.
if org_TZ is not None:
environ['TZ'] = org_TZ
elif 'TZ' in environ:
del environ['TZ']
time.tzset()
def test_insane_timestamps(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for func in time.ctime, time.gmtime, time.localtime:
for unreasonable in -1e200, 1e200:
self.assertRaises(OverflowError, func, unreasonable)
def test_ctime_without_arg(self):
# Not sure how to check the values, since the clock could tick
# at any time. Make sure these are at least accepted and
# don't raise errors.
time.ctime()
time.ctime(None)
def test_gmtime_without_arg(self):
gt0 = time.gmtime()
gt1 = time.gmtime(None)
t0 = time.mktime(gt0)
t1 = time.mktime(gt1)
self.assertAlmostEqual(t1, t0, delta=0.2)
def test_localtime_without_arg(self):
lt0 = time.localtime()
lt1 = time.localtime(None)
t0 = time.mktime(lt0)
t1 = time.mktime(lt1)
self.assertAlmostEqual(t1, t0, delta=0.2)
def test_mktime(self):
# Issue #1726687
for t in (-2, -1, 0, 1):
try:
tt = time.localtime(t)
except (OverflowError, OSError):
pass
else:
self.assertEqual(time.mktime(tt), t)
# Issue #13309: passing extreme values to mktime() or localtime()
# borks the glibc's internal timezone data.
@unittest.skipUnless(platform.libc_ver()[0] != 'glibc',
"disabled because of a bug in glibc. Issue #13309")
def test_mktime_error(self):
# It may not be possible to reliably make mktime return error
# on all platfom. This will make sure that no other exception
# than OverflowError is raised for an extreme value.
tt = time.gmtime(self.t)
tzname = time.strftime('%Z', tt)
self.assertNotEqual(tzname, 'LMT')
try:
time.mktime((-1, 1, 1, 0, 0, 0, -1, -1, -1))
except OverflowError:
pass
self.assertEqual(time.strftime('%Z', tt), tzname)
def test_monotonic(self):
# monotonic() should not go backward
times = [time.monotonic() for n in range(100)]
t1 = times[0]
for t2 in times[1:]:
self.assertGreaterEqual(t2, t1, "times=%s" % times)
t1 = t2
# monotonic() includes time elapsed during a sleep
t1 = time.monotonic()
time.sleep(0.5)
t2 = time.monotonic()
dt = t2 - t1
self.assertGreater(t2, t1)
# bpo-20101: tolerate a difference of 50 ms because of bad timer
# resolution on Windows
self.assertTrue(0.450 <= dt)
# monotonic() is a monotonic but non adjustable clock
info = time.get_clock_info('monotonic')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
def test_perf_counter(self):
time.perf_counter()
def test_process_time(self):
# process_time() should not include time spend during a sleep
start = time.process_time()
time.sleep(0.100)
stop = time.process_time()
# use 20 ms because process_time() has usually a resolution of 15 ms
# on Windows
self.assertLess(stop - start, 0.020)
info = time.get_clock_info('process_time')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
def test_thread_time(self):
if not hasattr(time, 'thread_time'):
if sys.platform.startswith(('linux', 'win')):
self.fail("time.thread_time() should be available on %r"
% (sys.platform,))
else:
self.skipTest("need time.thread_time")
# thread_time() should not include time spend during a sleep
start = time.thread_time()
time.sleep(0.100)
stop = time.thread_time()
# use 20 ms because thread_time() has usually a resolution of 15 ms
# on Windows
self.assertLess(stop - start, 0.020)
info = time.get_clock_info('thread_time')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
@unittest.skipUnless(hasattr(time, 'clock_settime'),
'need time.clock_settime')
def test_monotonic_settime(self):
t1 = time.monotonic()
realtime = time.clock_gettime(time.CLOCK_REALTIME)
# jump backward with an offset of 1 hour
try:
time.clock_settime(time.CLOCK_REALTIME, realtime - 3600)
except PermissionError as err:
self.skipTest(err)
t2 = time.monotonic()
time.clock_settime(time.CLOCK_REALTIME, realtime)
# monotonic must not be affected by system clock updates
self.assertGreaterEqual(t2, t1)
def test_localtime_failure(self):
# Issue #13847: check for localtime() failure
invalid_time_t = None
for time_t in (-1, 2**30, 2**33, 2**60):
try:
time.localtime(time_t)
except OverflowError:
self.skipTest("need 64-bit time_t")
except OSError:
invalid_time_t = time_t
break
if invalid_time_t is None:
self.skipTest("unable to find an invalid time_t value")
self.assertRaises(OSError, time.localtime, invalid_time_t)
self.assertRaises(OSError, time.ctime, invalid_time_t)
# Issue #26669: check for localtime() failure
self.assertRaises(ValueError, time.localtime, float("nan"))
self.assertRaises(ValueError, time.ctime, float("nan"))
def test_get_clock_info(self):
clocks = ['monotonic', 'perf_counter', 'process_time', 'time']
for name in clocks:
info = time.get_clock_info(name)
#self.assertIsInstance(info, dict)
self.assertIsInstance(info.implementation, str)
self.assertNotEqual(info.implementation, '')
self.assertIsInstance(info.monotonic, bool)
self.assertIsInstance(info.resolution, float)
# 0.0 < resolution <= 1.0
self.assertGreater(info.resolution, 0.0)
self.assertLessEqual(info.resolution, 1.0)
self.assertIsInstance(info.adjustable, bool)
self.assertRaises(ValueError, time.get_clock_info, 'xxx')
class TestLocale(unittest.TestCase):
def setUp(self):
self.oldloc = locale.setlocale(locale.LC_ALL)
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.oldloc)
def test_bug_3061(self):
try:
tmp = locale.setlocale(locale.LC_ALL, "fr_FR")
except locale.Error:
self.skipTest('could not set locale.LC_ALL to fr_FR')
# This should not cause an exception
time.strftime("%B", (2009,2,1,0,0,0,0,0,0))
class _TestAsctimeYear:
_format = '%d'
def yearstr(self, y):
return time.asctime((y,) + (0,) * 8).split()[-1]
def test_large_year(self):
# Check that it doesn't crash for year > 9999
self.assertEqual(self.yearstr(12345), '12345')
self.assertEqual(self.yearstr(123456789), '123456789')
class _TestStrftimeYear:
# Issue 13305: For years < 1000, the value is not always
# padded to 4 digits across platforms. The C standard
# assumes year >= 1900, so it does not specify the number
# of digits.
if time.strftime('%Y', (1,) + (0,) * 8) == '0001':
_format = '%04d'
else:
_format = '%d'
def yearstr(self, y):
return time.strftime('%Y', (y,) + (0,) * 8)
def test_4dyear(self):
# Check that we can return the zero padded value.
if self._format == '%04d':
self.test_year('%04d')
else:
def year4d(y):
return time.strftime('%4Y', (y,) + (0,) * 8)
self.test_year('%04d', func=year4d)
def skip_if_not_supported(y):
msg = "strftime() is limited to [1; 9999] with Visual Studio"
# Check that it doesn't crash for year > 9999
try:
time.strftime('%Y', (y,) + (0,) * 8)
except ValueError:
cond = False
else:
cond = True
return unittest.skipUnless(cond, msg)
@skip_if_not_supported(10000)
def test_large_year(self):
return super().test_large_year()
@skip_if_not_supported(0)
def test_negative(self):
return super().test_negative()
del skip_if_not_supported
class _Test4dYear:
_format = '%d'
def test_year(self, fmt=None, func=None):
fmt = fmt or self._format
func = func or self.yearstr
self.assertEqual(func(1), fmt % 1)
self.assertEqual(func(68), fmt % 68)
self.assertEqual(func(69), fmt % 69)
self.assertEqual(func(99), fmt % 99)
self.assertEqual(func(999), fmt % 999)
self.assertEqual(func(9999), fmt % 9999)
def test_large_year(self):
self.assertEqual(self.yearstr(12345).lstrip('+'), '12345')
self.assertEqual(self.yearstr(123456789).lstrip('+'), '123456789')
self.assertEqual(self.yearstr(TIME_MAXYEAR).lstrip('+'), str(TIME_MAXYEAR))
self.assertRaises(OverflowError, self.yearstr, TIME_MAXYEAR + 1)
def test_negative(self):
self.assertEqual(self.yearstr(-1), self._format % -1)
self.assertEqual(self.yearstr(-1234), '-1234')
self.assertEqual(self.yearstr(-123456), '-123456')
self.assertEqual(self.yearstr(-123456789), str(-123456789))
self.assertEqual(self.yearstr(-1234567890), str(-1234567890))
self.assertEqual(self.yearstr(TIME_MINYEAR), str(TIME_MINYEAR))
# Modules/timemodule.c checks for underflow
self.assertRaises(OverflowError, self.yearstr, TIME_MINYEAR - 1)
with self.assertRaises(OverflowError):
self.yearstr(-TIME_MAXYEAR - 1)
class TestAsctime4dyear(_TestAsctimeYear, _Test4dYear, unittest.TestCase):
pass
class TestStrftime4dyear(_TestStrftimeYear, _Test4dYear, unittest.TestCase):
pass
class TestPytime(unittest.TestCase):
@skip_if_buggy_ucrt_strfptime
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_localtime_timezone(self):
# Get the localtime and examine it for the offset and zone.
lt = time.localtime()
self.assertTrue(hasattr(lt, "tm_gmtoff"))
self.assertTrue(hasattr(lt, "tm_zone"))
# See if the offset and zone are similar to the module
# attributes.
if lt.tm_gmtoff is None:
self.assertTrue(not hasattr(time, "timezone"))
else:
self.assertEqual(lt.tm_gmtoff, -[time.timezone, time.altzone][lt.tm_isdst])
if lt.tm_zone is None:
self.assertTrue(not hasattr(time, "tzname"))
else:
self.assertEqual(lt.tm_zone, time.tzname[lt.tm_isdst])
# Try and make UNIX times from the localtime and a 9-tuple
# created from the localtime. Test to see that the times are
# the same.
t = time.mktime(lt); t9 = time.mktime(lt[:9])
self.assertEqual(t, t9)
# Make localtimes from the UNIX times and compare them to
# the original localtime, thus making a round trip.
new_lt = time.localtime(t); new_lt9 = time.localtime(t9)
self.assertEqual(new_lt, lt)
self.assertEqual(new_lt.tm_gmtoff, lt.tm_gmtoff)
self.assertEqual(new_lt.tm_zone, lt.tm_zone)
self.assertEqual(new_lt9, lt)
self.assertEqual(new_lt.tm_gmtoff, lt.tm_gmtoff)
self.assertEqual(new_lt9.tm_zone, lt.tm_zone)
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_strptime_timezone(self):
t = time.strptime("UTC", "%Z")
self.assertEqual(t.tm_zone, 'UTC')
t = time.strptime("+0500", "%z")
self.assertEqual(t.tm_gmtoff, 5 * 3600)
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_short_times(self):
import pickle
# Load a short time structure using pickle.
st = b"ctime\nstruct_time\np0\n((I2007\nI8\nI11\nI1\nI24\nI49\nI5\nI223\nI1\ntp1\n(dp2\ntp3\nRp4\n."
lt = pickle.loads(st)
self.assertIs(lt.tm_gmtoff, None)
self.assertIs(lt.tm_zone, None)
@unittest.skipIf(_testcapi is None, 'need the _testcapi module')
class CPyTimeTestCase:
"""
Base class to test the C _PyTime_t API.
"""
OVERFLOW_SECONDS = None
def setUp(self):
from _testcapi import SIZEOF_TIME_T
bits = SIZEOF_TIME_T * 8 - 1
self.time_t_min = -2 ** bits
self.time_t_max = 2 ** bits - 1
def time_t_filter(self, seconds):
return (self.time_t_min <= seconds <= self.time_t_max)
def _rounding_values(self, use_float):
"Build timestamps used to test rounding."
units = [1, US_TO_NS, MS_TO_NS, SEC_TO_NS]
if use_float:
# picoseconds are only tested to pytime_converter accepting floats
units.append(1e-3)
values = (
# small values
1, 2, 5, 7, 123, 456, 1234,
# 10^k - 1
9,
99,
999,
9999,
99999,
999999,
# test half even rounding near 0.5, 1.5, 2.5, 3.5, 4.5
499, 500, 501,
1499, 1500, 1501,
2500,
3500,
4500,
)
ns_timestamps = [0]
for unit in units:
for value in values:
ns = value * unit
ns_timestamps.extend((-ns, ns))
for pow2 in (0, 5, 10, 15, 22, 23, 24, 30, 33):
ns = (2 ** pow2) * SEC_TO_NS
ns_timestamps.extend((
-ns-1, -ns, -ns+1,
ns-1, ns, ns+1
))
for seconds in (_testcapi.INT_MIN, _testcapi.INT_MAX):
ns_timestamps.append(seconds * SEC_TO_NS)
if use_float:
# numbers with an exact representation in IEEE 754 (base 2)
for pow2 in (3, 7, 10, 15):
ns = 2.0 ** (-pow2)
ns_timestamps.extend((-ns, ns))
# seconds close to _PyTime_t type limit
ns = (2 ** 63 // SEC_TO_NS) * SEC_TO_NS
ns_timestamps.extend((-ns, ns))
return ns_timestamps
def _check_rounding(self, pytime_converter, expected_func,
use_float, unit_to_sec, value_filter=None):
def convert_values(ns_timestamps):
if use_float:
unit_to_ns = SEC_TO_NS / float(unit_to_sec)
values = [ns / unit_to_ns for ns in ns_timestamps]
else:
unit_to_ns = SEC_TO_NS // unit_to_sec
values = [ns // unit_to_ns for ns in ns_timestamps]
if value_filter:
values = filter(value_filter, values)
# remove duplicates and sort
return sorted(set(values))
# test rounding
ns_timestamps = self._rounding_values(use_float)
valid_values = convert_values(ns_timestamps)
for time_rnd, decimal_rnd in ROUNDING_MODES :
with decimal.localcontext() as context:
context.rounding = decimal_rnd
for value in valid_values:
debug_info = {'value': value, 'rounding': decimal_rnd}
try:
result = pytime_converter(value, time_rnd)
expected = expected_func(value)
except Exception:
self.fail("Error on timestamp conversion: %s" % debug_info)
self.assertEqual(result,
expected,
debug_info)
# test overflow
ns = self.OVERFLOW_SECONDS * SEC_TO_NS
ns_timestamps = (-ns, ns)
overflow_values = convert_values(ns_timestamps)
for time_rnd, _ in ROUNDING_MODES :
for value in overflow_values:
debug_info = {'value': value, 'rounding': time_rnd}
with self.assertRaises(OverflowError, msg=debug_info):
pytime_converter(value, time_rnd)
def check_int_rounding(self, pytime_converter, expected_func,
unit_to_sec=1, value_filter=None):
self._check_rounding(pytime_converter, expected_func,
False, unit_to_sec, value_filter)
def check_float_rounding(self, pytime_converter, expected_func,
unit_to_sec=1, value_filter=None):
self._check_rounding(pytime_converter, expected_func,
True, unit_to_sec, value_filter)
def decimal_round(self, x):
d = decimal.Decimal(x)
d = d.quantize(1)
return int(d)
class TestCPyTime(CPyTimeTestCase, unittest.TestCase):
"""
Test the C _PyTime_t API.
"""
# _PyTime_t is a 64-bit signed integer
OVERFLOW_SECONDS = math.ceil((2**63 + 1) / SEC_TO_NS)
def test_FromSeconds(self):
from _testcapi import PyTime_FromSeconds
# PyTime_FromSeconds() expects a C int, reject values out of range
def c_int_filter(secs):
return (_testcapi.INT_MIN <= secs <= _testcapi.INT_MAX)
self.check_int_rounding(lambda secs, rnd: PyTime_FromSeconds(secs),
lambda secs: secs * SEC_TO_NS,
value_filter=c_int_filter)
# test nan
for time_rnd, _ in ROUNDING_MODES:
with self.assertRaises(TypeError):
PyTime_FromSeconds(float('nan'))
def test_FromSecondsObject(self):
from _testcapi import PyTime_FromSecondsObject
self.check_int_rounding(
PyTime_FromSecondsObject,
lambda secs: secs * SEC_TO_NS)
self.check_float_rounding(
PyTime_FromSecondsObject,
lambda ns: self.decimal_round(ns * SEC_TO_NS))
# test nan
for time_rnd, _ in ROUNDING_MODES:
with self.assertRaises(ValueError):
PyTime_FromSecondsObject(float('nan'), time_rnd)
def test_AsSecondsDouble(self):
from _testcapi import PyTime_AsSecondsDouble
def float_converter(ns):
if abs(ns) % SEC_TO_NS == 0:
return float(ns // SEC_TO_NS)
else:
return float(ns) / SEC_TO_NS
self.check_int_rounding(lambda ns, rnd: PyTime_AsSecondsDouble(ns),
float_converter,
NS_TO_SEC)
# test nan
for time_rnd, _ in ROUNDING_MODES:
with self.assertRaises(TypeError):
PyTime_AsSecondsDouble(float('nan'))
def create_decimal_converter(self, denominator):
denom = decimal.Decimal(denominator)
def converter(value):
d = decimal.Decimal(value) / denom
return self.decimal_round(d)
return converter
def test_AsTimeval(self):
from _testcapi import PyTime_AsTimeval
us_converter = self.create_decimal_converter(US_TO_NS)
def timeval_converter(ns):
us = us_converter(ns)
return divmod(us, SEC_TO_US)
if sys.platform == 'win32':
from _testcapi import LONG_MIN, LONG_MAX
# On Windows, timeval.tv_sec type is a C long
def seconds_filter(secs):
return LONG_MIN <= secs <= LONG_MAX
else:
seconds_filter = self.time_t_filter
self.check_int_rounding(PyTime_AsTimeval,
timeval_converter,
NS_TO_SEC,
value_filter=seconds_filter)
@unittest.skipUnless(hasattr(_testcapi, 'PyTime_AsTimespec'),
'need _testcapi.PyTime_AsTimespec')
def test_AsTimespec(self):
from _testcapi import PyTime_AsTimespec
def timespec_converter(ns):
return divmod(ns, SEC_TO_NS)
self.check_int_rounding(lambda ns, rnd: PyTime_AsTimespec(ns),
timespec_converter,
NS_TO_SEC,
value_filter=self.time_t_filter)
@unittest.skipUnless(hasattr(_testcapi, 'PyTime_AsTimeval_clamp'),
'need _testcapi.PyTime_AsTimeval_clamp')
def test_AsTimeval_clamp(self):
from _testcapi import PyTime_AsTimeval_clamp
if sys.platform == 'win32':
from _testcapi import LONG_MIN, LONG_MAX
tv_sec_max = LONG_MAX
tv_sec_min = LONG_MIN
else:
tv_sec_max = self.time_t_max
tv_sec_min = self.time_t_min
for t in (_PyTime_MIN, _PyTime_MAX):
ts = PyTime_AsTimeval_clamp(t, _PyTime.ROUND_CEILING)
with decimal.localcontext() as context:
context.rounding = decimal.ROUND_CEILING
us = self.decimal_round(decimal.Decimal(t) / US_TO_NS)
tv_sec, tv_usec = divmod(us, SEC_TO_US)
if tv_sec_max < tv_sec:
tv_sec = tv_sec_max
tv_usec = 0
elif tv_sec < tv_sec_min:
tv_sec = tv_sec_min
tv_usec = 0
self.assertEqual(ts, (tv_sec, tv_usec))
@unittest.skipUnless(hasattr(_testcapi, 'PyTime_AsTimespec_clamp'),
'need _testcapi.PyTime_AsTimespec_clamp')
def test_AsTimespec_clamp(self):
from _testcapi import PyTime_AsTimespec_clamp
for t in (_PyTime_MIN, _PyTime_MAX):
ts = PyTime_AsTimespec_clamp(t)
tv_sec, tv_nsec = divmod(t, NS_TO_SEC)
if self.time_t_max < tv_sec:
tv_sec = self.time_t_max
tv_nsec = 0
elif tv_sec < self.time_t_min:
tv_sec = self.time_t_min
tv_nsec = 0
self.assertEqual(ts, (tv_sec, tv_nsec))
def test_AsMilliseconds(self):
from _testcapi import PyTime_AsMilliseconds
self.check_int_rounding(PyTime_AsMilliseconds,
self.create_decimal_converter(MS_TO_NS),
NS_TO_SEC)
def test_AsMicroseconds(self):
from _testcapi import PyTime_AsMicroseconds
self.check_int_rounding(PyTime_AsMicroseconds,
self.create_decimal_converter(US_TO_NS),
NS_TO_SEC)
class TestOldPyTime(CPyTimeTestCase, unittest.TestCase):
"""
Test the old C _PyTime_t API: _PyTime_ObjectToXXX() functions.
"""
# time_t is a 32-bit or 64-bit signed integer
OVERFLOW_SECONDS = 2 ** 64
def test_object_to_time_t(self):
from _testcapi import pytime_object_to_time_t
self.check_int_rounding(pytime_object_to_time_t,
lambda secs: secs,
value_filter=self.time_t_filter)
self.check_float_rounding(pytime_object_to_time_t,
self.decimal_round,
value_filter=self.time_t_filter)
def create_converter(self, sec_to_unit):
def converter(secs):
floatpart, intpart = math.modf(secs)
intpart = int(intpart)
floatpart *= sec_to_unit
floatpart = self.decimal_round(floatpart)
if floatpart < 0:
floatpart += sec_to_unit
intpart -= 1
elif floatpart >= sec_to_unit:
floatpart -= sec_to_unit
intpart += 1
return (intpart, floatpart)
return converter
def test_object_to_timeval(self):
from _testcapi import pytime_object_to_timeval
self.check_int_rounding(pytime_object_to_timeval,
lambda secs: (secs, 0),
value_filter=self.time_t_filter)
self.check_float_rounding(pytime_object_to_timeval,
self.create_converter(SEC_TO_US),
value_filter=self.time_t_filter)
# test nan
for time_rnd, _ in ROUNDING_MODES:
with self.assertRaises(ValueError):
pytime_object_to_timeval(float('nan'), time_rnd)
def test_object_to_timespec(self):
from _testcapi import pytime_object_to_timespec
self.check_int_rounding(pytime_object_to_timespec,
lambda secs: (secs, 0),
value_filter=self.time_t_filter)
self.check_float_rounding(pytime_object_to_timespec,
self.create_converter(SEC_TO_NS),
value_filter=self.time_t_filter)
# test nan
for time_rnd, _ in ROUNDING_MODES:
with self.assertRaises(ValueError):
pytime_object_to_timespec(float('nan'), time_rnd)
@unittest.skipUnless(sys.platform == "darwin", "test weak linking on macOS")
class TestTimeWeaklinking(unittest.TestCase):
# These test cases verify that weak linking support on macOS works
# as expected. These cases only test new behaviour introduced by weak linking,
# regular behaviour is tested by the normal test cases.
#
# See the section on Weak Linking in Mac/README.txt for more information.
def test_clock_functions(self):
import sysconfig
import platform
config_vars = sysconfig.get_config_vars()
var_name = "HAVE_CLOCK_GETTIME"
if var_name not in config_vars or not config_vars[var_name]:
raise unittest.SkipTest(f"{var_name} is not available")
mac_ver = tuple(int(x) for x in platform.mac_ver()[0].split("."))
clock_names = [
"CLOCK_MONOTONIC", "clock_gettime", "clock_gettime_ns", "clock_settime",
"clock_settime_ns", "clock_getres"]
if mac_ver >= (10, 12):
for name in clock_names:
self.assertTrue(hasattr(time, name), f"time.{name} is not available")
else:
for name in clock_names:
self.assertFalse(hasattr(time, name), f"time.{name} is available")
if __name__ == "__main__":
unittest.main()
| 37.772242 | 108 | 0.582085 | from test import support
from test.support import warnings_helper
import decimal
import enum
import locale
import math
import platform
import sys
import sysconfig
import time
import threading
import unittest
try:
import _testcapi
except ImportError:
_testcapi = None
from test.support import skip_if_buggy_ucrt_strfptime
SIZEOF_INT = sysconfig.get_config_var('SIZEOF_INT') or 4
TIME_MAXYEAR = (1 << 8 * SIZEOF_INT - 1) - 1
TIME_MINYEAR = -TIME_MAXYEAR - 1 + 1900
SEC_TO_US = 10 ** 6
US_TO_NS = 10 ** 3
MS_TO_NS = 10 ** 6
SEC_TO_NS = 10 ** 9
NS_TO_SEC = 10 ** 9
class _PyTime(enum.IntEnum):
ROUND_FLOOR = 0
ROUND_CEILING = 1
ROUND_HALF_EVEN = 2
ROUND_UP = 3
_PyTime_MIN = -2 ** 63
_PyTime_MAX = 2 ** 63 - 1
ROUNDING_MODES = (
(_PyTime.ROUND_FLOOR, decimal.ROUND_FLOOR),
(_PyTime.ROUND_CEILING, decimal.ROUND_CEILING),
(_PyTime.ROUND_HALF_EVEN, decimal.ROUND_HALF_EVEN),
(_PyTime.ROUND_UP, decimal.ROUND_UP),
)
class TimeTestCase(unittest.TestCase):
def setUp(self):
self.t = time.time()
def test_data_attributes(self):
time.altzone
time.daylight
time.timezone
time.tzname
def test_time(self):
time.time()
info = time.get_clock_info('time')
self.assertFalse(info.monotonic)
self.assertTrue(info.adjustable)
def test_time_ns_type(self):
def check_ns(sec, ns):
self.assertIsInstance(ns, int)
sec_ns = int(sec * 1e9)
self.assertLess((sec_ns - ns), 50 ** 6, (sec, ns))
check_ns(time.time(),
time.time_ns())
check_ns(time.monotonic(),
time.monotonic_ns())
check_ns(time.perf_counter(),
time.perf_counter_ns())
check_ns(time.process_time(),
time.process_time_ns())
if hasattr(time, 'thread_time'):
check_ns(time.thread_time(),
time.thread_time_ns())
if hasattr(time, 'clock_gettime'):
check_ns(time.clock_gettime(time.CLOCK_REALTIME),
time.clock_gettime_ns(time.CLOCK_REALTIME))
@unittest.skipUnless(hasattr(time, 'clock_gettime'),
'need time.clock_gettime()')
def test_clock_realtime(self):
t = time.clock_gettime(time.CLOCK_REALTIME)
self.assertIsInstance(t, float)
@unittest.skipUnless(hasattr(time, 'clock_gettime'),
'need time.clock_gettime()')
@unittest.skipUnless(hasattr(time, 'CLOCK_MONOTONIC'),
'need time.CLOCK_MONOTONIC')
def test_clock_monotonic(self):
a = time.clock_gettime(time.CLOCK_MONOTONIC)
b = time.clock_gettime(time.CLOCK_MONOTONIC)
self.assertLessEqual(a, b)
@unittest.skipUnless(hasattr(time, 'pthread_getcpuclockid'),
'need time.pthread_getcpuclockid()')
@unittest.skipUnless(hasattr(time, 'clock_gettime'),
'need time.clock_gettime()')
def test_pthread_getcpuclockid(self):
clk_id = time.pthread_getcpuclockid(threading.get_ident())
self.assertTrue(type(clk_id) is int)
if not platform.system() == "AIX":
self.assertNotEqual(clk_id, time.CLOCK_THREAD_CPUTIME_ID)
elif (sys.maxsize.bit_length() > 32):
self.assertNotEqual(clk_id, time.CLOCK_THREAD_CPUTIME_ID)
else:
self.assertEqual(clk_id, time.CLOCK_THREAD_CPUTIME_ID)
t1 = time.clock_gettime(clk_id)
t2 = time.clock_gettime(clk_id)
self.assertLessEqual(t1, t2)
@unittest.skipUnless(hasattr(time, 'clock_getres'),
'need time.clock_getres()')
def test_clock_getres(self):
res = time.clock_getres(time.CLOCK_REALTIME)
self.assertGreater(res, 0.0)
self.assertLessEqual(res, 1.0)
@unittest.skipUnless(hasattr(time, 'clock_settime'),
'need time.clock_settime()')
def test_clock_settime(self):
t = time.clock_gettime(time.CLOCK_REALTIME)
try:
time.clock_settime(time.CLOCK_REALTIME, t)
except PermissionError:
pass
if hasattr(time, 'CLOCK_MONOTONIC'):
self.assertRaises(OSError,
time.clock_settime, time.CLOCK_MONOTONIC, 0)
def test_conversions(self):
self.assertEqual(time.ctime(self.t),
time.asctime(time.localtime(self.t)))
self.assertEqual(int(time.mktime(time.localtime(self.t))),
int(self.t))
def test_sleep(self):
self.assertRaises(ValueError, time.sleep, -2)
self.assertRaises(ValueError, time.sleep, -1)
time.sleep(1.2)
def test_strftime(self):
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = ' %' + directive
try:
time.strftime(format, tt)
except ValueError:
self.fail('conversion specifier: %r failed.' % format)
self.assertRaises(TypeError, time.strftime, b'%S', tt)
self.assertRaises(ValueError, time.strftime, '%S\0', tt)
def _bounds_checking(self, func):
func((1900, 0, 1, 0, 0, 0, 0, 1, -1))
func((1900, 12, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, -1, 1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 13, 1, 0, 0, 0, 0, 1, -1))
func((1900, 1, 0, 0, 0, 0, 0, 1, -1))
func((1900, 1, 31, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, -1, 0, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 32, 0, 0, 0, 0, 1, -1))
func((1900, 1, 1, 23, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, -1, 0, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 24, 0, 0, 0, 1, -1))
func((1900, 1, 1, 0, 59, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, -1, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 60, 0, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, -1, 0, 1, -1))
# allow two leap seconds (0..61)
func((1900, 1, 1, 0, 0, 60, 0, 1, -1))
func((1900, 1, 1, 0, 0, 61, 0, 1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 62, 0, 1, -1))
# No check for upper-bound day of week;
# value forced into range by a ``% 7`` calculation.
# Start check at -2 since gettmarg() increments value before taking
# modulo.
self.assertEqual(func((1900, 1, 1, 0, 0, 0, -1, 1, -1)),
func((1900, 1, 1, 0, 0, 0, +6, 1, -1)))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, -2, 1, -1))
# Check day of the year [1, 366] + zero support
func((1900, 1, 1, 0, 0, 0, 0, 0, -1))
func((1900, 1, 1, 0, 0, 0, 0, 366, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, 0, -1, -1))
self.assertRaises(ValueError, func,
(1900, 1, 1, 0, 0, 0, 0, 367, -1))
def test_strftime_bounding_check(self):
self._bounds_checking(lambda tup: time.strftime('', tup))
def test_strftime_format_check(self):
# Test that strftime does not crash on invalid format strings
# that may trigger a buffer overread. When not triggered,
# strftime may succeed or raise ValueError depending on
# the platform.
for x in [ '', 'A', '%A', '%AA' ]:
for y in range(0x0, 0x10):
for z in [ '%', 'A%', 'AA%', '%A%', 'A%A%', '%
try:
time.strftime(x * y + z)
except ValueError:
pass
def test_default_values_for_zero(self):
# Make sure that using all zeros uses the proper default
# values. No test for daylight savings since strftime() does
# not change output based on its value and no test for year
# because systems vary in their support for year 0.
expected = "2000 01 01 00 00 00 1 001"
with warnings_helper.check_warnings():
result = time.strftime("%Y %m %d %H %M %S %w %j", (2000,)+(0,)*8)
self.assertEqual(expected, result)
@skip_if_buggy_ucrt_strfptime
def test_strptime(self):
# Should be able to go round-trip from strftime to strptime without
# raising an exception.
tt = time.gmtime(self.t)
for directive in ('a', 'A', 'b', 'B', 'c', 'd', 'H', 'I',
'j', 'm', 'M', 'p', 'S',
'U', 'w', 'W', 'x', 'X', 'y', 'Y', 'Z', '%'):
format = '%' + directive
strf_output = time.strftime(format, tt)
try:
time.strptime(strf_output, format)
except ValueError:
self.fail("conversion specifier %r failed with '%s' input." %
(format, strf_output))
def test_strptime_bytes(self):
# Make sure only strings are accepted as arguments to strptime.
self.assertRaises(TypeError, time.strptime, b'2009', "%Y")
self.assertRaises(TypeError, time.strptime, '2009', b'%Y')
def test_strptime_exception_context(self):
# check that this doesn't chain exceptions needlessly (see
with self.assertRaises(ValueError) as e:
time.strptime('', '%D')
self.assertIs(e.exception.__suppress_context__, True)
with self.assertRaises(ValueError) as e:
time.strptime('19', '%Y %')
self.assertIs(e.exception.__suppress_context__, True)
def test_asctime(self):
time.asctime(time.gmtime(self.t))
for bigyear in TIME_MAXYEAR, TIME_MINYEAR:
asc = time.asctime((bigyear, 6, 1) + (0,) * 6)
self.assertEqual(asc[-len(str(bigyear)):], str(bigyear))
self.assertRaises(OverflowError, time.asctime,
(TIME_MAXYEAR + 1,) + (0,) * 8)
self.assertRaises(OverflowError, time.asctime,
(TIME_MINYEAR - 1,) + (0,) * 8)
self.assertRaises(TypeError, time.asctime, 0)
self.assertRaises(TypeError, time.asctime, ())
self.assertRaises(TypeError, time.asctime, (0,) * 10)
def test_asctime_bounding_check(self):
self._bounds_checking(time.asctime)
def test_ctime(self):
t = time.mktime((1973, 9, 16, 1, 3, 52, 0, 0, -1))
self.assertEqual(time.ctime(t), 'Sun Sep 16 01:03:52 1973')
t = time.mktime((2000, 1, 1, 0, 0, 0, 0, 0, -1))
self.assertEqual(time.ctime(t), 'Sat Jan 1 00:00:00 2000')
for year in [-100, 100, 1000, 2000, 2050, 10000]:
try:
testval = time.mktime((year, 1, 10) + (0,)*6)
except (ValueError, OverflowError):
pass
else:
self.assertEqual(time.ctime(testval)[20:], str(year))
@unittest.skipUnless(hasattr(time, "tzset"),
"time module has no attribute tzset")
def test_tzset(self):
from os import environ
xmas2002 = 1040774400.0
eastern = 'EST+05EDT,M4.1.0,M10.5.0'
victoria = 'AEST-10AEDT-11,M10.5.0,M3.5.0'
utc='UTC+0'
org_TZ = environ.get('TZ',None)
try:
environ['TZ'] = eastern
time.tzset()
environ['TZ'] = utc
time.tzset()
self.assertEqual(
time.gmtime(xmas2002), time.localtime(xmas2002)
)
self.assertEqual(time.daylight, 0)
self.assertEqual(time.timezone, 0)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
environ['TZ'] = eastern
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
self.assertEqual(time.tzname, ('EST', 'EDT'))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, 18000)
self.assertEqual(time.altzone, 14400)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 0)
self.assertEqual(len(time.tzname), 2)
environ['TZ'] = victoria
time.tzset()
self.assertNotEqual(time.gmtime(xmas2002), time.localtime(xmas2002))
self.assertIn(time.tzname[0], ('AEST' 'EST'), time.tzname[0])
self.assertTrue(time.tzname[1] in ('AEDT', 'EDT'), str(time.tzname[1]))
self.assertEqual(len(time.tzname), 2)
self.assertEqual(time.daylight, 1)
self.assertEqual(time.timezone, -36000)
self.assertEqual(time.altzone, -39600)
self.assertEqual(time.localtime(xmas2002).tm_isdst, 1)
finally:
if org_TZ is not None:
environ['TZ'] = org_TZ
elif 'TZ' in environ:
del environ['TZ']
time.tzset()
def test_insane_timestamps(self):
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for func in time.ctime, time.gmtime, time.localtime:
for unreasonable in -1e200, 1e200:
self.assertRaises(OverflowError, func, unreasonable)
def test_ctime_without_arg(self):
# Not sure how to check the values, since the clock could tick
# at any time. Make sure these are at least accepted and
# don't raise errors.
time.ctime()
time.ctime(None)
def test_gmtime_without_arg(self):
gt0 = time.gmtime()
gt1 = time.gmtime(None)
t0 = time.mktime(gt0)
t1 = time.mktime(gt1)
self.assertAlmostEqual(t1, t0, delta=0.2)
def test_localtime_without_arg(self):
lt0 = time.localtime()
lt1 = time.localtime(None)
t0 = time.mktime(lt0)
t1 = time.mktime(lt1)
self.assertAlmostEqual(t1, t0, delta=0.2)
def test_mktime(self):
for t in (-2, -1, 0, 1):
try:
tt = time.localtime(t)
except (OverflowError, OSError):
pass
else:
self.assertEqual(time.mktime(tt), t)
'glibc',
"disabled because of a bug in glibc. Issue #13309")
def test_mktime_error(self):
# It may not be possible to reliably make mktime return error
# on all platfom. This will make sure that no other exception
# than OverflowError is raised for an extreme value.
tt = time.gmtime(self.t)
tzname = time.strftime('%Z', tt)
self.assertNotEqual(tzname, 'LMT')
try:
time.mktime((-1, 1, 1, 0, 0, 0, -1, -1, -1))
except OverflowError:
pass
self.assertEqual(time.strftime('%Z', tt), tzname)
def test_monotonic(self):
# monotonic() should not go backward
times = [time.monotonic() for n in range(100)]
t1 = times[0]
for t2 in times[1:]:
self.assertGreaterEqual(t2, t1, "times=%s" % times)
t1 = t2
# monotonic() includes time elapsed during a sleep
t1 = time.monotonic()
time.sleep(0.5)
t2 = time.monotonic()
dt = t2 - t1
self.assertGreater(t2, t1)
# bpo-20101: tolerate a difference of 50 ms because of bad timer
# resolution on Windows
self.assertTrue(0.450 <= dt)
# monotonic() is a monotonic but non adjustable clock
info = time.get_clock_info('monotonic')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
def test_perf_counter(self):
time.perf_counter()
def test_process_time(self):
# process_time() should not include time spend during a sleep
start = time.process_time()
time.sleep(0.100)
stop = time.process_time()
# use 20 ms because process_time() has usually a resolution of 15 ms
# on Windows
self.assertLess(stop - start, 0.020)
info = time.get_clock_info('process_time')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
def test_thread_time(self):
if not hasattr(time, 'thread_time'):
if sys.platform.startswith(('linux', 'win')):
self.fail("time.thread_time() should be available on %r"
% (sys.platform,))
else:
self.skipTest("need time.thread_time")
# thread_time() should not include time spend during a sleep
start = time.thread_time()
time.sleep(0.100)
stop = time.thread_time()
# use 20 ms because thread_time() has usually a resolution of 15 ms
# on Windows
self.assertLess(stop - start, 0.020)
info = time.get_clock_info('thread_time')
self.assertTrue(info.monotonic)
self.assertFalse(info.adjustable)
@unittest.skipUnless(hasattr(time, 'clock_settime'),
'need time.clock_settime')
def test_monotonic_settime(self):
t1 = time.monotonic()
realtime = time.clock_gettime(time.CLOCK_REALTIME)
# jump backward with an offset of 1 hour
try:
time.clock_settime(time.CLOCK_REALTIME, realtime - 3600)
except PermissionError as err:
self.skipTest(err)
t2 = time.monotonic()
time.clock_settime(time.CLOCK_REALTIME, realtime)
# monotonic must not be affected by system clock updates
self.assertGreaterEqual(t2, t1)
def test_localtime_failure(self):
# Issue #13847: check for localtime() failure
invalid_time_t = None
for time_t in (-1, 2**30, 2**33, 2**60):
try:
time.localtime(time_t)
except OverflowError:
self.skipTest("need 64-bit time_t")
except OSError:
invalid_time_t = time_t
break
if invalid_time_t is None:
self.skipTest("unable to find an invalid time_t value")
self.assertRaises(OSError, time.localtime, invalid_time_t)
self.assertRaises(OSError, time.ctime, invalid_time_t)
# Issue #26669: check for localtime() failure
self.assertRaises(ValueError, time.localtime, float("nan"))
self.assertRaises(ValueError, time.ctime, float("nan"))
def test_get_clock_info(self):
clocks = ['monotonic', 'perf_counter', 'process_time', 'time']
for name in clocks:
info = time.get_clock_info(name)
#self.assertIsInstance(info, dict)
self.assertIsInstance(info.implementation, str)
self.assertNotEqual(info.implementation, '')
self.assertIsInstance(info.monotonic, bool)
self.assertIsInstance(info.resolution, float)
# 0.0 < resolution <= 1.0
self.assertGreater(info.resolution, 0.0)
self.assertLessEqual(info.resolution, 1.0)
self.assertIsInstance(info.adjustable, bool)
self.assertRaises(ValueError, time.get_clock_info, 'xxx')
class TestLocale(unittest.TestCase):
def setUp(self):
self.oldloc = locale.setlocale(locale.LC_ALL)
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.oldloc)
def test_bug_3061(self):
try:
tmp = locale.setlocale(locale.LC_ALL, "fr_FR")
except locale.Error:
self.skipTest('could not set locale.LC_ALL to fr_FR')
# This should not cause an exception
time.strftime("%B", (2009,2,1,0,0,0,0,0,0))
class _TestAsctimeYear:
_format = '%d'
def yearstr(self, y):
return time.asctime((y,) + (0,) * 8).split()[-1]
def test_large_year(self):
# Check that it doesn't crash for year > 9999
self.assertEqual(self.yearstr(12345), '12345')
self.assertEqual(self.yearstr(123456789), '123456789')
class _TestStrftimeYear:
if time.strftime('%Y', (1,) + (0,) * 8) == '0001':
_format = '%04d'
else:
_format = '%d'
def yearstr(self, y):
return time.strftime('%Y', (y,) + (0,) * 8)
def test_4dyear(self):
if self._format == '%04d':
self.test_year('%04d')
else:
def year4d(y):
return time.strftime('%4Y', (y,) + (0,) * 8)
self.test_year('%04d', func=year4d)
def skip_if_not_supported(y):
msg = "strftime() is limited to [1; 9999] with Visual Studio"
try:
time.strftime('%Y', (y,) + (0,) * 8)
except ValueError:
cond = False
else:
cond = True
return unittest.skipUnless(cond, msg)
@skip_if_not_supported(10000)
def test_large_year(self):
return super().test_large_year()
@skip_if_not_supported(0)
def test_negative(self):
return super().test_negative()
del skip_if_not_supported
class _Test4dYear:
_format = '%d'
def test_year(self, fmt=None, func=None):
fmt = fmt or self._format
func = func or self.yearstr
self.assertEqual(func(1), fmt % 1)
self.assertEqual(func(68), fmt % 68)
self.assertEqual(func(69), fmt % 69)
self.assertEqual(func(99), fmt % 99)
self.assertEqual(func(999), fmt % 999)
self.assertEqual(func(9999), fmt % 9999)
def test_large_year(self):
self.assertEqual(self.yearstr(12345).lstrip('+'), '12345')
self.assertEqual(self.yearstr(123456789).lstrip('+'), '123456789')
self.assertEqual(self.yearstr(TIME_MAXYEAR).lstrip('+'), str(TIME_MAXYEAR))
self.assertRaises(OverflowError, self.yearstr, TIME_MAXYEAR + 1)
def test_negative(self):
self.assertEqual(self.yearstr(-1), self._format % -1)
self.assertEqual(self.yearstr(-1234), '-1234')
self.assertEqual(self.yearstr(-123456), '-123456')
self.assertEqual(self.yearstr(-123456789), str(-123456789))
self.assertEqual(self.yearstr(-1234567890), str(-1234567890))
self.assertEqual(self.yearstr(TIME_MINYEAR), str(TIME_MINYEAR))
# Modules/timemodule.c checks for underflow
self.assertRaises(OverflowError, self.yearstr, TIME_MINYEAR - 1)
with self.assertRaises(OverflowError):
self.yearstr(-TIME_MAXYEAR - 1)
class TestAsctime4dyear(_TestAsctimeYear, _Test4dYear, unittest.TestCase):
pass
class TestStrftime4dyear(_TestStrftimeYear, _Test4dYear, unittest.TestCase):
pass
class TestPytime(unittest.TestCase):
@skip_if_buggy_ucrt_strfptime
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_localtime_timezone(self):
# Get the localtime and examine it for the offset and zone.
lt = time.localtime()
self.assertTrue(hasattr(lt, "tm_gmtoff"))
self.assertTrue(hasattr(lt, "tm_zone"))
# See if the offset and zone are similar to the module
# attributes.
if lt.tm_gmtoff is None:
self.assertTrue(not hasattr(time, "timezone"))
else:
self.assertEqual(lt.tm_gmtoff, -[time.timezone, time.altzone][lt.tm_isdst])
if lt.tm_zone is None:
self.assertTrue(not hasattr(time, "tzname"))
else:
self.assertEqual(lt.tm_zone, time.tzname[lt.tm_isdst])
# Try and make UNIX times from the localtime and a 9-tuple
# created from the localtime. Test to see that the times are
# the same.
t = time.mktime(lt); t9 = time.mktime(lt[:9])
self.assertEqual(t, t9)
# Make localtimes from the UNIX times and compare them to
# the original localtime, thus making a round trip.
new_lt = time.localtime(t); new_lt9 = time.localtime(t9)
self.assertEqual(new_lt, lt)
self.assertEqual(new_lt.tm_gmtoff, lt.tm_gmtoff)
self.assertEqual(new_lt.tm_zone, lt.tm_zone)
self.assertEqual(new_lt9, lt)
self.assertEqual(new_lt.tm_gmtoff, lt.tm_gmtoff)
self.assertEqual(new_lt9.tm_zone, lt.tm_zone)
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_strptime_timezone(self):
t = time.strptime("UTC", "%Z")
self.assertEqual(t.tm_zone, 'UTC')
t = time.strptime("+0500", "%z")
self.assertEqual(t.tm_gmtoff, 5 * 3600)
@unittest.skipUnless(time._STRUCT_TM_ITEMS == 11, "needs tm_zone support")
def test_short_times(self):
import pickle
# Load a short time structure using pickle.
st = b"ctime\nstruct_time\np0\n((I2007\nI8\nI11\nI1\nI24\nI49\nI5\nI223\nI1\ntp1\n(dp2\ntp3\nRp4\n."
lt = pickle.loads(st)
self.assertIs(lt.tm_gmtoff, None)
self.assertIs(lt.tm_zone, None)
@unittest.skipIf(_testcapi is None, 'need the _testcapi module')
class CPyTimeTestCase:
OVERFLOW_SECONDS = None
def setUp(self):
from _testcapi import SIZEOF_TIME_T
bits = SIZEOF_TIME_T * 8 - 1
self.time_t_min = -2 ** bits
self.time_t_max = 2 ** bits - 1
def time_t_filter(self, seconds):
return (self.time_t_min <= seconds <= self.time_t_max)
def _rounding_values(self, use_float):
units = [1, US_TO_NS, MS_TO_NS, SEC_TO_NS]
if use_float:
# picoseconds are only tested to pytime_converter accepting floats
units.append(1e-3)
values = (
# small values
1, 2, 5, 7, 123, 456, 1234,
# 10^k - 1
9,
99,
999,
9999,
99999,
999999,
# test half even rounding near 0.5, 1.5, 2.5, 3.5, 4.5
499, 500, 501,
1499, 1500, 1501,
2500,
3500,
4500,
)
ns_timestamps = [0]
for unit in units:
for value in values:
ns = value * unit
ns_timestamps.extend((-ns, ns))
for pow2 in (0, 5, 10, 15, 22, 23, 24, 30, 33):
ns = (2 ** pow2) * SEC_TO_NS
ns_timestamps.extend((
-ns-1, -ns, -ns+1,
ns-1, ns, ns+1
))
for seconds in (_testcapi.INT_MIN, _testcapi.INT_MAX):
ns_timestamps.append(seconds * SEC_TO_NS)
if use_float:
# numbers with an exact representation in IEEE 754 (base 2)
for pow2 in (3, 7, 10, 15):
ns = 2.0 ** (-pow2)
ns_timestamps.extend((-ns, ns))
# seconds close to _PyTime_t type limit
ns = (2 ** 63 // SEC_TO_NS) * SEC_TO_NS
ns_timestamps.extend((-ns, ns))
return ns_timestamps
def _check_rounding(self, pytime_converter, expected_func,
use_float, unit_to_sec, value_filter=None):
def convert_values(ns_timestamps):
if use_float:
unit_to_ns = SEC_TO_NS / float(unit_to_sec)
values = [ns / unit_to_ns for ns in ns_timestamps]
else:
unit_to_ns = SEC_TO_NS // unit_to_sec
values = [ns // unit_to_ns for ns in ns_timestamps]
if value_filter:
values = filter(value_filter, values)
# remove duplicates and sort
return sorted(set(values))
# test rounding
ns_timestamps = self._rounding_values(use_float)
valid_values = convert_values(ns_timestamps)
for time_rnd, decimal_rnd in ROUNDING_MODES :
with decimal.localcontext() as context:
context.rounding = decimal_rnd
for value in valid_values:
debug_info = {'value': value, 'rounding': decimal_rnd}
try:
result = pytime_converter(value, time_rnd)
expected = expected_func(value)
except Exception:
self.fail("Error on timestamp conversion: %s" % debug_info)
self.assertEqual(result,
expected,
debug_info)
# test overflow
ns = self.OVERFLOW_SECONDS * SEC_TO_NS
ns_timestamps = (-ns, ns)
overflow_values = convert_values(ns_timestamps)
for time_rnd, _ in ROUNDING_MODES :
for value in overflow_values:
debug_info = {'value': value, 'rounding': time_rnd}
with self.assertRaises(OverflowError, msg=debug_info):
pytime_converter(value, time_rnd)
def check_int_rounding(self, pytime_converter, expected_func,
unit_to_sec=1, value_filter=None):
self._check_rounding(pytime_converter, expected_func,
False, unit_to_sec, value_filter)
def check_float_rounding(self, pytime_converter, expected_func,
unit_to_sec=1, value_filter=None):
self._check_rounding(pytime_converter, expected_func,
True, unit_to_sec, value_filter)
def decimal_round(self, x):
d = decimal.Decimal(x)
d = d.quantize(1)
return int(d)
class TestCPyTime(CPyTimeTestCase, unittest.TestCase):
# _PyTime_t is a 64-bit signed integer
OVERFLOW_SECONDS = math.ceil((2**63 + 1) / SEC_TO_NS)
def test_FromSeconds(self):
from _testcapi import PyTime_FromSeconds
# PyTime_FromSeconds() expects a C int, reject values out of range
def c_int_filter(secs):
return (_testcapi.INT_MIN <= secs <= _testcapi.INT_MAX)
self.check_int_rounding(lambda secs, rnd: PyTime_FromSeconds(secs),
lambda secs: secs * SEC_TO_NS,
value_filter=c_int_filter)
# test nan
for time_rnd, _ in ROUNDING_MODES:
with self.assertRaises(TypeError):
PyTime_FromSeconds(float('nan'))
def test_FromSecondsObject(self):
from _testcapi import PyTime_FromSecondsObject
self.check_int_rounding(
PyTime_FromSecondsObject,
lambda secs: secs * SEC_TO_NS)
self.check_float_rounding(
PyTime_FromSecondsObject,
lambda ns: self.decimal_round(ns * SEC_TO_NS))
# test nan
for time_rnd, _ in ROUNDING_MODES:
with self.assertRaises(ValueError):
PyTime_FromSecondsObject(float('nan'), time_rnd)
def test_AsSecondsDouble(self):
from _testcapi import PyTime_AsSecondsDouble
def float_converter(ns):
if abs(ns) % SEC_TO_NS == 0:
return float(ns // SEC_TO_NS)
else:
return float(ns) / SEC_TO_NS
self.check_int_rounding(lambda ns, rnd: PyTime_AsSecondsDouble(ns),
float_converter,
NS_TO_SEC)
# test nan
for time_rnd, _ in ROUNDING_MODES:
with self.assertRaises(TypeError):
PyTime_AsSecondsDouble(float('nan'))
def create_decimal_converter(self, denominator):
denom = decimal.Decimal(denominator)
def converter(value):
d = decimal.Decimal(value) / denom
return self.decimal_round(d)
return converter
def test_AsTimeval(self):
from _testcapi import PyTime_AsTimeval
us_converter = self.create_decimal_converter(US_TO_NS)
def timeval_converter(ns):
us = us_converter(ns)
return divmod(us, SEC_TO_US)
if sys.platform == 'win32':
from _testcapi import LONG_MIN, LONG_MAX
# On Windows, timeval.tv_sec type is a C long
def seconds_filter(secs):
return LONG_MIN <= secs <= LONG_MAX
else:
seconds_filter = self.time_t_filter
self.check_int_rounding(PyTime_AsTimeval,
timeval_converter,
NS_TO_SEC,
value_filter=seconds_filter)
@unittest.skipUnless(hasattr(_testcapi, 'PyTime_AsTimespec'),
'need _testcapi.PyTime_AsTimespec')
def test_AsTimespec(self):
from _testcapi import PyTime_AsTimespec
def timespec_converter(ns):
return divmod(ns, SEC_TO_NS)
self.check_int_rounding(lambda ns, rnd: PyTime_AsTimespec(ns),
timespec_converter,
NS_TO_SEC,
value_filter=self.time_t_filter)
@unittest.skipUnless(hasattr(_testcapi, 'PyTime_AsTimeval_clamp'),
'need _testcapi.PyTime_AsTimeval_clamp')
def test_AsTimeval_clamp(self):
from _testcapi import PyTime_AsTimeval_clamp
if sys.platform == 'win32':
from _testcapi import LONG_MIN, LONG_MAX
tv_sec_max = LONG_MAX
tv_sec_min = LONG_MIN
else:
tv_sec_max = self.time_t_max
tv_sec_min = self.time_t_min
for t in (_PyTime_MIN, _PyTime_MAX):
ts = PyTime_AsTimeval_clamp(t, _PyTime.ROUND_CEILING)
with decimal.localcontext() as context:
context.rounding = decimal.ROUND_CEILING
us = self.decimal_round(decimal.Decimal(t) / US_TO_NS)
tv_sec, tv_usec = divmod(us, SEC_TO_US)
if tv_sec_max < tv_sec:
tv_sec = tv_sec_max
tv_usec = 0
elif tv_sec < tv_sec_min:
tv_sec = tv_sec_min
tv_usec = 0
self.assertEqual(ts, (tv_sec, tv_usec))
@unittest.skipUnless(hasattr(_testcapi, 'PyTime_AsTimespec_clamp'),
'need _testcapi.PyTime_AsTimespec_clamp')
def test_AsTimespec_clamp(self):
from _testcapi import PyTime_AsTimespec_clamp
for t in (_PyTime_MIN, _PyTime_MAX):
ts = PyTime_AsTimespec_clamp(t)
tv_sec, tv_nsec = divmod(t, NS_TO_SEC)
if self.time_t_max < tv_sec:
tv_sec = self.time_t_max
tv_nsec = 0
elif tv_sec < self.time_t_min:
tv_sec = self.time_t_min
tv_nsec = 0
self.assertEqual(ts, (tv_sec, tv_nsec))
def test_AsMilliseconds(self):
from _testcapi import PyTime_AsMilliseconds
self.check_int_rounding(PyTime_AsMilliseconds,
self.create_decimal_converter(MS_TO_NS),
NS_TO_SEC)
def test_AsMicroseconds(self):
from _testcapi import PyTime_AsMicroseconds
self.check_int_rounding(PyTime_AsMicroseconds,
self.create_decimal_converter(US_TO_NS),
NS_TO_SEC)
class TestOldPyTime(CPyTimeTestCase, unittest.TestCase):
# time_t is a 32-bit or 64-bit signed integer
OVERFLOW_SECONDS = 2 ** 64
def test_object_to_time_t(self):
from _testcapi import pytime_object_to_time_t
self.check_int_rounding(pytime_object_to_time_t,
lambda secs: secs,
value_filter=self.time_t_filter)
self.check_float_rounding(pytime_object_to_time_t,
self.decimal_round,
value_filter=self.time_t_filter)
def create_converter(self, sec_to_unit):
def converter(secs):
floatpart, intpart = math.modf(secs)
intpart = int(intpart)
floatpart *= sec_to_unit
floatpart = self.decimal_round(floatpart)
if floatpart < 0:
floatpart += sec_to_unit
intpart -= 1
elif floatpart >= sec_to_unit:
floatpart -= sec_to_unit
intpart += 1
return (intpart, floatpart)
return converter
def test_object_to_timeval(self):
from _testcapi import pytime_object_to_timeval
self.check_int_rounding(pytime_object_to_timeval,
lambda secs: (secs, 0),
value_filter=self.time_t_filter)
self.check_float_rounding(pytime_object_to_timeval,
self.create_converter(SEC_TO_US),
value_filter=self.time_t_filter)
# test nan
for time_rnd, _ in ROUNDING_MODES:
with self.assertRaises(ValueError):
pytime_object_to_timeval(float('nan'), time_rnd)
def test_object_to_timespec(self):
from _testcapi import pytime_object_to_timespec
self.check_int_rounding(pytime_object_to_timespec,
lambda secs: (secs, 0),
value_filter=self.time_t_filter)
self.check_float_rounding(pytime_object_to_timespec,
self.create_converter(SEC_TO_NS),
value_filter=self.time_t_filter)
# test nan
for time_rnd, _ in ROUNDING_MODES:
with self.assertRaises(ValueError):
pytime_object_to_timespec(float('nan'), time_rnd)
@unittest.skipUnless(sys.platform == "darwin", "test weak linking on macOS")
class TestTimeWeaklinking(unittest.TestCase):
# These test cases verify that weak linking support on macOS works
# as expected. These cases only test new behaviour introduced by weak linking,
# regular behaviour is tested by the normal test cases.
#
# See the section on Weak Linking in Mac/README.txt for more information.
def test_clock_functions(self):
import sysconfig
import platform
config_vars = sysconfig.get_config_vars()
var_name = "HAVE_CLOCK_GETTIME"
if var_name not in config_vars or not config_vars[var_name]:
raise unittest.SkipTest(f"{var_name} is not available")
mac_ver = tuple(int(x) for x in platform.mac_ver()[0].split("."))
clock_names = [
"CLOCK_MONOTONIC", "clock_gettime", "clock_gettime_ns", "clock_settime",
"clock_settime_ns", "clock_getres"]
if mac_ver >= (10, 12):
for name in clock_names:
self.assertTrue(hasattr(time, name), f"time.{name} is not available")
else:
for name in clock_names:
self.assertFalse(hasattr(time, name), f"time.{name} is available")
if __name__ == "__main__":
unittest.main()
| true | true |
f7fd652797704d9dee7b345306d913dfe42070c4 | 986 | py | Python | scheduler/account/migrations/0001_initial.py | NaskoVasilev/Scheduler | 02633e38e8bb803c04371ab3e1ee27e3d8997a53 | [
"MIT"
] | 1 | 2021-03-04T19:08:27.000Z | 2021-03-04T19:08:27.000Z | scheduler/account/migrations/0001_initial.py | NaskoVasilev/Scheduler | 02633e38e8bb803c04371ab3e1ee27e3d8997a53 | [
"MIT"
] | 23 | 2021-03-11T16:45:41.000Z | 2021-06-28T21:38:44.000Z | scheduler/account/migrations/0001_initial.py | NaskoVasilev/Scheduler | 02633e38e8bb803c04371ab3e1ee27e3d8997a53 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-05-28 16:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=64)),
('password', models.CharField(max_length=64)),
('salt', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Hairdresser',
fields=[
('client_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='account.client')),
],
bases=('account.client',),
),
]
| 30.8125 | 191 | 0.586207 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=64)),
('password', models.CharField(max_length=64)),
('salt', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Hairdresser',
fields=[
('client_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='account.client')),
],
bases=('account.client',),
),
]
| true | true |
f7fd652a1f5692001a77d19216eef56c7b7b0292 | 4,529 | py | Python | missingfact/nn/util.py | usc-isi-i2/missing-fact | 834a0b4531170b4a108f765e19d02bd7446e0563 | [
"Apache-2.0"
] | 17 | 2019-09-23T12:47:37.000Z | 2022-03-26T12:50:08.000Z | missingfact/nn/util.py | usc-isi-i2/missing-fact | 834a0b4531170b4a108f765e19d02bd7446e0563 | [
"Apache-2.0"
] | null | null | null | missingfact/nn/util.py | usc-isi-i2/missing-fact | 834a0b4531170b4a108f765e19d02bd7446e0563 | [
"Apache-2.0"
] | 4 | 2020-01-10T12:15:02.000Z | 2020-07-19T05:39:25.000Z | import torch
from allennlp.nn.util import replace_masked_values, masked_max
def seq2vec_seq_aggregate(seq_tensor, mask, aggregate, bidirectional, dim=1):
"""
Takes the aggregation of sequence tensor
:param seq_tensor: Batched sequence requires [batch, seq, hs]
:param mask: binary mask with shape batch, seq_len, 1
:param aggregate: max, avg, sum
:param dim: The dimension to take the max. for batch, seq, hs it is 1
:return:
"""
seq_tensor_masked = seq_tensor * mask.unsqueeze(-1)
aggr_func = None
if aggregate == "last":
if seq_tensor.dim() > 3:
seq = get_final_encoder_states_after_squashing(seq_tensor, mask, bidirectional)
else:
seq = get_final_encoder_states(seq_tensor, mask, bidirectional)
elif aggregate == "max":
seq = masked_max(seq_tensor, mask.unsqueeze(-1).expand_as(seq_tensor), dim=dim)
elif aggregate == "min":
seq = -masked_max(-seq_tensor, mask.unsqueeze(-1).expand_as(seq_tensor), dim=dim)
elif aggregate == "sum":
aggr_func = torch.sum
seq = aggr_func(seq_tensor_masked, dim=dim)
elif aggregate == "avg":
aggr_func = torch.sum
seq = aggr_func(seq_tensor_masked, dim=dim)
seq_lens = torch.sum(mask, dim=dim) # this returns batch_size, .. 1 ..
masked_seq_lens = replace_masked_values(seq_lens, (seq_lens != 0).float(), 1.0)
masked_seq_lens = masked_seq_lens.unsqueeze(dim=dim).expand_as(seq)
# print(seq.shape)
# print(masked_seq_lens.shape)
seq = seq / masked_seq_lens
return seq
def get_final_encoder_states_after_squashing(embedded_text, text_mask, bidirectional):
# print(embedded_text.size())
squashed_shape = [-1, embedded_text.size()[-2], embedded_text.size()[-1]]
# print(squashed_shape)
squashed_text = embedded_text.contiguous().view(*squashed_shape)
squash_mask_shape = [squashed_text.size()[0], squashed_text.size()[1]]
squashed_mask = text_mask.contiguous().view(*squash_mask_shape)
squashed_final_seq = get_final_encoder_states(squashed_text, squashed_mask, bidirectional)
# print(squashed_final_seq.size())
output_size = [x for x in embedded_text.size()[:-2]] + [-1]
return squashed_final_seq.contiguous().view(*output_size)
def get_final_encoder_states(encoder_outputs: torch.Tensor,
mask: torch.Tensor,
bidirectional: bool = False) -> torch.Tensor:
"""
Modified over the original Allennlp function
Given the output from a ``Seq2SeqEncoder``, with shape ``(batch_size, sequence_length,
encoding_dim)``, this method returns the final hidden state for each element of the batch,
giving a tensor of shape ``(batch_size, encoding_dim)``. This is not as simple as
``encoder_outputs[:, -1]``, because the sequences could have different lengths. We use the
mask (which has shape ``(batch_size, sequence_length)``) to find the final state for each batch
instance.
Additionally, if ``bidirectional`` is ``True``, we will split the final dimension of the
``encoder_outputs`` into two and assume that the first half is for the forward direction of the
encoder and the second half is for the backward direction. We will concatenate the last state
for each encoder dimension, giving ``encoder_outputs[:, -1, :encoding_dim/2]`` concated with
``encoder_outputs[:, 0, encoding_dim/2:]``.
"""
# These are the indices of the last words in the sequences (i.e. length sans padding - 1). We
# are assuming sequences are right padded.
# Shape: (batch_size,)
last_word_indices = mask.sum(1).long() - 1
# handle -1 cases
ll_ = (last_word_indices != -1).long()
last_word_indices = last_word_indices * ll_
batch_size, _, encoder_output_dim = encoder_outputs.size()
expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)
# Shape: (batch_size, 1, encoder_output_dim)
final_encoder_output = encoder_outputs.gather(1, expanded_indices)
final_encoder_output = final_encoder_output.squeeze(1) # (batch_size, encoder_output_dim)
if bidirectional:
final_forward_output = final_encoder_output[:, :(encoder_output_dim // 2)]
final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2):]
final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)
return final_encoder_output | 48.698925 | 99 | 0.693972 | import torch
from allennlp.nn.util import replace_masked_values, masked_max
def seq2vec_seq_aggregate(seq_tensor, mask, aggregate, bidirectional, dim=1):
seq_tensor_masked = seq_tensor * mask.unsqueeze(-1)
aggr_func = None
if aggregate == "last":
if seq_tensor.dim() > 3:
seq = get_final_encoder_states_after_squashing(seq_tensor, mask, bidirectional)
else:
seq = get_final_encoder_states(seq_tensor, mask, bidirectional)
elif aggregate == "max":
seq = masked_max(seq_tensor, mask.unsqueeze(-1).expand_as(seq_tensor), dim=dim)
elif aggregate == "min":
seq = -masked_max(-seq_tensor, mask.unsqueeze(-1).expand_as(seq_tensor), dim=dim)
elif aggregate == "sum":
aggr_func = torch.sum
seq = aggr_func(seq_tensor_masked, dim=dim)
elif aggregate == "avg":
aggr_func = torch.sum
seq = aggr_func(seq_tensor_masked, dim=dim)
seq_lens = torch.sum(mask, dim=dim)
masked_seq_lens = replace_masked_values(seq_lens, (seq_lens != 0).float(), 1.0)
masked_seq_lens = masked_seq_lens.unsqueeze(dim=dim).expand_as(seq)
seq = seq / masked_seq_lens
return seq
def get_final_encoder_states_after_squashing(embedded_text, text_mask, bidirectional):
squashed_shape = [-1, embedded_text.size()[-2], embedded_text.size()[-1]]
squashed_text = embedded_text.contiguous().view(*squashed_shape)
squash_mask_shape = [squashed_text.size()[0], squashed_text.size()[1]]
squashed_mask = text_mask.contiguous().view(*squash_mask_shape)
squashed_final_seq = get_final_encoder_states(squashed_text, squashed_mask, bidirectional)
output_size = [x for x in embedded_text.size()[:-2]] + [-1]
return squashed_final_seq.contiguous().view(*output_size)
def get_final_encoder_states(encoder_outputs: torch.Tensor,
mask: torch.Tensor,
bidirectional: bool = False) -> torch.Tensor:
last_word_indices = mask.sum(1).long() - 1
ll_ = (last_word_indices != -1).long()
last_word_indices = last_word_indices * ll_
batch_size, _, encoder_output_dim = encoder_outputs.size()
expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)
final_encoder_output = encoder_outputs.gather(1, expanded_indices)
final_encoder_output = final_encoder_output.squeeze(1)
if bidirectional:
final_forward_output = final_encoder_output[:, :(encoder_output_dim // 2)]
final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2):]
final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)
return final_encoder_output | true | true |
f7fd66d33fc0c329db7daaf87373385156d84217 | 17,850 | py | Python | tensorflow/contrib/training/python/training/evaluation.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 71 | 2017-05-25T16:02:15.000Z | 2021-06-09T16:08:08.000Z | tensorflow/contrib/training/python/training/evaluation.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 133 | 2017-04-26T16:49:49.000Z | 2019-10-15T11:39:26.000Z | tensorflow/contrib/training/python/training/evaluation.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 31 | 2018-09-11T02:17:17.000Z | 2021-12-15T10:33:35.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains functions for evaluation and summarization of metrics.
The evaluation.py module contains helper functions for evaluating TensorFlow
modules using a variety of metrics and summarizing the results.
****************************************
* Evaluating a Checkpointed Model Once *
****************************************
Once we've trained a model, we'll want to evaluate it. The simplest way to do
this is to evaluate the performance of a saved model a single time. In order
to do this, we can specify a number of metrics we'll want to evaluate as well
as specify the summaries we want to save to disk. Furthermore, we can print
out the metrics values to stdout:
# Specify where the checkpoint is stored:
checkpoint_path = ...
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Choose the metrics to compute:
names_to_values, names_to_updates = tf.contrib.metrics.aggregate_metric_map({
"accuracy": tf.metrics.accuracy(labels, predictions),
"mse": tf.metrics.mean_squared_error(labels, predictions),
})
# Define the summaries to write:
for metric_name, metric_value in metrics_to_values.iteritems():
tf.summary.scalar(metric_name, metric_value)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# We'll evaluate 1000 batches:
num_evals = 1000
names_to_values = evaluate_once(
checkpoint_path=checkpoint_path,
eval_ops=names_to_updates.values(),
final_ops=names_to_values,
hooks=[
tf.contrib.training.StopAfterNEvalsHook(num_evals),
tf.contrib.training.SummaryAtEndHook(logdir),
],
config=None)
for name in names_to_values:
print('Metric %s has value %f.' % (name, names_to_values[name]))
************************************************
* Evaluating a Checkpointed Model with Metrics *
************************************************
Often, one wants to evaluate a model checkpoint saved on disk. This can be
performed once or repeatedly on a set schedule.
To evaluate a particular model, users define zero or more metrics and zero or
more summaries and call the evaluate_repeatedly method:
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Choose the metrics to compute:
names_to_values, names_to_updates = tf.contrib.metrics.aggregate_metric_map({
"accuracy": tf.metrics.accuracy(labels, predictions),
"mse": tf.metrics.mean_squared_error(labels, predictions),
})
# Define the summaries to write:
for metric_name, metric_value in metrics_to_values.iteritems():
tf.summary.scalar(metric_name, metric_value)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# We'll evaluate 1000 batches:
num_evals = 1000
# Evaluate every 10 minutes:
tf.contrib.training.evaluate_repeatedly(
checkpoint_dir,
eval_ops=names_to_updates.values(),
hooks=[
tf.contrib.training.StopAfterNEvalsHook(num_evals),
tf.contrib.training.SummaryAtEndHook(logdir),
],
eval_interval_secs=600)
*******************************************************
* Evaluating a Checkpointed Model with Summaries Only *
*******************************************************
At times, an evaluation can be performed without metrics at all but rather
with only summaries. The user need only leave out the 'eval_ops' argument:
# Create model and obtain the predictions:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the summaries to write:
tf.summary.scalar(...)
tf.summary.histogram(...)
checkpoint_dir = '/tmp/my_model_dir/'
log_dir = '/tmp/my_model_eval/'
# Evaluate once every 10 minutes.
tf.contrib.training.evaluate_repeatedly(
checkpoint_dir,
hooks=[
tf.contrib.training.SummaryAtEndHook(logdir),
],
eval_interval_secs=600)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
__all__ = [
'StopAfterNEvalsHook',
'SummaryAtEndHook',
'checkpoints_iterator',
'evaluate_once',
'evaluate_repeatedly',
'get_or_create_eval_step',
'wait_for_new_checkpoint',
]
# pylint: disable=protected-access
# pylint: disable=invalid-name
StopAfterNEvalsHook = evaluation._StopAfterNEvalsHook
evaluate_once = evaluation._evaluate_once
get_or_create_eval_step = evaluation._get_or_create_eval_step
# pylint: enable=invalid-name
# pylint: enable=protected-access
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
"""Waits until a new checkpoint file is found.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
last_checkpoint: The last checkpoint path used or `None` if we're expecting
a checkpoint for the first time.
seconds_to_sleep: The number of seconds to sleep for before looking for a
new checkpoint.
timeout: The maximum amount of time to wait. If left as `None`, then the
process will wait indefinitely.
Returns:
a new checkpoint path, or None if the timeout was reached.
"""
logging.info('Waiting for new checkpoint at %s', checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = tf_saver.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info('Found new checkpoint at %s', checkpoint_path)
return checkpoint_path
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
"""Continuously yield new checkpoint files as they appear.
The iterator only checks for new checkpoints when control flow has been
reverted to it. This means it can miss checkpoints if your code takes longer
to run between iterations than `min_interval_secs` or the interval at which
new checkpoints are written.
The `timeout` argument is the maximum number of seconds to block waiting for
a new checkpoint. It is used in combination with the `timeout_fn` as
follows:
* If the timeout expires and no `timeout_fn` was specified, the iterator
stops yielding.
* If a `timeout_fn` was specified, that function is called and if it returns
a true boolean value the iterator stops yielding.
* If the function returns a false boolean value then the iterator resumes the
wait for new checkpoints. At this point the timeout logic applies again.
This behavior gives control to callers on what to do if checkpoints do not
come fast enough or stop being generated. For example, if callers have a way
to detect that the training has stopped and know that no new checkpoints
will be generated, they can provide a `timeout_fn` that returns `True` when
the training has stopped. If they know that the training is still going on
they return `False` instead.
Args:
checkpoint_dir: The directory in which checkpoints are saved.
min_interval_secs: The minimum number of seconds between yielding
checkpoints.
timeout: The maximum amount of time to wait between checkpoints. If left as
`None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Yields:
String paths to latest checkpoint files as they arrive.
"""
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
# timed out
logging.info('Timed-out waiting for a checkpoint.')
return
if timeout_fn():
# The timeout_fn indicated that we are truly done.
return
else:
# The timeout_fn indicated that more checkpoints may come.
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
class SummaryAtEndHook(session_run_hook.SessionRunHook):
"""A run hook that saves a summary with the results of evaluation."""
def __init__(self,
log_dir=None,
summary_writer=None,
summary_op=None,
feed_dict=None):
"""Constructs the Summary Hook.
Args:
log_dir: The directory where the summary events are saved to. Used only
when `summary_writer` is not specified.
summary_writer: A `tf.summary.FileWriter` to write summary events with.
summary_op: The summary op to run. If left as `None`, then all summaries
in the tf.GraphKeys.SUMMARIES collection are used.
feed_dict: An optional feed dictionary to use when evaluating the
summaries.
Raises:
ValueError: If both `log_dir` and `summary_writer` are `None`.
"""
self._summary_op = summary_op
self._replace_summary_op = summary_op is None
self._feed_dict = feed_dict
self._summary_writer = summary_writer
self._log_dir = log_dir
if self._log_dir is None and self._summary_writer is None:
raise ValueError('One of log_dir or summary_writer should be used.')
def begin(self):
if self._replace_summary_op:
self._summary_op = summary.merge_all()
self._global_step = training_util.get_or_create_global_step()
def after_create_session(self, session, coord):
if self._summary_writer is None and self._log_dir:
self._summary_writer = summary.FileWriterCache.get(self._log_dir)
def end(self, session):
global_step = training_util.global_step(session, self._global_step)
summary_str = session.run(self._summary_op, self._feed_dict)
if self._summary_writer:
self._summary_writer.add_summary(summary_str, global_step)
self._summary_writer.flush()
def _scaffold_with_init(scaffold, saver, checkpoint_path):
"""Creates a scaffold that loads the given checkpoint using an init_fn.
Args:
scaffold: The scaffold to copy.
saver: The saver to use when restoring the checkpoint.
checkpoint_path: An absolute path to a checkpoint.
Returns:
A scaffold with an init_fn that loads the given checkpoint. If the scaffold
provided already has an init_fn, the scaffold is returned unchanged.
"""
def restore_checkpoint(_, session):
saver.restore(session, checkpoint_path)
if not scaffold.init_fn:
scaffold = monitored_session.Scaffold(
init_op=scaffold.init_op,
init_feed_dict=scaffold.init_feed_dict,
init_fn=restore_checkpoint,
ready_op=scaffold.ready_op,
local_init_op=scaffold.local_init_op,
summary_op=scaffold.summary_op,
saver=scaffold.saver)
return scaffold
def evaluate_repeatedly(checkpoint_dir,
master='',
scaffold=None,
eval_ops=None,
feed_dict=None,
final_ops=None,
final_ops_feed_dict=None,
eval_interval_secs=60,
hooks=None,
config=None,
max_number_of_evaluations=None,
timeout=None,
timeout_fn=None):
"""Repeatedly searches for a checkpoint in `checkpoint_dir` and evaluates it.
During a single evaluation, the `eval_ops` is run until the session is
interrupted or requested to finish. This is typically requested via a
`tf.contrib.training.StopAfterNEvalsHook` which results in `eval_ops` running
the requested number of times.
Optionally, a user can pass in `final_ops`, a single `Tensor`, a list of
`Tensors` or a dictionary from names to `Tensors`. The `final_ops` is
evaluated a single time after `eval_ops` has finished running and the fetched
values of `final_ops` are returned. If `final_ops` is left as `None`, then
`None` is returned.
One may also consider using a `tf.contrib.training.SummaryAtEndHook` to record
summaries after the `eval_ops` have run. If `eval_ops` is `None`, the
summaries run immediately after the model checkpoint has been restored.
Note that `evaluate_once` creates a local variable used to track the number of
evaluations run via `tf.contrib.training.get_or_create_eval_step`.
Consequently, if a custom local init op is provided via a `scaffold`, the
caller should ensure that the local init op also initializes the eval step.
Args:
checkpoint_dir: The directory where checkpoints are stored.
master: The address of the TensorFlow master.
scaffold: An tf.train.Scaffold instance for initializing variables and
restoring variables. Note that `scaffold.init_fn` is used by the function
to restore the checkpoint. If you supply a custom init_fn, then it must
also take care of restoring the model from its checkpoint.
eval_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`, which is run until the session is requested to stop,
commonly done by a `tf.contrib.training.StopAfterNEvalsHook`.
feed_dict: The feed dictionary to use when executing the `eval_ops`.
final_ops: A single `Tensor`, a list of `Tensors` or a dictionary of names
to `Tensors`.
final_ops_feed_dict: A feed dictionary to use when evaluating `final_ops`.
eval_interval_secs: The minimum number of seconds between evaluations.
hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the
evaluation loop.
config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
max_number_of_evaluations: The maximum times to run the evaluation. If left
as `None`, then evaluation runs indefinitely.
timeout: The maximum amount of time to wait between checkpoints. If left as
`None`, then the process will wait indefinitely.
timeout_fn: Optional function to call after a timeout. If the function
returns True, then it means that no new checkpoints will be generated and
the iterator will exit. The function is called with no arguments.
Returns:
The fetched values of `final_ops` or `None` if `final_ops` is `None`.
"""
eval_step = get_or_create_eval_step()
# Prepare the run hooks.
hooks = hooks or []
if eval_ops is not None:
update_eval_step = state_ops.assign_add(eval_step, 1)
for h in hooks:
if isinstance(h, StopAfterNEvalsHook):
h._set_evals_completed_tensor(update_eval_step) # pylint: disable=protected-access
if isinstance(eval_ops, dict):
eval_ops['update_eval_step'] = update_eval_step
elif isinstance(eval_ops, (tuple, list)):
eval_ops = list(eval_ops) + [update_eval_step]
else:
eval_ops = [eval_ops, update_eval_step]
final_ops_hook = basic_session_run_hooks.FinalOpsHook(final_ops,
final_ops_feed_dict)
hooks.append(final_ops_hook)
num_evaluations = 0
for checkpoint_path in checkpoints_iterator(
checkpoint_dir,
min_interval_secs=eval_interval_secs,
timeout=timeout,
timeout_fn=timeout_fn):
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master=master,
config=config)
with monitored_session.MonitoredSession(
session_creator=session_creator, hooks=hooks) as session:
logging.info('Starting evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
if eval_ops is not None:
while not session.should_stop():
session.run(eval_ops, feed_dict)
logging.info('Finished evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
num_evaluations += 1
if (max_number_of_evaluations is not None and
num_evaluations >= max_number_of_evaluations):
return final_ops_hook.final_ops_values
return final_ops_hook.final_ops_values
| 38.804348 | 91 | 0.703417 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
__all__ = [
'StopAfterNEvalsHook',
'SummaryAtEndHook',
'checkpoints_iterator',
'evaluate_once',
'evaluate_repeatedly',
'get_or_create_eval_step',
'wait_for_new_checkpoint',
]
StopAfterNEvalsHook = evaluation._StopAfterNEvalsHook
evaluate_once = evaluation._evaluate_once
get_or_create_eval_step = evaluation._get_or_create_eval_step
def wait_for_new_checkpoint(checkpoint_dir,
last_checkpoint=None,
seconds_to_sleep=1,
timeout=None):
logging.info('Waiting for new checkpoint at %s', checkpoint_dir)
stop_time = time.time() + timeout if timeout is not None else None
while True:
checkpoint_path = tf_saver.latest_checkpoint(checkpoint_dir)
if checkpoint_path is None or checkpoint_path == last_checkpoint:
if stop_time is not None and time.time() + seconds_to_sleep > stop_time:
return None
time.sleep(seconds_to_sleep)
else:
logging.info('Found new checkpoint at %s', checkpoint_path)
return checkpoint_path
def checkpoints_iterator(checkpoint_dir,
min_interval_secs=0,
timeout=None,
timeout_fn=None):
checkpoint_path = None
while True:
new_checkpoint_path = wait_for_new_checkpoint(
checkpoint_dir, checkpoint_path, timeout=timeout)
if new_checkpoint_path is None:
if not timeout_fn:
logging.info('Timed-out waiting for a checkpoint.')
return
if timeout_fn():
return
else:
continue
start = time.time()
checkpoint_path = new_checkpoint_path
yield checkpoint_path
time_to_next_eval = start + min_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
class SummaryAtEndHook(session_run_hook.SessionRunHook):
def __init__(self,
log_dir=None,
summary_writer=None,
summary_op=None,
feed_dict=None):
self._summary_op = summary_op
self._replace_summary_op = summary_op is None
self._feed_dict = feed_dict
self._summary_writer = summary_writer
self._log_dir = log_dir
if self._log_dir is None and self._summary_writer is None:
raise ValueError('One of log_dir or summary_writer should be used.')
def begin(self):
if self._replace_summary_op:
self._summary_op = summary.merge_all()
self._global_step = training_util.get_or_create_global_step()
def after_create_session(self, session, coord):
if self._summary_writer is None and self._log_dir:
self._summary_writer = summary.FileWriterCache.get(self._log_dir)
def end(self, session):
global_step = training_util.global_step(session, self._global_step)
summary_str = session.run(self._summary_op, self._feed_dict)
if self._summary_writer:
self._summary_writer.add_summary(summary_str, global_step)
self._summary_writer.flush()
def _scaffold_with_init(scaffold, saver, checkpoint_path):
def restore_checkpoint(_, session):
saver.restore(session, checkpoint_path)
if not scaffold.init_fn:
scaffold = monitored_session.Scaffold(
init_op=scaffold.init_op,
init_feed_dict=scaffold.init_feed_dict,
init_fn=restore_checkpoint,
ready_op=scaffold.ready_op,
local_init_op=scaffold.local_init_op,
summary_op=scaffold.summary_op,
saver=scaffold.saver)
return scaffold
def evaluate_repeatedly(checkpoint_dir,
master='',
scaffold=None,
eval_ops=None,
feed_dict=None,
final_ops=None,
final_ops_feed_dict=None,
eval_interval_secs=60,
hooks=None,
config=None,
max_number_of_evaluations=None,
timeout=None,
timeout_fn=None):
eval_step = get_or_create_eval_step()
hooks = hooks or []
if eval_ops is not None:
update_eval_step = state_ops.assign_add(eval_step, 1)
for h in hooks:
if isinstance(h, StopAfterNEvalsHook):
h._set_evals_completed_tensor(update_eval_step)
if isinstance(eval_ops, dict):
eval_ops['update_eval_step'] = update_eval_step
elif isinstance(eval_ops, (tuple, list)):
eval_ops = list(eval_ops) + [update_eval_step]
else:
eval_ops = [eval_ops, update_eval_step]
final_ops_hook = basic_session_run_hooks.FinalOpsHook(final_ops,
final_ops_feed_dict)
hooks.append(final_ops_hook)
num_evaluations = 0
for checkpoint_path in checkpoints_iterator(
checkpoint_dir,
min_interval_secs=eval_interval_secs,
timeout=timeout,
timeout_fn=timeout_fn):
session_creator = monitored_session.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_filename_with_path=checkpoint_path,
master=master,
config=config)
with monitored_session.MonitoredSession(
session_creator=session_creator, hooks=hooks) as session:
logging.info('Starting evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
if eval_ops is not None:
while not session.should_stop():
session.run(eval_ops, feed_dict)
logging.info('Finished evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
num_evaluations += 1
if (max_number_of_evaluations is not None and
num_evaluations >= max_number_of_evaluations):
return final_ops_hook.final_ops_values
return final_ops_hook.final_ops_values
| true | true |
f7fd67a235541ec57a2246174a93721eec952813 | 3,672 | py | Python | tests/test_process.py | murilocamargos/pytvname | fa797980d9cbb88d1107019c60f70ec62e68a885 | [
"MIT"
] | null | null | null | tests/test_process.py | murilocamargos/pytvname | fa797980d9cbb88d1107019c60f70ec62e68a885 | [
"MIT"
] | null | null | null | tests/test_process.py | murilocamargos/pytvname | fa797980d9cbb88d1107019c60f70ec62e68a885 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os
test_root = os.path.dirname(os.path.abspath(__file__))
os.chdir(test_root)
sys.path.insert(0, os.path.dirname(test_root))
sys.path.insert(0, test_root)
import unittest
from pytvname import process
class NormalizeTest(unittest.TestCase):
"""Tests for normalize function"""
def test_scandal_2009(self):
self.assertEqual(process.normalize('scandal (2009)'), 'Scandal')
def test_the_mentalist(self):
self.assertEqual(process.normalize('[www.down.org]the.mentalist'), 'The Mentalist')
def test_arrow_us_720p(self):
self.assertEqual(process.normalize('arrow.us.720p'), 'Arrow')
class InfoTest(unittest.TestCase):
"""Tests for retrive informations function"""
def test_banshee_s03e07_killers(self):
result = {
'showName': 'Banshee',
'seasonNum': '03',
'episodeNum': '07',
'teamName': 'KILLERS',
'quality': 'HDTV'
}
self.assertEqual(process.info('Banshee.S03e07.HDTV.x264-KILLERS[ettv]'), result)
def test_vikings_s01e01_webdl(self):
result = {
'showName': 'Vikings',
'seasonNum': '01',
'episodeNum': '01',
'teamName': 'WEB DL',
'quality': ''
}
self.assertEqual(process.info('vikings 01x01.web.dl'), result)
def test_the_big_bang_theory_8x16_hdtv_x264_lol(self):
result = {
'showName': 'The Big Bang Theory',
'seasonNum': '08',
'episodeNum': '16',
'teamName': 'LOL',
'quality': 'HDTV'
}
self.assertEqual(process.info('The.Big.Bang.Theory.8x16.HDTV.x264-LOL'), result)
def test_the_big_bang_theory_816_hdtv_x264_lol(self):
self.assertEqual(process.info('The.Big.Bang.Theory.816.HDTV.x264-LOL'), None)
class ApplyFuncsTest(unittest.TestCase):
"""Tests for apply string functions function"""
def test_house_of_cards_upper(self):
self.assertEqual(process.applyfuncs('house of cards', ['upper']), 'HOUSE OF CARDS')
def test_the_big_bang_theory_title(self):
self.assertEqual(process.applyfuncs('The BIG bang ThEoRy', ['title']), 'The Big Bang Theory')
def test_the_mentalist_lower_title(self):
self.assertEqual(process.applyfuncs('The Mentalist', ['lower', 'title']), 'The Mentalist')
def test_09_zfone(self):
self.assertEqual(process.applyfuncs('09', ['zfone']), '9')
def test_5_zftwo(self):
self.assertEqual(process.applyfuncs('5', ['zftwo']), '05')
class PrcTest(unittest.TestCase):
"""Tests for name processment"""
def test_banshee_s03e07_killers(self):
original = 'Banshee.S03E07.HDTV.x264-KILLERS[ettv]'
processed = process.prc(original)
self.assertEqual(processed, 'Banshee S03E07 KILLERS')
def test_vikings_s01e01_webdl(self):
original = 'vikings.s01e01.rites.of.passage.720p.web.dl.sujaidr'
processed = process.prc(original, '{showName} S{seasonNum}E{episodeNum} {quality} {teamName}')
self.assertEqual(processed, 'Vikings S01E01 720p WEB DL')
def test_banshee_s03e08_killers(self):
original = 'Banshee.S03E08.HDTV.x264-KILLERS[ettv]'
processed = process.prc(original, '{showName.lower}.{seasonNum.zfone}{episodeNum}.{teamName.lower}')
self.assertEqual(processed, 'banshee.308.killers')
def test_banshee_308_killers(self):
original = 'Banshee.308.HDTV.x264-KILLERS[ettv]'
processed = process.prc(original)
self.assertEqual(processed, None)
if __name__ == '__main__':
unittest.main() | 36.72 | 108 | 0.651961 |
import sys, os
test_root = os.path.dirname(os.path.abspath(__file__))
os.chdir(test_root)
sys.path.insert(0, os.path.dirname(test_root))
sys.path.insert(0, test_root)
import unittest
from pytvname import process
class NormalizeTest(unittest.TestCase):
def test_scandal_2009(self):
self.assertEqual(process.normalize('scandal (2009)'), 'Scandal')
def test_the_mentalist(self):
self.assertEqual(process.normalize('[www.down.org]the.mentalist'), 'The Mentalist')
def test_arrow_us_720p(self):
self.assertEqual(process.normalize('arrow.us.720p'), 'Arrow')
class InfoTest(unittest.TestCase):
def test_banshee_s03e07_killers(self):
result = {
'showName': 'Banshee',
'seasonNum': '03',
'episodeNum': '07',
'teamName': 'KILLERS',
'quality': 'HDTV'
}
self.assertEqual(process.info('Banshee.S03e07.HDTV.x264-KILLERS[ettv]'), result)
def test_vikings_s01e01_webdl(self):
result = {
'showName': 'Vikings',
'seasonNum': '01',
'episodeNum': '01',
'teamName': 'WEB DL',
'quality': ''
}
self.assertEqual(process.info('vikings 01x01.web.dl'), result)
def test_the_big_bang_theory_8x16_hdtv_x264_lol(self):
result = {
'showName': 'The Big Bang Theory',
'seasonNum': '08',
'episodeNum': '16',
'teamName': 'LOL',
'quality': 'HDTV'
}
self.assertEqual(process.info('The.Big.Bang.Theory.8x16.HDTV.x264-LOL'), result)
def test_the_big_bang_theory_816_hdtv_x264_lol(self):
self.assertEqual(process.info('The.Big.Bang.Theory.816.HDTV.x264-LOL'), None)
class ApplyFuncsTest(unittest.TestCase):
def test_house_of_cards_upper(self):
self.assertEqual(process.applyfuncs('house of cards', ['upper']), 'HOUSE OF CARDS')
def test_the_big_bang_theory_title(self):
self.assertEqual(process.applyfuncs('The BIG bang ThEoRy', ['title']), 'The Big Bang Theory')
def test_the_mentalist_lower_title(self):
self.assertEqual(process.applyfuncs('The Mentalist', ['lower', 'title']), 'The Mentalist')
def test_09_zfone(self):
self.assertEqual(process.applyfuncs('09', ['zfone']), '9')
def test_5_zftwo(self):
self.assertEqual(process.applyfuncs('5', ['zftwo']), '05')
class PrcTest(unittest.TestCase):
def test_banshee_s03e07_killers(self):
original = 'Banshee.S03E07.HDTV.x264-KILLERS[ettv]'
processed = process.prc(original)
self.assertEqual(processed, 'Banshee S03E07 KILLERS')
def test_vikings_s01e01_webdl(self):
original = 'vikings.s01e01.rites.of.passage.720p.web.dl.sujaidr'
processed = process.prc(original, '{showName} S{seasonNum}E{episodeNum} {quality} {teamName}')
self.assertEqual(processed, 'Vikings S01E01 720p WEB DL')
def test_banshee_s03e08_killers(self):
original = 'Banshee.S03E08.HDTV.x264-KILLERS[ettv]'
processed = process.prc(original, '{showName.lower}.{seasonNum.zfone}{episodeNum}.{teamName.lower}')
self.assertEqual(processed, 'banshee.308.killers')
def test_banshee_308_killers(self):
original = 'Banshee.308.HDTV.x264-KILLERS[ettv]'
processed = process.prc(original)
self.assertEqual(processed, None)
if __name__ == '__main__':
unittest.main() | true | true |
f7fd67bb5b4bfcbdd8f4154ce43a64cf21eae0ef | 2,777 | py | Python | tests/test_workers/worker_persistance/test_enrich_cntrb_id.py | k1nty/augur | 2160e1dffbc2ac082f83ffa910057717b15cbde4 | [
"MIT"
] | 26 | 2017-02-27T19:07:40.000Z | 2018-03-21T19:28:54.000Z | tests/test_workers/worker_persistance/test_enrich_cntrb_id.py | RylanChamberlin/augur | 47a8599e694677952a792dbe8783343e12f67d3a | [
"MIT"
] | 83 | 2017-01-20T14:56:01.000Z | 2018-04-11T21:40:43.000Z | tests/test_workers/worker_persistance/test_enrich_cntrb_id.py | RylanChamberlin/augur | 47a8599e694677952a792dbe8783343e12f67d3a | [
"MIT"
] | 51 | 2017-01-16T16:20:02.000Z | 2018-03-27T08:28:31.000Z | #SPDX-License-Identifier: MIT
from tests.test_workers.worker_persistance.util_persistance import *
#WIP
def test_enrich_cntrb_id_standard_input(database_connection, sample_source_data_standard_github_comments, sample_source_data_enriched, sample_source_data_unenriched):
#create class for testing
dummy = DummyFullWorker(database_connection)
cntrb = [
{
"cntrb_login": test_data_not_enriched['login'],
"gh_user_id": test_data_not_enriched['id'],
"gh_login": test_data_not_enriched['login'],
"gh_url": test_data_not_enriched['url'],
"gh_html_url": test_data_not_enriched['html_url'],
#"gh_node_id": test_data_not_enriched['node_id'],
#"gh_avatar_url": test_data_not_enriched['avatar_url'],
"gh_gravatar_id": test_data_not_enriched['gravatar_id'],
"gh_followers_url": test_data_not_enriched['followers_url'],
"gh_following_url": test_data_not_enriched['following_url'],
"gh_gists_url": test_data_not_enriched['gists_url'],
"gh_starred_url": test_data_not_enriched['starred_url'],
"gh_subscriptions_url": test_data_not_enriched['subscriptions_url'],
"gh_organizations_url": test_data_not_enriched['organizations_url'],
"gh_repos_url": test_data_not_enriched['repos_url'],
"gh_events_url": test_data_not_enriched['events_url'],
"gh_received_events_url": test_data_not_enriched['received_events_url'],
"gh_type": test_data_not_enriched['type'],
"gh_site_admin": test_data_not_enriched['site_admin'],
"tool_source": "Test",
"tool_version": "test_enrich_cntrb_id",
"data_source":"test_enrich_cntrb_id"
} for test_data_not_enriched in sample_source_data_unenriched
]
database_connection.execute(dummy.contributors_table.values(cntrb))
gh_merge_fields = ['avatar_url']
augur_merge_fields = ['gh_avatar_url']
dummy.enrich_cntrb_id(
sample_source_data_standard_github_comments, 'user.login', action_map_additions={
'insert': {
'source': ['user.node_id'],
'augur': ['gh_node_id']
}
}, prefix='user.'
)
#now test each record to make sure that they have an avatar_url and node id.
avatar_url_sql = s.sql.text("""
SELECT gh_avatar_url, gh_node_id
FROM contributors
""")
avatar_url_list = pd.read_sql(avatar_url_sql, database_connection, params={})
for url in avatar_url_list:
assert url != None
return
| 41.447761 | 166 | 0.64134 |
from tests.test_workers.worker_persistance.util_persistance import *
def test_enrich_cntrb_id_standard_input(database_connection, sample_source_data_standard_github_comments, sample_source_data_enriched, sample_source_data_unenriched):
dummy = DummyFullWorker(database_connection)
cntrb = [
{
"cntrb_login": test_data_not_enriched['login'],
"gh_user_id": test_data_not_enriched['id'],
"gh_login": test_data_not_enriched['login'],
"gh_url": test_data_not_enriched['url'],
"gh_html_url": test_data_not_enriched['html_url'],
"gh_gravatar_id": test_data_not_enriched['gravatar_id'],
"gh_followers_url": test_data_not_enriched['followers_url'],
"gh_following_url": test_data_not_enriched['following_url'],
"gh_gists_url": test_data_not_enriched['gists_url'],
"gh_starred_url": test_data_not_enriched['starred_url'],
"gh_subscriptions_url": test_data_not_enriched['subscriptions_url'],
"gh_organizations_url": test_data_not_enriched['organizations_url'],
"gh_repos_url": test_data_not_enriched['repos_url'],
"gh_events_url": test_data_not_enriched['events_url'],
"gh_received_events_url": test_data_not_enriched['received_events_url'],
"gh_type": test_data_not_enriched['type'],
"gh_site_admin": test_data_not_enriched['site_admin'],
"tool_source": "Test",
"tool_version": "test_enrich_cntrb_id",
"data_source":"test_enrich_cntrb_id"
} for test_data_not_enriched in sample_source_data_unenriched
]
database_connection.execute(dummy.contributors_table.values(cntrb))
gh_merge_fields = ['avatar_url']
augur_merge_fields = ['gh_avatar_url']
dummy.enrich_cntrb_id(
sample_source_data_standard_github_comments, 'user.login', action_map_additions={
'insert': {
'source': ['user.node_id'],
'augur': ['gh_node_id']
}
}, prefix='user.'
)
avatar_url_sql = s.sql.text("""
SELECT gh_avatar_url, gh_node_id
FROM contributors
""")
avatar_url_list = pd.read_sql(avatar_url_sql, database_connection, params={})
for url in avatar_url_list:
assert url != None
return
| true | true |
f7fd67d388f484d7e684f63f878ce69af3a9e8d2 | 2,769 | py | Python | Anti-Phish/antiphish.py | Kixiron/Anti-Phish | 513cbc16729bead0ba4c2e17ec705cdbe949b928 | [
"Apache-2.0"
] | null | null | null | Anti-Phish/antiphish.py | Kixiron/Anti-Phish | 513cbc16729bead0ba4c2e17ec705cdbe949b928 | [
"Apache-2.0"
] | 1 | 2020-07-31T15:48:21.000Z | 2020-07-31T15:48:21.000Z | Anti-Phish/antiphish.py | Kixiron/Anti-Phish | 513cbc16729bead0ba4c2e17ec705cdbe949b928 | [
"Apache-2.0"
] | null | null | null | import argparse
import json
import os
import random
import string
import sys
import requests
# Copyright 2019 Kixiron
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
parser = argparse.ArgumentParser(description="Anti-Phish, an anti-phishing website script")
parser.add_argument('url', help="URL of the target site", type=str)
parser.add_argument('-u', '--username', help="The username field of the target site", type=str)
parser.add_argument('-p', '--password', help="The password field of the target site", type=str)
parser.add_argument('-dl', '--domainlist', help="The email domain list to choose", type=bool)
args = parser.parse_args()
if not args.url:
print("Target URL required!")
parser.print_help()
sys.exit(1)
if not args.username:
print("Username field required!")
parser.print_help()
sys.exit(1)
if not args.password:
print("Password field required!")
parser.print_help()
sys.exit(1)
if not args.domainlist:
domain = json.loads(open('data/domains.json').read())
domains = False
else:
domain = json.loads(open('data/alldomains.json').read())
domains = True
url = args.url
formusername = args.username
formpassword = args.password
chars = string.ascii_letters + string.digits + '!@#$%^&*()_-+=\|?>.<,'
random.seed()
name = json.loads(open('data/names.json').read())
entry_num = 0
while(1):
try:
entry_num += 1
nameAdd = ''.join(random.choice(name).lower())
digitAdd = ''.join(random.choice(string.digits) for i in range(0, 4))
domainAdd = ''.join(random.choice(domain))
username = nameAdd + digitAdd + domainAdd
password = ''.join(random.choice(chars) for i in range(0, 20))
requests.post(url, allow_redirects=False, data={
formusername : username,
formpassword : password
})
print("Sending username {} and password {} | Entry #{}".format(username, password, entry_num))
except KeyboardInterrupt:
print("You sent {} total requests to {}".format(entry_num, url))
print("Command used: py antiphish.py {} -u {} -p {} -dl {}".format(url, formusername, formpassword, domains))
sys.exit()
| 32.576471 | 118 | 0.661611 | import argparse
import json
import os
import random
import string
import sys
import requests
parser = argparse.ArgumentParser(description="Anti-Phish, an anti-phishing website script")
parser.add_argument('url', help="URL of the target site", type=str)
parser.add_argument('-u', '--username', help="The username field of the target site", type=str)
parser.add_argument('-p', '--password', help="The password field of the target site", type=str)
parser.add_argument('-dl', '--domainlist', help="The email domain list to choose", type=bool)
args = parser.parse_args()
if not args.url:
print("Target URL required!")
parser.print_help()
sys.exit(1)
if not args.username:
print("Username field required!")
parser.print_help()
sys.exit(1)
if not args.password:
print("Password field required!")
parser.print_help()
sys.exit(1)
if not args.domainlist:
domain = json.loads(open('data/domains.json').read())
domains = False
else:
domain = json.loads(open('data/alldomains.json').read())
domains = True
url = args.url
formusername = args.username
formpassword = args.password
chars = string.ascii_letters + string.digits + '!@#$%^&*()_-+=\|?>.<,'
random.seed()
name = json.loads(open('data/names.json').read())
entry_num = 0
while(1):
try:
entry_num += 1
nameAdd = ''.join(random.choice(name).lower())
digitAdd = ''.join(random.choice(string.digits) for i in range(0, 4))
domainAdd = ''.join(random.choice(domain))
username = nameAdd + digitAdd + domainAdd
password = ''.join(random.choice(chars) for i in range(0, 20))
requests.post(url, allow_redirects=False, data={
formusername : username,
formpassword : password
})
print("Sending username {} and password {} | Entry #{}".format(username, password, entry_num))
except KeyboardInterrupt:
print("You sent {} total requests to {}".format(entry_num, url))
print("Command used: py antiphish.py {} -u {} -p {} -dl {}".format(url, formusername, formpassword, domains))
sys.exit()
| true | true |
f7fd684d845d46ea25ad02474db4753890c2e3cc | 171 | py | Python | src/prefect/environments/storage/azure.py | vnsn/prefect | 972345597975155dba9e3232bcc430d0a6258a37 | [
"Apache-2.0"
] | 1 | 2021-05-12T12:47:12.000Z | 2021-05-12T12:47:12.000Z | src/prefect/environments/storage/azure.py | vnsn/prefect | 972345597975155dba9e3232bcc430d0a6258a37 | [
"Apache-2.0"
] | 7 | 2021-06-26T08:05:20.000Z | 2022-03-26T08:05:32.000Z | src/prefect/environments/storage/azure.py | vnsn/prefect | 972345597975155dba9e3232bcc430d0a6258a37 | [
"Apache-2.0"
] | 1 | 2021-10-16T08:33:56.000Z | 2021-10-16T08:33:56.000Z | from prefect.storage import Azure as _Azure
from prefect.environments.storage.base import _DeprecatedStorageMixin
class Azure(_Azure, _DeprecatedStorageMixin):
pass
| 24.428571 | 69 | 0.836257 | from prefect.storage import Azure as _Azure
from prefect.environments.storage.base import _DeprecatedStorageMixin
class Azure(_Azure, _DeprecatedStorageMixin):
pass
| true | true |
f7fd69d629599e31055b1c49074d2055aaf475d6 | 2,037 | py | Python | models/map_model.py | JannerM/spatial-reasoning | e163003a33177e41ca02d5feefee3fdfca5ba154 | [
"MIT"
] | 54 | 2017-07-14T01:08:57.000Z | 2021-07-09T12:46:57.000Z | models/map_model.py | jannerm/spatial-reasoning | e163003a33177e41ca02d5feefee3fdfca5ba154 | [
"MIT"
] | null | null | null | models/map_model.py | jannerm/spatial-reasoning | e163003a33177e41ca02d5feefee3fdfca5ba154 | [
"MIT"
] | 16 | 2017-07-16T03:18:19.000Z | 2021-05-28T13:04:12.000Z | import sys, math
import numpy as np
from tqdm import tqdm, trange
import torch, torch.nn as nn, torch.nn.functional as F
import torch.optim as optim
'''
State observations are two-channel images
with 0: puddle, 1: grass, 2: agent
'''
class MapModel(nn.Module):
def __init__(self, vocab_size, embed_dim, out_dim):
super(MapModel, self).__init__()
self.embed_dim = embed_dim
self.embed = nn.Embedding(vocab_size, embed_dim)
self.conv1 = nn.Conv2d(embed_dim, 3, kernel_size=3)
self.conv2 = nn.Conv2d(3, 6, kernel_size=3)
self.conv3 = nn.Conv2d(6,12, kernel_size=3)
# self.conv4 = nn.Conv2d(12,12, kernel_size=5)
self.fc1 = nn.Linear(192, out_dim)
def forward(self, x):
reshape = []
for dim in x.size(): reshape.append(dim)
reshape.append(self.embed_dim)
## reshape to vector
x = x.view(-1)
## get embeddings
x = self.embed(x)
## reshape to batch x channels x M x N x embed_dim
x = x.view(*reshape)
## sum over channels in input
x = x.sum(1, keepdim=True)
## reshape to batch x embed_dim x M x N
## (treats embedding dims as channels)
x = x.transpose(1,-1).squeeze()
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 192)
x = self.fc1(x)
return x
if __name__ == '__main__':
from torch.autograd import Variable
# inp = torch.LongTensor(2,10,10).zero_()
vocab_size = 10
emb_dim = 3
rank = 7
phi = MapModel(vocab_size, emb_dim, rank)
# enc = nn.Embedding(10,emb_dim,padding_idx=0)
inp = torch.LongTensor(5,2,10,10).zero_()
inp[0][0][0][0]=1
# inp[0][1][0][0]=1
inp[1][0][0][2]=1
print inp.size()
inp = Variable(inp)
out = phi.forward(inp)
# print out
# out = out.view(-1,2,3,3,emb_dim)
out = out.data
print out.size()
# print out[0][0][0]
# print out[1][0][0]
| 24.841463 | 59 | 0.581247 | import sys, math
import numpy as np
from tqdm import tqdm, trange
import torch, torch.nn as nn, torch.nn.functional as F
import torch.optim as optim
'''
State observations are two-channel images
with 0: puddle, 1: grass, 2: agent
'''
class MapModel(nn.Module):
def __init__(self, vocab_size, embed_dim, out_dim):
super(MapModel, self).__init__()
self.embed_dim = embed_dim
self.embed = nn.Embedding(vocab_size, embed_dim)
self.conv1 = nn.Conv2d(embed_dim, 3, kernel_size=3)
self.conv2 = nn.Conv2d(3, 6, kernel_size=3)
self.conv3 = nn.Conv2d(6,12, kernel_size=3)
self.fc1 = nn.Linear(192, out_dim)
def forward(self, x):
reshape = []
for dim in x.size(): reshape.append(dim)
reshape.append(self.embed_dim)
(-1)
f.embed(x)
onv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(-1, 192)
x = self.fc1(x)
return x
if __name__ == '__main__':
from torch.autograd import Variable
vocab_size = 10
emb_dim = 3
rank = 7
phi = MapModel(vocab_size, emb_dim, rank)
inp = torch.LongTensor(5,2,10,10).zero_()
inp[0][0][0][0]=1
inp[1][0][0][2]=1
print inp.size()
inp = Variable(inp)
out = phi.forward(inp)
out = out.data
print out.size()
| false | true |
f7fd6b16610ed332a40ded054de3263b7b004974 | 2,217 | py | Python | core/views.py | Cauan2305/Blog | 2817b1b29c2cb8a859ac1d154a574b20cef187fe | [
"MIT"
] | null | null | null | core/views.py | Cauan2305/Blog | 2817b1b29c2cb8a859ac1d154a574b20cef187fe | [
"MIT"
] | null | null | null | core/views.py | Cauan2305/Blog | 2817b1b29c2cb8a859ac1d154a574b20cef187fe | [
"MIT"
] | null | null | null | from core.models import Comentarios, Publicação
from django.shortcuts import redirect, render,HttpResponse,get_object_or_404,HttpResponseRedirect
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from .forms import ComentariosForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import AnonymousUser
# from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy,reverse
from django.contrib import messages
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
#View Index com exibição dos titulos
def index(request):
# pu=Publicação.objects.all().order_by('id')
posts=Publicação.objects.all()
paginator=Paginator(posts,2)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context={
'posts':posts,
'post':page_obj,
}
return render(request,'index.html',context)
#
# Post Post Escolhido no index com id do post e um form dos comentarios daquele
def post(request,id):
# Form Comentarios
form=ComentariosForm
post = Publicação.objects.get(id=id)
if request.method=='POST':
form=ComentariosForm(request.POST)
if form.is_valid():
obj=form.save(commit=False)
obj.post=post
obj.save()
return redirect('post',id=post.id)
context={
'post':Publicação.objects.get(id=id),
'form':form,
}
#
return render(request,'post.html',context)
# Sistem of Like
@login_required
def LikeView(request,id):
user=request.user
if request.method=='POST':
post_id=Publicação.objects.get(id=id)
post_obj=Publicação.objects.get(id=id)
if user in post_obj.like.all():
post_obj.like.remove()
else:
post_obj.like.add(user)
context= {
'cont_likes':Publicação.cont_like
}
return redirect('post',id=post_id.id)
def about(request):
return render(request,'about.html')
def contato(request):
return render(request,'contact.html') | 24.633333 | 97 | 0.670726 | from core.models import Comentarios, Publicação
from django.shortcuts import redirect, render,HttpResponse,get_object_or_404,HttpResponseRedirect
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from .forms import ComentariosForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import AnonymousUser
from django.urls import reverse_lazy,reverse
from django.contrib import messages
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
def index(request):
posts=Publicação.objects.all()
paginator=Paginator(posts,2)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context={
'posts':posts,
'post':page_obj,
}
return render(request,'index.html',context)
def post(request,id):
form=ComentariosForm
post = Publicação.objects.get(id=id)
if request.method=='POST':
form=ComentariosForm(request.POST)
if form.is_valid():
obj=form.save(commit=False)
obj.post=post
obj.save()
return redirect('post',id=post.id)
context={
'post':Publicação.objects.get(id=id),
'form':form,
}
return render(request,'post.html',context)
@login_required
def LikeView(request,id):
user=request.user
if request.method=='POST':
post_id=Publicação.objects.get(id=id)
post_obj=Publicação.objects.get(id=id)
if user in post_obj.like.all():
post_obj.like.remove()
else:
post_obj.like.add(user)
context= {
'cont_likes':Publicação.cont_like
}
return redirect('post',id=post_id.id)
def about(request):
return render(request,'about.html')
def contato(request):
return render(request,'contact.html') | true | true |
f7fd6be5f1e259da23392a17f44d60424f181f0a | 1,008 | py | Python | solutions/p034.py | xianlinfeng/project_euler_python3 | 77eca44eb2b1d13bc70d6dc0258b737449d43a23 | [
"MIT"
] | null | null | null | solutions/p034.py | xianlinfeng/project_euler_python3 | 77eca44eb2b1d13bc70d6dc0258b737449d43a23 | [
"MIT"
] | null | null | null | solutions/p034.py | xianlinfeng/project_euler_python3 | 77eca44eb2b1d13bc70d6dc0258b737449d43a23 | [
"MIT"
] | null | null | null | #
# Solution to Project Euler problem 34
# Copyright (c) Project Nayuki. All rights reserved.
#
# https://www.nayuki.io/page/project-euler-solutions
# https://github.com/nayuki/Project-Euler-solutions
#
import math
def compute():
# As stated in the problem, 1 = 1! and 2 = 2! are excluded.
# If a number has at least n >= 8 digits, then even if every digit is 9,
# n * 9! is still less than the number (which is at least 10^n).
ans = sum(i for i in range(3, 10000000) if i == factorial_digit_sum(i))
return str(ans)
def factorial_digit_sum(n):
result = 0
while n >= 10000:
result += FACTORIAL_DIGITS_SUM_WITH_LEADING_ZEROS[n % 10000]
n //= 10000
return result + FACTORIAL_DIGITS_SUM_WITHOUT_LEADING_ZEROS[n]
FACTORIAL_DIGITS_SUM_WITHOUT_LEADING_ZEROS = [sum(math.factorial(int(c)) for c in str(i)) for i in range(10000)]
FACTORIAL_DIGITS_SUM_WITH_LEADING_ZEROS = [sum(math.factorial(int(c)) for c in str(i).zfill(4)) for i in range(10000)]
if __name__ == "__main__":
print(compute())
| 30.545455 | 118 | 0.722222 |
import math
def compute():
ans = sum(i for i in range(3, 10000000) if i == factorial_digit_sum(i))
return str(ans)
def factorial_digit_sum(n):
result = 0
while n >= 10000:
result += FACTORIAL_DIGITS_SUM_WITH_LEADING_ZEROS[n % 10000]
n //= 10000
return result + FACTORIAL_DIGITS_SUM_WITHOUT_LEADING_ZEROS[n]
FACTORIAL_DIGITS_SUM_WITHOUT_LEADING_ZEROS = [sum(math.factorial(int(c)) for c in str(i)) for i in range(10000)]
FACTORIAL_DIGITS_SUM_WITH_LEADING_ZEROS = [sum(math.factorial(int(c)) for c in str(i).zfill(4)) for i in range(10000)]
if __name__ == "__main__":
print(compute())
| true | true |
f7fd6c9517647c70d407dac0ca98e7795d15eacf | 2,768 | py | Python | sdk/python/tests/cli/test_online_retrieval.py | mrzzy/feast | 960f1ed8ba9cb76f1f82a6df54d8317cc7447a03 | [
"Apache-2.0"
] | null | null | null | sdk/python/tests/cli/test_online_retrieval.py | mrzzy/feast | 960f1ed8ba9cb76f1f82a6df54d8317cc7447a03 | [
"Apache-2.0"
] | null | null | null | sdk/python/tests/cli/test_online_retrieval.py | mrzzy/feast | 960f1ed8ba9cb76f1f82a6df54d8317cc7447a03 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
import pytest
from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
from feast.protos.feast.types.Value_pb2 import Value as ValueProto
from tests.cli.utils import CliRunner, get_example_repo
class TestOnlineRetrieval:
def test_basic(self) -> None:
runner = CliRunner()
with runner.local_repo(get_example_repo("example_feature_repo_1.py")) as store:
# Write some data to two tables
registry = store._get_registry()
table = registry.get_feature_view(
project=store.config.project, name="driver_locations"
)
table_2 = registry.get_feature_view(
project=store.config.project, name="driver_locations_2"
)
provider = store._get_provider()
entity_key = EntityKeyProto(
entity_names=["driver"], entity_values=[ValueProto(int64_val=1)]
)
provider.online_write_batch(
project=store.config.project,
table=table,
data=[
(
entity_key,
{
"lat": ValueProto(double_val=0.1),
"lon": ValueProto(string_val="1.0"),
},
datetime.utcnow(),
datetime.utcnow(),
)
],
)
provider.online_write_batch(
project=store.config.project,
table=table_2,
data=[
(
entity_key,
{
"lat": ValueProto(double_val=2.0),
"lon": ValueProto(string_val="2.0"),
},
datetime.utcnow(),
datetime.utcnow(),
)
],
)
# Retrieve two features using two keys, one valid one non-existing
result = store.get_online_features(
feature_refs=["driver_locations:lon", "driver_locations_2:lon"],
entity_rows=[{"driver": 1}, {"driver": 123}],
)
assert "driver_locations:lon" in result.to_dict()
assert result.to_dict()["driver_locations:lon"] == ["1.0", None]
assert result.to_dict()["driver_locations_2:lon"] == ["2.0", None]
# invalid table reference
with pytest.raises(ValueError):
store.get_online_features(
feature_refs=["driver_locations_bad:lon"],
entity_rows=[{"driver": 1}],
)
| 35.948052 | 87 | 0.493497 | from datetime import datetime
import pytest
from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
from feast.protos.feast.types.Value_pb2 import Value as ValueProto
from tests.cli.utils import CliRunner, get_example_repo
class TestOnlineRetrieval:
def test_basic(self) -> None:
runner = CliRunner()
with runner.local_repo(get_example_repo("example_feature_repo_1.py")) as store:
registry = store._get_registry()
table = registry.get_feature_view(
project=store.config.project, name="driver_locations"
)
table_2 = registry.get_feature_view(
project=store.config.project, name="driver_locations_2"
)
provider = store._get_provider()
entity_key = EntityKeyProto(
entity_names=["driver"], entity_values=[ValueProto(int64_val=1)]
)
provider.online_write_batch(
project=store.config.project,
table=table,
data=[
(
entity_key,
{
"lat": ValueProto(double_val=0.1),
"lon": ValueProto(string_val="1.0"),
},
datetime.utcnow(),
datetime.utcnow(),
)
],
)
provider.online_write_batch(
project=store.config.project,
table=table_2,
data=[
(
entity_key,
{
"lat": ValueProto(double_val=2.0),
"lon": ValueProto(string_val="2.0"),
},
datetime.utcnow(),
datetime.utcnow(),
)
],
)
result = store.get_online_features(
feature_refs=["driver_locations:lon", "driver_locations_2:lon"],
entity_rows=[{"driver": 1}, {"driver": 123}],
)
assert "driver_locations:lon" in result.to_dict()
assert result.to_dict()["driver_locations:lon"] == ["1.0", None]
assert result.to_dict()["driver_locations_2:lon"] == ["2.0", None]
with pytest.raises(ValueError):
store.get_online_features(
feature_refs=["driver_locations_bad:lon"],
entity_rows=[{"driver": 1}],
)
| true | true |
f7fd6d633a11d387307ac76fb81bd8d8ecdae2e7 | 439 | py | Python | WRA_and_wikipedia.py | crayzeerr/VirtualAssistant | d45f305f3ee418c8e3de04901285b08b141e97d6 | [
"MIT"
] | 1 | 2021-08-08T02:13:13.000Z | 2021-08-08T02:13:13.000Z | WRA_and_wikipedia.py | crayzeerr/VirtualAssistant | d45f305f3ee418c8e3de04901285b08b141e97d6 | [
"MIT"
] | null | null | null | WRA_and_wikipedia.py | crayzeerr/VirtualAssistant | d45f305f3ee418c8e3de04901285b08b141e97d6 | [
"MIT"
] | null | null | null | import wikipedia
import wolframalpha
while True:
input = raw_input("Q: ")
try:
#wolframalpha
app_id = "*******************"
client = wolframalpha.Client(app_id)
result = client.query(input)
answer = next(result.results).text
print answer
except:
#wikipedia
print wikipedia.summary(input, sentences=5)
| 25.823529 | 56 | 0.498861 | import wikipedia
import wolframalpha
while True:
input = raw_input("Q: ")
try:
app_id = "*******************"
client = wolframalpha.Client(app_id)
result = client.query(input)
answer = next(result.results).text
print answer
except:
print wikipedia.summary(input, sentences=5)
| false | true |
f7fd6d8669fc5c556757d81114622cf3df76991e | 1,804 | py | Python | tests/test_postgres_discover_runner.py | wonkybream/django-rdtwt | 5bf5ad5f0927e177b2adc49a41681b99b6af397d | [
"MIT"
] | 2 | 2021-11-25T09:02:50.000Z | 2022-01-09T14:52:04.000Z | tests/test_postgres_discover_runner.py | wonkybream/django-rdtwt | 5bf5ad5f0927e177b2adc49a41681b99b6af397d | [
"MIT"
] | 1 | 2021-12-09T15:54:24.000Z | 2021-12-09T15:54:24.000Z | tests/test_postgres_discover_runner.py | wonkybream/django-rdtwt | 5bf5ad5f0927e177b2adc49a41681b99b6af397d | [
"MIT"
] | 2 | 2022-01-09T15:03:24.000Z | 2022-01-09T15:03:38.000Z | from unittest import TestCase
from unittest.mock import Mock, patch
from rdtwt.runner import PostgresDiscoverRunner
class SettingsStub:
DATABASES = {"default": {"HOST": "127.0.0.1", "PORT": "5432"}}
RDTWT_POSTGRESQL_IMAGE = "postgres:latest"
def get_host_ip(self):
return self.DATABASES["default"]["HOST"]
def get_bind_port(self):
return self.DATABASES["default"]["PORT"]
class PostgresDiscoverRunnerTests(TestCase):
@patch("rdtwt.runner.settings", SettingsStub())
def test_setup_container_overwrites_default_database_host(self):
container_mock = Mock()
postgres_runner = PostgresDiscoverRunner()
postgres_runner._postgres_container = container_mock
container_mock.get_container_host_ip.return_value = "container-ip"
postgres_runner._setup_container()
container_mock.start.assert_called()
self.assertEqual(SettingsStub().get_host_ip(), "container-ip")
@patch("rdtwt.runner.settings", SettingsStub())
def test_setup_container_overwrites_default_database_port(self):
container_mock = Mock()
postgres_runner = PostgresDiscoverRunner()
postgres_runner._postgres_container = container_mock
container_mock.get_exposed_port.return_value = "bind-port"
postgres_runner._setup_container()
container_mock.start.assert_called()
self.assertEqual(SettingsStub().get_bind_port(), "bind-port")
@patch("rdtwt.runner.settings", SettingsStub())
def test_teardown_container_stops_postgres_container(self):
container_mock = Mock()
postgres_runner = PostgresDiscoverRunner()
postgres_runner._postgres_container = container_mock
postgres_runner._teardown_container()
container_mock.stop.assert_called()
| 34.037736 | 74 | 0.727827 | from unittest import TestCase
from unittest.mock import Mock, patch
from rdtwt.runner import PostgresDiscoverRunner
class SettingsStub:
DATABASES = {"default": {"HOST": "127.0.0.1", "PORT": "5432"}}
RDTWT_POSTGRESQL_IMAGE = "postgres:latest"
def get_host_ip(self):
return self.DATABASES["default"]["HOST"]
def get_bind_port(self):
return self.DATABASES["default"]["PORT"]
class PostgresDiscoverRunnerTests(TestCase):
@patch("rdtwt.runner.settings", SettingsStub())
def test_setup_container_overwrites_default_database_host(self):
container_mock = Mock()
postgres_runner = PostgresDiscoverRunner()
postgres_runner._postgres_container = container_mock
container_mock.get_container_host_ip.return_value = "container-ip"
postgres_runner._setup_container()
container_mock.start.assert_called()
self.assertEqual(SettingsStub().get_host_ip(), "container-ip")
@patch("rdtwt.runner.settings", SettingsStub())
def test_setup_container_overwrites_default_database_port(self):
container_mock = Mock()
postgres_runner = PostgresDiscoverRunner()
postgres_runner._postgres_container = container_mock
container_mock.get_exposed_port.return_value = "bind-port"
postgres_runner._setup_container()
container_mock.start.assert_called()
self.assertEqual(SettingsStub().get_bind_port(), "bind-port")
@patch("rdtwt.runner.settings", SettingsStub())
def test_teardown_container_stops_postgres_container(self):
container_mock = Mock()
postgres_runner = PostgresDiscoverRunner()
postgres_runner._postgres_container = container_mock
postgres_runner._teardown_container()
container_mock.stop.assert_called()
| true | true |
f7fd6ddfe2e47cbbb1b6edbf42cb88736922ebf0 | 27,427 | py | Python | src/transformers/modeling_tf_ctrl.py | JonathanSum/transformers | 27b68f95e4585713b575603545cf520ab9621621 | [
"Apache-2.0"
] | 100 | 2020-01-30T08:14:25.000Z | 2022-03-30T08:59:33.000Z | src/transformers/modeling_tf_ctrl.py | JonathanSum/transformers | 27b68f95e4585713b575603545cf520ab9621621 | [
"Apache-2.0"
] | 4 | 2021-04-30T21:42:40.000Z | 2022-02-10T05:15:45.000Z | src/transformers/modeling_tf_ctrl.py | JonathanSum/transformers | 27b68f95e4585713b575603545cf520ab9621621 | [
"Apache-2.0"
] | 15 | 2020-04-13T22:56:27.000Z | 2022-03-10T02:44:26.000Z | # coding=utf-8
# Copyright 2018 Salesforce and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 CTRL model."""
import logging
import numpy as np
import tensorflow as tf
from .configuration_ctrl import CTRLConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, keras_serializable, shape_list
from .tokenization_utils import BatchEncoding
logger = logging.getLogger(__name__)
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP = {"ctrl": "https://s3.amazonaws.com/models.huggingface.co/bert/ctrl-tf_model.h5"}
def angle_defn(pos, i, d_model_size):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model_size))
return pos * angle_rates
def positional_encoding(position, d_model_size):
# create the sinusoidal pattern for the positional encoding
angle_rads = angle_defn(np.arange(position)[:, np.newaxis], np.arange(d_model_size)[np.newaxis, :], d_model_size)
sines = np.sin(angle_rads[:, 0::2])
cosines = np.cos(angle_rads[:, 1::2])
# pos_encoding = tf.cast(np.concatenate([sines, cosines], axis=-1)[np.newaxis, ...], dtype=tf.float32)
pos_encoding = tf.cast(np.concatenate([sines, cosines], axis=-1), dtype=tf.float32)
return pos_encoding
def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
# calculate attention
matmul_qk = tf.matmul(q, k, transpose_b=True)
dk = tf.cast(shape_list(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
if mask is not None:
scaled_attention_logits += mask * -1e4
if attention_mask is not None:
# Apply the attention mask
scaled_attention_logits = scaled_attention_logits + attention_mask
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
# Mask heads if we want to
if head_mask is not None:
attention_weights = attention_weights * head_mask
output = tf.matmul(attention_weights, v)
return output, attention_weights
class TFMultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model_size, num_heads, output_attentions=False, **kwargs):
super().__init__(**kwargs)
self.output_attentions = output_attentions
self.num_heads = num_heads
self.d_model_size = d_model_size
self.depth = int(d_model_size / self.num_heads)
self.Wq = tf.keras.layers.Dense(d_model_size, name="Wq")
self.Wk = tf.keras.layers.Dense(d_model_size, name="Wk")
self.Wv = tf.keras.layers.Dense(d_model_size, name="Wv")
self.dense = tf.keras.layers.Dense(d_model_size, name="dense")
def split_into_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, inputs, training=False):
v, k, q, mask, layer_past, attention_mask, head_mask, use_cache = inputs
batch_size = shape_list(q)[0]
q = self.Wq(q)
k = self.Wk(k)
v = self.Wv(v)
q = self.split_into_heads(q, batch_size)
k = self.split_into_heads(k, batch_size)
v = self.split_into_heads(v, batch_size)
if layer_past is not None:
past_key, past_value = tf.unstack(layer_past, axis=0)
k = tf.concat((past_key, k), axis=-2)
v = tf.concat((past_value, v), axis=-2)
# to cope with keras serialization
# we need to cast `use_cache` to correct bool
# if it is a tensor
if tf.is_tensor(use_cache):
if hasattr(use_cache, "numpy"):
use_cache = bool(use_cache.numpy())
else:
use_cache = True
if use_cache is True:
present = tf.stack((k, v), axis=0)
else:
present = (None,)
output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)
scaled_attention = tf.transpose(output[0], perm=[0, 2, 1, 3])
attn = output[1]
original_size_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model_size))
output = self.dense(original_size_attention)
outputs = (output, present)
if self.output_attentions:
outputs = outputs + (attn,)
return outputs
def point_wise_feed_forward_network(d_model_size, dff, name=""):
return tf.keras.Sequential(
[tf.keras.layers.Dense(dff, activation="relu", name="0"), tf.keras.layers.Dense(d_model_size, name="2")],
name="ffn",
)
class TFEncoderLayer(tf.keras.layers.Layer):
def __init__(
self, d_model_size, num_heads, dff, rate=0.1, layer_norm_epsilon=1e-6, output_attentions=False, **kwargs
):
super().__init__(**kwargs)
self.multi_head_attention = TFMultiHeadAttention(
d_model_size, num_heads, output_attentions, name="multi_head_attention"
)
self.ffn = point_wise_feed_forward_network(d_model_size, dff, name="ffn")
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm1")
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm2")
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, inputs, training=False):
x, mask, layer_past, attention_mask, head_mask, use_cache = inputs
normed = self.layernorm1(x)
attn_outputs = self.multi_head_attention(
[normed, normed, normed, mask, layer_past, attention_mask, head_mask, use_cache], training=training
)
attn_output = attn_outputs[0]
attn_output = self.dropout1(attn_output, training=training)
out1 = x + attn_output
out2 = self.layernorm2(out1)
ffn_output = self.ffn(out2)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = out1 + ffn_output
outputs = (out2,) + attn_outputs[1:]
return outputs
@keras_serializable
class TFCTRLMainLayer(tf.keras.layers.Layer):
config_class = CTRLConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.d_model_size = config.n_embd
self.num_layers = config.n_layer
self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size)
self.w = TFSharedEmbeddings(
config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name="w"
)
self.dropout = tf.keras.layers.Dropout(config.embd_pdrop)
self.h = [
TFEncoderLayer(
config.n_embd,
config.n_head,
config.dff,
config.resid_pdrop,
config.layer_norm_epsilon,
config.output_attentions,
name="h_._{}".format(i),
)
for i in range(config.n_layer)
]
self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="layernorm")
def get_input_embeddings(self):
return self.w
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
raise NotImplementedError
def call(
self,
inputs,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
training=False,
):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
past = inputs[1] if len(inputs) > 1 else past
attention_mask = inputs[2] if len(inputs) > 2 else attention_mask
token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids
position_ids = inputs[4] if len(inputs) > 4 else position_ids
head_mask = inputs[5] if len(inputs) > 5 else head_mask
inputs_embeds = inputs[6] if len(inputs) > 6 else inputs_embeds
use_cache = inputs[7] if len(inputs) > 7 else use_cache
assert len(inputs) <= 8, "Too many inputs."
elif isinstance(inputs, (dict, BatchEncoding)):
input_ids = inputs.get("input_ids")
past = inputs.get("past", past)
attention_mask = inputs.get("attention_mask", attention_mask)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
use_cache = inputs.get("use_cache", use_cache)
assert len(inputs) <= 8, "Too many inputs."
else:
input_ids = inputs
# If using past key value states, only the last tokens
# should be given as an input
if past is not None:
if input_ids is not None:
input_ids = input_ids[:, -1:]
if inputs_embeds is not None:
inputs_embeds = inputs_embeds[:, -1:]
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1:]
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = shape_list(past[0][0])[-2]
if position_ids is None:
position_ids = tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32)[tf.newaxis, :]
position_ids = tf.tile(position_ids, [input_shape[0], 1])
# Attention mask.
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = tf.cast(attention_mask, tf.float32)
attention_mask = (1.0 - attention_mask) * -10000.0
else:
attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# head_mask has shape n_layer x batch x n_heads x N x N
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.num_layers
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.w(token_type_ids, mode="embedding")
token_type_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, tf.float32))
else:
token_type_embeds = 0
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
if inputs_embeds is None:
inputs_embeds = self.w(input_ids, mode="embedding")
seq_len = input_shape[-1]
mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)
inputs_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, tf.float32))
pos_embeds = tf.gather(self.pos_encoding, position_ids)
hidden_states = inputs_embeds + pos_embeds + token_type_embeds
hidden_states = self.dropout(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
presents = ()
all_hidden_states = ()
all_attentions = []
for i, (h, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = h([hidden_states, mask, layer_past, attention_mask, head_mask[i], use_cache], training=training)
hidden_states, present = outputs[:2]
if use_cache is True:
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.layernorm(hidden_states)
hidden_states = tf.reshape(hidden_states, output_shape)
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if use_cache is True:
outputs = outputs + (presents,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs
class TFCTRLPreTrainedModel(TFPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = CTRLConfig
pretrained_model_archive_map = TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
CTRL_START_DOCSTRING = r"""
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.CTRLConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
CTRL_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
If `past` is used, optionally only the last `input_ids` have to be input (see `past`).
Indices can be obtained using :class:`transformers.CTRLTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding.
If `past` is used, the user can optionally input only the last `input_ids`
(those that don't have their past given to this model) of shape :obj:`(batch_size, 1)`
instead of all `input_ids` of shape :obj:`(batch_size, sequence_length)`.
attention_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
If `past` is used, optionally only the last `token_type_ids` have to be input (see `past`).
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
If `past` is used, optionally only the last `input_embeds` have to be input (see `past`).
use_cache (:obj:`bool`):
If `use_cache` is True, `past` key value states are returned and
can be used to speed up decoding (see `past`). Defaults to `True`.
training (:obj:`boolean`, `optional`, defaults to :obj:`False`):
Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them
(if set to :obj:`False`) for evaluation.
"""
@add_start_docstrings(
"The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.",
CTRL_START_DOCSTRING,
)
class TFCTRLModel(TFCTRLPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFCTRLMainLayer(config, name="transformer")
@add_start_docstrings_to_callable(CTRL_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.CTRLConfig`) and inputs:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(tf.Tensor)` `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import CTRLTokenizer, TFCTRLModel
tokenizer = CTRLTokenizer.from_pretrained('ctrl')
model = TFCTRLModel.from_pretrained('ctrl')
input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
outputs = self.transformer(inputs, **kwargs)
return outputs
class TFCTRLLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def call(self, hidden_states):
hidden_states = self.input_embeddings(hidden_states, mode="linear")
hidden_states = hidden_states + self.bias
return hidden_states
@add_start_docstrings(
"""The CTRL Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
CTRL_START_DOCSTRING,
)
class TFCTRLLMHeadModel(TFCTRLPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFCTRLMainLayer(config, name="transformer")
self.lm_head = TFCTRLLMHead(config, self.transformer.w, name="lm_head")
def get_output_embeddings(self):
return self.lm_head.input_embeddings
def prepare_inputs_for_generation(self, inputs, past, **kwargs):
# only last token for inputs_ids if past is defined in kwargs
if past:
inputs = tf.expand_dims(inputs[:, -1], -1)
return {"inputs": inputs, "past": past, "use_cache": kwargs["use_cache"]}
@add_start_docstrings_to_callable(CTRL_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
r"""
Return:
:obj:`tuple(tf.Tensor)` comprising various elements depending on the configuration (:class:`~transformers.CTRLConfig`) and inputs:
prediction_scores (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
import tensorflow as tf
from transformers import CTRLTokenizer, TFCTRLLMHeadModel
tokenizer = CTRLTokenizer.from_pretrained('ctrl')
model = TFCTRLLMHeadModel.from_pretrained('ctrl')
input_ids = tf.constant([tokenizer.encode("Links Hello, my dog is cute", add_special_tokens=True)])
outputs = model(input_ids)
loss, logits = outputs[:2]
"""
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
return outputs # lm_logits, presents, (all hidden_states), (attentions)
| 45.864548 | 169 | 0.66059 |
import logging
import numpy as np
import tensorflow as tf
from .configuration_ctrl import CTRLConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, keras_serializable, shape_list
from .tokenization_utils import BatchEncoding
logger = logging.getLogger(__name__)
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP = {"ctrl": "https://s3.amazonaws.com/models.huggingface.co/bert/ctrl-tf_model.h5"}
def angle_defn(pos, i, d_model_size):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model_size))
return pos * angle_rates
def positional_encoding(position, d_model_size):
angle_rads = angle_defn(np.arange(position)[:, np.newaxis], np.arange(d_model_size)[np.newaxis, :], d_model_size)
sines = np.sin(angle_rads[:, 0::2])
cosines = np.cos(angle_rads[:, 1::2])
pos_encoding = tf.cast(np.concatenate([sines, cosines], axis=-1), dtype=tf.float32)
return pos_encoding
def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
matmul_qk = tf.matmul(q, k, transpose_b=True)
dk = tf.cast(shape_list(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
if mask is not None:
scaled_attention_logits += mask * -1e4
if attention_mask is not None:
scaled_attention_logits = scaled_attention_logits + attention_mask
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
if head_mask is not None:
attention_weights = attention_weights * head_mask
output = tf.matmul(attention_weights, v)
return output, attention_weights
class TFMultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model_size, num_heads, output_attentions=False, **kwargs):
super().__init__(**kwargs)
self.output_attentions = output_attentions
self.num_heads = num_heads
self.d_model_size = d_model_size
self.depth = int(d_model_size / self.num_heads)
self.Wq = tf.keras.layers.Dense(d_model_size, name="Wq")
self.Wk = tf.keras.layers.Dense(d_model_size, name="Wk")
self.Wv = tf.keras.layers.Dense(d_model_size, name="Wv")
self.dense = tf.keras.layers.Dense(d_model_size, name="dense")
def split_into_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, inputs, training=False):
v, k, q, mask, layer_past, attention_mask, head_mask, use_cache = inputs
batch_size = shape_list(q)[0]
q = self.Wq(q)
k = self.Wk(k)
v = self.Wv(v)
q = self.split_into_heads(q, batch_size)
k = self.split_into_heads(k, batch_size)
v = self.split_into_heads(v, batch_size)
if layer_past is not None:
past_key, past_value = tf.unstack(layer_past, axis=0)
k = tf.concat((past_key, k), axis=-2)
v = tf.concat((past_value, v), axis=-2)
if tf.is_tensor(use_cache):
if hasattr(use_cache, "numpy"):
use_cache = bool(use_cache.numpy())
else:
use_cache = True
if use_cache is True:
present = tf.stack((k, v), axis=0)
else:
present = (None,)
output = scaled_dot_product_attention(q, k, v, mask, attention_mask, head_mask)
scaled_attention = tf.transpose(output[0], perm=[0, 2, 1, 3])
attn = output[1]
original_size_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model_size))
output = self.dense(original_size_attention)
outputs = (output, present)
if self.output_attentions:
outputs = outputs + (attn,)
return outputs
def point_wise_feed_forward_network(d_model_size, dff, name=""):
return tf.keras.Sequential(
[tf.keras.layers.Dense(dff, activation="relu", name="0"), tf.keras.layers.Dense(d_model_size, name="2")],
name="ffn",
)
class TFEncoderLayer(tf.keras.layers.Layer):
def __init__(
self, d_model_size, num_heads, dff, rate=0.1, layer_norm_epsilon=1e-6, output_attentions=False, **kwargs
):
super().__init__(**kwargs)
self.multi_head_attention = TFMultiHeadAttention(
d_model_size, num_heads, output_attentions, name="multi_head_attention"
)
self.ffn = point_wise_feed_forward_network(d_model_size, dff, name="ffn")
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm1")
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_epsilon, name="layernorm2")
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, inputs, training=False):
x, mask, layer_past, attention_mask, head_mask, use_cache = inputs
normed = self.layernorm1(x)
attn_outputs = self.multi_head_attention(
[normed, normed, normed, mask, layer_past, attention_mask, head_mask, use_cache], training=training
)
attn_output = attn_outputs[0]
attn_output = self.dropout1(attn_output, training=training)
out1 = x + attn_output
out2 = self.layernorm2(out1)
ffn_output = self.ffn(out2)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = out1 + ffn_output
outputs = (out2,) + attn_outputs[1:]
return outputs
@keras_serializable
class TFCTRLMainLayer(tf.keras.layers.Layer):
config_class = CTRLConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.d_model_size = config.n_embd
self.num_layers = config.n_layer
self.pos_encoding = positional_encoding(config.n_positions, self.d_model_size)
self.w = TFSharedEmbeddings(
config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name="w"
)
self.dropout = tf.keras.layers.Dropout(config.embd_pdrop)
self.h = [
TFEncoderLayer(
config.n_embd,
config.n_head,
config.dff,
config.resid_pdrop,
config.layer_norm_epsilon,
config.output_attentions,
name="h_._{}".format(i),
)
for i in range(config.n_layer)
]
self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="layernorm")
def get_input_embeddings(self):
return self.w
def _resize_token_embeddings(self, new_num_tokens):
raise NotImplementedError
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def call(
self,
inputs,
past=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
training=False,
):
if isinstance(inputs, (tuple, list)):
input_ids = inputs[0]
past = inputs[1] if len(inputs) > 1 else past
attention_mask = inputs[2] if len(inputs) > 2 else attention_mask
token_type_ids = inputs[3] if len(inputs) > 3 else token_type_ids
position_ids = inputs[4] if len(inputs) > 4 else position_ids
head_mask = inputs[5] if len(inputs) > 5 else head_mask
inputs_embeds = inputs[6] if len(inputs) > 6 else inputs_embeds
use_cache = inputs[7] if len(inputs) > 7 else use_cache
assert len(inputs) <= 8, "Too many inputs."
elif isinstance(inputs, (dict, BatchEncoding)):
input_ids = inputs.get("input_ids")
past = inputs.get("past", past)
attention_mask = inputs.get("attention_mask", attention_mask)
token_type_ids = inputs.get("token_type_ids", token_type_ids)
position_ids = inputs.get("position_ids", position_ids)
head_mask = inputs.get("head_mask", head_mask)
inputs_embeds = inputs.get("inputs_embeds", inputs_embeds)
use_cache = inputs.get("use_cache", use_cache)
assert len(inputs) <= 8, "Too many inputs."
else:
input_ids = inputs
if past is not None:
if input_ids is not None:
input_ids = input_ids[:, -1:]
if inputs_embeds is not None:
inputs_embeds = inputs_embeds[:, -1:]
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1:]
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if past is None:
past_length = 0
past = [None] * len(self.h)
else:
past_length = shape_list(past[0][0])[-2]
if position_ids is None:
position_ids = tf.range(past_length, input_shape[-1] + past_length, dtype=tf.int32)[tf.newaxis, :]
position_ids = tf.tile(position_ids, [input_shape[0], 1])
if attention_mask is not None:
attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
attention_mask = tf.cast(attention_mask, tf.float32)
attention_mask = (1.0 - attention_mask) * -10000.0
else:
attention_mask = None
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.num_layers
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.w(token_type_ids, mode="embedding")
token_type_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, tf.float32))
else:
token_type_embeds = 0
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
if inputs_embeds is None:
inputs_embeds = self.w(input_ids, mode="embedding")
seq_len = input_shape[-1]
mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)
inputs_embeds *= tf.math.sqrt(tf.cast(self.d_model_size, tf.float32))
pos_embeds = tf.gather(self.pos_encoding, position_ids)
hidden_states = inputs_embeds + pos_embeds + token_type_embeds
hidden_states = self.dropout(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
presents = ()
all_hidden_states = ()
all_attentions = []
for i, (h, layer_past) in enumerate(zip(self.h, past)):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = h([hidden_states, mask, layer_past, attention_mask, head_mask[i], use_cache], training=training)
hidden_states, present = outputs[:2]
if use_cache is True:
presents = presents + (present,)
if self.output_attentions:
all_attentions.append(outputs[2])
hidden_states = self.layernorm(hidden_states)
hidden_states = tf.reshape(hidden_states, output_shape)
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if use_cache is True:
outputs = outputs + (presents,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
outputs = outputs + (all_attentions,)
return outputs
class TFCTRLPreTrainedModel(TFPreTrainedModel):
config_class = CTRLConfig
pretrained_model_archive_map = TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "transformer"
CTRL_START_DOCSTRING = r"""
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :obj:`tf.keras.Model.fit()` method which currently requires having
all the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors
in the first positional argument :
- a single Tensor with input_ids only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
Parameters:
config (:class:`~transformers.CTRLConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
CTRL_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
If `past` is used, optionally only the last `input_ids` have to be input (see `past`).
Indices can be obtained using :class:`transformers.CTRLTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
past (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `past` output below). Can be used to speed up sequential decoding.
If `past` is used, the user can optionally input only the last `input_ids`
(those that don't have their past given to this model) of shape :obj:`(batch_size, 1)`
instead of all `input_ids` of shape :obj:`(batch_size, sequence_length)`.
attention_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
If `past` is used, optionally only the last `token_type_ids` have to be input (see `past`).
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
input_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
If `past` is used, optionally only the last `input_embeds` have to be input (see `past`).
use_cache (:obj:`bool`):
If `use_cache` is True, `past` key value states are returned and
can be used to speed up decoding (see `past`). Defaults to `True`.
training (:obj:`boolean`, `optional`, defaults to :obj:`False`):
Whether to activate dropout modules (if set to :obj:`True`) during training or to de-activate them
(if set to :obj:`False`) for evaluation.
"""
@add_start_docstrings(
"The bare CTRL Model transformer outputting raw hidden-states without any specific head on top.",
CTRL_START_DOCSTRING,
)
class TFCTRLModel(TFCTRLPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFCTRLMainLayer(config, name="transformer")
@add_start_docstrings_to_callable(CTRL_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
outputs = self.transformer(inputs, **kwargs)
return outputs
class TFCTRLLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def call(self, hidden_states):
hidden_states = self.input_embeddings(hidden_states, mode="linear")
hidden_states = hidden_states + self.bias
return hidden_states
@add_start_docstrings(
"""The CTRL Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
CTRL_START_DOCSTRING,
)
class TFCTRLLMHeadModel(TFCTRLPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFCTRLMainLayer(config, name="transformer")
self.lm_head = TFCTRLLMHead(config, self.transformer.w, name="lm_head")
def get_output_embeddings(self):
return self.lm_head.input_embeddings
def prepare_inputs_for_generation(self, inputs, past, **kwargs):
if past:
inputs = tf.expand_dims(inputs[:, -1], -1)
return {"inputs": inputs, "past": past, "use_cache": kwargs["use_cache"]}
@add_start_docstrings_to_callable(CTRL_INPUTS_DOCSTRING)
def call(self, inputs, **kwargs):
transformer_outputs = self.transformer(inputs, **kwargs)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
return outputs
| true | true |
f7fd6e586d7d67538fa6307c72be9af9ff13204f | 925 | py | Python | Polymorphism_and_Magic_Methods/wild_farm_04E/project/animals/animal.py | MNikov/Python-OOP-October-2020 | a53e4555758ec810605e31e7b2c71b65c49b2332 | [
"MIT"
] | null | null | null | Polymorphism_and_Magic_Methods/wild_farm_04E/project/animals/animal.py | MNikov/Python-OOP-October-2020 | a53e4555758ec810605e31e7b2c71b65c49b2332 | [
"MIT"
] | null | null | null | Polymorphism_and_Magic_Methods/wild_farm_04E/project/animals/animal.py | MNikov/Python-OOP-October-2020 | a53e4555758ec810605e31e7b2c71b65c49b2332 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
class Animal(ABC):
def __init__(self, name, weight):
self.name = name
self.weight = weight
self.food_eaten = 0
@abstractmethod
def make_sound(self):
pass
@abstractmethod
def feed(self, food):
pass
class Bird(Animal, ABC):
@abstractmethod
def __init__(self, name, weight, wing_size):
super().__init__(name, weight)
self.wing_size = wing_size
def __repr__(self):
return f"{self.__class__.__name__} [{self.name}, {self.wing_size}, {self.weight}, {self.food_eaten}]"
class Mammal(Animal, ABC):
@abstractmethod
def __init__(self, name, weight, living_region):
super().__init__(name, weight)
self.living_region = living_region
def __repr__(self):
return f"{self.__class__.__name__} [{self.name}, {self.weight}, {self.living_region}, {self.food_eaten}]"
| 25 | 113 | 0.644324 | from abc import ABC, abstractmethod
class Animal(ABC):
def __init__(self, name, weight):
self.name = name
self.weight = weight
self.food_eaten = 0
@abstractmethod
def make_sound(self):
pass
@abstractmethod
def feed(self, food):
pass
class Bird(Animal, ABC):
@abstractmethod
def __init__(self, name, weight, wing_size):
super().__init__(name, weight)
self.wing_size = wing_size
def __repr__(self):
return f"{self.__class__.__name__} [{self.name}, {self.wing_size}, {self.weight}, {self.food_eaten}]"
class Mammal(Animal, ABC):
@abstractmethod
def __init__(self, name, weight, living_region):
super().__init__(name, weight)
self.living_region = living_region
def __repr__(self):
return f"{self.__class__.__name__} [{self.name}, {self.weight}, {self.living_region}, {self.food_eaten}]"
| true | true |
f7fd6e99a75252c110b5bdcb99f74f5181f3e17b | 2,978 | py | Python | openGaussBase/testcase/GUC/CLIENTCONNECTION/Opengauss_Function_Guc_ClientConnection_Case0197.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/GUC/CLIENTCONNECTION/Opengauss_Function_Guc_ClientConnection_Case0197.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/GUC/CLIENTCONNECTION/Opengauss_Function_Guc_ClientConnection_Case0197.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : GUC
Case Name : 使用alter user方法设置参数partition_lock_upgrade_timeout为3000,
观察预期结果
Description :
1.查询partition_lock_upgrade_timeout默认值
2.创建用户
3.修改参数值为3000
4.删除用户
Expect :
1.显示默认值1800
2.用户创建成功
3.设置成功
4.删除成功
History :
"""
import unittest
import time
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro
LOG = Logger()
commonsh = CommonSH('dbuser')
class ClientConnection(unittest.TestCase):
def setUp(self):
LOG.info(
'----Opengauss_Function_Guc_ClientConnection_Case0197start-----')
self.constant = Constant()
self.user_node = Node('dbuser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
def test_partition_lock_upgrade_timeout(self):
# 查询默认值
sql_cmd = commonsh.execut_db_sql('show partition_lock_upgrade_timeout;')
LOG.info(sql_cmd)
self.res = sql_cmd.splitlines()[-2].strip()
# 创建用户
sql_cmd = commonsh.execut_db_sql(f'''drop user if exists test_spur0197
cascade;
create user test_spur0197 password '{macro.COMMON_PASSWD}';
''')
LOG.info(sql_cmd)
self.assertIn(self.constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd)
# 修改用户级别参数
sql_cmd = commonsh.execut_db_sql('''alter user test_spur0197 set
partition_lock_upgrade_timeout to 3000;
''')
LOG.info(sql_cmd)
self.assertIn(self.constant.ALTER_ROLE_SUCCESS_MSG, sql_cmd)
time.sleep(3)
# 查询
sql_cmd = 'show partition_lock_upgrade_timeout';
excute_cmd1 = f'''source {self.DB_ENV_PATH};\
gsql -d {self.user_node.db_name} \
-p{self.user_node.db_port} \
-U test_spur0197 \
-W '{macro.COMMON_PASSWD}' \
-c "{sql_cmd}"\
'''
LOG.info(sql_cmd)
msg1 = self.user_node.sh(excute_cmd1).result()
LOG.info(msg1)
self.assertIn('3000', msg1)
def tearDown(self):
LOG.info('----------------恢复默认值-----------------------')
sql_cmd = commonsh.execut_db_sql('''drop user if exists test_spur0197
cascade;
''')
LOG.info(sql_cmd)
LOG.info(
'--Opengauss_Function_Guc_ClientConnection_Case0197执行完成---')
| 31.680851 | 84 | 0.635662 | import unittest
import time
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro
LOG = Logger()
commonsh = CommonSH('dbuser')
class ClientConnection(unittest.TestCase):
def setUp(self):
LOG.info(
'----Opengauss_Function_Guc_ClientConnection_Case0197start-----')
self.constant = Constant()
self.user_node = Node('dbuser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
def test_partition_lock_upgrade_timeout(self):
sql_cmd = commonsh.execut_db_sql('show partition_lock_upgrade_timeout;')
LOG.info(sql_cmd)
self.res = sql_cmd.splitlines()[-2].strip()
sql_cmd = commonsh.execut_db_sql(f'''drop user if exists test_spur0197
cascade;
create user test_spur0197 password '{macro.COMMON_PASSWD}';
''')
LOG.info(sql_cmd)
self.assertIn(self.constant.CREATE_ROLE_SUCCESS_MSG, sql_cmd)
sql_cmd = commonsh.execut_db_sql('''alter user test_spur0197 set
partition_lock_upgrade_timeout to 3000;
''')
LOG.info(sql_cmd)
self.assertIn(self.constant.ALTER_ROLE_SUCCESS_MSG, sql_cmd)
time.sleep(3)
sql_cmd = 'show partition_lock_upgrade_timeout';
excute_cmd1 = f'''source {self.DB_ENV_PATH};\
gsql -d {self.user_node.db_name} \
-p{self.user_node.db_port} \
-U test_spur0197 \
-W '{macro.COMMON_PASSWD}' \
-c "{sql_cmd}"\
'''
LOG.info(sql_cmd)
msg1 = self.user_node.sh(excute_cmd1).result()
LOG.info(msg1)
self.assertIn('3000', msg1)
def tearDown(self):
LOG.info('----------------恢复默认值-----------------------')
sql_cmd = commonsh.execut_db_sql('''drop user if exists test_spur0197
cascade;
''')
LOG.info(sql_cmd)
LOG.info(
'--Opengauss_Function_Guc_ClientConnection_Case0197执行完成---')
| true | true |
f7fd6f7a13e1ad1154a280d7322c5cee5763a17f | 5,777 | py | Python | pygame_menu/examples/scroll_menu.py | notrurs/pygame-menu | 159853d856d5b25e813389b8ebf541c79771c8ed | [
"MIT"
] | null | null | null | pygame_menu/examples/scroll_menu.py | notrurs/pygame-menu | 159853d856d5b25e813389b8ebf541c79771c8ed | [
"MIT"
] | null | null | null | pygame_menu/examples/scroll_menu.py | notrurs/pygame-menu | 159853d856d5b25e813389b8ebf541c79771c8ed | [
"MIT"
] | null | null | null | # coding=utf-8
"""
pygame-menu
https://github.com/ppizarror/pygame-menu
EXAMPLE - SCROLL MENU
Shows scrolling in menu.
License:
-------------------------------------------------------------------------------
The MIT License (MIT)
Copyright 2017-2020 Pablo Pizarro R. @ppizarror
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-------------------------------------------------------------------------------
"""
import os
import pygame
import pygame_menu
from functools import partial
FPS = 30.0
H_SIZE = 600 # Height of window size
W_SIZE = 800 # Width of window size
def on_button_click(value=None, text=None):
"""
Button event on menus.
:param value: Button value
:param text: Button text
:return: None
"""
if not text:
print('Hello from {}'.format(value))
else:
print('Hello from {} with {}'.format(text, value))
def paint_background(surface):
"""
Paints a given surface with background color.
:param surface: Pygame surface
:type surface: :py:class:`pygame.Surface`
:return: None
"""
surface.fill((128, 230, 198))
def make_long_menu():
"""
Create a long scrolling menu.
:return: Menu
:rtype: pygame_menu.Menu
"""
# Main menu, pauses execution of the application
_menu = pygame_menu.Menu(
height=400,
onclose=pygame_menu.events.EXIT,
theme=pygame_menu.themes.THEME_BLUE,
title='Main Menu',
width=600, # px
)
_menu_sub = pygame_menu.Menu(
columns=4,
height=400,
onclose=pygame_menu.events.EXIT,
rows=3,
theme=pygame_menu.themes.THEME_GREEN,
title='Menu with columns',
width=600,
)
_menu_text = pygame_menu.Menu(
height=400,
onclose=pygame_menu.events.EXIT,
theme=pygame_menu.themes.THEME_DARK,
title='Text with scroll',
width=600,
)
_menu.add_button('Rows and Columns', _menu_sub)
_menu.add_button('Text scrolled', _menu_text)
_menu.add_vertical_margin(20) # Adds margin
label1 = 'Button n°{}'
label2 = 'Text n°{}: '
for i in range(1, 20):
if i % 2 == 0:
_menu.add_button(label1.format(i),
on_button_click,
'Button n°{}'.format(i))
else:
_menu.add_text_input(label2.format(i),
onchange=on_button_click,
text='Text n°{}'.format(i))
_menu.add_button('Exit', pygame_menu.events.EXIT)
label = 'Button n°{}'
for i in range(1, 11):
# Test large button
if i == 5:
txt = 'This is a very long button!'
else:
txt = label.format(100 * i)
_menu_sub.add_button(txt, on_button_click, 100 * i)
_menu_sub.add_button('Back', pygame_menu.events.BACK)
_menu_sub.center_content()
_menu_text.add_label('Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod '
'tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, '
'quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. '
'Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu '
'fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in '
'culpa qui officia deserunt mollit anim id est laborum.',
max_char=33,
align=pygame_menu.locals.ALIGN_LEFT,
margin=(0, -1))
return _menu
def main(test=False):
"""
Main function.
:param test: Indicate function is being tested
:type test: bool
:return: None
"""
os.environ['SDL_VIDEO_CENTERED'] = '1'
pygame.init()
clock = pygame.time.Clock()
# Create window
screen = pygame.display.set_mode((W_SIZE, H_SIZE))
pygame.display.set_caption('Example - Scrolling Menu')
# Create menu
menu = make_long_menu()
# -------------------------------------------------------------------------
# Main loop
# -------------------------------------------------------------------------
while True:
# Tick
clock.tick(FPS)
# Paint background
paint_background(screen)
# Execute main from principal menu if is enabled
menu.mainloop(surface=screen,
bgfun=partial(paint_background, screen),
disable_loop=test,
fps_limit=FPS)
# Update surface
pygame.display.flip()
# At first loop returns
if test:
break
if __name__ == '__main__':
main()
| 30.566138 | 110 | 0.592349 |
import os
import pygame
import pygame_menu
from functools import partial
FPS = 30.0
H_SIZE = 600
W_SIZE = 800
def on_button_click(value=None, text=None):
if not text:
print('Hello from {}'.format(value))
else:
print('Hello from {} with {}'.format(text, value))
def paint_background(surface):
surface.fill((128, 230, 198))
def make_long_menu():
_menu = pygame_menu.Menu(
height=400,
onclose=pygame_menu.events.EXIT,
theme=pygame_menu.themes.THEME_BLUE,
title='Main Menu',
width=600,
)
_menu_sub = pygame_menu.Menu(
columns=4,
height=400,
onclose=pygame_menu.events.EXIT,
rows=3,
theme=pygame_menu.themes.THEME_GREEN,
title='Menu with columns',
width=600,
)
_menu_text = pygame_menu.Menu(
height=400,
onclose=pygame_menu.events.EXIT,
theme=pygame_menu.themes.THEME_DARK,
title='Text with scroll',
width=600,
)
_menu.add_button('Rows and Columns', _menu_sub)
_menu.add_button('Text scrolled', _menu_text)
_menu.add_vertical_margin(20)
label1 = 'Button n°{}'
label2 = 'Text n°{}: '
for i in range(1, 20):
if i % 2 == 0:
_menu.add_button(label1.format(i),
on_button_click,
'Button n°{}'.format(i))
else:
_menu.add_text_input(label2.format(i),
onchange=on_button_click,
text='Text n°{}'.format(i))
_menu.add_button('Exit', pygame_menu.events.EXIT)
label = 'Button n°{}'
for i in range(1, 11):
if i == 5:
txt = 'This is a very long button!'
else:
txt = label.format(100 * i)
_menu_sub.add_button(txt, on_button_click, 100 * i)
_menu_sub.add_button('Back', pygame_menu.events.BACK)
_menu_sub.center_content()
_menu_text.add_label('Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod '
'tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, '
'quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. '
'Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu '
'fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in '
'culpa qui officia deserunt mollit anim id est laborum.',
max_char=33,
align=pygame_menu.locals.ALIGN_LEFT,
margin=(0, -1))
return _menu
def main(test=False):
os.environ['SDL_VIDEO_CENTERED'] = '1'
pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode((W_SIZE, H_SIZE))
pygame.display.set_caption('Example - Scrolling Menu')
menu = make_long_menu()
while True:
clock.tick(FPS)
paint_background(screen)
menu.mainloop(surface=screen,
bgfun=partial(paint_background, screen),
disable_loop=test,
fps_limit=FPS)
pygame.display.flip()
if test:
break
if __name__ == '__main__':
main()
| true | true |
f7fd6f8cef2159b49f5d2d09aafbe7065291ef13 | 438 | py | Python | refugeedata/app/wsgi.py | ryanmrubin/refugeedata | d71bedb0895e8011f3b67245c17df3422553820c | [
"MIT"
] | null | null | null | refugeedata/app/wsgi.py | ryanmrubin/refugeedata | d71bedb0895e8011f3b67245c17df3422553820c | [
"MIT"
] | 1 | 2018-11-13T15:13:37.000Z | 2018-11-13T15:13:37.000Z | refugeedata/app/wsgi.py | ryanmrubin/refugeedata | d71bedb0895e8011f3b67245c17df3422553820c | [
"MIT"
] | null | null | null | """
WSGI config for refugeedata project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "refugeedata.app.settings")
application = Cling(get_wsgi_application())
| 24.333333 | 78 | 0.792237 |
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "refugeedata.app.settings")
application = Cling(get_wsgi_application())
| true | true |
f7fd705134f054dccecedc4cd0e57a61f3aa3cc6 | 1,253 | py | Python | bluebrain/repo-bluebrain/packages/py-neuror/package.py | BlueBrain/Spack | dc328512c70e182f3c24bb0ce64fa3586482bdf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | bluebrain/repo-bluebrain/packages/py-neuror/package.py | BlueBrain/Spack | dc328512c70e182f3c24bb0ce64fa3586482bdf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | bluebrain/repo-bluebrain/packages/py-neuror/package.py | BlueBrain/Spack | dc328512c70e182f3c24bb0ce64fa3586482bdf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyNeuror(PythonPackage):
"""A collection of tools to repair morphologies."""
homepage = "https://github.com/BlueBrain/NeuroR"
git = "https://github.com/BlueBrain/NeuroR.git"
pypi = "neuror/NeuroR-1.2.3.tar.gz"
version('develop', branch='master')
version('1.4.2', sha256='f5e18ebddf59a60ce650c24eb49042057cf97990d63aee3ceb58b7acff823255')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-click@6.7:', type=('build', 'run'))
depends_on('py-matplotlib@2.2.3:', type=('build', 'run'))
depends_on('py-morph-tool@2.9.0:2.999', type=('build', 'run'))
depends_on('py-morphio@3.0:3.999', type=('build', 'run'))
depends_on('py-neurom@3.0:3.999', type=('build', 'run'))
depends_on('py-numpy@1.19.2:', type=('build', 'run'))
depends_on('py-nptyping@1.3.0:', type=('build', 'run'))
depends_on('py-pandas@0.24.2:', type=('build', 'run'))
depends_on('py-pyquaternion@0.9.2:', type=('build', 'run'))
depends_on('py-scipy@1.2.0:', type=('build', 'run'))
| 39.15625 | 95 | 0.656824 |
from spack import *
class PyNeuror(PythonPackage):
homepage = "https://github.com/BlueBrain/NeuroR"
git = "https://github.com/BlueBrain/NeuroR.git"
pypi = "neuror/NeuroR-1.2.3.tar.gz"
version('develop', branch='master')
version('1.4.2', sha256='f5e18ebddf59a60ce650c24eb49042057cf97990d63aee3ceb58b7acff823255')
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-click@6.7:', type=('build', 'run'))
depends_on('py-matplotlib@2.2.3:', type=('build', 'run'))
depends_on('py-morph-tool@2.9.0:2.999', type=('build', 'run'))
depends_on('py-morphio@3.0:3.999', type=('build', 'run'))
depends_on('py-neurom@3.0:3.999', type=('build', 'run'))
depends_on('py-numpy@1.19.2:', type=('build', 'run'))
depends_on('py-nptyping@1.3.0:', type=('build', 'run'))
depends_on('py-pandas@0.24.2:', type=('build', 'run'))
depends_on('py-pyquaternion@0.9.2:', type=('build', 'run'))
depends_on('py-scipy@1.2.0:', type=('build', 'run'))
| true | true |
f7fd71d8613fa65357f5396f65536d8faa7196d2 | 6,888 | py | Python | service/omega365.py | sesam-community/omega365 | c0baaafa5fe3436356c1f7edcb264f27b183747d | [
"Apache-2.0"
] | null | null | null | service/omega365.py | sesam-community/omega365 | c0baaafa5fe3436356c1f7edcb264f27b183747d | [
"Apache-2.0"
] | null | null | null | service/omega365.py | sesam-community/omega365 | c0baaafa5fe3436356c1f7edcb264f27b183747d | [
"Apache-2.0"
] | null | null | null | import requests
from flask import Flask, Response, request
import os
import logger
import cherrypy
import json
from datetime import datetime
app = Flask(__name__)
logger = logger.Logger('Omega365 client service')
url = os.environ.get("base_url")
username = os.environ.get("username")
pw = os.environ.get("password")
remove_namespaces = os.environ.get("remove_namespaces", True)
headers = json.loads('{"Content-Type": "application/json"}')
resources_config = json.loads(os.environ.get("resources", '[]'))
resources = {}
class BasicUrlSystem:
def __init__(self, config):
self._config = config
def make_session(self):
session = requests.Session()
session.headers = self._config["headers"]
session.verify = True
return session
session_factory = BasicUrlSystem({"headers": headers})
def authenticate(s):
auth_url = url + "/login?mobile_login=true"
auth_content = {
"username_user": username,
"password": pw,
"remember": "false"
}
try:
auth_resp = s.request("POST", auth_url, json=auth_content)
except Exception as e:
logger.warning("Exception occurred when authenticating the user: '%s'", e)
def stream_json(clean, since_property_name, id_property_name):
first = True
yield '['
for i, row in enumerate(clean):
if not first:
yield ','
else:
first = False
if since_property_name is not None:
row["_updated"] = row[since_property_name]
if id_property_name is not None:
row["_id"] = str(row[id_property_name])
yield json.dumps(row)
yield ']'
def remove_ns(keys):
if isinstance(keys, list):
for key in keys:
remove_ns(key)
if isinstance(keys, dict):
for key in keys.keys():
if ":" in key:
new_key = key.split(":")[1]
keys[new_key] = keys.pop(key)
for val in keys.values():
remove_ns(val)
def populate_resources():
for resource in resources_config:
since_property_name = None
id_property_name = None
if "since_property_name" in resource:
since_property_name = resource["since_property_name"]
if "id_property_name" in resource:
id_property_name = resource["id_property_name"]
resources[resource["resource_name"]] = \
{
"fields": resource["fields"],
"since_property_name": since_property_name,
"id_property_name": id_property_name
}
@app.route("/<path:path>", methods=["GET"])
def get(path):
request_url = "{0}{1}".format(url, "/api/data")
logger.info("Request url: %s", request_url)
if path not in resources:
raise Exception("Resource with name '{0}' not found!".format(path))
where_clause = None
if request.args.get('since') is not None and resources[path]["since_property_name"] is not None:
logger.info("Since marker found: {0}".format(request.args.get('since')))
since = request.args.get('since').split(".")[0]
where_clause = "{0} >= '{1}'".format(resources[path]["since_property_name"], datetime.strptime(since, "%Y-%m-%dT%H:%M:%S"))
get_template = {
"maxRecords": -1,
"operation": "retrieve",
"resourceName": path,
"fields": resources[path]["fields"],
"whereClause": where_clause
}
logger.info("Request data: %s", get_template)
with session_factory.make_session() as s:
authenticate(s)
response = s.request("POST", request_url, json=get_template, headers=headers)
if response.status_code != 200:
raise Exception(response.reason + ": " + response.text)
result = json.loads(response.text)
return Response(
stream_json(result['success'], resources[path]["since_property_name"], resources[path]["id_property_name"]),
mimetype='application/json'
)
@app.route("/<path:path>", methods=["POST"])
def post(path):
request_url = "{0}{1}".format(url, "/api/data")
logger.info("Request url: %s", request_url)
if path not in resources:
raise Exception("Resource with name '{0}' not found!".format(path))
request_data = json.loads(request.data)
logger.info("Request data: %s", request_data)
create_template = {
"maxRecords": -1,
"operation": "create",
"resourceName": path,
"uniqueName": path,
"excludeFieldNames": False,
"fields": resources[path]["fields"]
}
delete_template = {
"operation": "destroy",
"resourceName": path,
"uniqueName": path
}
update_template = {
"operation": "update",
"resourceName": path,
"uniqueName": path,
"excludeFieldNames": False,
"fields": resources[path]["fields"]
}
def generate(entities):
yield "["
with session_factory.make_session() as s:
authenticate(s)
for index, entity in enumerate(entities):
if index > 0:
yield ","
post_entity = entity.copy()
if "_deleted" in entity and entity["_deleted"] is True:
logger.info("Deleting entity: {0}!".format(entity["_id"]))
post_entity.update(delete_template)
else:
if resources[path]["id_property_name"] in entity:
logger.info("Updating entity: {0}!".format(entity["_id"]))
post_entity.update(update_template)
else:
logger.info("Creating entity: {0}!".format(entity["_id"]))
post_entity.update(create_template)
response = s.request("POST", request_url, json=post_entity, headers=headers)
if response.status_code != 200:
logger.warning("An error occurred: {0}. {1}".format(response.reason, response.text))
raise Exception(response.reason + ": " + response.text)
result = json.loads(response.text)
yield json.dumps(result['success'])
yield "]"
response_data_generator = generate(request_data)
response_data = response_data_generator
return Response(response=response_data, mimetype="application/json")
if __name__ == '__main__':
cherrypy.tree.graft(app, '/')
populate_resources()
# Set the configuration of the web server to production mode
cherrypy.config.update({
'environment': 'production',
'engine.autoreload_on': False,
'log.screen': True,
'server.socket_port': 5002,
'server.socket_host': '0.0.0.0'
})
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engine.block()
| 30.75 | 131 | 0.598868 | import requests
from flask import Flask, Response, request
import os
import logger
import cherrypy
import json
from datetime import datetime
app = Flask(__name__)
logger = logger.Logger('Omega365 client service')
url = os.environ.get("base_url")
username = os.environ.get("username")
pw = os.environ.get("password")
remove_namespaces = os.environ.get("remove_namespaces", True)
headers = json.loads('{"Content-Type": "application/json"}')
resources_config = json.loads(os.environ.get("resources", '[]'))
resources = {}
class BasicUrlSystem:
def __init__(self, config):
self._config = config
def make_session(self):
session = requests.Session()
session.headers = self._config["headers"]
session.verify = True
return session
session_factory = BasicUrlSystem({"headers": headers})
def authenticate(s):
auth_url = url + "/login?mobile_login=true"
auth_content = {
"username_user": username,
"password": pw,
"remember": "false"
}
try:
auth_resp = s.request("POST", auth_url, json=auth_content)
except Exception as e:
logger.warning("Exception occurred when authenticating the user: '%s'", e)
def stream_json(clean, since_property_name, id_property_name):
first = True
yield '['
for i, row in enumerate(clean):
if not first:
yield ','
else:
first = False
if since_property_name is not None:
row["_updated"] = row[since_property_name]
if id_property_name is not None:
row["_id"] = str(row[id_property_name])
yield json.dumps(row)
yield ']'
def remove_ns(keys):
if isinstance(keys, list):
for key in keys:
remove_ns(key)
if isinstance(keys, dict):
for key in keys.keys():
if ":" in key:
new_key = key.split(":")[1]
keys[new_key] = keys.pop(key)
for val in keys.values():
remove_ns(val)
def populate_resources():
for resource in resources_config:
since_property_name = None
id_property_name = None
if "since_property_name" in resource:
since_property_name = resource["since_property_name"]
if "id_property_name" in resource:
id_property_name = resource["id_property_name"]
resources[resource["resource_name"]] = \
{
"fields": resource["fields"],
"since_property_name": since_property_name,
"id_property_name": id_property_name
}
@app.route("/<path:path>", methods=["GET"])
def get(path):
request_url = "{0}{1}".format(url, "/api/data")
logger.info("Request url: %s", request_url)
if path not in resources:
raise Exception("Resource with name '{0}' not found!".format(path))
where_clause = None
if request.args.get('since') is not None and resources[path]["since_property_name"] is not None:
logger.info("Since marker found: {0}".format(request.args.get('since')))
since = request.args.get('since').split(".")[0]
where_clause = "{0} >= '{1}'".format(resources[path]["since_property_name"], datetime.strptime(since, "%Y-%m-%dT%H:%M:%S"))
get_template = {
"maxRecords": -1,
"operation": "retrieve",
"resourceName": path,
"fields": resources[path]["fields"],
"whereClause": where_clause
}
logger.info("Request data: %s", get_template)
with session_factory.make_session() as s:
authenticate(s)
response = s.request("POST", request_url, json=get_template, headers=headers)
if response.status_code != 200:
raise Exception(response.reason + ": " + response.text)
result = json.loads(response.text)
return Response(
stream_json(result['success'], resources[path]["since_property_name"], resources[path]["id_property_name"]),
mimetype='application/json'
)
@app.route("/<path:path>", methods=["POST"])
def post(path):
request_url = "{0}{1}".format(url, "/api/data")
logger.info("Request url: %s", request_url)
if path not in resources:
raise Exception("Resource with name '{0}' not found!".format(path))
request_data = json.loads(request.data)
logger.info("Request data: %s", request_data)
create_template = {
"maxRecords": -1,
"operation": "create",
"resourceName": path,
"uniqueName": path,
"excludeFieldNames": False,
"fields": resources[path]["fields"]
}
delete_template = {
"operation": "destroy",
"resourceName": path,
"uniqueName": path
}
update_template = {
"operation": "update",
"resourceName": path,
"uniqueName": path,
"excludeFieldNames": False,
"fields": resources[path]["fields"]
}
def generate(entities):
yield "["
with session_factory.make_session() as s:
authenticate(s)
for index, entity in enumerate(entities):
if index > 0:
yield ","
post_entity = entity.copy()
if "_deleted" in entity and entity["_deleted"] is True:
logger.info("Deleting entity: {0}!".format(entity["_id"]))
post_entity.update(delete_template)
else:
if resources[path]["id_property_name"] in entity:
logger.info("Updating entity: {0}!".format(entity["_id"]))
post_entity.update(update_template)
else:
logger.info("Creating entity: {0}!".format(entity["_id"]))
post_entity.update(create_template)
response = s.request("POST", request_url, json=post_entity, headers=headers)
if response.status_code != 200:
logger.warning("An error occurred: {0}. {1}".format(response.reason, response.text))
raise Exception(response.reason + ": " + response.text)
result = json.loads(response.text)
yield json.dumps(result['success'])
yield "]"
response_data_generator = generate(request_data)
response_data = response_data_generator
return Response(response=response_data, mimetype="application/json")
if __name__ == '__main__':
cherrypy.tree.graft(app, '/')
populate_resources()
cherrypy.config.update({
'environment': 'production',
'engine.autoreload_on': False,
'log.screen': True,
'server.socket_port': 5002,
'server.socket_host': '0.0.0.0'
})
cherrypy.engine.start()
cherrypy.engine.block()
| true | true |
f7fd7221f5d5e2245608435c4131abf611610f3e | 648 | py | Python | src/toil/test/docs/scripts/tutorial_multiplejobs.py | thiagogenez/toil | b25e7d0616fef3aa9085a7d7d7ae6bdc257f2d92 | [
"Apache-2.0"
] | 6 | 2018-05-27T05:09:11.000Z | 2020-07-01T17:02:40.000Z | src/toil/test/docs/scripts/tutorial_multiplejobs.py | thiagogenez/toil | b25e7d0616fef3aa9085a7d7d7ae6bdc257f2d92 | [
"Apache-2.0"
] | 20 | 2021-10-07T08:31:41.000Z | 2022-03-01T17:38:13.000Z | src/toil/test/docs/scripts/tutorial_multiplejobs.py | thiagogenez/toil | b25e7d0616fef3aa9085a7d7d7ae6bdc257f2d92 | [
"Apache-2.0"
] | 1 | 2020-04-06T15:04:44.000Z | 2020-04-06T15:04:44.000Z | from toil.common import Toil
from toil.job import Job
def helloWorld(job, message, memory="2G", cores=2, disk="3G"):
job.log("Hello world, I have a message: {}".format(message))
if __name__=="__main__":
options = Job.Runner.getDefaultOptions("./toilWorkflowRun")
options.logLevel = "INFO"
options.clean = "always"
j1 = Job.wrapJobFn(helloWorld, "first")
j2 = Job.wrapJobFn(helloWorld, "second or third")
j3 = Job.wrapJobFn(helloWorld, "second or third")
j4 = Job.wrapJobFn(helloWorld, "last")
j1.addChild(j2)
j1.addChild(j3)
j1.addFollowOn(j4)
with Toil(options) as toil:
toil.start(j1)
| 28.173913 | 64 | 0.669753 | from toil.common import Toil
from toil.job import Job
def helloWorld(job, message, memory="2G", cores=2, disk="3G"):
job.log("Hello world, I have a message: {}".format(message))
if __name__=="__main__":
options = Job.Runner.getDefaultOptions("./toilWorkflowRun")
options.logLevel = "INFO"
options.clean = "always"
j1 = Job.wrapJobFn(helloWorld, "first")
j2 = Job.wrapJobFn(helloWorld, "second or third")
j3 = Job.wrapJobFn(helloWorld, "second or third")
j4 = Job.wrapJobFn(helloWorld, "last")
j1.addChild(j2)
j1.addChild(j3)
j1.addFollowOn(j4)
with Toil(options) as toil:
toil.start(j1)
| true | true |
f7fd737df6aafffe228a493760504b52168af3dc | 1,061 | py | Python | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/domain_category_service/transports/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/domain_category_service/transports/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/domain_category_service/transports/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import DomainCategoryServiceTransport
from .grpc import DomainCategoryServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[DomainCategoryServiceTransport]]
_transport_registry['grpc'] = DomainCategoryServiceGrpcTransport
__all__ = (
'DomainCategoryServiceTransport',
'DomainCategoryServiceGrpcTransport',
)
| 33.15625 | 92 | 0.779453 |
from collections import OrderedDict
from typing import Dict, Type
from .base import DomainCategoryServiceTransport
from .grpc import DomainCategoryServiceGrpcTransport
_transport_registry = OrderedDict()
_transport_registry['grpc'] = DomainCategoryServiceGrpcTransport
__all__ = (
'DomainCategoryServiceTransport',
'DomainCategoryServiceGrpcTransport',
)
| true | true |
f7fd739d690be03b7c3068d808b7aac7975af636 | 4,075 | py | Python | api_smartcondor/run_api.py | romanrdgz/smartcondor | c5419f8f3a402b441aec8a6fd38ddbc787f63e9e | [
"MIT"
] | 32 | 2016-08-01T12:20:34.000Z | 2022-03-16T16:33:35.000Z | api_smartcondor/run_api.py | ajmal017/smartcondor | c5419f8f3a402b441aec8a6fd38ddbc787f63e9e | [
"MIT"
] | null | null | null | api_smartcondor/run_api.py | ajmal017/smartcondor | c5419f8f3a402b441aec8a6fd38ddbc787f63e9e | [
"MIT"
] | 15 | 2017-02-25T16:35:44.000Z | 2021-08-31T14:34:11.000Z | # -*- coding: utf-8 -*-
from flask import Flask, jsonify
from flask_pymongo import PyMongo
from flask_restful import Api, Resource
from datetime import datetime
app = Flask(__name__)
app.config['MONGO_DBNAME'] = 'smartcondor'
mongo = PyMongo(app, config_prefix='MONGO')
APP_URL = 'http://127.0.0.1:5000'
class Underlying(Resource):
def get(self, ticker=None, startdate=None, enddate=None):
'''
Gets a list of underlying close prices and IV for given ticker and
dates between requested limits
'''
data = []
error = None
if ticker:
# Check if date range is given
if startdate and enddate:
try:
# Check dates format
start_datetime = datetime.strptime(startdate, '%d%m%Y')
end_datetime = datetime.strptime(enddate, '%d%m%Y')
# Query the database
info = mongo.db.underlyings.find({
'ticker': ticker,
'timestamp': {
'$gte': start_datetime,
'$lte': end_datetime
}
})
if info:
data.append(info)
except ValueError:
error = 'Wrong date format: use \'ddmmyyyy\''
else:
# No dates given, return latest day info
info = mongo.db.underlyings.find({
'ticker': ticker}).limit(1).sort({'$natural': -1})
if info:
data.append(info)
return jsonify({'status': ('nok' if error else 'ok'),
'response': (error if error else data)})
class OptionData(Resource):
def get(self, ticker=None, right=None, strike=None, expiry=None,
samples=1):
data = []
error = None
query = {}
if ticker:
query['ticker'] = ticker
# Check if expiry date is given
if expiry:
try:
# Check date format
query['expiry'] = datetime.strptime(expiry, '%d%m%Y')
except ValueError:
error = 'Wrong date format: use \'ddmmyyyy\''
# Check if right is given
if (right.upper() == 'P') or (right.upper() == 'C'):
query['right'] = right.upper()
else:
error = ('Wrong right format: use \'C\' for calls '
'or \'P\' for puts')
# Check if strike is given and it is a positive number
if strike:
try:
value = float(strike)
if value < 0:
error = 'Wrong strike format: must be positive'
else:
query['strike'] = strike
except ValueError:
error = 'Wrong strike format: must be a number'
# Query the database
info = mongo.db.options.find(query).limit(samples).sort(
{'$natural': -1})
if info:
data.append(info)
return jsonify({'status': ('nok' if error else 'ok'),
'response': (error if error else data)})
api = Api(app)
api.add_resource(Underlying, '/underlying/<string:ticker>/')
api.add_resource(Underlying, '/underlying/<string:ticker>/'
'<string:startdate>/'
'<string:enddate>')
api.add_resource(OptionData, '/optiondata/<string:ticker>/'
'<string:right>/'
'<string:strike>/'
'<string:expiry>')
api.add_resource(OptionData, '/optiondata/<string:ticker>/'
'<string:right>/'
'<string:strike>/'
'<string:expiry>/'
'<int:samples>')
if __name__ == '__main__':
app.run(debug=True)
| 36.061947 | 75 | 0.470675 |
from flask import Flask, jsonify
from flask_pymongo import PyMongo
from flask_restful import Api, Resource
from datetime import datetime
app = Flask(__name__)
app.config['MONGO_DBNAME'] = 'smartcondor'
mongo = PyMongo(app, config_prefix='MONGO')
APP_URL = 'http://127.0.0.1:5000'
class Underlying(Resource):
def get(self, ticker=None, startdate=None, enddate=None):
data = []
error = None
if ticker:
if startdate and enddate:
try:
start_datetime = datetime.strptime(startdate, '%d%m%Y')
end_datetime = datetime.strptime(enddate, '%d%m%Y')
info = mongo.db.underlyings.find({
'ticker': ticker,
'timestamp': {
'$gte': start_datetime,
'$lte': end_datetime
}
})
if info:
data.append(info)
except ValueError:
error = 'Wrong date format: use \'ddmmyyyy\''
else:
info = mongo.db.underlyings.find({
'ticker': ticker}).limit(1).sort({'$natural': -1})
if info:
data.append(info)
return jsonify({'status': ('nok' if error else 'ok'),
'response': (error if error else data)})
class OptionData(Resource):
def get(self, ticker=None, right=None, strike=None, expiry=None,
samples=1):
data = []
error = None
query = {}
if ticker:
query['ticker'] = ticker
if expiry:
try:
query['expiry'] = datetime.strptime(expiry, '%d%m%Y')
except ValueError:
error = 'Wrong date format: use \'ddmmyyyy\''
if (right.upper() == 'P') or (right.upper() == 'C'):
query['right'] = right.upper()
else:
error = ('Wrong right format: use \'C\' for calls '
'or \'P\' for puts')
if strike:
try:
value = float(strike)
if value < 0:
error = 'Wrong strike format: must be positive'
else:
query['strike'] = strike
except ValueError:
error = 'Wrong strike format: must be a number'
info = mongo.db.options.find(query).limit(samples).sort(
{'$natural': -1})
if info:
data.append(info)
return jsonify({'status': ('nok' if error else 'ok'),
'response': (error if error else data)})
api = Api(app)
api.add_resource(Underlying, '/underlying/<string:ticker>/')
api.add_resource(Underlying, '/underlying/<string:ticker>/'
'<string:startdate>/'
'<string:enddate>')
api.add_resource(OptionData, '/optiondata/<string:ticker>/'
'<string:right>/'
'<string:strike>/'
'<string:expiry>')
api.add_resource(OptionData, '/optiondata/<string:ticker>/'
'<string:right>/'
'<string:strike>/'
'<string:expiry>/'
'<int:samples>')
if __name__ == '__main__':
app.run(debug=True)
| true | true |
f7fd75f55adda02c811649d715d877445fdfa5a3 | 3,434 | py | Python | ros/genpy/src/genpy/msg/_TestString.py | numberen/apollo-platform | 8f359c8d00dd4a98f56ec2276c5663cb6c100e47 | [
"Apache-2.0"
] | 742 | 2017-07-05T02:49:36.000Z | 2022-03-30T12:55:43.000Z | ros/genpy/src/genpy/msg/_TestString.py | numberen/apollo-platform | 8f359c8d00dd4a98f56ec2276c5663cb6c100e47 | [
"Apache-2.0"
] | 73 | 2017-07-06T12:50:51.000Z | 2022-03-07T08:07:07.000Z | ros/genpy/src/genpy/msg/_TestString.py | numberen/apollo-platform | 8f359c8d00dd4a98f56ec2276c5663cb6c100e47 | [
"Apache-2.0"
] | 425 | 2017-07-04T22:03:29.000Z | 2022-03-29T06:59:06.000Z | """autogenerated by genpy from genpy/TestString.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class TestString(genpy.Message):
_md5sum = "992ce8a1687cec8c8bd883ec73ca41d1"
_type = "genpy/TestString"
_has_header = False #flag to mark the presence of a Header object
_full_text = """string data
"""
__slots__ = ['data']
_slot_types = ['string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
data
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(TestString, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.data is None:
self.data = ''
else:
self.data = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.data
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.data = str[start:end].decode('utf-8')
else:
self.data = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.data
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.data = str[start:end].decode('utf-8')
else:
self.data = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
| 28.616667 | 91 | 0.638614 | import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class TestString(genpy.Message):
_md5sum = "992ce8a1687cec8c8bd883ec73ca41d1"
_type = "genpy/TestString"
_has_header = False
_full_text = """string data
"""
__slots__ = ['data']
_slot_types = ['string']
def __init__(self, *args, **kwds):
if args or kwds:
super(TestString, self).__init__(*args, **kwds)
if self.data is None:
self.data = ''
else:
self.data = ''
def _get_types(self):
return self._slot_types
def serialize(self, buff):
try:
_x = self.data
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.data = str[start:end].decode('utf-8')
else:
self.data = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e)
def serialize_numpy(self, buff, numpy):
try:
_x = self.data
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.data = str[start:end].decode('utf-8')
else:
self.data = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e)
_struct_I = genpy.struct_I
| true | true |
f7fd7664a6b04cec977f05e363ee860669c9ba0f | 5,622 | py | Python | detection/prepare_train_data.py | sunset768541/ctw-baseline | f303f9ae0477ef2aa1fe56426a28e0ed9a0a89f8 | [
"MIT"
] | 333 | 2018-03-09T12:50:49.000Z | 2022-02-10T04:02:50.000Z | detection/prepare_train_data.py | sunset768541/ctw-baseline | f303f9ae0477ef2aa1fe56426a28e0ed9a0a89f8 | [
"MIT"
] | 43 | 2018-03-19T08:11:28.000Z | 2021-03-03T08:19:35.000Z | detection/prepare_train_data.py | sunset768541/ctw-baseline | f303f9ae0477ef2aa1fe56426a28e0ed9a0a89f8 | [
"MIT"
] | 105 | 2018-03-15T10:17:50.000Z | 2021-11-08T02:46:26.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import darknet_tools
import json
import numpy as np
import os
import settings
from jinja2 import Template
from pythonapi import anno_tools, common_tools
from six.moves import queue
def write_darknet_data():
if not os.path.exists(settings.DARKNET_BACKUP_DIR):
os.makedirs(settings.DARKNET_BACKUP_DIR)
if not os.path.exists(settings.DARKNET_RESULTS_DIR):
os.makedirs(settings.DARKNET_RESULTS_DIR)
data = {
'classes': settings.NUM_CHAR_CATES + 1,
'train': settings.DARKNET_TRAIN_LIST,
'valid': settings.DARKNET_VALID_LIST,
'names': settings.DARKNET_NAMES,
'backup': settings.DARKNET_BACKUP_DIR,
'results': settings.DARKNET_RESULTS_DIR,
}
with open(settings.DARKNET_DATA, 'w') as f:
for k, v in sorted(data.items()):
f.write('{} = {}\n'.format(k, v))
def write_darknet_cfg():
with open('yolo-chinese.template.cfg') as f:
template = Template(f.read())
with open(settings.DARKNET_CFG, 'w') as f:
f.write(template.render({
'testing': False,
'image_size': settings.TRAIN_IMAGE_SIZE,
'classes': settings.NUM_CHAR_CATES + 1,
'filters': 25 + 5 * (settings.NUM_CHAR_CATES + 1),
}))
f.write('\n')
def write_darknet_names():
with open(settings.DARKNET_NAMES, 'w') as f:
for i in range(settings.NUM_CHAR_CATES + 1):
f.write('{}\n'.format(i))
def crop_train_images():
imshape = (2048, 2048, 3)
cropshape = (settings.TRAIN_IMAGE_SIZE // 4, settings.TRAIN_IMAGE_SIZE // 4)
cropoverlap = (16, 16)
with open(settings.CATES) as f:
cates = json.load(f)
text2cate = {c['text']: c['cate_id'] for c in cates}
if not os.path.isdir(settings.TRAINVAL_CROPPED_DIR):
os.makedirs(settings.TRAINVAL_CROPPED_DIR)
with open(settings.TRAIN) as f:
lines = f.read().splitlines()
with open(settings.VAL) as f:
lines += f.read().splitlines()
def in_image_ratio(bbox): # bbox is in darknet bbox representation
xmid, ymid, w, h = bbox
def cutto01(x):
return max(0, min(1, x))
Acut = (cutto01(xmid + w / 2) - cutto01(xmid - w / 2)) * (cutto01(ymid + h / 2) - cutto01(ymid - h / 2))
return Acut / (w * h)
def crop_once(line, write_images):
anno = json.loads(line.strip())
image_id = anno['image_id']
all = []
for char in anno_tools.each_char(anno):
if not char['is_chinese']:
continue
cate_id = text2cate[char['text']]
if cate_id >= settings.NUM_CHAR_CATES:
cate_id = settings.NUM_CHAR_CATES
all.append((char['adjusted_bbox'], cate_id))
if write_images:
image = cv2.imread(os.path.join(settings.TRAINVAL_IMAGE_DIR, anno['file_name']))
assert image.shape == imshape
for o in anno['ignore']:
poly = (np.array(o['polygon'])).astype(np.int32)
cv2.fillConvexPoly(image, poly, (128, 128, 128))
cropped_list = list()
for o in darknet_tools.get_crop_bboxes(imshape, cropshape, cropoverlap):
xlo = o['xlo']
xhi = xlo + cropshape[1]
ylo = o['ylo']
yhi = ylo + cropshape[0]
labels = []
for bbox, cate_id in all:
x, y, w, h = bbox
if x > xhi or x + w < xlo or y > yhi or y + h < ylo:
continue
bbox = ((x + w / 2 - xlo) / cropshape[1], (y + h / 2 - ylo) / cropshape[0], w / cropshape[1], h / cropshape[0])
if 0.5 < in_image_ratio(bbox):
labels.append((bbox, cate_id))
if 0 < len(labels):
basename = '{}_{}'.format(image_id, o['name'])
cropped_file_name = os.path.join(settings.TRAINVAL_CROPPED_DIR, '{}.jpg'.format(basename))
cropped_list.append(cropped_file_name)
if write_images:
cropped = image[ylo:yhi, xlo:xhi]
cv2.imwrite(cropped_file_name, cropped)
with open(os.path.join(settings.TRAINVAL_CROPPED_DIR, '{}.txt'.format(basename)), 'w') as f:
for bbox, cate_id in labels:
f.write('%d %f %f %f %f\n' % ((cate_id, ) + bbox))
return cropped_list
q_i = queue.Queue()
q_i.put(0)
def foo(*args):
i = q_i.get()
if i % 100 == 0:
print('crop trainval', i, '/', len(lines))
q_i.put(i + 1)
crop_once(*args)
common_tools.multithreaded(foo, [(line, True) for line in lines], num_thread=4)
trainset = []
for i, line in enumerate(lines):
if i % 1000 == 0:
print('list trainval', i, '/', len(lines))
trainset += crop_once(line, False)
with open(settings.DARKNET_TRAIN_LIST, 'w') as f:
for file_name in trainset:
f.write(file_name)
f.write('\n')
def main():
write_darknet_data()
write_darknet_cfg()
write_darknet_names()
assert os.path.isfile(settings.DARKNET_PRETRAIN) and 79327120 == os.path.getsize(settings.DARKNET_PRETRAIN), \
'please download {} to {}'.format('https://pjreddie.com/media/files/darknet19_448.conv.23', settings.DARKNET_PRETRAIN)
crop_train_images()
if __name__ == '__main__':
main()
| 35.808917 | 130 | 0.583422 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import cv2
import darknet_tools
import json
import numpy as np
import os
import settings
from jinja2 import Template
from pythonapi import anno_tools, common_tools
from six.moves import queue
def write_darknet_data():
if not os.path.exists(settings.DARKNET_BACKUP_DIR):
os.makedirs(settings.DARKNET_BACKUP_DIR)
if not os.path.exists(settings.DARKNET_RESULTS_DIR):
os.makedirs(settings.DARKNET_RESULTS_DIR)
data = {
'classes': settings.NUM_CHAR_CATES + 1,
'train': settings.DARKNET_TRAIN_LIST,
'valid': settings.DARKNET_VALID_LIST,
'names': settings.DARKNET_NAMES,
'backup': settings.DARKNET_BACKUP_DIR,
'results': settings.DARKNET_RESULTS_DIR,
}
with open(settings.DARKNET_DATA, 'w') as f:
for k, v in sorted(data.items()):
f.write('{} = {}\n'.format(k, v))
def write_darknet_cfg():
with open('yolo-chinese.template.cfg') as f:
template = Template(f.read())
with open(settings.DARKNET_CFG, 'w') as f:
f.write(template.render({
'testing': False,
'image_size': settings.TRAIN_IMAGE_SIZE,
'classes': settings.NUM_CHAR_CATES + 1,
'filters': 25 + 5 * (settings.NUM_CHAR_CATES + 1),
}))
f.write('\n')
def write_darknet_names():
with open(settings.DARKNET_NAMES, 'w') as f:
for i in range(settings.NUM_CHAR_CATES + 1):
f.write('{}\n'.format(i))
def crop_train_images():
imshape = (2048, 2048, 3)
cropshape = (settings.TRAIN_IMAGE_SIZE // 4, settings.TRAIN_IMAGE_SIZE // 4)
cropoverlap = (16, 16)
with open(settings.CATES) as f:
cates = json.load(f)
text2cate = {c['text']: c['cate_id'] for c in cates}
if not os.path.isdir(settings.TRAINVAL_CROPPED_DIR):
os.makedirs(settings.TRAINVAL_CROPPED_DIR)
with open(settings.TRAIN) as f:
lines = f.read().splitlines()
with open(settings.VAL) as f:
lines += f.read().splitlines()
def in_image_ratio(bbox):
xmid, ymid, w, h = bbox
def cutto01(x):
return max(0, min(1, x))
Acut = (cutto01(xmid + w / 2) - cutto01(xmid - w / 2)) * (cutto01(ymid + h / 2) - cutto01(ymid - h / 2))
return Acut / (w * h)
def crop_once(line, write_images):
anno = json.loads(line.strip())
image_id = anno['image_id']
all = []
for char in anno_tools.each_char(anno):
if not char['is_chinese']:
continue
cate_id = text2cate[char['text']]
if cate_id >= settings.NUM_CHAR_CATES:
cate_id = settings.NUM_CHAR_CATES
all.append((char['adjusted_bbox'], cate_id))
if write_images:
image = cv2.imread(os.path.join(settings.TRAINVAL_IMAGE_DIR, anno['file_name']))
assert image.shape == imshape
for o in anno['ignore']:
poly = (np.array(o['polygon'])).astype(np.int32)
cv2.fillConvexPoly(image, poly, (128, 128, 128))
cropped_list = list()
for o in darknet_tools.get_crop_bboxes(imshape, cropshape, cropoverlap):
xlo = o['xlo']
xhi = xlo + cropshape[1]
ylo = o['ylo']
yhi = ylo + cropshape[0]
labels = []
for bbox, cate_id in all:
x, y, w, h = bbox
if x > xhi or x + w < xlo or y > yhi or y + h < ylo:
continue
bbox = ((x + w / 2 - xlo) / cropshape[1], (y + h / 2 - ylo) / cropshape[0], w / cropshape[1], h / cropshape[0])
if 0.5 < in_image_ratio(bbox):
labels.append((bbox, cate_id))
if 0 < len(labels):
basename = '{}_{}'.format(image_id, o['name'])
cropped_file_name = os.path.join(settings.TRAINVAL_CROPPED_DIR, '{}.jpg'.format(basename))
cropped_list.append(cropped_file_name)
if write_images:
cropped = image[ylo:yhi, xlo:xhi]
cv2.imwrite(cropped_file_name, cropped)
with open(os.path.join(settings.TRAINVAL_CROPPED_DIR, '{}.txt'.format(basename)), 'w') as f:
for bbox, cate_id in labels:
f.write('%d %f %f %f %f\n' % ((cate_id, ) + bbox))
return cropped_list
q_i = queue.Queue()
q_i.put(0)
def foo(*args):
i = q_i.get()
if i % 100 == 0:
print('crop trainval', i, '/', len(lines))
q_i.put(i + 1)
crop_once(*args)
common_tools.multithreaded(foo, [(line, True) for line in lines], num_thread=4)
trainset = []
for i, line in enumerate(lines):
if i % 1000 == 0:
print('list trainval', i, '/', len(lines))
trainset += crop_once(line, False)
with open(settings.DARKNET_TRAIN_LIST, 'w') as f:
for file_name in trainset:
f.write(file_name)
f.write('\n')
def main():
write_darknet_data()
write_darknet_cfg()
write_darknet_names()
assert os.path.isfile(settings.DARKNET_PRETRAIN) and 79327120 == os.path.getsize(settings.DARKNET_PRETRAIN), \
'please download {} to {}'.format('https://pjreddie.com/media/files/darknet19_448.conv.23', settings.DARKNET_PRETRAIN)
crop_train_images()
if __name__ == '__main__':
main()
| true | true |
f7fd76a81f5709a748b3dc6abfcd848bbf798ff9 | 5,334 | py | Python | inter_view/utils.py | fmi-basel/inter-view | e7ebf616ac15eddf1e0d222930750fb4b113d9fa | [
"MIT"
] | null | null | null | inter_view/utils.py | fmi-basel/inter-view | e7ebf616ac15eddf1e0d222930750fb4b113d9fa | [
"MIT"
] | null | null | null | inter_view/utils.py | fmi-basel/inter-view | e7ebf616ac15eddf1e0d222930750fb4b113d9fa | [
"MIT"
] | null | null | null | import numpy as np
import holoviews as hv
hv.extension('bokeh', logo=False)
import param
import panel as pn
import matplotlib.pyplot as plt
from holoviews.operation.datashader import rasterize
from bokeh.models import WheelZoomTool
from holoviews.core import Store
valid_rgb_options = [
k for group in ['style', 'plot', 'norm', 'output']
for k in Store.options(backend='bokeh')['RGB'][group].allowed_keywords
]
valid_rgb_options.remove(
'alpha') # remove option set by sliders on individual channels
# TODO move to color module
import colorcet as cc
# repeat colormap to handle unint16 values
# needed to handle non continuous labels because colormap is stretched (and not cycled)
label_cmap = cc.b_glasbey_hv * 256
# bokeh hook workaround --> remove if holoviews finally handle this
def zoom_bounds_hook(bounds):
'''restrict zooming out to given bounds'''
def _hook(plot, element):
plot.state.x_range.bounds = (bounds[0], bounds[2])
plot.state.y_range.bounds = (bounds[1], bounds[3])
plot.state.select(WheelZoomTool).maintain_focus = False
return _hook
def get_img_dims_coords(img, spacing=1):
img_dims = ['x', 'y', 'z'][:img.ndim]
spacing = np.broadcast_to(np.array(spacing), img.ndim)
img_coords = [
np.arange(d) * s for d, s in zip(img.shape[::-1], spacing[::-1])
]
return img_dims, img_coords
def image_to_hvds(img, label, spacing=1):
'''Converts a 2D/3D image to a holoview dataset to facilitate
plotting with the correct axis bounds/scaling'''
img_dims, img_coords = get_img_dims_coords(img, spacing)
return hv.Dataset((*(img_coords), img),
kdims=img_dims,
vdims=['intensity'],
label=label)
class HvDataset(param.Parameterized):
'''Converts a numpy image to holoviews Dataset dynamic map'''
img = param.Array(np.zeros((2, 2), dtype=np.uint8),
doc='numpy iamge array',
precedence=-1)
label = param.String('channel',
doc='label for the generated hv.Dataset',
precedence=-1)
spacing = param.Parameter((1, ), doc='pixel/voxel size', precedence=-1)
_update_counter = param.Integer(0, precedence=-1)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._broadcast_spacing()
@param.depends()
def _broadcast_spacing(self):
self.spacing = tuple(
np.broadcast_to(np.array(self.spacing), self.img.ndim).tolist())
@param.depends('img', watch=True)
def _update_img(self):
self._broadcast_spacing()
self._update_counter += 1
# NOTE dynamic map with dependency directly on array is less responsive (hash computation overhead?)
@param.depends('_update_counter', 'label')
def _build_dataset(self):
return image_to_hvds(self.img, self.label, self.spacing)
@param.depends('spacing')
def dmap(self):
return hv.DynamicMap(self._build_dataset, cache_size=1)
def make_composite(imgs, cmaps, mode='max'):
'''embeds colormap and blend grescale input images into a rgb image'''
_modes = {'max': np.max, 'mean': np.mean}
blending_fun = _modes.get(mode, None)
if blending_fun is None:
raise NotImplementedError(
'blending mode note implemented: {}'.format(mode))
imgs = [(plt.get_cmap(name)(img)[..., :-1] * 255).astype(np.uint8)
for img, name in zip(imgs, cmaps)]
blended_img = blending_fun(np.asarray(imgs), axis=0)
return np.rint(blended_img).astype(np.uint8)
def blend_overlay(elems):
'''Transforms a hv.Overlay of hv.Image into a hv.RGB'''
if not isinstance(elems, hv.Overlay):
# probably a single channel, do nothing
return elems
imgs = [e.dimension_values(2, flat=False) for e in elems]
if imgs[0].dtype != np.uint8:
raise ValueError(
'8 bit images are expected to stack overlays, got {}'.format(
imgs[0].dtype))
# embed colormap,opacity and blend
# Note somehow hv.RGB inverts the y axis but not hv.Image???
cmaps = [e.opts.get().options['cmap'] for e in elems]
alphas = [e.opts.get().options['alpha'] for e in elems]
imgs = [(a * img).astype(int) if a < 1.0 else img
for a, img in zip(alphas, imgs)]
rgb = make_composite(imgs, cmaps, mode='max')[::-1]
xr = elems.range(0)
yr = elems.range(1)
bounds = (xr[1], yr[0], xr[0], yr[1])
height, width = rgb.shape[:-1]
options = list(elems)[0].opts.get().options
options = {
key: val
for key, val in options.items() if key in valid_rgb_options
}
return hv.RGB(rgb, bounds=bounds, group='composite').opts(**options)
def split_element(element, axis, values=None):
'''Applies element.select to all values along axis and returns the result as a list.
Dimension values can also be specified explicitly to select a subset or control the order.'''
new_dims_name = [d.name for d in element.kdims if d.name != axis]
if values is None:
values = element.dimension_values(axis, expanded=False)
return tuple(
element.select(**{
axis: val
}).reindex(new_dims_name).relabel(val) for val in values)
| 32.327273 | 104 | 0.646419 | import numpy as np
import holoviews as hv
hv.extension('bokeh', logo=False)
import param
import panel as pn
import matplotlib.pyplot as plt
from holoviews.operation.datashader import rasterize
from bokeh.models import WheelZoomTool
from holoviews.core import Store
valid_rgb_options = [
k for group in ['style', 'plot', 'norm', 'output']
for k in Store.options(backend='bokeh')['RGB'][group].allowed_keywords
]
valid_rgb_options.remove(
'alpha')
import colorcet as cc
label_cmap = cc.b_glasbey_hv * 256
def zoom_bounds_hook(bounds):
def _hook(plot, element):
plot.state.x_range.bounds = (bounds[0], bounds[2])
plot.state.y_range.bounds = (bounds[1], bounds[3])
plot.state.select(WheelZoomTool).maintain_focus = False
return _hook
def get_img_dims_coords(img, spacing=1):
img_dims = ['x', 'y', 'z'][:img.ndim]
spacing = np.broadcast_to(np.array(spacing), img.ndim)
img_coords = [
np.arange(d) * s for d, s in zip(img.shape[::-1], spacing[::-1])
]
return img_dims, img_coords
def image_to_hvds(img, label, spacing=1):
img_dims, img_coords = get_img_dims_coords(img, spacing)
return hv.Dataset((*(img_coords), img),
kdims=img_dims,
vdims=['intensity'],
label=label)
class HvDataset(param.Parameterized):
img = param.Array(np.zeros((2, 2), dtype=np.uint8),
doc='numpy iamge array',
precedence=-1)
label = param.String('channel',
doc='label for the generated hv.Dataset',
precedence=-1)
spacing = param.Parameter((1, ), doc='pixel/voxel size', precedence=-1)
_update_counter = param.Integer(0, precedence=-1)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._broadcast_spacing()
@param.depends()
def _broadcast_spacing(self):
self.spacing = tuple(
np.broadcast_to(np.array(self.spacing), self.img.ndim).tolist())
@param.depends('img', watch=True)
def _update_img(self):
self._broadcast_spacing()
self._update_counter += 1
@param.depends('_update_counter', 'label')
def _build_dataset(self):
return image_to_hvds(self.img, self.label, self.spacing)
@param.depends('spacing')
def dmap(self):
return hv.DynamicMap(self._build_dataset, cache_size=1)
def make_composite(imgs, cmaps, mode='max'):
_modes = {'max': np.max, 'mean': np.mean}
blending_fun = _modes.get(mode, None)
if blending_fun is None:
raise NotImplementedError(
'blending mode note implemented: {}'.format(mode))
imgs = [(plt.get_cmap(name)(img)[..., :-1] * 255).astype(np.uint8)
for img, name in zip(imgs, cmaps)]
blended_img = blending_fun(np.asarray(imgs), axis=0)
return np.rint(blended_img).astype(np.uint8)
def blend_overlay(elems):
if not isinstance(elems, hv.Overlay):
return elems
imgs = [e.dimension_values(2, flat=False) for e in elems]
if imgs[0].dtype != np.uint8:
raise ValueError(
'8 bit images are expected to stack overlays, got {}'.format(
imgs[0].dtype))
cmaps = [e.opts.get().options['cmap'] for e in elems]
alphas = [e.opts.get().options['alpha'] for e in elems]
imgs = [(a * img).astype(int) if a < 1.0 else img
for a, img in zip(alphas, imgs)]
rgb = make_composite(imgs, cmaps, mode='max')[::-1]
xr = elems.range(0)
yr = elems.range(1)
bounds = (xr[1], yr[0], xr[0], yr[1])
height, width = rgb.shape[:-1]
options = list(elems)[0].opts.get().options
options = {
key: val
for key, val in options.items() if key in valid_rgb_options
}
return hv.RGB(rgb, bounds=bounds, group='composite').opts(**options)
def split_element(element, axis, values=None):
new_dims_name = [d.name for d in element.kdims if d.name != axis]
if values is None:
values = element.dimension_values(axis, expanded=False)
return tuple(
element.select(**{
axis: val
}).reindex(new_dims_name).relabel(val) for val in values)
| true | true |
f7fd76d82ae101a6cb068e44db53b457d191e583 | 44,328 | py | Python | lib/spack/spack/build_environment.py | klevzoff/spack | 396936d24173254ecf4148bc460702185e4c99e5 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2020-12-28T14:38:41.000Z | 2020-12-28T14:38:41.000Z | lib/spack/spack/build_environment.py | klevzoff/spack | 396936d24173254ecf4148bc460702185e4c99e5 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 17 | 2019-03-21T15:54:00.000Z | 2022-03-29T19:34:28.000Z | lib/spack/spack/build_environment.py | klevzoff/spack | 396936d24173254ecf4148bc460702185e4c99e5 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2021-04-07T18:27:09.000Z | 2022-03-31T22:52:38.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This module contains all routines related to setting up the package
build environment. All of this is set up by package.py just before
install() is called.
There are two parts to the build environment:
1. Python build environment (i.e. install() method)
This is how things are set up when install() is called. Spack
takes advantage of each package being in its own module by adding a
bunch of command-like functions (like configure(), make(), etc.) in
the package's module scope. Ths allows package writers to call
them all directly in Package.install() without writing 'self.'
everywhere. No, this isn't Pythonic. Yes, it makes the code more
readable and more like the shell script from which someone is
likely porting.
2. Build execution environment
This is the set of environment variables, like PATH, CC, CXX,
etc. that control the build. There are also a number of
environment variables used to pass information (like RPATHs and
other information about dependencies) to Spack's compiler wrappers.
All of these env vars are also set up here.
Skimming this module is a nice way to get acquainted with the types of
calls you can make from within the install() function.
"""
import inspect
import re
import multiprocessing
import os
import shutil
import sys
import traceback
import types
from six import StringIO
import llnl.util.tty as tty
from llnl.util.tty.color import cescape, colorize
from llnl.util.filesystem import mkdirp, install, install_tree
from llnl.util.lang import dedupe
from llnl.util.tty.log import MultiProcessFd
import spack.build_systems.cmake
import spack.build_systems.meson
import spack.config
import spack.main
import spack.paths
import spack.package
import spack.repo
import spack.schema.environment
import spack.store
import spack.install_test
import spack.subprocess_context
import spack.architecture as arch
import spack.util.path
from spack.util.string import plural
from spack.util.environment import (
env_flag, filter_system_paths, get_path, is_system_path,
EnvironmentModifications, validate, preserve_environment)
from spack.util.environment import system_dirs
from spack.error import NoLibrariesError, NoHeadersError
from spack.util.executable import Executable
from spack.util.module_cmd import load_module, path_from_modules, module
from spack.util.log_parse import parse_log_events, make_log_context
#
# This can be set by the user to globally disable parallel builds.
#
SPACK_NO_PARALLEL_MAKE = 'SPACK_NO_PARALLEL_MAKE'
#
# These environment variables are set by
# set_build_environment_variables and used to pass parameters to
# Spack's compiler wrappers.
#
SPACK_ENV_PATH = 'SPACK_ENV_PATH'
SPACK_INCLUDE_DIRS = 'SPACK_INCLUDE_DIRS'
SPACK_LINK_DIRS = 'SPACK_LINK_DIRS'
SPACK_RPATH_DIRS = 'SPACK_RPATH_DIRS'
SPACK_RPATH_DEPS = 'SPACK_RPATH_DEPS'
SPACK_LINK_DEPS = 'SPACK_LINK_DEPS'
SPACK_PREFIX = 'SPACK_PREFIX'
SPACK_INSTALL = 'SPACK_INSTALL'
SPACK_DEBUG = 'SPACK_DEBUG'
SPACK_SHORT_SPEC = 'SPACK_SHORT_SPEC'
SPACK_DEBUG_LOG_ID = 'SPACK_DEBUG_LOG_ID'
SPACK_DEBUG_LOG_DIR = 'SPACK_DEBUG_LOG_DIR'
SPACK_CCACHE_BINARY = 'SPACK_CCACHE_BINARY'
SPACK_SYSTEM_DIRS = 'SPACK_SYSTEM_DIRS'
# Platform-specific library suffix.
dso_suffix = 'dylib' if sys.platform == 'darwin' else 'so'
class MakeExecutable(Executable):
"""Special callable executable object for make so the user can specify
parallelism options on a per-invocation basis. Specifying
'parallel' to the call will override whatever the package's
global setting is, so you can either default to true or false and
override particular calls. Specifying 'jobs_env' to a particular
call will name an environment variable which will be set to the
parallelism level (without affecting the normal invocation with
-j).
Note that if the SPACK_NO_PARALLEL_MAKE env var is set it overrides
everything.
"""
def __init__(self, name, jobs):
super(MakeExecutable, self).__init__(name)
self.jobs = jobs
def __call__(self, *args, **kwargs):
"""parallel, and jobs_env from kwargs are swallowed and used here;
remaining arguments are passed through to the superclass.
"""
disable = env_flag(SPACK_NO_PARALLEL_MAKE)
parallel = (not disable) and kwargs.pop('parallel', self.jobs > 1)
if parallel:
args = ('-j{0}'.format(self.jobs),) + args
jobs_env = kwargs.pop('jobs_env', None)
if jobs_env:
# Caller wants us to set an environment variable to
# control the parallelism.
kwargs['extra_env'] = {jobs_env: str(self.jobs)}
return super(MakeExecutable, self).__call__(*args, **kwargs)
def clean_environment():
# Stuff in here sanitizes the build environment to eliminate
# anything the user has set that may interfere. We apply it immediately
# unlike the other functions so it doesn't overwrite what the modules load.
env = EnvironmentModifications()
# Remove these vars from the environment during build because they
# can affect how some packages find libraries. We want to make
# sure that builds never pull in unintended external dependencies.
env.unset('LD_LIBRARY_PATH')
env.unset('LD_RUN_PATH')
env.unset('DYLD_LIBRARY_PATH')
env.unset('DYLD_FALLBACK_LIBRARY_PATH')
# These vars affect how the compiler finds libraries and include dirs.
env.unset('LIBRARY_PATH')
env.unset('CPATH')
env.unset('C_INCLUDE_PATH')
env.unset('CPLUS_INCLUDE_PATH')
env.unset('OBJC_INCLUDE_PATH')
# On Cray "cluster" systems, unset CRAY_LD_LIBRARY_PATH to avoid
# interference with Spack dependencies.
# CNL requires these variables to be set (or at least some of them,
# depending on the CNL version).
hostarch = arch.Arch(arch.platform(), 'default_os', 'default_target')
on_cray = str(hostarch.platform) == 'cray'
using_cnl = re.match(r'cnl\d+', str(hostarch.os))
if on_cray and not using_cnl:
env.unset('CRAY_LD_LIBRARY_PATH')
for varname in os.environ.keys():
if 'PKGCONF' in varname:
env.unset(varname)
# Unset the following variables because they can affect installation of
# Autotools and CMake packages.
build_system_vars = [
'CC', 'CFLAGS', 'CPP', 'CPPFLAGS', # C variables
'CXX', 'CCC', 'CXXFLAGS', 'CXXCPP', # C++ variables
'F77', 'FFLAGS', 'FLIBS', # Fortran77 variables
'FC', 'FCFLAGS', 'FCLIBS', # Fortran variables
'LDFLAGS', 'LIBS' # linker variables
]
for v in build_system_vars:
env.unset(v)
# Unset mpi environment vars. These flags should only be set by
# mpi providers for packages with mpi dependencies
mpi_vars = [
'MPICC', 'MPICXX', 'MPIFC', 'MPIF77', 'MPIF90'
]
for v in mpi_vars:
env.unset(v)
build_lang = spack.config.get('config:build_language')
if build_lang:
# Override language-related variables. This can be used to force
# English compiler messages etc., which allows parse_log_events to
# show useful matches.
env.set('LC_ALL', build_lang)
# Remove any macports installs from the PATH. The macports ld can
# cause conflicts with the built-in linker on el capitan. Solves
# assembler issues, e.g.:
# suffix or operands invalid for `movq'"
path = get_path('PATH')
for p in path:
if '/macports/' in p:
env.remove_path('PATH', p)
env.apply_modifications()
def set_compiler_environment_variables(pkg, env):
assert pkg.spec.concrete
compiler = pkg.compiler
spec = pkg.spec
# Make sure the executables for this compiler exist
compiler.verify_executables()
# Set compiler variables used by CMake and autotools
assert all(key in compiler.link_paths for key in (
'cc', 'cxx', 'f77', 'fc'))
# Populate an object with the list of environment modifications
# and return it
# TODO : add additional kwargs for better diagnostics, like requestor,
# ttyout, ttyerr, etc.
link_dir = spack.paths.build_env_path
# Set SPACK compiler variables so that our wrapper knows what to call
if compiler.cc:
env.set('SPACK_CC', compiler.cc)
env.set('CC', os.path.join(link_dir, compiler.link_paths['cc']))
if compiler.cxx:
env.set('SPACK_CXX', compiler.cxx)
env.set('CXX', os.path.join(link_dir, compiler.link_paths['cxx']))
if compiler.f77:
env.set('SPACK_F77', compiler.f77)
env.set('F77', os.path.join(link_dir, compiler.link_paths['f77']))
if compiler.fc:
env.set('SPACK_FC', compiler.fc)
env.set('FC', os.path.join(link_dir, compiler.link_paths['fc']))
# Set SPACK compiler rpath flags so that our wrapper knows what to use
env.set('SPACK_CC_RPATH_ARG', compiler.cc_rpath_arg)
env.set('SPACK_CXX_RPATH_ARG', compiler.cxx_rpath_arg)
env.set('SPACK_F77_RPATH_ARG', compiler.f77_rpath_arg)
env.set('SPACK_FC_RPATH_ARG', compiler.fc_rpath_arg)
env.set('SPACK_LINKER_ARG', compiler.linker_arg)
# Check whether we want to force RPATH or RUNPATH
if spack.config.get('config:shared_linking') == 'rpath':
env.set('SPACK_DTAGS_TO_STRIP', compiler.enable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.disable_new_dtags)
else:
env.set('SPACK_DTAGS_TO_STRIP', compiler.disable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.enable_new_dtags)
# Set the target parameters that the compiler will add
isa_arg = spec.architecture.target.optimization_flags(compiler)
env.set('SPACK_TARGET_ARGS', isa_arg)
# Trap spack-tracked compiler flags as appropriate.
# env_flags are easy to accidentally override.
inject_flags = {}
env_flags = {}
build_system_flags = {}
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Always convert flag_handler to function type.
# This avoids discrepencies in calling conventions between functions
# and methods, or between bound and unbound methods in python 2.
# We cannot effectively convert everything to a bound method, which
# would be the simpler solution.
if isinstance(pkg.flag_handler, types.FunctionType):
handler = pkg.flag_handler
else:
if sys.version_info >= (3, 0):
handler = pkg.flag_handler.__func__
else:
handler = pkg.flag_handler.im_func
injf, envf, bsf = handler(pkg, flag, spec.compiler_flags[flag])
inject_flags[flag] = injf or []
env_flags[flag] = envf or []
build_system_flags[flag] = bsf or []
# Place compiler flags as specified by flag_handler
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Concreteness guarantees key safety here
if inject_flags[flag]:
# variables SPACK_<FLAG> inject flags through wrapper
var_name = 'SPACK_{0}'.format(flag.upper())
env.set(var_name, ' '.join(f for f in inject_flags[flag]))
if env_flags[flag]:
# implicit variables
env.set(flag.upper(), ' '.join(f for f in env_flags[flag]))
pkg.flags_to_build_system_args(build_system_flags)
env.set('SPACK_COMPILER_SPEC', str(spec.compiler))
env.set('SPACK_SYSTEM_DIRS', ':'.join(system_dirs))
compiler.setup_custom_environment(pkg, env)
return env
def set_build_environment_variables(pkg, env, dirty):
"""Ensure a clean install environment when we build packages.
This involves unsetting pesky environment variables that may
affect the build. It also involves setting environment variables
used by Spack's compiler wrappers.
Args:
pkg: The package we are building
env: The build environment
dirty (bool): Skip unsetting the user's environment settings
"""
# Gather information about various types of dependencies
build_deps = set(pkg.spec.dependencies(deptype=('build', 'test')))
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
build_link_deps = build_deps | link_deps
rpath_deps = get_rpath_deps(pkg)
link_dirs = []
include_dirs = []
rpath_dirs = []
# The top-level package is always RPATHed. It hasn't been installed yet
# so the RPATHs are added unconditionally (e.g. even though lib64/ may
# not be created for the install).
for libdir in ['lib', 'lib64']:
lib_path = os.path.join(pkg.prefix, libdir)
rpath_dirs.append(lib_path)
# Set up link, include, RPATH directories that are passed to the
# compiler wrapper
for dep in link_deps:
if is_system_path(dep.prefix):
continue
query = pkg.spec[dep.name]
dep_link_dirs = list()
try:
dep_link_dirs.extend(query.libs.directories)
except NoLibrariesError:
tty.debug("No libraries found for {0}".format(dep.name))
for default_lib_dir in ['lib', 'lib64']:
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
if os.path.isdir(default_lib_prefix):
dep_link_dirs.append(default_lib_prefix)
link_dirs.extend(dep_link_dirs)
if dep in rpath_deps:
rpath_dirs.extend(dep_link_dirs)
try:
include_dirs.extend(query.headers.directories)
except NoHeadersError:
tty.debug("No headers found for {0}".format(dep.name))
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
env.set(SPACK_LINK_DIRS, ':'.join(link_dirs))
env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))
build_prefixes = [dep.prefix for dep in build_deps]
build_link_prefixes = [dep.prefix for dep in build_link_deps]
# add run-time dependencies of direct build-time dependencies:
for build_dep in build_deps:
for run_dep in build_dep.traverse(deptype='run'):
build_prefixes.append(run_dep.prefix)
# Filter out system paths: ['/', '/usr', '/usr/local']
# These paths can be introduced into the build when an external package
# is added as a dependency. The problem with these paths is that they often
# contain hundreds of other packages installed in the same directory.
# If these paths come first, they can overshadow Spack installations.
build_prefixes = filter_system_paths(build_prefixes)
build_link_prefixes = filter_system_paths(build_link_prefixes)
# Add dependencies to CMAKE_PREFIX_PATH
env.set_path('CMAKE_PREFIX_PATH', build_link_prefixes)
# Set environment variables if specified for
# the given compiler
compiler = pkg.compiler
env.extend(spack.schema.environment.parse(compiler.environment))
if compiler.extra_rpaths:
extra_rpaths = ':'.join(compiler.extra_rpaths)
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
# Add bin directories from dependencies to the PATH for the build.
for prefix in build_prefixes:
for dirname in ['bin', 'bin64']:
bin_dir = os.path.join(prefix, dirname)
if os.path.isdir(bin_dir):
env.prepend_path('PATH', bin_dir)
# Add spack build environment path with compiler wrappers first in
# the path. We add the compiler wrapper path, which includes default
# wrappers (cc, c++, f77, f90), AND a subdirectory containing
# compiler-specific symlinks. The latter ensures that builds that
# are sensitive to the *name* of the compiler see the right name when
# we're building with the wrappers.
#
# Conflicts on case-insensitive systems (like "CC" and "cc") are
# handled by putting one in the <build_env_path>/case-insensitive
# directory. Add that to the path too.
env_paths = []
compiler_specific = os.path.join(
spack.paths.build_env_path, os.path.dirname(pkg.compiler.link_paths['cc']))
for item in [spack.paths.build_env_path, compiler_specific]:
env_paths.append(item)
ci = os.path.join(item, 'case-insensitive')
if os.path.isdir(ci):
env_paths.append(ci)
for item in env_paths:
env.prepend_path('PATH', item)
env.set_path(SPACK_ENV_PATH, env_paths)
# Working directory for the spack command itself, for debug logs.
if spack.config.get('config:debug'):
env.set(SPACK_DEBUG, 'TRUE')
env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format('{name}-{hash:7}'))
env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)
# Find ccache binary and hand it to build environment
if spack.config.get('config:ccache'):
ccache = Executable('ccache')
if not ccache:
raise RuntimeError("No ccache binary found in PATH")
env.set(SPACK_CCACHE_BINARY, ccache)
# Add any pkgconfig directories to PKG_CONFIG_PATH
for prefix in build_link_prefixes:
for directory in ('lib', 'lib64', 'share'):
pcdir = os.path.join(prefix, directory, 'pkgconfig')
if os.path.isdir(pcdir):
env.prepend_path('PKG_CONFIG_PATH', pcdir)
return env
def _set_variables_for_single_module(pkg, module):
"""Helper function to set module variables for single module."""
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
marker = '_set_run_already_called'
if getattr(module, marker, False):
return
jobs = spack.config.get('config:build_jobs', 16) if pkg.parallel else 1
jobs = min(jobs, multiprocessing.cpu_count())
m = module
m.make_jobs = jobs
# TODO: make these build deps that can be installed if not found.
m.make = MakeExecutable('make', jobs)
m.gmake = MakeExecutable('gmake', jobs)
m.scons = MakeExecutable('scons', jobs)
m.ninja = MakeExecutable('ninja', jobs)
# easy shortcut to os.environ
m.env = os.environ
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
m.configure = Executable('./configure')
m.meson = Executable('meson')
m.cmake = Executable('cmake')
m.ctest = MakeExecutable('ctest', jobs)
# Standard CMake arguments
m.std_cmake_args = spack.build_systems.cmake.CMakePackage._std_args(pkg)
m.std_meson_args = spack.build_systems.meson.MesonPackage._std_args(pkg)
# Put spack compiler paths in module scope.
link_dir = spack.paths.build_env_path
m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths['cc'])
m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths['cxx'])
m.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths['f77'])
m.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths['fc'])
# Emulate some shell commands for convenience
m.pwd = os.getcwd
m.cd = os.chdir
m.mkdir = os.mkdir
m.makedirs = os.makedirs
m.remove = os.remove
m.removedirs = os.removedirs
m.symlink = os.symlink
m.mkdirp = mkdirp
m.install = install
m.install_tree = install_tree
m.rmtree = shutil.rmtree
m.move = shutil.move
# Useful directories within the prefix are encapsulated in
# a Prefix object.
m.prefix = pkg.prefix
# Platform-specific library suffix.
m.dso_suffix = dso_suffix
def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
compiler_path = kwargs.get('compiler', m.spack_cc)
compiler = Executable(compiler_path)
return _static_to_shared_library(pkg.spec.architecture, compiler,
static_lib, shared_lib, **kwargs)
m.static_to_shared_library = static_to_shared_library
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
setattr(m, marker, True)
def set_module_variables_for_package(pkg):
"""Populate the module scope of install() with some useful functions.
This makes things easier for package writers.
"""
# If a user makes their own package repo, e.g.
# spack.pkg.mystuff.libelf.Libelf, and they inherit from an existing class
# like spack.pkg.original.libelf.Libelf, then set the module variables
# for both classes so the parent class can still use them if it gets
# called. parent_class_modules includes pkg.module.
modules = parent_class_modules(pkg.__class__)
for mod in modules:
_set_variables_for_single_module(pkg, mod)
def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None,
**kwargs):
"""
Converts a static library to a shared library. The static library has to
be built with PIC for the conversion to work.
Parameters:
static_lib (str): Path to the static library.
shared_lib (str): Path to the shared library. Default is to derive
from the static library's path.
Keyword arguments:
compiler (str): Path to the compiler. Default is spack_cc.
compiler_output: Where to print compiler output to.
arguments (str list): Additional arguments for the compiler.
version (str): Library version. Default is unspecified.
compat_version (str): Library compatibility version. Default is
version.
"""
compiler_output = kwargs.get('compiler_output', None)
arguments = kwargs.get('arguments', [])
version = kwargs.get('version', None)
compat_version = kwargs.get('compat_version', version)
if not shared_lib:
shared_lib = '{0}.{1}'.format(os.path.splitext(static_lib)[0],
dso_suffix)
compiler_args = []
# TODO: Compiler arguments should not be hardcoded but provided by
# the different compiler classes.
if 'linux' in arch or 'cray' in arch:
soname = os.path.basename(shared_lib)
if compat_version:
soname += '.{0}'.format(compat_version)
compiler_args = [
'-shared',
'-Wl,-soname,{0}'.format(soname),
'-Wl,--whole-archive',
static_lib,
'-Wl,--no-whole-archive'
]
elif 'darwin' in arch:
install_name = shared_lib
if compat_version:
install_name += '.{0}'.format(compat_version)
compiler_args = [
'-dynamiclib',
'-install_name', '{0}'.format(install_name),
'-Wl,-force_load,{0}'.format(static_lib)
]
if compat_version:
compiler_args.extend(['-compatibility_version', '{0}'.format(
compat_version)])
if version:
compiler_args.extend(['-current_version', '{0}'.format(version)])
if len(arguments) > 0:
compiler_args.extend(arguments)
shared_lib_base = shared_lib
if version:
shared_lib += '.{0}'.format(version)
elif compat_version:
shared_lib += '.{0}'.format(compat_version)
compiler_args.extend(['-o', shared_lib])
# Create symlinks for version and compat_version
shared_lib_link = os.path.basename(shared_lib)
if version or compat_version:
os.symlink(shared_lib_link, shared_lib_base)
if compat_version and compat_version != version:
os.symlink(shared_lib_link, '{0}.{1}'.format(shared_lib_base,
compat_version))
return compiler(*compiler_args, output=compiler_output)
def get_rpath_deps(pkg):
"""Return immediate or transitive RPATHs depending on the package."""
if pkg.transitive_rpaths:
return [d for d in pkg.spec.traverse(root=False, deptype=('link'))]
else:
return pkg.spec.dependencies(deptype='link')
def get_rpaths(pkg):
"""Get a list of all the rpaths for a package."""
rpaths = [pkg.prefix.lib, pkg.prefix.lib64]
deps = get_rpath_deps(pkg)
rpaths.extend(d.prefix.lib for d in deps
if os.path.isdir(d.prefix.lib))
rpaths.extend(d.prefix.lib64 for d in deps
if os.path.isdir(d.prefix.lib64))
# Second module is our compiler mod name. We use that to get rpaths from
# module show output.
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
rpaths.append(path_from_modules([pkg.compiler.modules[1]]))
return list(dedupe(filter_system_paths(rpaths)))
def get_std_cmake_args(pkg):
"""List of standard arguments used if a package is a CMakePackage.
Returns:
list of str: standard arguments that would be used if this
package were a CMakePackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for cmake
"""
return spack.build_systems.cmake.CMakePackage._std_args(pkg)
def get_std_meson_args(pkg):
"""List of standard arguments used if a package is a MesonPackage.
Returns:
list of str: standard arguments that would be used if this
package were a MesonPackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for meson
"""
return spack.build_systems.meson.MesonPackage._std_args(pkg)
def parent_class_modules(cls):
"""
Get list of superclass modules that descend from spack.package.PackageBase
Includes cls.__module__
"""
if (not issubclass(cls, spack.package.PackageBase) or
issubclass(spack.package.PackageBase, cls)):
return []
result = []
module = sys.modules.get(cls.__module__)
if module:
result = [module]
for c in cls.__bases__:
result.extend(parent_class_modules(c))
return result
def load_external_modules(pkg):
"""Traverse a package's spec DAG and load any external modules.
Traverse a package's dependencies and load any external modules
associated with them.
Args:
pkg (PackageBase): package to load deps for
"""
for dep in list(pkg.spec.traverse()):
external_modules = dep.external_modules or []
for external_module in external_modules:
load_module(external_module)
def setup_package(pkg, dirty, context='build'):
"""Execute all environment setup routines."""
env = EnvironmentModifications()
if not dirty:
clean_environment()
# setup compilers and build tools for build contexts
need_compiler = context == 'build' or (context == 'test' and
pkg.test_requires_compiler)
if need_compiler:
set_compiler_environment_variables(pkg, env)
set_build_environment_variables(pkg, env, dirty)
# architecture specific setup
pkg.architecture.platform.setup_platform_environment(pkg, env)
if context == 'build':
# recursive post-order dependency information
env.extend(
modifications_from_dependencies(pkg.spec, context=context)
)
if (not dirty) and (not env.is_unset('CPATH')):
tty.debug("A dependency has updated CPATH, this may lead pkg-"
"config to assume that the package is part of the system"
" includes and omit it when invoked with '--cflags'.")
# setup package itself
set_module_variables_for_package(pkg)
pkg.setup_build_environment(env)
elif context == 'test':
import spack.user_environment as uenv # avoid circular import
env.extend(uenv.environment_modifications_for_spec(pkg.spec))
env.extend(
modifications_from_dependencies(pkg.spec, context=context)
)
set_module_variables_for_package(pkg)
env.prepend_path('PATH', '.')
# Loading modules, in particular if they are meant to be used outside
# of Spack, can change environment variables that are relevant to the
# build of packages. To avoid a polluted environment, preserve the
# value of a few, selected, environment variables
# With the current ordering of environment modifications, this is strictly
# unnecessary. Modules affecting these variables will be overwritten anyway
with preserve_environment('CC', 'CXX', 'FC', 'F77'):
# All module loads that otherwise would belong in previous
# functions have to occur after the env object has its
# modifications applied. Otherwise the environment modifications
# could undo module changes, such as unsetting LD_LIBRARY_PATH
# after a module changes it.
if need_compiler:
for mod in pkg.compiler.modules:
# Fixes issue https://github.com/spack/spack/issues/3153
if os.environ.get("CRAY_CPU_TARGET") == "mic-knl":
load_module("cce")
load_module(mod)
# kludge to handle cray libsci being automatically loaded by PrgEnv
# modules on cray platform. Module unload does no damage when
# unnecessary
module('unload', 'cray-libsci')
if pkg.architecture.target.module_name:
load_module(pkg.architecture.target.module_name)
load_external_modules(pkg)
implicit_rpaths = pkg.compiler.implicit_rpaths()
if implicit_rpaths:
env.set('SPACK_COMPILER_IMPLICIT_RPATHS',
':'.join(implicit_rpaths))
# Make sure nothing's strange about the Spack environment.
validate(env, tty.warn)
env.apply_modifications()
def modifications_from_dependencies(spec, context):
"""Returns the environment modifications that are required by
the dependencies of a spec and also applies modifications
to this spec's package at module scope, if need be.
Args:
spec (Spec): spec for which we want the modifications
context (str): either 'build' for build-time modifications or 'run'
for run-time modifications
"""
env = EnvironmentModifications()
pkg = spec.package
# Maps the context to deptype and method to be called
deptype_and_method = {
'build': (('build', 'link', 'test'),
'setup_dependent_build_environment'),
'run': (('link', 'run'), 'setup_dependent_run_environment'),
'test': (('link', 'run', 'test'), 'setup_dependent_run_environment')
}
deptype, method = deptype_and_method[context]
root = context == 'test'
for dspec in spec.traverse(order='post', root=root, deptype=deptype):
dpkg = dspec.package
set_module_variables_for_package(dpkg)
# Allow dependencies to modify the module
dpkg.setup_dependent_package(pkg.module, spec)
getattr(dpkg, method)(env, spec)
return env
def _setup_pkg_and_run(serialized_pkg, function, kwargs, child_pipe,
input_multiprocess_fd):
context = kwargs.get('context', 'build')
try:
# We are in the child process. Python sets sys.stdin to
# open(os.devnull) to prevent our process and its parent from
# simultaneously reading from the original stdin. But, we assume
# that the parent process is not going to read from it till we
# are done with the child, so we undo Python's precaution.
if input_multiprocess_fd is not None:
sys.stdin = os.fdopen(input_multiprocess_fd.fd)
pkg = serialized_pkg.restore()
if not kwargs.get('fake', False):
kwargs['unmodified_env'] = os.environ.copy()
setup_package(pkg, dirty=kwargs.get('dirty', False),
context=context)
return_value = function(pkg, kwargs)
child_pipe.send(return_value)
except StopPhase as e:
# Do not create a full ChildError from this, it's not an error
# it's a control statement.
child_pipe.send(e)
except BaseException:
# catch ANYTHING that goes wrong in the child process
exc_type, exc, tb = sys.exc_info()
# Need to unwind the traceback in the child because traceback
# objects can't be sent to the parent.
tb_string = traceback.format_exc()
# build up some context from the offending package so we can
# show that, too.
package_context = get_package_context(tb)
logfile = None
if context == 'build':
try:
if hasattr(pkg, 'log_path'):
logfile = pkg.log_path
except NameError:
# 'pkg' is not defined yet
pass
elif context == 'test':
logfile = os.path.join(
pkg.test_suite.stage,
spack.install_test.TestSuite.test_log_name(pkg.spec))
# make a pickleable exception to send to parent.
msg = "%s: %s" % (exc_type.__name__, str(exc))
ce = ChildError(msg,
exc_type.__module__,
exc_type.__name__,
tb_string, logfile, context, package_context)
child_pipe.send(ce)
finally:
child_pipe.close()
if input_multiprocess_fd is not None:
input_multiprocess_fd.close()
def start_build_process(pkg, function, kwargs):
"""Create a child process to do part of a spack build.
Args:
pkg (PackageBase): package whose environment we should set up the
child process for.
function (callable): argless function to run in the child
process.
Usage::
def child_fun():
# do stuff
build_env.start_build_process(pkg, child_fun)
The child process is run with the build environment set up by
spack.build_environment. This allows package authors to have full
control over the environment, etc. without affecting other builds
that might be executed in the same spack call.
If something goes wrong, the child process catches the error and
passes it to the parent wrapped in a ChildError. The parent is
expected to handle (or re-raise) the ChildError.
This uses `multiprocessing.Process` to create the child process. The
mechanism used to create the process differs on different operating
systems and for different versions of Python. In some cases "fork"
is used (i.e. the "fork" system call) and some cases it starts an
entirely new Python interpreter process (in the docs this is referred
to as the "spawn" start method). Breaking it down by OS:
- Linux always uses fork.
- Mac OS uses fork before Python 3.8 and "spawn" for 3.8 and after.
- Windows always uses the "spawn" start method.
For more information on `multiprocessing` child process creation
mechanisms, see https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
"""
parent_pipe, child_pipe = multiprocessing.Pipe()
input_multiprocess_fd = None
serialized_pkg = spack.subprocess_context.PackageInstallContext(pkg)
try:
# Forward sys.stdin when appropriate, to allow toggling verbosity
if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'):
input_fd = os.dup(sys.stdin.fileno())
input_multiprocess_fd = MultiProcessFd(input_fd)
p = multiprocessing.Process(
target=_setup_pkg_and_run,
args=(serialized_pkg, function, kwargs, child_pipe,
input_multiprocess_fd))
p.start()
except InstallError as e:
e.pkg = pkg
raise
finally:
# Close the input stream in the parent process
if input_multiprocess_fd is not None:
input_multiprocess_fd.close()
child_result = parent_pipe.recv()
p.join()
# If returns a StopPhase, raise it
if isinstance(child_result, StopPhase):
# do not print
raise child_result
# let the caller know which package went wrong.
if isinstance(child_result, InstallError):
child_result.pkg = pkg
if isinstance(child_result, ChildError):
# If the child process raised an error, print its output here rather
# than waiting until the call to SpackError.die() in main(). This
# allows exception handling output to be logged from within Spack.
# see spack.main.SpackCommand.
child_result.print_context()
raise child_result
return child_result
def get_package_context(traceback, context=3):
"""Return some context for an error message when the build fails.
Args:
traceback (traceback): A traceback from some exception raised during
install
context (int): Lines of context to show before and after the line
where the error happened
This function inspects the stack to find where we failed in the
package file, and it adds detailed context to the long_message
from there.
"""
def make_stack(tb, stack=None):
"""Tracebacks come out of the system in caller -> callee order. Return
an array in callee -> caller order so we can traverse it."""
if stack is None:
stack = []
if tb is not None:
make_stack(tb.tb_next, stack)
stack.append(tb)
return stack
stack = make_stack(traceback)
for tb in stack:
frame = tb.tb_frame
if 'self' in frame.f_locals:
# Find the first proper subclass of PackageBase.
obj = frame.f_locals['self']
if isinstance(obj, spack.package.PackageBase):
break
# We found obj, the Package implementation we care about.
# Point out the location in the install method where we failed.
lines = [
'{0}:{1:d}, in {2}:'.format(
inspect.getfile(frame.f_code),
frame.f_lineno - 1, # subtract 1 because f_lineno is 0-indexed
frame.f_code.co_name
)
]
# Build a message showing context in the install method.
sourcelines, start = inspect.getsourcelines(frame)
# Calculate lineno of the error relative to the start of the function.
# Subtract 1 because f_lineno is 0-indexed.
fun_lineno = frame.f_lineno - start - 1
start_ctx = max(0, fun_lineno - context)
sourcelines = sourcelines[start_ctx:fun_lineno + context + 1]
for i, line in enumerate(sourcelines):
is_error = start_ctx + i == fun_lineno
mark = '>> ' if is_error else ' '
# Add start to get lineno relative to start of file, not function.
marked = ' {0}{1:-6d}{2}'.format(
mark, start + start_ctx + i, line.rstrip())
if is_error:
marked = colorize('@R{%s}' % cescape(marked))
lines.append(marked)
return lines
class InstallError(spack.error.SpackError):
"""Raised by packages when a package fails to install.
Any subclass of InstallError will be annotated by Spack wtih a
``pkg`` attribute on failure, which the caller can use to get the
package for which the exception was raised.
"""
class ChildError(InstallError):
"""Special exception class for wrapping exceptions from child processes
in Spack's build environment.
The main features of a ChildError are:
1. They're serializable, so when a child build fails, we can send one
of these to the parent and let the parent report what happened.
2. They have a ``traceback`` field containing a traceback generated
on the child immediately after failure. Spack will print this on
failure in lieu of trying to run sys.excepthook on the parent
process, so users will see the correct stack trace from a child.
3. They also contain context, which shows context in the Package
implementation where the error happened. This helps people debug
Python code in their packages. To get it, Spack searches the
stack trace for the deepest frame where ``self`` is in scope and
is an instance of PackageBase. This will generally find a useful
spot in the ``package.py`` file.
The long_message of a ChildError displays one of two things:
1. If the original error was a ProcessError, indicating a command
died during the build, we'll show context from the build log.
2. If the original error was any other type of error, we'll show
context from the Python code.
SpackError handles displaying the special traceback if we're in debug
mode with spack -d.
"""
# List of errors considered "build errors", for which we'll show log
# context instead of Python context.
build_errors = [('spack.util.executable', 'ProcessError')]
def __init__(self, msg, module, classname, traceback_string, log_name,
log_type, context):
super(ChildError, self).__init__(msg)
self.module = module
self.name = classname
self.traceback = traceback_string
self.log_name = log_name
self.log_type = log_type
self.context = context
@property
def long_message(self):
out = StringIO()
out.write(self._long_message if self._long_message else '')
have_log = self.log_name and os.path.exists(self.log_name)
if (self.module, self.name) in ChildError.build_errors:
# The error happened in some external executed process. Show
# the log with errors or warnings highlighted.
if have_log:
write_log_summary(out, self.log_type, self.log_name)
else:
# The error happened in the Python code, so try to show
# some context from the Package itself.
if self.context:
out.write('\n')
out.write('\n'.join(self.context))
out.write('\n')
if out.getvalue():
out.write('\n')
if have_log:
out.write('See {0} log for details:\n'.format(self.log_type))
out.write(' {0}\n'.format(self.log_name))
return out.getvalue()
def __str__(self):
return self.message
def __reduce__(self):
"""__reduce__ is used to serialize (pickle) ChildErrors.
Return a function to reconstruct a ChildError, along with the
salient properties we'll need.
"""
return _make_child_error, (
self.message,
self.module,
self.name,
self.traceback,
self.log_name,
self.log_type,
self.context)
def _make_child_error(msg, module, name, traceback, log, log_type, context):
"""Used by __reduce__ in ChildError to reconstruct pickled errors."""
return ChildError(msg, module, name, traceback, log, log_type, context)
class StopPhase(spack.error.SpackError):
"""Pickle-able exception to control stopped builds."""
def __reduce__(self):
return _make_stop_phase, (self.message, self.long_message)
def _make_stop_phase(msg, long_msg):
return StopPhase(msg, long_msg)
def write_log_summary(out, log_type, log, last=None):
errors, warnings = parse_log_events(log)
nerr = len(errors)
nwar = len(warnings)
if nerr > 0:
if last and nerr > last:
errors = errors[-last:]
nerr = last
# If errors are found, only display errors
out.write(
"\n%s found in %s log:\n" %
(plural(nerr, 'error'), log_type))
out.write(make_log_context(errors))
elif nwar > 0:
if last and nwar > last:
warnings = warnings[-last:]
nwar = last
# If no errors are found but warnings are, display warnings
out.write(
"\n%s found in %s log:\n" %
(plural(nwar, 'warning'), log_type))
out.write(make_log_context(warnings))
| 36.84788 | 101 | 0.667434 |
import inspect
import re
import multiprocessing
import os
import shutil
import sys
import traceback
import types
from six import StringIO
import llnl.util.tty as tty
from llnl.util.tty.color import cescape, colorize
from llnl.util.filesystem import mkdirp, install, install_tree
from llnl.util.lang import dedupe
from llnl.util.tty.log import MultiProcessFd
import spack.build_systems.cmake
import spack.build_systems.meson
import spack.config
import spack.main
import spack.paths
import spack.package
import spack.repo
import spack.schema.environment
import spack.store
import spack.install_test
import spack.subprocess_context
import spack.architecture as arch
import spack.util.path
from spack.util.string import plural
from spack.util.environment import (
env_flag, filter_system_paths, get_path, is_system_path,
EnvironmentModifications, validate, preserve_environment)
from spack.util.environment import system_dirs
from spack.error import NoLibrariesError, NoHeadersError
from spack.util.executable import Executable
from spack.util.module_cmd import load_module, path_from_modules, module
from spack.util.log_parse import parse_log_events, make_log_context
SPACK_NO_PARALLEL_MAKE = 'SPACK_NO_PARALLEL_MAKE'
#
SPACK_ENV_PATH = 'SPACK_ENV_PATH'
SPACK_INCLUDE_DIRS = 'SPACK_INCLUDE_DIRS'
SPACK_LINK_DIRS = 'SPACK_LINK_DIRS'
SPACK_RPATH_DIRS = 'SPACK_RPATH_DIRS'
SPACK_RPATH_DEPS = 'SPACK_RPATH_DEPS'
SPACK_LINK_DEPS = 'SPACK_LINK_DEPS'
SPACK_PREFIX = 'SPACK_PREFIX'
SPACK_INSTALL = 'SPACK_INSTALL'
SPACK_DEBUG = 'SPACK_DEBUG'
SPACK_SHORT_SPEC = 'SPACK_SHORT_SPEC'
SPACK_DEBUG_LOG_ID = 'SPACK_DEBUG_LOG_ID'
SPACK_DEBUG_LOG_DIR = 'SPACK_DEBUG_LOG_DIR'
SPACK_CCACHE_BINARY = 'SPACK_CCACHE_BINARY'
SPACK_SYSTEM_DIRS = 'SPACK_SYSTEM_DIRS'
# Platform-specific library suffix.
dso_suffix = 'dylib' if sys.platform == 'darwin' else 'so'
class MakeExecutable(Executable):
def __init__(self, name, jobs):
super(MakeExecutable, self).__init__(name)
self.jobs = jobs
def __call__(self, *args, **kwargs):
disable = env_flag(SPACK_NO_PARALLEL_MAKE)
parallel = (not disable) and kwargs.pop('parallel', self.jobs > 1)
if parallel:
args = ('-j{0}'.format(self.jobs),) + args
jobs_env = kwargs.pop('jobs_env', None)
if jobs_env:
# Caller wants us to set an environment variable to
# control the parallelism.
kwargs['extra_env'] = {jobs_env: str(self.jobs)}
return super(MakeExecutable, self).__call__(*args, **kwargs)
def clean_environment():
# Stuff in here sanitizes the build environment to eliminate
# anything the user has set that may interfere. We apply it immediately
# unlike the other functions so it doesn't overwrite what the modules load.
env = EnvironmentModifications()
env.unset('LD_LIBRARY_PATH')
env.unset('LD_RUN_PATH')
env.unset('DYLD_LIBRARY_PATH')
env.unset('DYLD_FALLBACK_LIBRARY_PATH')
env.unset('LIBRARY_PATH')
env.unset('CPATH')
env.unset('C_INCLUDE_PATH')
env.unset('CPLUS_INCLUDE_PATH')
env.unset('OBJC_INCLUDE_PATH')
hostarch = arch.Arch(arch.platform(), 'default_os', 'default_target')
on_cray = str(hostarch.platform) == 'cray'
using_cnl = re.match(r'cnl\d+', str(hostarch.os))
if on_cray and not using_cnl:
env.unset('CRAY_LD_LIBRARY_PATH')
for varname in os.environ.keys():
if 'PKGCONF' in varname:
env.unset(varname)
build_system_vars = [
'CC', 'CFLAGS', 'CPP', 'CPPFLAGS',
'CXX', 'CCC', 'CXXFLAGS', 'CXXCPP',
'F77', 'FFLAGS', 'FLIBS',
'FC', 'FCFLAGS', 'FCLIBS',
'LDFLAGS', 'LIBS'
]
for v in build_system_vars:
env.unset(v)
mpi_vars = [
'MPICC', 'MPICXX', 'MPIFC', 'MPIF77', 'MPIF90'
]
for v in mpi_vars:
env.unset(v)
build_lang = spack.config.get('config:build_language')
if build_lang:
env.set('LC_ALL', build_lang)
path = get_path('PATH')
for p in path:
if '/macports/' in p:
env.remove_path('PATH', p)
env.apply_modifications()
def set_compiler_environment_variables(pkg, env):
assert pkg.spec.concrete
compiler = pkg.compiler
spec = pkg.spec
# Make sure the executables for this compiler exist
compiler.verify_executables()
# Set compiler variables used by CMake and autotools
assert all(key in compiler.link_paths for key in (
'cc', 'cxx', 'f77', 'fc'))
# Populate an object with the list of environment modifications
# and return it
# TODO : add additional kwargs for better diagnostics, like requestor,
# ttyout, ttyerr, etc.
link_dir = spack.paths.build_env_path
# Set SPACK compiler variables so that our wrapper knows what to call
if compiler.cc:
env.set('SPACK_CC', compiler.cc)
env.set('CC', os.path.join(link_dir, compiler.link_paths['cc']))
if compiler.cxx:
env.set('SPACK_CXX', compiler.cxx)
env.set('CXX', os.path.join(link_dir, compiler.link_paths['cxx']))
if compiler.f77:
env.set('SPACK_F77', compiler.f77)
env.set('F77', os.path.join(link_dir, compiler.link_paths['f77']))
if compiler.fc:
env.set('SPACK_FC', compiler.fc)
env.set('FC', os.path.join(link_dir, compiler.link_paths['fc']))
# Set SPACK compiler rpath flags so that our wrapper knows what to use
env.set('SPACK_CC_RPATH_ARG', compiler.cc_rpath_arg)
env.set('SPACK_CXX_RPATH_ARG', compiler.cxx_rpath_arg)
env.set('SPACK_F77_RPATH_ARG', compiler.f77_rpath_arg)
env.set('SPACK_FC_RPATH_ARG', compiler.fc_rpath_arg)
env.set('SPACK_LINKER_ARG', compiler.linker_arg)
# Check whether we want to force RPATH or RUNPATH
if spack.config.get('config:shared_linking') == 'rpath':
env.set('SPACK_DTAGS_TO_STRIP', compiler.enable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.disable_new_dtags)
else:
env.set('SPACK_DTAGS_TO_STRIP', compiler.disable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.enable_new_dtags)
# Set the target parameters that the compiler will add
isa_arg = spec.architecture.target.optimization_flags(compiler)
env.set('SPACK_TARGET_ARGS', isa_arg)
# Trap spack-tracked compiler flags as appropriate.
# env_flags are easy to accidentally override.
inject_flags = {}
env_flags = {}
build_system_flags = {}
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Always convert flag_handler to function type.
# This avoids discrepencies in calling conventions between functions
# and methods, or between bound and unbound methods in python 2.
# We cannot effectively convert everything to a bound method, which
# would be the simpler solution.
if isinstance(pkg.flag_handler, types.FunctionType):
handler = pkg.flag_handler
else:
if sys.version_info >= (3, 0):
handler = pkg.flag_handler.__func__
else:
handler = pkg.flag_handler.im_func
injf, envf, bsf = handler(pkg, flag, spec.compiler_flags[flag])
inject_flags[flag] = injf or []
env_flags[flag] = envf or []
build_system_flags[flag] = bsf or []
# Place compiler flags as specified by flag_handler
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Concreteness guarantees key safety here
if inject_flags[flag]:
# variables SPACK_<FLAG> inject flags through wrapper
var_name = 'SPACK_{0}'.format(flag.upper())
env.set(var_name, ' '.join(f for f in inject_flags[flag]))
if env_flags[flag]:
# implicit variables
env.set(flag.upper(), ' '.join(f for f in env_flags[flag]))
pkg.flags_to_build_system_args(build_system_flags)
env.set('SPACK_COMPILER_SPEC', str(spec.compiler))
env.set('SPACK_SYSTEM_DIRS', ':'.join(system_dirs))
compiler.setup_custom_environment(pkg, env)
return env
def set_build_environment_variables(pkg, env, dirty):
# Gather information about various types of dependencies
build_deps = set(pkg.spec.dependencies(deptype=('build', 'test')))
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
build_link_deps = build_deps | link_deps
rpath_deps = get_rpath_deps(pkg)
link_dirs = []
include_dirs = []
rpath_dirs = []
# The top-level package is always RPATHed. It hasn't been installed yet
# so the RPATHs are added unconditionally (e.g. even though lib64/ may
# not be created for the install).
for libdir in ['lib', 'lib64']:
lib_path = os.path.join(pkg.prefix, libdir)
rpath_dirs.append(lib_path)
# Set up link, include, RPATH directories that are passed to the
# compiler wrapper
for dep in link_deps:
if is_system_path(dep.prefix):
continue
query = pkg.spec[dep.name]
dep_link_dirs = list()
try:
dep_link_dirs.extend(query.libs.directories)
except NoLibrariesError:
tty.debug("No libraries found for {0}".format(dep.name))
for default_lib_dir in ['lib', 'lib64']:
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
if os.path.isdir(default_lib_prefix):
dep_link_dirs.append(default_lib_prefix)
link_dirs.extend(dep_link_dirs)
if dep in rpath_deps:
rpath_dirs.extend(dep_link_dirs)
try:
include_dirs.extend(query.headers.directories)
except NoHeadersError:
tty.debug("No headers found for {0}".format(dep.name))
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
env.set(SPACK_LINK_DIRS, ':'.join(link_dirs))
env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))
build_prefixes = [dep.prefix for dep in build_deps]
build_link_prefixes = [dep.prefix for dep in build_link_deps]
# add run-time dependencies of direct build-time dependencies:
for build_dep in build_deps:
for run_dep in build_dep.traverse(deptype='run'):
build_prefixes.append(run_dep.prefix)
# Filter out system paths: ['/', '/usr', '/usr/local']
# These paths can be introduced into the build when an external package
# is added as a dependency. The problem with these paths is that they often
# contain hundreds of other packages installed in the same directory.
# If these paths come first, they can overshadow Spack installations.
build_prefixes = filter_system_paths(build_prefixes)
build_link_prefixes = filter_system_paths(build_link_prefixes)
# Add dependencies to CMAKE_PREFIX_PATH
env.set_path('CMAKE_PREFIX_PATH', build_link_prefixes)
# Set environment variables if specified for
# the given compiler
compiler = pkg.compiler
env.extend(spack.schema.environment.parse(compiler.environment))
if compiler.extra_rpaths:
extra_rpaths = ':'.join(compiler.extra_rpaths)
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
# Add bin directories from dependencies to the PATH for the build.
for prefix in build_prefixes:
for dirname in ['bin', 'bin64']:
bin_dir = os.path.join(prefix, dirname)
if os.path.isdir(bin_dir):
env.prepend_path('PATH', bin_dir)
# Add spack build environment path with compiler wrappers first in
# the path. We add the compiler wrapper path, which includes default
# wrappers (cc, c++, f77, f90), AND a subdirectory containing
# compiler-specific symlinks. The latter ensures that builds that
# are sensitive to the *name* of the compiler see the right name when
# we're building with the wrappers.
#
# Conflicts on case-insensitive systems (like "CC" and "cc") are
# handled by putting one in the <build_env_path>/case-insensitive
# directory. Add that to the path too.
env_paths = []
compiler_specific = os.path.join(
spack.paths.build_env_path, os.path.dirname(pkg.compiler.link_paths['cc']))
for item in [spack.paths.build_env_path, compiler_specific]:
env_paths.append(item)
ci = os.path.join(item, 'case-insensitive')
if os.path.isdir(ci):
env_paths.append(ci)
for item in env_paths:
env.prepend_path('PATH', item)
env.set_path(SPACK_ENV_PATH, env_paths)
# Working directory for the spack command itself, for debug logs.
if spack.config.get('config:debug'):
env.set(SPACK_DEBUG, 'TRUE')
env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format('{name}-{hash:7}'))
env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)
# Find ccache binary and hand it to build environment
if spack.config.get('config:ccache'):
ccache = Executable('ccache')
if not ccache:
raise RuntimeError("No ccache binary found in PATH")
env.set(SPACK_CCACHE_BINARY, ccache)
# Add any pkgconfig directories to PKG_CONFIG_PATH
for prefix in build_link_prefixes:
for directory in ('lib', 'lib64', 'share'):
pcdir = os.path.join(prefix, directory, 'pkgconfig')
if os.path.isdir(pcdir):
env.prepend_path('PKG_CONFIG_PATH', pcdir)
return env
def _set_variables_for_single_module(pkg, module):
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
marker = '_set_run_already_called'
if getattr(module, marker, False):
return
jobs = spack.config.get('config:build_jobs', 16) if pkg.parallel else 1
jobs = min(jobs, multiprocessing.cpu_count())
m = module
m.make_jobs = jobs
# TODO: make these build deps that can be installed if not found.
m.make = MakeExecutable('make', jobs)
m.gmake = MakeExecutable('gmake', jobs)
m.scons = MakeExecutable('scons', jobs)
m.ninja = MakeExecutable('ninja', jobs)
# easy shortcut to os.environ
m.env = os.environ
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
m.configure = Executable('./configure')
m.meson = Executable('meson')
m.cmake = Executable('cmake')
m.ctest = MakeExecutable('ctest', jobs)
# Standard CMake arguments
m.std_cmake_args = spack.build_systems.cmake.CMakePackage._std_args(pkg)
m.std_meson_args = spack.build_systems.meson.MesonPackage._std_args(pkg)
# Put spack compiler paths in module scope.
link_dir = spack.paths.build_env_path
m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths['cc'])
m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths['cxx'])
m.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths['f77'])
m.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths['fc'])
# Emulate some shell commands for convenience
m.pwd = os.getcwd
m.cd = os.chdir
m.mkdir = os.mkdir
m.makedirs = os.makedirs
m.remove = os.remove
m.removedirs = os.removedirs
m.symlink = os.symlink
m.mkdirp = mkdirp
m.install = install
m.install_tree = install_tree
m.rmtree = shutil.rmtree
m.move = shutil.move
# Useful directories within the prefix are encapsulated in
# a Prefix object.
m.prefix = pkg.prefix
# Platform-specific library suffix.
m.dso_suffix = dso_suffix
def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
compiler_path = kwargs.get('compiler', m.spack_cc)
compiler = Executable(compiler_path)
return _static_to_shared_library(pkg.spec.architecture, compiler,
static_lib, shared_lib, **kwargs)
m.static_to_shared_library = static_to_shared_library
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
setattr(m, marker, True)
def set_module_variables_for_package(pkg):
# If a user makes their own package repo, e.g.
# spack.pkg.mystuff.libelf.Libelf, and they inherit from an existing class
# like spack.pkg.original.libelf.Libelf, then set the module variables
# for both classes so the parent class can still use them if it gets
# called. parent_class_modules includes pkg.module.
modules = parent_class_modules(pkg.__class__)
for mod in modules:
_set_variables_for_single_module(pkg, mod)
def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None,
**kwargs):
compiler_output = kwargs.get('compiler_output', None)
arguments = kwargs.get('arguments', [])
version = kwargs.get('version', None)
compat_version = kwargs.get('compat_version', version)
if not shared_lib:
shared_lib = '{0}.{1}'.format(os.path.splitext(static_lib)[0],
dso_suffix)
compiler_args = []
# TODO: Compiler arguments should not be hardcoded but provided by
# the different compiler classes.
if 'linux' in arch or 'cray' in arch:
soname = os.path.basename(shared_lib)
if compat_version:
soname += '.{0}'.format(compat_version)
compiler_args = [
'-shared',
'-Wl,-soname,{0}'.format(soname),
'-Wl,--whole-archive',
static_lib,
'-Wl,--no-whole-archive'
]
elif 'darwin' in arch:
install_name = shared_lib
if compat_version:
install_name += '.{0}'.format(compat_version)
compiler_args = [
'-dynamiclib',
'-install_name', '{0}'.format(install_name),
'-Wl,-force_load,{0}'.format(static_lib)
]
if compat_version:
compiler_args.extend(['-compatibility_version', '{0}'.format(
compat_version)])
if version:
compiler_args.extend(['-current_version', '{0}'.format(version)])
if len(arguments) > 0:
compiler_args.extend(arguments)
shared_lib_base = shared_lib
if version:
shared_lib += '.{0}'.format(version)
elif compat_version:
shared_lib += '.{0}'.format(compat_version)
compiler_args.extend(['-o', shared_lib])
# Create symlinks for version and compat_version
shared_lib_link = os.path.basename(shared_lib)
if version or compat_version:
os.symlink(shared_lib_link, shared_lib_base)
if compat_version and compat_version != version:
os.symlink(shared_lib_link, '{0}.{1}'.format(shared_lib_base,
compat_version))
return compiler(*compiler_args, output=compiler_output)
def get_rpath_deps(pkg):
if pkg.transitive_rpaths:
return [d for d in pkg.spec.traverse(root=False, deptype=('link'))]
else:
return pkg.spec.dependencies(deptype='link')
def get_rpaths(pkg):
rpaths = [pkg.prefix.lib, pkg.prefix.lib64]
deps = get_rpath_deps(pkg)
rpaths.extend(d.prefix.lib for d in deps
if os.path.isdir(d.prefix.lib))
rpaths.extend(d.prefix.lib64 for d in deps
if os.path.isdir(d.prefix.lib64))
# Second module is our compiler mod name. We use that to get rpaths from
# module show output.
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
rpaths.append(path_from_modules([pkg.compiler.modules[1]]))
return list(dedupe(filter_system_paths(rpaths)))
def get_std_cmake_args(pkg):
return spack.build_systems.cmake.CMakePackage._std_args(pkg)
def get_std_meson_args(pkg):
return spack.build_systems.meson.MesonPackage._std_args(pkg)
def parent_class_modules(cls):
if (not issubclass(cls, spack.package.PackageBase) or
issubclass(spack.package.PackageBase, cls)):
return []
result = []
module = sys.modules.get(cls.__module__)
if module:
result = [module]
for c in cls.__bases__:
result.extend(parent_class_modules(c))
return result
def load_external_modules(pkg):
for dep in list(pkg.spec.traverse()):
external_modules = dep.external_modules or []
for external_module in external_modules:
load_module(external_module)
def setup_package(pkg, dirty, context='build'):
env = EnvironmentModifications()
if not dirty:
clean_environment()
# setup compilers and build tools for build contexts
need_compiler = context == 'build' or (context == 'test' and
pkg.test_requires_compiler)
if need_compiler:
set_compiler_environment_variables(pkg, env)
set_build_environment_variables(pkg, env, dirty)
# architecture specific setup
pkg.architecture.platform.setup_platform_environment(pkg, env)
if context == 'build':
# recursive post-order dependency information
env.extend(
modifications_from_dependencies(pkg.spec, context=context)
)
if (not dirty) and (not env.is_unset('CPATH')):
tty.debug("A dependency has updated CPATH, this may lead pkg-"
"config to assume that the package is part of the system"
" includes and omit it when invoked with '--cflags'.")
# setup package itself
set_module_variables_for_package(pkg)
pkg.setup_build_environment(env)
elif context == 'test':
import spack.user_environment as uenv # avoid circular import
env.extend(uenv.environment_modifications_for_spec(pkg.spec))
env.extend(
modifications_from_dependencies(pkg.spec, context=context)
)
set_module_variables_for_package(pkg)
env.prepend_path('PATH', '.')
# Loading modules, in particular if they are meant to be used outside
# of Spack, can change environment variables that are relevant to the
# build of packages. To avoid a polluted environment, preserve the
# value of a few, selected, environment variables
# With the current ordering of environment modifications, this is strictly
# unnecessary. Modules affecting these variables will be overwritten anyway
with preserve_environment('CC', 'CXX', 'FC', 'F77'):
# All module loads that otherwise would belong in previous
# functions have to occur after the env object has its
# modifications applied. Otherwise the environment modifications
# could undo module changes, such as unsetting LD_LIBRARY_PATH
# after a module changes it.
if need_compiler:
for mod in pkg.compiler.modules:
# Fixes issue https://github.com/spack/spack/issues/3153
if os.environ.get("CRAY_CPU_TARGET") == "mic-knl":
load_module("cce")
load_module(mod)
# kludge to handle cray libsci being automatically loaded by PrgEnv
# modules on cray platform. Module unload does no damage when
# unnecessary
module('unload', 'cray-libsci')
if pkg.architecture.target.module_name:
load_module(pkg.architecture.target.module_name)
load_external_modules(pkg)
implicit_rpaths = pkg.compiler.implicit_rpaths()
if implicit_rpaths:
env.set('SPACK_COMPILER_IMPLICIT_RPATHS',
':'.join(implicit_rpaths))
# Make sure nothing's strange about the Spack environment.
validate(env, tty.warn)
env.apply_modifications()
def modifications_from_dependencies(spec, context):
env = EnvironmentModifications()
pkg = spec.package
# Maps the context to deptype and method to be called
deptype_and_method = {
'build': (('build', 'link', 'test'),
'setup_dependent_build_environment'),
'run': (('link', 'run'), 'setup_dependent_run_environment'),
'test': (('link', 'run', 'test'), 'setup_dependent_run_environment')
}
deptype, method = deptype_and_method[context]
root = context == 'test'
for dspec in spec.traverse(order='post', root=root, deptype=deptype):
dpkg = dspec.package
set_module_variables_for_package(dpkg)
# Allow dependencies to modify the module
dpkg.setup_dependent_package(pkg.module, spec)
getattr(dpkg, method)(env, spec)
return env
def _setup_pkg_and_run(serialized_pkg, function, kwargs, child_pipe,
input_multiprocess_fd):
context = kwargs.get('context', 'build')
try:
# We are in the child process. Python sets sys.stdin to
# open(os.devnull) to prevent our process and its parent from
# simultaneously reading from the original stdin. But, we assume
# that the parent process is not going to read from it till we
# are done with the child, so we undo Python's precaution.
if input_multiprocess_fd is not None:
sys.stdin = os.fdopen(input_multiprocess_fd.fd)
pkg = serialized_pkg.restore()
if not kwargs.get('fake', False):
kwargs['unmodified_env'] = os.environ.copy()
setup_package(pkg, dirty=kwargs.get('dirty', False),
context=context)
return_value = function(pkg, kwargs)
child_pipe.send(return_value)
except StopPhase as e:
# Do not create a full ChildError from this, it's not an error
# it's a control statement.
child_pipe.send(e)
except BaseException:
# catch ANYTHING that goes wrong in the child process
exc_type, exc, tb = sys.exc_info()
# Need to unwind the traceback in the child because traceback
# objects can't be sent to the parent.
tb_string = traceback.format_exc()
# build up some context from the offending package so we can
# show that, too.
package_context = get_package_context(tb)
logfile = None
if context == 'build':
try:
if hasattr(pkg, 'log_path'):
logfile = pkg.log_path
except NameError:
# 'pkg' is not defined yet
pass
elif context == 'test':
logfile = os.path.join(
pkg.test_suite.stage,
spack.install_test.TestSuite.test_log_name(pkg.spec))
# make a pickleable exception to send to parent.
msg = "%s: %s" % (exc_type.__name__, str(exc))
ce = ChildError(msg,
exc_type.__module__,
exc_type.__name__,
tb_string, logfile, context, package_context)
child_pipe.send(ce)
finally:
child_pipe.close()
if input_multiprocess_fd is not None:
input_multiprocess_fd.close()
def start_build_process(pkg, function, kwargs):
parent_pipe, child_pipe = multiprocessing.Pipe()
input_multiprocess_fd = None
serialized_pkg = spack.subprocess_context.PackageInstallContext(pkg)
try:
# Forward sys.stdin when appropriate, to allow toggling verbosity
if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'):
input_fd = os.dup(sys.stdin.fileno())
input_multiprocess_fd = MultiProcessFd(input_fd)
p = multiprocessing.Process(
target=_setup_pkg_and_run,
args=(serialized_pkg, function, kwargs, child_pipe,
input_multiprocess_fd))
p.start()
except InstallError as e:
e.pkg = pkg
raise
finally:
# Close the input stream in the parent process
if input_multiprocess_fd is not None:
input_multiprocess_fd.close()
child_result = parent_pipe.recv()
p.join()
# If returns a StopPhase, raise it
if isinstance(child_result, StopPhase):
# do not print
raise child_result
# let the caller know which package went wrong.
if isinstance(child_result, InstallError):
child_result.pkg = pkg
if isinstance(child_result, ChildError):
# If the child process raised an error, print its output here rather
# than waiting until the call to SpackError.die() in main(). This
# allows exception handling output to be logged from within Spack.
# see spack.main.SpackCommand.
child_result.print_context()
raise child_result
return child_result
def get_package_context(traceback, context=3):
def make_stack(tb, stack=None):
if stack is None:
stack = []
if tb is not None:
make_stack(tb.tb_next, stack)
stack.append(tb)
return stack
stack = make_stack(traceback)
for tb in stack:
frame = tb.tb_frame
if 'self' in frame.f_locals:
# Find the first proper subclass of PackageBase.
obj = frame.f_locals['self']
if isinstance(obj, spack.package.PackageBase):
break
# We found obj, the Package implementation we care about.
# Point out the location in the install method where we failed.
lines = [
'{0}:{1:d}, in {2}:'.format(
inspect.getfile(frame.f_code),
frame.f_lineno - 1, # subtract 1 because f_lineno is 0-indexed
frame.f_code.co_name
)
]
# Build a message showing context in the install method.
sourcelines, start = inspect.getsourcelines(frame)
# Calculate lineno of the error relative to the start of the function.
# Subtract 1 because f_lineno is 0-indexed.
fun_lineno = frame.f_lineno - start - 1
start_ctx = max(0, fun_lineno - context)
sourcelines = sourcelines[start_ctx:fun_lineno + context + 1]
for i, line in enumerate(sourcelines):
is_error = start_ctx + i == fun_lineno
mark = '>> ' if is_error else ' '
# Add start to get lineno relative to start of file, not function.
marked = ' {0}{1:-6d}{2}'.format(
mark, start + start_ctx + i, line.rstrip())
if is_error:
marked = colorize('@R{%s}' % cescape(marked))
lines.append(marked)
return lines
class InstallError(spack.error.SpackError):
class ChildError(InstallError):
# List of errors considered "build errors", for which we'll show log
# context instead of Python context.
build_errors = [('spack.util.executable', 'ProcessError')]
def __init__(self, msg, module, classname, traceback_string, log_name,
log_type, context):
super(ChildError, self).__init__(msg)
self.module = module
self.name = classname
self.traceback = traceback_string
self.log_name = log_name
self.log_type = log_type
self.context = context
@property
def long_message(self):
out = StringIO()
out.write(self._long_message if self._long_message else '')
have_log = self.log_name and os.path.exists(self.log_name)
if (self.module, self.name) in ChildError.build_errors:
# The error happened in some external executed process. Show
# the log with errors or warnings highlighted.
if have_log:
write_log_summary(out, self.log_type, self.log_name)
else:
# The error happened in the Python code, so try to show
# some context from the Package itself.
if self.context:
out.write('\n')
out.write('\n'.join(self.context))
out.write('\n')
if out.getvalue():
out.write('\n')
if have_log:
out.write('See {0} log for details:\n'.format(self.log_type))
out.write(' {0}\n'.format(self.log_name))
return out.getvalue()
def __str__(self):
return self.message
def __reduce__(self):
return _make_child_error, (
self.message,
self.module,
self.name,
self.traceback,
self.log_name,
self.log_type,
self.context)
def _make_child_error(msg, module, name, traceback, log, log_type, context):
return ChildError(msg, module, name, traceback, log, log_type, context)
class StopPhase(spack.error.SpackError):
def __reduce__(self):
return _make_stop_phase, (self.message, self.long_message)
def _make_stop_phase(msg, long_msg):
return StopPhase(msg, long_msg)
def write_log_summary(out, log_type, log, last=None):
errors, warnings = parse_log_events(log)
nerr = len(errors)
nwar = len(warnings)
if nerr > 0:
if last and nerr > last:
errors = errors[-last:]
nerr = last
# If errors are found, only display errors
out.write(
"\n%s found in %s log:\n" %
(plural(nerr, 'error'), log_type))
out.write(make_log_context(errors))
elif nwar > 0:
if last and nwar > last:
warnings = warnings[-last:]
nwar = last
# If no errors are found but warnings are, display warnings
out.write(
"\n%s found in %s log:\n" %
(plural(nwar, 'warning'), log_type))
out.write(make_log_context(warnings))
| true | true |
f7fd7766ddcd15151dd470e0d354238bb2e09129 | 121,133 | py | Python | atp_mens/data_2019_04.py | Tjorriemorrie/ufc | 46918c91e1ccf464d9d03dc8524dab91eca239d2 | [
"Apache-2.0"
] | 1 | 2019-11-10T14:14:42.000Z | 2019-11-10T14:14:42.000Z | atp_mens/data_2019_04.py | Tjorriemorrie/ufc | 46918c91e1ccf464d9d03dc8524dab91eca239d2 | [
"Apache-2.0"
] | 2 | 2020-09-25T23:55:31.000Z | 2022-02-10T00:20:20.000Z | atp_mens/data_2019_04.py | Tjorriemorrie/ufc | 46918c91e1ccf464d9d03dc8524dab91eca239d2 | [
"Apache-2.0"
] | null | null | null | from men import *
from location import *
DATA_2019_04 = [
{
'location': HOUSTON,
'date': '2019-04-14',
'matches': [
# 2019-04-06
{
'round': 512,
'players': [
SANTIAGO_GIRALDO,
JAMES_WARD
],
'score': [(6, 4), (6, 4)],
'odds': {
SANTIAGO_GIRALDO: 1.27,
JAMES_WARD: 3.50
}
},
{
'round': 512,
'players': [
PEDJA_KRSTIN,
MARCOS_GIRON
],
'score': [(6, 4), (6, 1)],
'odds': {
PEDJA_KRSTIN: 1.65,
MARCOS_GIRON: 2.15
}
},
{
'round': 512,
'players': [
ROBERTO_QUIROZ,
JC_ARAGONE
],
'score': [(6, 2), (6, 0)],
'odds': {
ROBERTO_QUIROZ: 1.65,
JC_ARAGONE: 2.13
}
},
{
'round': 512,
'players': [
MITCHELL_KRUEGER,
DOMINIK_KOEPFER
],
'score': [(3, 6), (6, 3), (6, 4)],
'odds': {
MITCHELL_KRUEGER: 1.67,
DOMINIK_KOEPFER: 1.96
}
},
{
'round': 512,
'players': [
DANIEL_ELAHI_GALAN,
SEBASTIAN_OFNER
],
'score': [(6, 0), (6, 2)],
'odds': {
DANIEL_ELAHI_GALAN: 2.14,
SEBASTIAN_OFNER: 1.65
}
},
{
'round': 512,
'players': [
CHRISTOPHER_EUBANKS,
JAY_CLARKE
],
'score': [(6, 3), (6, 4)],
'odds': {
CHRISTOPHER_EUBANKS: 2.40,
JAY_CLARKE: 1.56
}
},
{
'round': 512,
'players': [
DARIAN_KING,
PETER_POLANSKY
],
'score': [(6, 4), (6, 1)],
'odds': {
DARIAN_KING: 1.63,
PETER_POLANSKY: 2.20
}
},
{
'round': 512,
'players': [
HENRI_LAAKSONEN,
TOMMY_PAUL
],
'score': [(6, 4), (6, 7), (6, 4)],
'odds': {
HENRI_LAAKSONEN: 1.93,
TOMMY_PAUL: 1.69
}
},
# 2019-04-08
{
'round': 256,
'players': [
PEDJA_KRSTIN,
DARIAN_KING
],
'score': [(7, 6), (7, 5)],
'odds': {
PEDJA_KRSTIN: 1.48,
DARIAN_KING: 2.51
}
},
{
'round': 256,
'players': [
DANIEL_ELAHI_GALAN,
ROBERTO_QUIROZ
],
'score': [(4, 6), (7, 5), (6, 1)],
# no odds
},
{
'round': 256,
'players': [
SANTIAGO_GIRALDO,
CHRISTOPHER_EUBANKS
],
'score': [(6, 4), (6, 4)],
# no odds
},
{
'round': 256,
'players': [
HENRI_LAAKSONEN,
MITCHELL_KRUEGER
],
'score': [(6, 3), (5, 7), (6, 3)],
# no odds
},
{
'round': 32,
'players': [
BERNARD_TOMIC,
DENIS_KUDLA
],
'score': [(7, 6), (7, 5)],
'odds': {
BERNARD_TOMIC: 1.77,
DENIS_KUDLA: 2.05
}
},
{
'round': 32,
'players': [
CASPER_RUUD,
HUGO_DELLIEN
],
'score': [(7, 6), (6, 4)],
'odds': {
CASPER_RUUD: 1.49,
HUGO_DELLIEN: 2.68
}
},
{
'round': 32,
'players': [
RYAN_HARRISON,
IVO_KARLOVIC
],
'score': [(6, 3), (6, 4)],
'odds': {
RYAN_HARRISON: 2.26,
IVO_KARLOVIC: 1.65
}
},
{
'round': 32,
'players': [
CHRISTIAN_GARIN,
PABLO_CUEVAS
],
'score': [(4, 6), (6, 4), (6, 2)],
'odds': {
CHRISTIAN_GARIN: 2.38,
PABLO_CUEVAS: 1.59
}
},
{
'round': 32,
'players': [
MARCEL_GRANOLLERS,
TAYLOR_FRITZ
],
'score': [(6, 2), (4, 6), (6, 2)],
'odds': {
MARCEL_GRANOLLERS: 2.20,
TAYLOR_FRITZ: 1.59
}
},
# 2019-04-09
{
'round': 32,
'players': [
JANKO_TIPSAREVIC,
TENNYS_SANDGREN
],
'score': [(6, 1), (7, 6)],
'odds': {
JANKO_TIPSAREVIC: 2.71,
TENNYS_SANDGREN: 1.45
}
},
{
'round': 32,
'players': [
SANTIAGO_GIRALDO,
BRADLEY_KLAHN
],
'score': [(6, 4), (6, 4)],
'odds': {
SANTIAGO_GIRALDO: 1.43,
BRADLEY_KLAHN: 2.78
}
},
{
'round': 32,
'players': [
GUILLERMO_GARCIA_LOPEZ,
NOAH_RUBIN
],
'score': [(6, 7), (6, 3), (6, 3)],
'odds': {
GUILLERMO_GARCIA_LOPEZ: 1.57,
NOAH_RUBIN: 2.48
}
},
{
'round': 32,
'players': [
DANIEL_ELAHI_GALAN,
PAOLO_LORENZI
],
'score': [(7, 6), (6, 4)],
'odds': {
DANIEL_ELAHI_GALAN: 2.00,
PAOLO_LORENZI: 1.77
}
},
{
'round': 32,
'players': [
SAM_QUERREY,
BJORN_FRATANGELO
],
'score': [(6, 3), (6, 4)],
'odds': {
SAM_QUERREY: 1.61,
BJORN_FRATANGELO: 2.30
}
},
{
'round': 32,
'players': [
JORDAN_THOMPSON,
PEDJA_KRSTIN
],
'score': [(7, 5), (6, 2)],
'odds': {
JORDAN_THOMPSON: 1.58,
PEDJA_KRSTIN: 2.48
}
},
{
'round': 32,
'players': [
HENRI_LAAKSONEN,
MACKENZIE_MCDONALD
],
'score': [(6, 3), (6, 4)],
'odds': {
HENRI_LAAKSONEN: 1.71,
MACKENZIE_MCDONALD: 2.13
}
},
# 2019-04-10
{
'round': 16,
'players': [
HENRI_LAAKSONEN,
RYAN_HARRISON
],
'score': [(6, 4), (7, 5)],
'odds': {
HENRI_LAAKSONEN: 1.93,
RYAN_HARRISON: 1.83
}
},
{
'round': 16,
'players': [
MARCEL_GRANOLLERS,
BERNARD_TOMIC
],
'score': [(6, 1), (6, 2)],
'odds': {
MARCEL_GRANOLLERS: 1.59,
BERNARD_TOMIC: 2.40
}
},
{
'round': 16,
'players': [
CASPER_RUUD,
REILLY_OPELKA
],
'score': [(4, 6), (6, 4), (6, 4)],
'odds': {
CASPER_RUUD: 1.89,
REILLY_OPELKA: 1.91
}
},
{
'round': 16,
'players': [
CHRISTIAN_GARIN,
JEREMY_CHARDY
],
'score': [(3, 6), (7, 6), (7, 6)],
'odds': {
CHRISTIAN_GARIN: 1.67,
JEREMY_CHARDY: 2.03
}
},
# 2019-04-11
{
'round': 16,
'players': [
SAM_QUERREY,
GUILLERMO_GARCIA_LOPEZ
],
'score': [(6, 4), (6, 3)],
'odds': {
SAM_QUERREY: 1.44,
GUILLERMO_GARCIA_LOPEZ: 2.79
}
},
{
'round': 16,
'players': [
JORDAN_THOMPSON,
SANTIAGO_GIRALDO
],
'score': [(4, 6), (7, 6), (7, 5)],
'odds': {
JORDAN_THOMPSON: 1.59,
SANTIAGO_GIRALDO: 2.30
}
},
{
'round': 16,
'players': [
JANKO_TIPSAREVIC,
CAMERON_NORRIE
],
'score': [(6, 3), (6, 4)],
'odds': {
JANKO_TIPSAREVIC: 2.27,
CAMERON_NORRIE: 1.63
}
},
{
'round': 16,
'players': [
DANIEL_ELAHI_GALAN,
STEVE_JOHNSON
],
'score': [(6, 3), (6, 3)],
'odds': {
DANIEL_ELAHI_GALAN: 2.85,
STEVE_JOHNSON: 1.43
}
},
# 2019-04-12
{
'round': 8,
'players': [
CASPER_RUUD,
MARCEL_GRANOLLERS
],
'score': [(6, 1), (6, 0)],
'odds': {
CASPER_RUUD: 1.65,
MARCEL_GRANOLLERS: 2.30
}
},
{
'round': 8,
'players': [
CHRISTIAN_GARIN,
HENRI_LAAKSONEN
],
'score': [(6, 3), (6, 2)],
'odds': {
CHRISTIAN_GARIN: 1.49,
HENRI_LAAKSONEN: 2.72
}
},
{
'round': 8,
'players': [
SAM_QUERREY,
JANKO_TIPSAREVIC
],
'score': [(7, 6), (7, 6)],
'odds': {
SAM_QUERREY: 1.40,
JANKO_TIPSAREVIC: 2.96
}
},
# 2019-04-13
{
'round': 8,
'players': [
DANIEL_ELAHI_GALAN,
JORDAN_THOMPSON
],
'score': [(6, 1), (4, 6), (6, 4)],
# no odds
},
{
'round': 4,
'players': [
CASPER_RUUD,
DANIEL_ELAHI_GALAN
],
'score': [(7, 5), (6, 2)],
'odds': {
CASPER_RUUD: 1.29,
DANIEL_ELAHI_GALAN: 3.60
}
},
{
'round': 4,
'players': [
CHRISTIAN_GARIN,
SAM_QUERREY
],
'score': [(7, 6), (6, 2)],
'odds': {
CHRISTIAN_GARIN: 2.00,
SAM_QUERREY: 1.81
}
},
# 2019-04-13
{
'round': 2,
'players': [
CHRISTIAN_GARIN,
CASPER_RUUD
],
'score': [(7, 6), (4, 6), (6, 3)],
'odds': {
CHRISTIAN_GARIN: 1.67,
CASPER_RUUD: 2.25
}
}
]
},
{
'location': MARRAKECH,
'date': '2019-04-14',
'matches': [
# 2019-04-07
{
'round': 512,
'players': [
EVGENY_KARLOVSKIY,
TIM_PUETZ
],
'score': [(7, 6), (4, 6), (6, 2)],
# no odds
},
{
'round': 512,
'players': [
CARLOS_BERLOCQ,
ADAM_MOUNDIR
],
'score': [(6, 1), (6, 2)],
'odds': {
CARLOS_BERLOCQ: 1.07,
ADAM_MOUNDIR: 7.18
}
},
{
'round': 512,
'players': [
FACUNDO_BAGNIS,
VIKTOR_TROICKI
],
'score': [(7, 6), (6, 4)],
'odds': {
FACUNDO_BAGNIS: 1.69,
VIKTOR_TROICKI: 2.01
}
},
{
'round': 512,
'players': [
ADRIAN_MENENDEZ_MACEIRAS,
LAMINE_OUAHAB
],
'score': [(7, 6), (6, 7), (6, 4)],
'odds': {
ADRIAN_MENENDEZ_MACEIRAS: 2.28,
LAMINE_OUAHAB: 1.57
}
},
{
'round': 512,
'players': [
ELLIOT_BENCHETRIT,
CORENTIN_MOUTET
],
'score': [(6, 3), (7, 6)],
'odds': {
ELLIOT_BENCHETRIT: 3.24,
CORENTIN_MOUTET: 1.33
}
},
{
'round': 512,
'players': [
ALEJANDRO_DAVIDOVICH_FOKINA,
GREGOIRE_BARRERE
],
'score': [(7, 5), (3, 6), (6, 4)],
'odds': {
ALEJANDRO_DAVIDOVICH_FOKINA: 1.59,
GREGOIRE_BARRERE: 2.30
}
},
{
'round': 512,
'players': [
ELIAS_YMER,
KEVIN_KRAWIETZ
],
'score': [(7, 6), (6, 4)],
'odds': {
ELIAS_YMER: 1.31,
KEVIN_KRAWIETZ: 3.40
}
},
{
'round': 512,
'players': [
LORENZO_SONEGO,
ALEXEY_VATUTIN
],
'score': [(7, 6), (6, 4)],
'odds': {
LORENZO_SONEGO: 1.36,
ALEXEY_VATUTIN: 3.00
}
},
# 2019-04-08
{
'round': 256,
'players': [
ALEJANDRO_DAVIDOVICH_FOKINA,
EVGENY_KARLOVSKIY
],
'score': [(6, 2), (6, 2)],
'odds': {
ALEJANDRO_DAVIDOVICH_FOKINA: 1.15,
EVGENY_KARLOVSKIY: 5.16
}
},
{
'round': 256,
'players': [
ADRIAN_MENENDEZ_MACEIRAS,
ELLIOT_BENCHETRIT
],
'score': [(7, 5), (7, 5)],
'odds': {
ADRIAN_MENENDEZ_MACEIRAS: 2.55,
ELLIOT_BENCHETRIT: 1.48
}
},
{
'round': 256,
'players': [
FACUNDO_BAGNIS,
ELIAS_YMER
],
'score': [(1, 6), (6, 3), (7, 5)],
'odds': {
FACUNDO_BAGNIS: 2.13,
ELIAS_YMER: 1.63
}
},
{
'round': 256,
'players': [
LORENZO_SONEGO,
CARLOS_BERLOCQ
],
'score': [(6, 4), (7, 5)],
'odds': {
LORENZO_SONEGO: 1.28,
CARLOS_BERLOCQ: 3.34
}
},
{
'round': 32,
'players': [
JO_WILFRIED_TSONGA,
CEDRIC_MARCEL_STEBE
],
'score': [(6, 1), (7, 6)],
'odds': {
JO_WILFRIED_TSONGA: 1.11,
CEDRIC_MARCEL_STEBE: 6.50
}
},
{
'round': 32,
'players': [
TARO_DANIEL,
MISCHA_ZVEREV
],
'score': [(6, 3), (6, 0)],
'odds': {
TARO_DANIEL: 1.45,
MISCHA_ZVEREV: 2.80
}
},
{
'round': 32,
'players': [
GUIDO_ANDREOZZI,
ALBERT_RAMOS_VINOLAS
],
'score': [(6, 3), (7, 6)],
'odds': {
GUIDO_ANDREOZZI: 2.40,
ALBERT_RAMOS_VINOLAS: 1.59
}
},
{
'round': 32,
'players': [
GILLES_SIMON,
JOZEF_KOVALIK
],
'score': [(6, 4), (6, 1)],
'odds': {
GILLES_SIMON: 1.38,
JOZEF_KOVALIK: 3.00
}
},
{
'round': 32,
'players': [
KYLE_EDMUND,
UGO_HUMBERT
],
'score': [(6, 3), (6, 2)],
'odds': {
KYLE_EDMUND: 1.20,
UGO_HUMBERT: 4.70
}
},
# 2019-04-09
{
'round': 32,
'players': [
BENOIT_PAIRE,
ALJAZ_BEDENE
],
'score': [(3, 6), (6, 4), (7, 5)],
'odds': {
BENOIT_PAIRE: 1.95,
ALJAZ_BEDENE: 1.74
}
},
{
'round': 32,
'players': [
JAUME_MUNAR,
FACUNDO_BAGNIS
],
'score': [(6, 1), (7, 6)],
'odds': {
JAUME_MUNAR: 1.30,
FACUNDO_BAGNIS: 3.30
}
},
{
'round': 32,
'players': [
JUAN_IGNACIO_LONDERO,
CARLOS_BERLOCQ
],
'score': [(6, 2), (6, 4)],
'odds': {
JUAN_IGNACIO_LONDERO: 1.50,
CARLOS_BERLOCQ: 2.51
}
},
{
'round': 32,
'players': [
ROBIN_HAASE,
MALEK_JAZIRI
],
'score': [(6, 3), (6, 4)],
'odds': {
ROBIN_HAASE: 1.44,
MALEK_JAZIRI: 2.75
}
},
{
'round': 32,
'players': [
PABLO_ANDUJAR,
FEDERICO_DELBONIS
],
'score': [(7, 6), (6, 3)],
'odds': {
PABLO_ANDUJAR: 2.25,
FEDERICO_DELBONIS: 1.63
}
},
{
'round': 32,
'players': [
PIERRE_HUGUES_HERBERT,
THOMAS_FABBIANO
],
'score': [(6, 7), (6, 4), (6, 1)],
'odds': {
PIERRE_HUGUES_HERBERT: 1.61,
THOMAS_FABBIANO: 2.35
}
},
{
'round': 32,
'players': [
PHILIPP_KOHLSCHREIBER,
ALEJANDRO_DAVIDOVICH_FOKINA
],
'score': [(7, 6), (7, 5)],
'odds': {
PHILIPP_KOHLSCHREIBER: 1.56,
ALEJANDRO_DAVIDOVICH_FOKINA: 2.45
}
},
{
'round': 32,
'players': [
ADRIAN_MENENDEZ_MACEIRAS,
FERNANDO_VERDASCO
],
'score': [(5, 7), (6, 2), (6, 2)],
'odds': {
ADRIAN_MENENDEZ_MACEIRAS: 4.60,
FERNANDO_VERDASCO: 1.20
}
},
{
'round': 32,
'players': [
LORENZO_SONEGO,
LASLO_DJERE
],
'score': [(6, 3), (6, 3)],
'odds': {
LORENZO_SONEGO: 2.13,
LASLO_DJERE: 1.63
}
},
{
'round': 32,
'players': [
JIRI_VESELY,
FABIO_FOGNINI
],
'score': [(7, 6), (6, 4)],
'odds': {
JIRI_VESELY: 2.32,
FABIO_FOGNINI: 1.54
}
},
{
'round': 32,
'players': [
ALEXANDER_ZVEREV,
DENIS_ISTOMIN
],
'score': [(6, 4), (6, 4)],
'odds': {
ALEXANDER_ZVEREV: 1.10,
DENIS_ISTOMIN: 7.70
}
},
# 2019-04-10
{
'round': 16,
'players': [
LORENZO_SONEGO,
ROBIN_HAASE
],
'score': [(7, 6), (6, 3)],
'odds': {
LORENZO_SONEGO: 1.66,
ROBIN_HAASE: 2.20
}
},
{
'round': 16,
'players': [
TARO_DANIEL,
ADRIAN_MENENDEZ_MACEIRAS
],
'score': [(6, 2), (1, 6), (6, 1)],
'odds': {
TARO_DANIEL: 1.30,
ADRIAN_MENENDEZ_MACEIRAS: 3.40
}
},
{
'round': 16,
'players': [
GILLES_SIMON,
GUIDO_ANDREOZZI
],
'score': [(6, 2), (6, 2)],
'odds': {
GILLES_SIMON: 1.59,
GUIDO_ANDREOZZI: 2.25
}
},
{
'round': 16,
'players': [
JO_WILFRIED_TSONGA,
KYLE_EDMUND
],
'score': [(7, 6), (6, 3)],
'odds': {
JO_WILFRIED_TSONGA: 2.65,
KYLE_EDMUND: 1.49
}
},
# 2019-04-11
{
'round': 16,
'players': [
JIRI_VESELY,
JUAN_IGNACIO_LONDERO
],
'score': [(6, 3), (6, 4)],
'odds': {
JIRI_VESELY: 1.77,
JUAN_IGNACIO_LONDERO: 2.05
}
},
{
'round': 16,
'players': [
BENOIT_PAIRE,
PIERRE_HUGUES_HERBERT
],
'score': [(6, 4), (6, 2)],
'odds': {
BENOIT_PAIRE: 1.67,
PIERRE_HUGUES_HERBERT: 2.15
}
},
{
'round': 16,
'players': [
PABLO_ANDUJAR,
PHILIPP_KOHLSCHREIBER
],
'score': [(7, 6), (6, 4)],
'odds': {
PABLO_ANDUJAR: 1.95,
PHILIPP_KOHLSCHREIBER: 1.74
}
},
{
'round': 16,
'players': [
JAUME_MUNAR,
ALEXANDER_ZVEREV
],
'score': [(7, 6), (2, 6), (6, 3)],
'odds': {
JAUME_MUNAR: 4.11,
ALEXANDER_ZVEREV: 1.24
}
},
# 2019-04-12
{
'round': 8,
'players': [
JO_WILFRIED_TSONGA,
LORENZO_SONEGO
],
'score': [(6, 3), (6, 2)],
'odds': {
JO_WILFRIED_TSONGA: 1.42,
LORENZO_SONEGO: 2.75
}
},
{
'round': 8,
'players': [
BENOIT_PAIRE,
JAUME_MUNAR
],
'score': [(6, 1), (6, 3)],
'odds': {
BENOIT_PAIRE: 2.35,
JAUME_MUNAR: 1.59
}
},
{
'round': 8,
'players': [
PABLO_ANDUJAR,
JIRI_VESELY
],
'score': [],
'retired': True,
'odds': {
PABLO_ANDUJAR: 1.77,
JIRI_VESELY: 2.05
}
},
{
'round': 8,
'players': [
GILLES_SIMON,
TARO_DANIEL
],
'score': [(6, 4), (7, 5)],
'odds': {
GILLES_SIMON: 1.36,
TARO_DANIEL: 3.00
}
},
# 2019-04-13
{
'round': 4,
'players': [
BENOIT_PAIRE,
JO_WILFRIED_TSONGA
],
'score': [(2, 6), (6, 4), (6, 3)],
'odds': {
BENOIT_PAIRE: 3.00,
JO_WILFRIED_TSONGA: 1.38
}
},
{
'round': 4,
'players': [
PABLO_ANDUJAR,
GILLES_SIMON
],
'score': [(6, 1), (6, 1)],
'odds': {
PABLO_ANDUJAR: 1.91,
GILLES_SIMON: 1.80
}
},
# 2019-04-14
{
'round': 2,
'players': [
BENOIT_PAIRE,
PABLO_ANDUJAR
],
'score': [(6, 2), (6, 3)],
'odds': {
BENOIT_PAIRE: 2.10,
PABLO_ANDUJAR: 1.69
}
}
]
},
{
'location': MONTE_CARLO,
'date': '2019-04-21',
'matches': [
# 2019-04-13
{
'round': 512,
'players': [
ELIAS_YMER,
MIOMIR_KECMANOVIC
],
'score': [(6, 1), (6, 3)],
'odds': {
ELIAS_YMER: 2.15,
MIOMIR_KECMANOVIC: 1.65
}
},
{
'round': 512,
'players': [
THOMAS_FABBIANO,
FELICIANO_LOPEZ
],
'score': [(3, 6), (6, 4), (6, 2)],
'odds': {
THOMAS_FABBIANO: 1.83,
FELICIANO_LOPEZ: 1.82
}
},
{
'round': 512,
'players': [
MARCO_TRUNGELLITI,
PETER_GOJOWCZYK
],
'score': [(6, 4), (6, 2)],
'odds': {
MARCO_TRUNGELLITI: 1.83,
PETER_GOJOWCZYK: 1.71
}
},
{
'round': 512,
'players': [
ALBERT_RAMOS_VINOLAS,
MAXIMILIAN_MARTERER
],
'score': [(6, 2), (6, 2)],
'odds': {
ALBERT_RAMOS_VINOLAS: 1.41,
MAXIMILIAN_MARTERER: 2.70
}
},
{
'round': 512,
'players': [
ANDREY_RUBLEV,
BERNARD_TOMIC
],
'score': [(4, 6), (7, 6), (7, 6)],
'odds': {
ANDREY_RUBLEV: 1.19,
BERNARD_TOMIC: 4.44
}
},
{
'round': 512,
'players': [
GUIDO_ANDREOZZI,
ERNESTS_GULBIS
],
'score': [(6, 4), (6, 1)],
'odds': {
GUIDO_ANDREOZZI: 1.50,
ERNESTS_GULBIS: 2.40
}
},
{
'round': 512,
'players': [
TARO_DANIEL,
YANNICK_MADEN
],
'score': [(6, 4), (6, 4)],
'odds': {
TARO_DANIEL: 1.63,
YANNICK_MADEN: 2.10
}
},
{
'round': 512,
'players': [
FEDERICO_DELBONIS,
ILYA_IVASHKA
],
'score': [(6, 2), (3, 4)],
'retired': True,
'odds': {
FEDERICO_DELBONIS: 1.24,
ILYA_IVASHKA: 3.79
}
},
{
'round': 512,
'players': [
JULIAN_OCLEPPO,
MISCHA_ZVEREV
],
'score': [(7, 6), (7, 6)],
'odds': {
JULIAN_OCLEPPO: 2.78,
MISCHA_ZVEREV: 1.36
}
},
{
'round': 512,
'players': [
ALJAZ_BEDENE,
HUGO_NYS
],
'score': [(6, 2), (6, 4)],
'odds': {
ALJAZ_BEDENE: 1.07,
HUGO_NYS: 8.00
}
},
{
'round': 512,
'players': [
JUAN_IGNACIO_LONDERO,
ROMAIN_ARNEODO
],
'score': [(6, 0), (6, 4)],
'odds': {
JUAN_IGNACIO_LONDERO: 1.06,
ROMAIN_ARNEODO: 7.00
}
},
{
'round': 512,
'players': [
LORENZO_SONEGO,
YOSHIHITO_NISHIOKA
],
'score': [(6, 2), (4, 6), (6, 0)],
'odds': {
LORENZO_SONEGO: 1.33,
YOSHIHITO_NISHIOKA: 3.00
}
},
{
'round': 512,
'players': [
UGO_HUMBERT,
FLORENT_DIEP
],
'score': [(3, 6), (7, 5), (6, 3)],
'odds': {
UGO_HUMBERT: 1.05,
FLORENT_DIEP: 11.00
}
},
{
'round': 512,
'players': [
ALEXEI_POPYRIN,
LEONARDO_MAYER
],
'score': [(7, 6), (2, 6), (7, 6)],
'odds': {
ALEXEI_POPYRIN: 3.20,
LEONARDO_MAYER: 1.33
}
},
{
'round': 256,
'players': [
LORENZO_SONEGO,
MARCO_TRUNGELLITI
],
'score': [],
'retired': True,
# no odds
},
# 2019-04-14
{
'round': 256,
'players': [
ALEXEI_POPYRIN,
ELIAS_YMER
],
'score': [(6, 3), (7, 6)],
'odds': {
ALEXEI_POPYRIN: 2.35,
ELIAS_YMER: 1.57
}
},
{
'round': 256,
'players': [
GUIDO_ANDREOZZI,
JULIAN_OCLEPPO
],
'score': [(6, 3), (6, 1)],
'odds': {
GUIDO_ANDREOZZI: 1.07,
JULIAN_OCLEPPO: 7.43
}
},
{
'round': 256,
'players': [
FEDERICO_DELBONIS,
ALBERT_RAMOS_VINOLAS
],
'score': [(7, 5), (6, 0)],
'odds': {
FEDERICO_DELBONIS: 1.80,
ALBERT_RAMOS_VINOLAS: 1.81
}
},
{
'round': 256,
'players': [
ALJAZ_BEDENE,
TARO_DANIEL
],
'score': [(7, 6), (6, 3)],
'odds': {
ALJAZ_BEDENE: 1.54,
TARO_DANIEL: 2.39
}
},
{
'round': 256,
'players': [
JUAN_IGNACIO_LONDERO,
THOMAS_FABBIANO
],
'score': [(6, 4), (6, 1)],
'odds': {
JUAN_IGNACIO_LONDERO: 1.47,
THOMAS_FABBIANO: 2.46
}
},
{
'round': 256,
'players': [
ANDREY_RUBLEV,
UGO_HUMBERT
],
'score': [(6, 4), (6, 4)],
'odds': {
ANDREY_RUBLEV: 1.36,
UGO_HUMBERT: 3.00
}
},
{
'round': 64,
'players': [
STAN_WAWRINKA,
LUCAS_POUILLE
],
'score': [(7, 5), (6, 3)],
'odds': {
STAN_WAWRINKA: 1.39,
LUCAS_POUILLE: 3.03
}
},
{
'round': 64,
'players': [
GUIDO_PELLA,
LASLO_DJERE
],
'score': [(6, 7), (6, 2), (6, 4)],
'odds': {
GUIDO_PELLA: 1.69,
LASLO_DJERE: 2.15
}
},
{
'round': 64,
'players': [
GRIGOR_DIMITROV,
MATTEO_BERRETTINI
],
'score': [(7, 5), (6, 4)],
'odds': {
GRIGOR_DIMITROV: 1.71,
MATTEO_BERRETTINI: 2.10
}
},
{
'round': 64,
'players': [
BORNA_CORIC,
HUBERT_HURKACZ
],
'score': [(6, 4), (5, 7), (7, 5)],
'odds': {
BORNA_CORIC: 1.53,
HUBERT_HURKACZ: 2.63
}
},
# 2019-04-15
{
'round': 64,
'players': [
LORENZO_SONEGO,
ANDREAS_SEPPI
],
'score': [(7, 6), (6, 4)],
'odds': {
LORENZO_SONEGO: 1.48,
ANDREAS_SEPPI: 2.70
}
},
{
'round': 64,
'players': [
JAUME_MUNAR,
LUCAS_CATARINA
],
'score': [(6, 0), (6, 3)],
'odds': {
JAUME_MUNAR: 1.04,
LUCAS_CATARINA: 12.24
}
},
{
'round': 64,
'players': [
DUSAN_LAJOVIC,
MALEK_JAZIRI
],
'score': [(6, 4), (6, 4)],
'odds': {
DUSAN_LAJOVIC: 1.37,
MALEK_JAZIRI: 3.18
}
},
{
'round': 64,
'players': [
MIKHAIL_KUKUSHKIN,
JEREMY_CHARDY
],
'score': [(6, 3), (6, 4)],
'odds': {
MIKHAIL_KUKUSHKIN: 2.60,
JEREMY_CHARDY: 1.51
}
},
{
'round': 64,
'players': [
PHILIPP_KOHLSCHREIBER,
TARO_DANIEL
],
'score': [(6, 1), (6, 3)],
'odds': {
PHILIPP_KOHLSCHREIBER: 1.36,
TARO_DANIEL: 3.22
}
},
{
'round': 64,
'players': [
MARTIN_KLIZAN,
FEDERICO_DELBONIS
],
'score': [(7, 6), (7, 5)],
'odds': {
MARTIN_KLIZAN: 2.99,
FEDERICO_DELBONIS: 1.39
}
},
{
'round': 64,
'players': [
ROBERTO_BAUTISTA_AGUT,
JOHN_MILLMAN
],
'score': [(3, 6), (6, 1), (6, 1)],
'odds': {
ROBERTO_BAUTISTA_AGUT: 1.18,
JOHN_MILLMAN: 5.16
}
},
{
'round': 64,
'players': [
RADU_ALBOT,
ALJAZ_BEDENE
],
'score': [(6, 4), (6, 2)],
'odds': {
RADU_ALBOT: 2.35,
ALJAZ_BEDENE: 1.57
}
},
{
'round': 64,
'players': [
DIEGO_SCHWARTZMAN,
KYLE_EDMUND
],
'score': [(4, 6), (6, 3), (6, 1)],
'odds': {
DIEGO_SCHWARTZMAN: 2.15,
KYLE_EDMUND: 1.69
}
},
{
'round': 64,
'players': [
DAVID_GOFFIN,
GUIDO_ANDREOZZI
],
'score': [(6, 1), (6, 4)],
'odds': {
DAVID_GOFFIN: 1.30,
GUIDO_ANDREOZZI: 3.57
}
},
{
'round': 64,
'players': [
JAN_LENNARD_STRUFF,
DENIS_SHAPOVALOV
],
'score': [(5, 7), (6, 3), (6, 1)],
'odds': {
JAN_LENNARD_STRUFF: 2.20,
DENIS_SHAPOVALOV: 1.67
}
},
{
'round': 64,
'players': [
FABIO_FOGNINI,
ANDREY_RUBLEV
],
'score': [(4, 6), (7, 5), (6, 4)],
'odds': {
FABIO_FOGNINI: 2.02,
ANDREY_RUBLEV: 1.74
}
},
{
'round': 64,
'players': [
MARTON_FUCSOVICS,
NIKOLOZ_BASILASHVILI
],
'score': [(7, 5), (3, 6), (6, 1)],
'odds': {
MARTON_FUCSOVICS: 1.75,
NIKOLOZ_BASILASHVILI: 2.05
}
},
{
'round': 64,
'players': [
MARCO_CECCHINATO,
DAMIR_DZUMHUR
],
'score': [(4, 0)],
'retired': True,
'odds': {
MARCO_CECCHINATO: 1.28,
DAMIR_DZUMHUR: 3.60
}
},
{
'round': 64,
'players': [
DANIIL_MEDVEDEV,
JOAO_SOUSA
],
'score': [(6, 1), (6, 1)],
'odds': {
DANIIL_MEDVEDEV: 1.54,
JOAO_SOUSA: 2.49
}
},
# 2019-04-16
{
'round': 64,
'players': [
GILLES_SIMON,
ALEXEI_POPYRIN
],
'score': [(7, 5), (6, 1)],
'odds': {
GILLES_SIMON: 1.41,
ALEXEI_POPYRIN: 2.84
}
},
{
'round': 64,
'players': [
CAMERON_NORRIE,
ADRIAN_MANNARINO
],
'score': [(6, 4), (6, 3)],
'odds': {
CAMERON_NORRIE: 1.62,
ADRIAN_MANNARINO: 2.36
}
},
{
'round': 64,
'players': [
PIERRE_HUGUES_HERBERT,
FERNANDO_VERDASCO
],
'score': [(6, 4), (6, 4)],
'odds': {
PIERRE_HUGUES_HERBERT: 2.49,
FERNANDO_VERDASCO: 1.54
}
},
{
'round': 64,
'players': [
TAYLOR_FRITZ,
JO_WILFRIED_TSONGA
],
'score': [(6, 4), (2, 0)],
'retired': True,
'odds': {
TAYLOR_FRITZ: 5.75,
JO_WILFRIED_TSONGA: 1.14
}
},
{
'round': 64,
'players': [
FELIX_AUGER_ALIASSIME,
JUAN_IGNACIO_LONDERO
],
'score': [(7, 5), (7, 6)],
'odds': {
FELIX_AUGER_ALIASSIME: 1.48,
JUAN_IGNACIO_LONDERO: 2.75
}
},
{
'round': 32,
'players': [
MARCO_CECCHINATO,
STAN_WAWRINKA
],
'score': [(0, 6), (7, 5), (6, 3)],
'odds': {
MARCO_CECCHINATO: 2.44,
STAN_WAWRINKA: 1.57
}
},
{
'round': 32,
'players': [
BORNA_CORIC,
JAUME_MUNAR
],
'score': [(6, 7), (7, 6), (6, 4)],
'odds': {
BORNA_CORIC: 1.65,
JAUME_MUNAR: 2.25
}
},
{
'round': 32,
'players': [
LORENZO_SONEGO,
KAREN_KHACHANOV
],
'score': [(7, 6), (6, 4)],
'odds': {
LORENZO_SONEGO: 2.35,
KAREN_KHACHANOV: 1.59
}
},
{
'round': 32,
'players': [
GUIDO_PELLA,
MARIN_CILIC
],
'score': [(6, 3), (5, 7), (6, 1)],
'odds': {
GUIDO_PELLA: 2.23,
MARIN_CILIC: 1.67
}
},
{
'round': 32,
'players': [
NOVAK_DJOKOVIC,
PHILIPP_KOHLSCHREIBER
],
'score': [(6, 3), (4, 6), (6, 4)],
'odds': {
NOVAK_DJOKOVIC: 1.16,
PHILIPP_KOHLSCHREIBER: 5.00
}
},
# 2019-04-17
{
'round': 32,
'players': [
CAMERON_NORRIE,
MARTON_FUCSOVICS
],
'score': [(7, 6), (6, 3)],
'odds': {
CAMERON_NORRIE: 2.95,
MARTON_FUCSOVICS: 1.41
}
},
{
'round': 32,
'players': [
TAYLOR_FRITZ,
DIEGO_SCHWARTZMAN
],
'score': [(6, 4), (6, 2)],
'odds': {
TAYLOR_FRITZ: 5.00,
DIEGO_SCHWARTZMAN: 1.16
}
},
{
'round': 32,
'players': [
GRIGOR_DIMITROV,
JAN_LENNARD_STRUFF
],
'score': [(7, 6), (6, 4)],
'odds': {
GRIGOR_DIMITROV: 1.67,
JAN_LENNARD_STRUFF: 2.20
}
},
{
'round': 32,
'players': [
DUSAN_LAJOVIC,
DAVID_GOFFIN
],
'score': [(6, 3), (6, 4)],
'odds': {
DUSAN_LAJOVIC: 3.61,
DAVID_GOFFIN: 1.27
}
},
{
'round': 32,
'players': [
FABIO_FOGNINI,
GILLES_SIMON
],
'score': [],
'retired': True,
'odds': {
FABIO_FOGNINI: 1.91,
GILLES_SIMON: 1.87
}
},
{
'round': 32,
'players': [
DANIIL_MEDVEDEV,
RADU_ALBOT
],
'score': [(6, 1), (6, 2)],
'odds': {
DANIIL_MEDVEDEV: 1.43,
RADU_ALBOT: 2.75
}
},
{
'round': 32,
'players': [
STEFANOS_TSITSIPAS,
MIKHAIL_KUKUSHKIN
],
'score': [(6, 3), (7, 5)],
'odds': {
STEFANOS_TSITSIPAS: 1.23,
MIKHAIL_KUKUSHKIN: 3.95
}
},
{
'round': 32,
'players': [
PIERRE_HUGUES_HERBERT,
KEI_NISHIKORI
],
'score': [(7, 5), (6, 4)],
'odds': {
PIERRE_HUGUES_HERBERT: 4.04,
KEI_NISHIKORI: 1.20
}
},
{
'round': 32,
'players': [
DOMINIC_THIEM,
MARTIN_KLIZAN
],
'score': [(6, 1), (6, 4)],
'odds': {
DOMINIC_THIEM: 1.21,
MARTIN_KLIZAN: 4.34
}
},
{
'round': 32,
'players': [
ALEXANDER_ZVEREV,
FELIX_AUGER_ALIASSIME
],
'score': [(6, 1), (6, 4)],
'odds': {
ALEXANDER_ZVEREV: 1.47,
FELIX_AUGER_ALIASSIME: 2.75
}
},
{
'round': 32,
'players': [
RAFAEL_NADAL,
ROBERTO_BAUTISTA_AGUT
],
'score': [(6, 1), (6, 1)],
'odds': {
RAFAEL_NADAL: 1.07,
ROBERTO_BAUTISTA_AGUT: 7.50
}
},
# 2019-04-18
{
'round': 16,
'players': [
LORENZO_SONEGO,
CAMERON_NORRIE
],
'score': [(6, 2), (7, 5)],
'odds': {
LORENZO_SONEGO: 1.57,
CAMERON_NORRIE: 2.40
}
},
{
'round': 16,
'players': [
GUIDO_PELLA,
MARCO_CECCHINATO
],
'score': [(6, 4), (4, 6), (6, 4)],
'odds': {
GUIDO_PELLA: 2.49,
MARCO_CECCHINATO: 1.58
}
},
{
'round': 16,
'players': [
BORNA_CORIC,
PIERRE_HUGUES_HERBERT
],
'score': [(6, 4), (6, 2)],
'odds': {
BORNA_CORIC: 1.42,
PIERRE_HUGUES_HERBERT: 2.70
}
},
{
'round': 16,
'players': [
DANIIL_MEDVEDEV,
STEFANOS_TSITSIPAS
],
'score': [(6, 2), (1, 6), (6, 4)],
'odds': {
DANIIL_MEDVEDEV: 2.00,
STEFANOS_TSITSIPAS: 1.79
}
},
{
'round': 16,
'players': [
DUSAN_LAJOVIC,
DOMINIC_THIEM
],
'score': [(6, 3), (6, 3)],
'odds': {
DUSAN_LAJOVIC: 4.90,
DOMINIC_THIEM: 1.17
}
},
{
'round': 16,
'players': [
FABIO_FOGNINI,
ALEXANDER_ZVEREV
],
'score': [(7, 6), (6, 1)],
'odds': {
FABIO_FOGNINI: 4.25,
ALEXANDER_ZVEREV: 1.26
}
},
{
'round': 16,
'players': [
RAFAEL_NADAL,
GRIGOR_DIMITROV
],
'score': [(6, 4), (6, 1)],
'odds': {
RAFAEL_NADAL: 1.02,
GRIGOR_DIMITROV: 13.19
}
},
{
'round': 16,
'players': [
NOVAK_DJOKOVIC,
TAYLOR_FRITZ
],
'score': [(6, 3), (6, 0)],
'odds': {
NOVAK_DJOKOVIC: 1.06,
TAYLOR_FRITZ: 7.50
}
},
# 2019-04-19
{
'round': 8,
'players': [
DUSAN_LAJOVIC,
LORENZO_SONEGO
],
'score': [(6, 4), (7, 5)],
'odds': {
DUSAN_LAJOVIC: 1.69,
LORENZO_SONEGO: 2.10
}
},
{
'round': 8,
'players': [
FABIO_FOGNINI,
BORNA_CORIC
],
'score': [(1, 6), (6, 3), (6, 2)],
'odds': {
FABIO_FOGNINI: 2.20,
BORNA_CORIC: 1.65
}
},
{
'round': 8,
'players': [
RAFAEL_NADAL,
GUIDO_PELLA
],
'score': [(7, 6), (6, 3)],
'odds': {
RAFAEL_NADAL: 1.02,
GUIDO_PELLA: 16.00
}
},
{
'round': 8,
'players': [
DANIIL_MEDVEDEV,
NOVAK_DJOKOVIC
],
'score': [(6, 3), (4, 6), (6, 2)],
'odds': {
DANIIL_MEDVEDEV: 4.40,
NOVAK_DJOKOVIC: 1.20
}
},
# 2019-04-20
{
'round': 4,
'players': [
DUSAN_LAJOVIC,
DANIIL_MEDVEDEV
],
'score': [(7, 5), (6, 1)],
'odds': {
DUSAN_LAJOVIC: 2.75,
DANIIL_MEDVEDEV: 1.42
}
},
{
'round': 4,
'players': [
FABIO_FOGNINI,
RAFAEL_NADAL
],
'score': [(6, 4), (6, 2)],
'odds': {
FABIO_FOGNINI: 7.50,
RAFAEL_NADAL: 1.06
}
},
# 2019-04-21
{
'round': 2,
'players': [
FABIO_FOGNINI,
DUSAN_LAJOVIC
],
'score': [(6, 3), (6, 4)],
'odds': {
FABIO_FOGNINI: 1.59,
DUSAN_LAJOVIC: 2.25
}
}
]
},
{
'location': BARCELONA,
'date': '2019-04-28',
'matches': [
# 2019-04-20
{
'round': 512,
'players': [
GUILLERMO_GARCIA_LOPEZ,
CARLOS_BERLOCQ
],
'score': [(6, 4), (6, 3)],
# no odds
},
{
'round': 512,
'players': [
PEDRO_SOUSA,
CARLOS_ALCARAZ_GARFIA
],
'score': [(6, 7), (6, 3), (6, 1)],
'odds': {
PEDRO_SOUSA: 1.27,
CARLOS_ALCARAZ_GARFIA: 3.02
}
},
{
'round': 512,
'players': [
ALEJANDRO_DAVIDOVICH_FOKINA,
DENIS_ISTOMIN
],
'score': [(6, 4), (6, 4)],
'odds': {
ALEJANDRO_DAVIDOVICH_FOKINA: 1.34,
DENIS_ISTOMIN: 2.70
}
},
{
'round': 512,
'players': [
ROBERTO_CARBALLES_BAENA,
PEDRO_MARTINEZ
],
'score': [(7, 5), (6, 1)],
'odds': {
ROBERTO_CARBALLES_BAENA: 1.36,
PEDRO_MARTINEZ: 2.85
}
},
{
'round': 512,
'players': [
ANTOINE_HOANG,
ANDREY_RUBLEV
],
'score': [(5, 7), (7, 6), (7, 6)],
'odds': {
ANTOINE_HOANG: 4.65,
ANDREY_RUBLEV: 1.18
}
},
{
'round': 512,
'players': [
MARCEL_GRANOLLERS,
DANIEL_EVANS
],
'score': [(6, 3), (4, 6), (7, 5)],
'odds': {
MARCEL_GRANOLLERS: 1.59,
DANIEL_EVANS: 2.25
}
},
{
'round': 512,
'players': [
GUIDO_ANDREOZZI,
TOMMY_ROBREDO
],
'score': [(3, 6), (6, 3), (7, 6)],
'odds': {
GUIDO_ANDREOZZI: 1.32,
TOMMY_ROBREDO: 3.30
}
},
{
'round': 512,
'players': [
NICOLAS_JARRY,
CHUN_HSIN_TSENG
],
'score': [(6, 4), (3, 6), (7, 6)],
'odds': {
NICOLAS_JARRY: 1.07,
CHUN_HSIN_TSENG: 6.25
}
},
{
'round': 512,
'players': [
ALBERT_RAMOS_VINOLAS,
ALEXEI_POPYRIN
],
'score': [(6, 2), (7, 6)],
'odds': {
ALBERT_RAMOS_VINOLAS: 1.34,
ALEXEI_POPYRIN: 3.10
}
},
{
'round': 512,
'players': [
HUGO_DELLIEN,
GREGOIRE_BARRERE
],
'score': [(6, 7), (7, 6), (7, 6)],
'odds': {
HUGO_DELLIEN: 1.67,
GREGOIRE_BARRERE: 2.10
}
},
{
'round': 512,
'players': [
FEDERICO_DELBONIS,
THIAGO_MONTEIRO
],
'score': [(7, 6), (6, 3)],
'odds': {
FEDERICO_DELBONIS: 1.24,
THIAGO_MONTEIRO: 3.81
}
},
{
'round': 512,
'players': [
DIEGO_SCHWARTZMAN,
JOZEF_KOVALIK
],
'score': [(6, 4), (6, 4)],
'odds': {
DIEGO_SCHWARTZMAN: 1.18,
JOZEF_KOVALIK: 4.52
}
},
# 2019-04-21
{
'round': 256,
'players': [
PEDRO_SOUSA,
GUIDO_ANDREOZZI
],
'score': [(6, 4), (6, 2)],
# no odds
},
{
'round': 256,
'players': [
MARCEL_GRANOLLERS,
NICOLAS_JARRY
],
'score': [(6, 7), (7, 5), (6, 4)],
'odds': {
MARCEL_GRANOLLERS: 1.77,
NICOLAS_JARRY: 1.91
}
},
{
'round': 256,
'players': [
ALBERT_RAMOS_VINOLAS,
ALEJANDRO_DAVIDOVICH_FOKINA
],
'score': [(7, 5), (7, 5)],
'odds': {
ALBERT_RAMOS_VINOLAS: 1.63,
ALEJANDRO_DAVIDOVICH_FOKINA: 2.20
}
},
{
'round': 256,
'players': [
HUGO_DELLIEN,
ANTOINE_HOANG
],
'score': [(6, 4), (6, 4)],
'odds': {
HUGO_DELLIEN: 1.64,
ANTOINE_HOANG: 2.05
}
},
{
'round': 256,
'players': [
FEDERICO_DELBONIS,
GUILLERMO_GARCIA_LOPEZ
],
'score': [(6, 3), (6, 0)],
'odds': {
FEDERICO_DELBONIS: 1.38,
GUILLERMO_GARCIA_LOPEZ: 2.77
}
},
{
'round': 256,
'players': [
DIEGO_SCHWARTZMAN,
ROBERTO_CARBALLES_BAENA
],
'score': [(6, 4), (6, 4)],
'odds': {
DIEGO_SCHWARTZMAN: 1.51,
ROBERTO_CARBALLES_BAENA: 2.46
}
},
# 2019-04-22
{
'round': 64,
'players': [
FERNANDO_VERDASCO,
FELICIANO_LOPEZ
],
'score': [(6, 4), (6, 3)],
'odds': {
FERNANDO_VERDASCO: 1.48,
FELICIANO_LOPEZ: 2.67
}
},
{
'round': 64,
'players': [
JAN_LENNARD_STRUFF,
HUGO_DELLIEN
],
'score': [(6, 3), (6, 1)],
'odds': {
JAN_LENNARD_STRUFF: 1.50,
HUGO_DELLIEN: 2.51
}
},
{
'round': 64,
'players': [
DIEGO_SCHWARTZMAN,
YOSHIHITO_NISHIOKA
],
'score': [(4, 6), (6, 4), (6, 2)],
'odds': {
DIEGO_SCHWARTZMAN: 1.25,
YOSHIHITO_NISHIOKA: 3.80
}
},
{
'round': 64,
'players': [
BENOIT_PAIRE,
JUAN_IGNACIO_LONDERO
],
'score': [(7, 5), (6, 2)],
'odds': {
BENOIT_PAIRE: 1.54,
JUAN_IGNACIO_LONDERO: 2.40
}
},
{
'round': 64,
'players': [
JAUME_MUNAR,
PEDRO_SOUSA
],
'score': [(2, 6), (6, 4), (6, 0)],
'odds': {
JAUME_MUNAR: 1.30,
PEDRO_SOUSA: 3.60
}
},
{
'round': 64,
'players': [
MACKENZIE_MCDONALD,
TARO_DANIEL
],
'score': [(6, 2), (6, 2)],
'odds': {
MACKENZIE_MCDONALD: 3.50,
TARO_DANIEL: 1.31
}
},
{
'round': 64,
'players': [
LEONARDO_MAYER,
MARIUS_COPIL
],
'score': [(6, 3), (6, 7), (7, 5)],
'odds': {
LEONARDO_MAYER: 1.27,
MARIUS_COPIL: 3.52
}
},
{
'round': 64,
'players': [
NICOLAS_JARRY,
MARCEL_GRANOLLERS
],
'score': [(7, 5), (4, 6), (6, 4)],
'odds': {
NICOLAS_JARRY: 1.83,
MARCEL_GRANOLLERS: 1.89
}
},
{
'round': 64,
'players': [
MARTON_FUCSOVICS,
DENIS_KUDLA
],
'score': [(6, 4), (6, 1)],
'odds': {
MARTON_FUCSOVICS: 1.18,
DENIS_KUDLA: 4.80
}
},
{
'round': 64,
'players': [
TAYLOR_FRITZ,
REILLY_OPELKA
],
'score': [(6, 3), (6, 4)],
'odds': {
TAYLOR_FRITZ: 1.72,
REILLY_OPELKA: 1.97
}
},
# 2019-04-23
{
'round': 64,
'players': [
ALBERT_RAMOS_VINOLAS,
CAMERON_NORRIE
],
'score': [(6, 2), (6, 2)],
'odds': {
ALBERT_RAMOS_VINOLAS: 1.56,
CAMERON_NORRIE: 2.35
}
},
{
'round': 64,
'players': [
GUIDO_PELLA,
JOAO_SOUSA
],
'score': [(3, 6), (7, 6), (6, 2)],
'odds': {
GUIDO_PELLA: 1.36,
JOAO_SOUSA: 3.15
}
},
{
'round': 64,
'players': [
NICOLA_KUHN,
FEDERICO_DELBONIS
],
'score': [(7, 6), (4, 6), (6, 2)],
'odds': {
NICOLA_KUHN: 4.90,
FEDERICO_DELBONIS: 1.16
}
},
{
'round': 64,
'players': [
MALEK_JAZIRI,
GUIDO_ANDREOZZI
],
'score': [(6, 7), (4, 6), (6, 2)],
'odds': {
MALEK_JAZIRI: 2.70,
GUIDO_ANDREOZZI: 1.44
}
},
{
'round': 64,
'players': [
CHRISTIAN_GARIN,
MARTIN_KLIZAN
],
'score': [(7, 5), (6, 4)],
'odds': {
CHRISTIAN_GARIN: 1.71,
MARTIN_KLIZAN: 2.10
}
},
{
'round': 64,
'players': [
DAVID_FERRER,
MISCHA_ZVEREV
],
'score': [(6, 3), (6, 1)],
'odds': {
DAVID_FERRER: 1.14,
MISCHA_ZVEREV: 6.00
}
},
{
'round': 32,
'players': [
JAUME_MUNAR,
FRANCES_TIAFOE
],
'score': [(6, 4), (6, 3)],
'odds': {
JAUME_MUNAR: 1.43,
FRANCES_TIAFOE: 2.70
}
},
{
'round': 32,
'players': [
JAN_LENNARD_STRUFF,
DAVID_GOFFIN
],
'score': [(7, 6), (6, 3)],
'odds': {
JAN_LENNARD_STRUFF: 2.40,
DAVID_GOFFIN: 1.56
}
},
{
'round': 32,
'players': [
STEFANOS_TSITSIPAS,
MARTON_FUCSOVICS
],
'score': [(6, 3), (6, 4)],
'odds': {
STEFANOS_TSITSIPAS: 1.31,
MARTON_FUCSOVICS: 3.44
}
},
{
'round': 32,
'players': [
KEI_NISHIKORI,
TAYLOR_FRITZ
],
'score': [(7, 5), (6, 2)],
'odds': {
KEI_NISHIKORI: 1.27,
TAYLOR_FRITZ: 3.65
}
},
{
'round': 32,
'players': [
DOMINIC_THIEM,
DIEGO_SCHWARTZMAN
],
'score': [(6, 3), (6, 3)],
'odds': {
DOMINIC_THIEM: 1.31,
DIEGO_SCHWARTZMAN: 3.45
}
},
{
'round': 32,
'players': [
NICOLAS_JARRY,
ALEXANDER_ZVEREV
],
'score': [(3, 6), (7, 5), (7, 6)],
'odds': {
NICOLAS_JARRY: 4.65,
ALEXANDER_ZVEREV: 1.20
}
},
# 2019-04-24
{
'round': 32,
'players': [
ROBERTO_CARBALLES_BAENA,
NICOLA_KUHN
],
'score': [(6, 7), (6, 4), (6, 2)],
'odds': {
ROBERTO_CARBALLES_BAENA: 1.37,
NICOLA_KUHN: 3.20
}
},
{
'round': 32,
'players': [
FELIX_AUGER_ALIASSIME,
MALEK_JAZIRI
],
'score': [(6, 3), (7, 6)],
'odds': {
FELIX_AUGER_ALIASSIME: 1.19,
MALEK_JAZIRI: 4.20
}
},
{
'round': 32,
'players': [
DAVID_FERRER,
LUCAS_POUILLE
],
'score': [(6, 3), (6, 1)],
'odds': {
DAVID_FERRER: 1.62,
LUCAS_POUILLE: 2.30
}
},
{
'round': 32,
'players': [
GRIGOR_DIMITROV,
FERNANDO_VERDASCO
],
'score': [(6, 2), (6, 7), (6, 3)],
'odds': {
GRIGOR_DIMITROV: 1.65,
FERNANDO_VERDASCO: 2.20
}
},
{
'round': 32,
'players': [
BENOIT_PAIRE,
PABLO_CARRENO_BUSTA
],
'score': [(6, 4), (6, 7), (6, 1)],
'odds': {
BENOIT_PAIRE: 1.42,
PABLO_CARRENO_BUSTA: 2.85
}
},
{
'round': 32,
'players': [
MACKENZIE_MCDONALD,
GILLES_SIMON
],
'score': [(6, 3), (6, 2)],
'odds': {
MACKENZIE_MCDONALD: 2.78,
GILLES_SIMON: 1.43
}
},
{
'round': 32,
'players': [
CHRISTIAN_GARIN,
DENIS_SHAPOVALOV
],
'score': [(7, 5), (6, 2)],
'odds': {
CHRISTIAN_GARIN: 1.65,
DENIS_SHAPOVALOV: 2.10
}
},
{
'round': 32,
'players': [
DANIIL_MEDVEDEV,
ALBERT_RAMOS_VINOLAS
],
'score': [(6, 3), (2, 6), (6, 1)],
'odds': {
DANIIL_MEDVEDEV: 1.44,
ALBERT_RAMOS_VINOLAS: 2.60
}
},
{
'round': 32,
'players': [
GUIDO_PELLA,
KAREN_KHACHANOV
],
'score': [(6, 2), (7, 6)],
'odds': {
GUIDO_PELLA: 1.87,
KAREN_KHACHANOV: 1.87
}
},
{
'round': 32,
'players': [
RAFAEL_NADAL,
LEONARDO_MAYER
],
'score': [(6, 7), (6, 4), (6, 2)],
'odds': {
RAFAEL_NADAL: 1.02,
LEONARDO_MAYER: 13.84
}
},
# 2019-04-25
{
'round': 16,
'players': [
GUIDO_PELLA,
BENOIT_PAIRE
],
'score': [(7, 5), (6, 3)],
'odds': {
GUIDO_PELLA: 1.61,
BENOIT_PAIRE: 2.20
}
},
{
'round': 16,
'players': [
ROBERTO_CARBALLES_BAENA,
CHRISTIAN_GARIN
],
'score': [(6, 4), (7, 6)],
'odds': {
ROBERTO_CARBALLES_BAENA: 2.76,
CHRISTIAN_GARIN: 1.42
}
},
{
'round': 16,
'players': [
NICOLAS_JARRY,
GRIGOR_DIMITROV
],
'score': [(2, 6), (6, 4), (7, 6)],
'odds': {
NICOLAS_JARRY: 2.79,
GRIGOR_DIMITROV: 1.43
}
},
{
'round': 16,
'players': [
DANIIL_MEDVEDEV,
MACKENZIE_MCDONALD
],
'score': [(6, 3), (6, 2)],
'odds': {
DANIIL_MEDVEDEV: 1.17,
MACKENZIE_MCDONALD: 4.60
}
},
{
'round': 16,
'players': [
JAN_LENNARD_STRUFF,
STEFANOS_TSITSIPAS
],
'score': [(6, 4), (3, 6), (6, 2)],
'odds': {
JAN_LENNARD_STRUFF: 3.70,
STEFANOS_TSITSIPAS: 1.28
}
},
{
'round': 16,
'players': [
KEI_NISHIKORI,
FELIX_AUGER_ALIASSIME
],
'score': [(6, 1), (6, 3)],
'odds': {
KEI_NISHIKORI: 1.63,
FELIX_AUGER_ALIASSIME: 2.25
}
},
{
'round': 16,
'players': [
DOMINIC_THIEM,
JAUME_MUNAR
],
'score': [(7, 5), (6, 1)],
'odds': {
DOMINIC_THIEM: 1.27,
JAUME_MUNAR: 3.68
}
},
{
'round': 16,
'players': [
RAFAEL_NADAL,
DAVID_FERRER
],
'score': [(6, 3), (6, 3)],
'odds': {
RAFAEL_NADAL: 1.14,
DAVID_FERRER: 5.00
}
},
# 2019-04-26
{
'round': 8,
'players': [
DANIIL_MEDVEDEV,
NICOLAS_JARRY
],
'score': [(6, 3), (6, 4)],
'odds': {
DANIIL_MEDVEDEV: 1.29,
NICOLAS_JARRY: 3.83
}
},
{
'round': 8,
'players': [
KEI_NISHIKORI,
ROBERTO_CARBALLES_BAENA
],
'score': [(6, 4), (7, 5)],
'odds': {
KEI_NISHIKORI: 1.30,
ROBERTO_CARBALLES_BAENA: 3.84
}
},
{
'round': 8,
'players': [
DOMINIC_THIEM,
GUIDO_PELLA
],
'score': [(7, 5), (6, 2)],
'odds': {
DOMINIC_THIEM: 1.36,
GUIDO_PELLA: 3.15
}
},
{
'round': 8,
'players': [
RAFAEL_NADAL,
JAN_LENNARD_STRUFF
],
'score': [(7, 5), (7, 5)],
'odds': {
RAFAEL_NADAL: 1.06,
JAN_LENNARD_STRUFF: 8.50
}
},
# 2019-04-27
{
'round': 4,
'players': [
DANIIL_MEDVEDEV,
KEI_NISHIKORI
],
'score': [(6, 4), (3, 6), (7, 5)],
'odds': {
DANIIL_MEDVEDEV: 1.95,
KEI_NISHIKORI: 1.80
}
},
{
'round': 4,
'players': [
DOMINIC_THIEM,
RAFAEL_NADAL
],
'score': [(6, 4), (6, 4)],
'odds': {
DOMINIC_THIEM: 3.20,
RAFAEL_NADAL: 1.33
}
},
# 2019-04-28
{
'round': 2,
'players': [
DOMINIC_THIEM,
DANIIL_MEDVEDEV
],
'score': [(6, 4), (6, 0)]
}
]
},
{
'location': BUDAPEST,
'date': '2019-04-22',
'matches': [
# 2019-04-20
{
'round': 512,
'players': [
EGOR_GERASIMOV,
FILLIPPO_BALDI
],
'score': [(6, 3), (6, 2)],
'odds': {
EGOR_GERASIMOV: 2.45,
FILLIPPO_BALDI: 1.43
}
},
{
'round': 512,
'players': [
JANNIK_SINNER,
LUKAS_ROSOL
],
'score': [(6, 2), (3, 0)],
'retired': True,
'odds': {
JANNIK_SINNER: 2.31,
LUKAS_ROSOL: 1.56
}
},
{
'round': 512,
'players': [
MATTHIAS_BACHINGER,
FILIP_HORANSKY
],
'score': [(6, 1), (6, 4)],
'odds': {
MATTHIAS_BACHINGER: 2.44,
FILIP_HORANSKY: 1.53
}
},
{
'round': 512,
'players': [
SERGIY_STAKHOVSKY,
DANIEL_BRANDS
],
'score': [(7, 5), (6, 1)],
'odds': {
SERGIY_STAKHOVSKY: 1.53,
DANIEL_BRANDS: 2.48
}
},
{
'round': 512,
'players': [
YANNICK_MADEN,
ZSOMBOR_PIROS
],
'score': [(6, 4), (1, 6), (6, 3)],
'odds': {
YANNICK_MADEN: 1.32,
ZSOMBOR_PIROS: 3.30
}
},
{
'round': 512,
'players': [
FILIP_KRAJINOVIC,
ROBERTO_MARCORA
],
'score': [(7, 5), (6, 2)],
'odds': {
FILIP_KRAJINOVIC: 1.13,
ROBERTO_MARCORA: 5.43
}
},
{
'round': 512,
'players': [
LLOYD_HARRIS,
DANIEL_GIMENO_TRAVER
],
'score': [(7, 6), (6, 3)],
'odds': {
LLOYD_HARRIS: 1.69,
DANIEL_GIMENO_TRAVER: 2.05
}
},
{
'round': 512,
'players': [
MIOMIR_KECMANOVIC,
ALESSANDRO_GIANNESSI
],
'score': [(6, 3), (6, 4)],
'odds': {
MIOMIR_KECMANOVIC: 1.57,
ALESSANDRO_GIANNESSI: 2.34
}
},
# 2019-04-21
{
'round': 256,
'players': [
YANNICK_MADEN,
JANNIK_SINNER
],
'score': [(6, 3), (6, 4)],
'odds': {
YANNICK_MADEN: 1.46,
JANNIK_SINNER: 2.45
}
},
{
'round': 256,
'players': [
FILIP_KRAJINOVIC,
EGOR_GERASIMOV
],
'score': [(7, 6), (6, 1)],
'odds': {
FILIP_KRAJINOVIC: 1.15,
EGOR_GERASIMOV: 5.10
}
},
{
'round': 256,
'players': [
LLOYD_HARRIS,
MATTHIAS_BACHINGER
],
'score': [(7, 5), (6, 4)],
'odds': {
LLOYD_HARRIS: 1.77,
MATTHIAS_BACHINGER: 1.91
}
},
{
'round': 256,
'players': [
MIOMIR_KECMANOVIC,
SERGIY_STAKHOVSKY
],
'score': [(6, 4), (6, 4)],
# no odds
},
# 2019-04-22
{
'round': 32,
'players': [
FILIP_KRAJINOVIC,
ANDREAS_SEPPI
],
'score': [(6, 2), (6, 7), (7, 5)],
'odds': {
FILIP_KRAJINOVIC: 1.44,
ANDREAS_SEPPI: 2.52
}
},
{
'round': 32,
'players': [
ALJAZ_BEDENE,
BERNARD_TOMIC
],
'score': [(7, 6), (6, 4)],
'odds': {
ALJAZ_BEDENE: 1.41,
BERNARD_TOMIC: 2.63
}
},
{
'round': 32,
'players': [
RADU_ALBOT,
SERGIY_STAKHOVSKY
],
'score': [(7, 5), (6, 4)],
'odds': {
RADU_ALBOT: 1.28,
SERGIY_STAKHOVSKY: 3.55
}
},
{
'round': 32,
'players': [
MATTEO_BERRETTINI,
MIKHAIL_KUKUSHKIN
],
'score': [(6, 4), (6, 4)],
'odds': {
MATTEO_BERRETTINI: 1.57,
MIKHAIL_KUKUSHKIN: 2.30
}
},
# 2019-04-23
{
'round': 32,
'players': [
PIERRE_HUGUES_HERBERT,
EGOR_GERASIMOV
],
'score': [(6, 3), 96, 2],
# no odds
},
{
'round': 32,
'players': [
ROBIN_HAASE,
THOMAS_FABBIANO
],
'score': [(6, 7), (6, 3), (6, 2)],
'odds': {
ROBIN_HAASE: 1.45,
THOMAS_FABBIANO: 2.60
}
},
{
'round': 32,
'players': [
PETER_GOJOWCZYK,
LLOYD_HARRIS
],
'score': [(7, 5), (6, 4)],
'odds': {
PETER_GOJOWCZYK: 2.27,
LLOYD_HARRIS: 1.63
}
},
{
'round': 32,
'players': [
ATTILA_BALAZS,
HUBERT_HURKACZ
],
'score': [(6, 3), (6, 4)],
'odds': {
ATTILA_BALAZS: 3.35,
HUBERT_HURKACZ: 1.32
}
},
{
'round': 32,
'players': [
JOHN_MILLMAN,
MIOMIR_KECMANOVIC
],
'score': [(6, 1), (6, 2)],
'odds': {
JOHN_MILLMAN: 2.20,
MIOMIR_KECMANOVIC: 1.69
}
},
{
'round': 32,
'players': [
LASLO_DJERE,
ERNESTS_GULBIS
],
'score': [(6, 4), (6, 7), (7, 6)],
'odds': {
LASLO_DJERE: 1.35,
ERNESTS_GULBIS: 2.90
}
},
# 2019-04-24
{
'round': 32,
'players': [
JANNIK_SINNER,
MATE_VALKUSZ
],
'score': [(6, 2), (0, 6), (6, 4)],
'odds': {
JANNIK_SINNER: 1.83,
MATE_VALKUSZ: 1.83
}
},
{
'round': 32,
'players': [
PABLO_CUEVAS,
YANNICK_MADEN
],
'score': [(6, 3), (3, 6), (6, 4)],
'odds': {
PABLO_CUEVAS: 1.38,
YANNICK_MADEN: 3.05
}
},
{
'round': 16,
'players': [
PIERRE_HUGUES_HERBERT,
MATTHIAS_BACHINGER
],
'score': [(7, 5), (6, 2)],
'odds': {
PIERRE_HUGUES_HERBERT: 1.26,
MATTHIAS_BACHINGER: 3.65
}
},
{
'round': 16,
'players': [
ATTILA_BALAZS,
JOHN_MILLMAN
],
'score': [(6, 4), (2, 6), (6, 2)],
'odds': {
ATTILA_BALAZS: 2.71,
JOHN_MILLMAN: 1.43
}
},
# 2019-04-25
{
'round': 16,
'players': [
MATTEO_BERRETTINI,
ALJAZ_BEDENE
],
'score': [(7, 6), (6, 2)],
'odds': {
MATTEO_BERRETTINI: 1.65,
ALJAZ_BEDENE: 2.20
}
},
{
'round': 16,
'players': [
FILIP_KRAJINOVIC,
RADU_ALBOT
],
'score': [(7, 5), (6, 4)],
'odds': {
FILIP_KRAJINOVIC: 1.37,
RADU_ALBOT: 3.00
}
},
{
'round': 16,
'players': [
LASLO_DJERE,
JANNIK_SINNER
],
'score': [(6, 3), (6, 1)],
'odds': {
LASLO_DJERE: 1.20,
JANNIK_SINNER: 4.55
}
},
{
'round': 16,
'players': [
NIKOLOZ_BASILASHVILI,
PETER_GOJOWCZYK
],
'score': [(6, 3), (0, 6), (6, 3)],
'odds': {
NIKOLOZ_BASILASHVILI: 1.42,
PETER_GOJOWCZYK: 2.90
}
},
{
'round': 16,
'players': [
BORNA_CORIC,
ROBIN_HAASE
],
'score': [(6, 3), (4, 6), (6, 4)],
'odds': {
BORNA_CORIC: 1.33,
ROBIN_HAASE: 3.25
}
},
{
'round': 16,
'players': [
PABLO_CUEVAS,
MARIN_CILIC
],
'score': [(5, 7), (7, 6), (7, 6)],
'odds': {
PABLO_CUEVAS: 2.25,
MARIN_CILIC: 1.61
}
},
# 2019-04-26
{
'round': 8,
'players': [
PIERRE_HUGUES_HERBERT,
ATTILA_BALAZS
],
'score': [(6, 3), (6, 4)],
'odds': {
PIERRE_HUGUES_HERBERT: 1.55,
ATTILA_BALAZS: 2.45
}
},
{
'round': 8,
'players': [
MATTEO_BERRETTINI,
PABLO_CUEVAS
],
'score': [(6, 3), (1, 6), (6, 3)],
'odds': {
MATTEO_BERRETTINI: 1.59,
PABLO_CUEVAS: 2.30
}
},
{
'round': 8,
'players': [
LASLO_DJERE,
NIKOLOZ_BASILASHVILI
],
'score': [(3, 6), (6, 2), (6, 3)],
'odds': {
LASLO_DJERE: 1.61,
NIKOLOZ_BASILASHVILI: 2.20
}
},
{
'round': 8,
'players': [
FILIP_KRAJINOVIC,
BORNA_CORIC
],
'score': [(6, 4), (7, 5)],
'odds': {
FILIP_KRAJINOVIC: 2.05,
BORNA_CORIC: 1.74
}
},
# 2019-04-27
{
'round': 4,
'players': [
FILIP_KRAJINOVIC,
PIERRE_HUGUES_HERBERT
],
'score': [(6, 2), (6, 2)],
'odds': {
FILIP_KRAJINOVIC: 1.41,
PIERRE_HUGUES_HERBERT: 2.95
}
},
{
'round': 4,
'players': [
MATTEO_BERRETTINI,
LASLO_DJERE
],
'score': [(6, 4), (6, 2)],
'odds': {
MATTEO_BERRETTINI: 1.67,
LASLO_DJERE: 2.15
}
},
# 2019-04-28
{
'round': 2,
'players': [
MATTEO_BERRETTINI,
FILIP_KRAJINOVIC
],
'score': [(4, 6), (6, 3), (6, 1)],
'odds': {
MATTEO_BERRETTINI: 2.10,
FILIP_KRAJINOVIC: 1.69
}
}
]
},
{
'location': MUNICH,
'date': '2019-04-29',
'matches': [
# 2019-04-27
{
'round': 512,
'players': [
DENIS_ISTOMIN,
CEDRIC_MARCEL_STEBE
],
'score': [(6, 3), (7, 6)],
'odds': {
DENIS_ISTOMIN: 1.63,
CEDRIC_MARCEL_STEBE: 2.20
}
},
{
'round': 512,
'players': [
YANNICK_MADEN,
THOMAS_FABBIANO
],
'score': [(4, 6), (6, 2), (6, 2)],
'odds': {
YANNICK_MADEN: 1.42,
THOMAS_FABBIANO: 2.55
}
},
{
'round': 512,
'players': [
ANDREY_RUBLEV,
MATTHIAS_BACHINGER
],
'score': [(7, 6), (6, 2)],
'odds': {
ANDREY_RUBLEV: 1.25,
MATTHIAS_BACHINGER: 3.80
}
},
{
'round': 512,
'players': [
HENRI_LAAKSONEN,
MIOMIR_KECMANOVIC
],
'score': [(4, 6), (6, 1), (6, 4)],
'odds': {
HENRI_LAAKSONEN: 2.25,
MIOMIR_KECMANOVIC: 1.61
}
},
{
'round': 512,
'players': [
LUKAS_ROSOL,
PETER_GOJOWCZYK
],
'score': [(6, 4), (2, 6), (6, 4)],
'odds': {
LUKAS_ROSOL: 2.61,
PETER_GOJOWCZYK: 1.47
}
},
{
'round': 512,
'players': [
THIAGO_MONTEIRO,
ALBERT_RAMOS_VINOLAS
],
'score': [(6, 3), (2, 6), (6, 2)],
'odds': {
THIAGO_MONTEIRO: 3.27,
ALBERT_RAMOS_VINOLAS: 1.32
}
},
{
'round': 512,
'players': [
PRAJNESH_GUNNESWARAN,
ALEXANDER_ERLER
],
'score': [(3, 6), (7, 6), (7, 5)],
'odds': {
PRAJNESH_GUNNESWARAN: 1.05,
ALEXANDER_ERLER: 10.00
}
},
{
'round': 512,
'players': [
LORENZO_SONEGO,
YANNICK_HANFMANN
],
'score': [(7, 6), (6, 7), (6, 3)],
'odds': {
LORENZO_SONEGO: 1.23,
YANNICK_HANFMANN: 3.85
}
},
# 2019-04-28
{
'round': 256,
'players': [
YANNICK_MADEN,
LUKAS_ROSOL
],
'score': [(6, 2), (6, 2)],
'odds': {
YANNICK_MADEN: 1.32,
LUKAS_ROSOL: 3.12
}
},
{
'round': 256,
'players': [
THIAGO_MONTEIRO,
ANDREY_RUBLEV
],
'score': [(6, 3), (6, 7), (6, 4)],
'odds': {
THIAGO_MONTEIRO: 2.70,
ANDREY_RUBLEV: 1.43
}
},
{
'round': 256,
'players': [
DENIS_ISTOMIN,
PRAJNESH_GUNNESWARAN
],
'score': [(4, 6), (6, 2), (6, 2)],
'odds': {
DENIS_ISTOMIN: 1.67,
PRAJNESH_GUNNESWARAN: 1.93
}
},
{
'round': 256,
'players': [
LORENZO_SONEGO,
HENRI_LAAKSONEN
],
'score': [(6, 0), (5, 7), (7, 6)],
'odds': {
LORENZO_SONEGO: 1.38,
HENRI_LAAKSONEN: 2.73
}
},
# 2019-04-29
{
'round': 32,
'players': [
TARO_DANIEL,
UGO_HUMBERT,
],
'score': [(6, 4), (6, 4)],
'odds': {
TARO_DANIEL: 1.65,
UGO_HUMBERT: 2.15
}
},
{
'round': 32,
'players': [
MARTON_FUCSOVICS,
LORENZO_SONEGO
],
'score': [(7, 5), (4, 6), (7, 6)],
'odds': {
MARTON_FUCSOVICS: 1.91,
LORENZO_SONEGO: 1.80
}
},
# 2019-04-30
{
'round': 32,
'players': [
THIAGO_MONTEIRO,
JAN_LENNARD_STRUFF
],
'score': [(6, 1), (6, 1)],
'odds': {
THIAGO_MONTEIRO: 3.50,
JAN_LENNARD_STRUFF: 1.29
}
},
{
'round': 32,
'players': [
RUDOLF_MOLLEKER,
MARIUS_COPIL
],
'score': [(7, 6), (4, 6), (6, 4)],
'odds': {
RUDOLF_MOLLEKER: 1.77,
MARIUS_COPIL: 1.97
}
},
{
'round': 32,
'players': [
JUAN_IGNACIO_LONDERO,
MAXIMILIAN_MARTERER
],
'score': [(6, 2), (4, 6), (6, 2)],
'odds': {
JUAN_IGNACIO_LONDERO: 1.50,
MAXIMILIAN_MARTERER: 2.53
}
},
{
'round': 32,
'players': [
PHILIPP_KOHLSCHREIBER,
ANDREAS_SEPPI
],
'score': [(6, 2), (7, 5)],
'odds': {
PHILIPP_KOHLSCHREIBER: 1.32,
ANDREAS_SEPPI: 3.30
}
},
{
'round': 32,
'players': [
MARTIN_KLIZAN,
ERNESTS_GULBIS
],
'score': [(6, 3), (7, 5)],
'odds': {
MARTIN_KLIZAN: 1.50,
ERNESTS_GULBIS: 2.54
}
},
{
'round': 32,
'players': [
CHRISTIAN_GARIN,
YANNICK_MADEN
],
'score': [(6, 4), (6, 2)],
'odds': {
CHRISTIAN_GARIN: 1.50,
YANNICK_MADEN: 2.55
}
},
{
'round': 32,
'players': [
MATTEO_BERRETTINI,
DENIS_ISTOMIN
],
'score': [(7, 6), (6, 3)],
'odds': {
MATTEO_BERRETTINI: 1.33,
DENIS_ISTOMIN: 3.10
}
},
{
'round': 32,
'players': [
GUIDO_PELLA,
MISCHA_ZVEREV
],
'score': [(6, 2), (6, 1)],
'odds': {
GUIDO_PELLA: 1.11,
MISCHA_ZVEREV: 7.04
}
},
{
'round': 32,
'players': [
DIEGO_SCHWARTZMAN,
BENOIT_PAIRE
],
'score': [(6, 4), (1, 6), (6, 1)],
'odds': {
DIEGO_SCHWARTZMAN: 1.60,
BENOIT_PAIRE: 2.30
}
},
{
'round': 32,
'players': [
DENIS_KUDLA,
KYLE_EDMUND
],
'score': [(6, 4), (6, 3)],
'odds': {
DENIS_KUDLA: 6.00,
KYLE_EDMUND: 1.11
}
},
# 2019-05-01
{
'round': 16,
'players': [
MARTON_FUCSOVICS,
THIAGO_MONTEIRO
],
'score': [(6, 7), (6, 4), (6, 3)],
'odds': {
MARTON_FUCSOVICS: 1.57,
THIAGO_MONTEIRO: 2.30
}
},
{
'round': 16,
'players': [
CHRISTIAN_GARIN,
DIEGO_SCHWARTZMAN
],
'score': [(6, 1), (7, 5)],
'odds': {
CHRISTIAN_GARIN: 2.02,
DIEGO_SCHWARTZMAN: 1.74
}
},
{
'round': 16,
'players': [
MARCO_CECCHINATO,
MARTIN_KLIZAN
],
'score': [(6, 1), (6, 3)],
'odds': {
MARCO_CECCHINATO: 1.65,
MARTIN_KLIZAN: 2.31
}
},
{
'round': 16,
'players': [
ALEXANDER_ZVEREV,
JUAN_IGNACIO_LONDERO
],
'score': [(7, 5), (6, 1)],
'odds': {
ALEXANDER_ZVEREV: 1.18,
JUAN_IGNACIO_LONDERO: 5.15
}
},
# 2019-05-02
{
'round': 16,
'players': [
MATTEO_BERRETTINI,
DENIS_KUDLA
],
'score': [(7, 5), (6, 3)],
'odds': {
MATTEO_BERRETTINI: 1.26,
DENIS_KUDLA: 3.85
}
},
{
'round': 16,
'players': [
GUIDO_PELLA,
TARO_DANIEL
],
'score': [(6, 1), (6, 7), (6, 3)],
'odds': {
GUIDO_PELLA: 1.17,
TARO_DANIEL: 5.00
}
},
{
'round': 16,
'players': [
ROBERTO_BAUTISTA_AGUT,
RUDOLF_MOLLEKER
],
'score': [(6, 4), (6, 2)],
'odds': {
ROBERTO_BAUTISTA_AGUT: 1.22,
RUDOLF_MOLLEKER: 4.10
}
},
{
'round': 16,
'players': [
PHILIPP_KOHLSCHREIBER,
KAREN_KHACHANOV
],
'score': [(7, 6), (6, 4)],
'odds': {
PHILIPP_KOHLSCHREIBER: 1.50,
KAREN_KHACHANOV: 2.35
}
},
# 2019-05-03
{
'round': 8,
'players': [
MATTEO_BERRETTINI,
PHILIPP_KOHLSCHREIBER
],
'score': [(4, 6), (7, 5), (6, 4)],
'odds': {
MATTEO_BERRETTINI: 2.20,
PHILIPP_KOHLSCHREIBER: 1.65
}
},
{
'round': 8,
'players': [
ROBERTO_BAUTISTA_AGUT,
GUIDO_PELLA
],
'score': [(4, 6), (6, 4), (6, 0)],
'odds': {
ROBERTO_BAUTISTA_AGUT: 1.79,
GUIDO_PELLA: 1.95
}
},
{
'round': 8,
'players': [
MARCO_CECCHINATO,
MARTON_FUCSOVICS
],
'score': [(1, 6), (7, 5), (7, 5)],
'odds': {
MARCO_CECCHINATO: 1.57,
MARTON_FUCSOVICS: 2.41
}
},
{
'round': 8,
'players': [
CHRISTIAN_GARIN,
ALEXANDER_ZVEREV
],
'score': [(6, 4), (5, 7), (7, 5)],
'odds': {
CHRISTIAN_GARIN: 3.28,
ALEXANDER_ZVEREV: 1.37
}
},
# 2019-05-04
{
'round': 4,
'players': [
CHRISTIAN_GARIN,
MARCO_CECCHINATO
],
'score': [(6, 2), (6, 4)],
'odds': {
CHRISTIAN_GARIN: 1.92,
MARCO_CECCHINATO: 1.83
}
},
# 2019-05-05
{
'round': 4,
'players': [
MATTEO_BERRETTINI,
ROBERTO_BAUTISTA_AGUT
],
'score': [(6, 4), (6, 2)],
'odds': {
MATTEO_BERRETTINI: 2.13,
ROBERTO_BAUTISTA_AGUT: 1.69
}
},
{
'round': 2,
'players': [
CHRISTIAN_GARIN,
MATTEO_BERRETTINI
],
'score': [(6, 1), (3, 6), (7, 6)],
'odds': {
CHRISTIAN_GARIN: 1.86,
MATTEO_BERRETTINI: 1.95
}
},
]
},
{
'location': ESTORIL,
'date': '2019-04-29',
'matches': [
# 2019-04-27
{
'round': 512,
'players': [
SALVATORE_CARUSO,
PEDRO_MARTINEZ
],
'score': [(6, 3), (5, 7), (7, 6)],
'odds': {
SALVATORE_CARUSO: 1.83,
PEDRO_MARTINEZ: 1.83
}
},
{
'round': 512,
'players': [
SIMONE_BOLELLI,
EGOR_GERASIMOV
],
'score': [(6, 2), (7, 6)],
'odds': {
SIMONE_BOLELLI: 1.41,
EGOR_GERASIMOV: 2.66
}
},
{
'round': 512,
'players': [
ALEJANDRO_DAVIDOVICH_FOKINA,
BJORN_FRATANGELO
],
'score': [(6, 2), (6, 4)],
'odds': {
ALEJANDRO_DAVIDOVICH_FOKINA: 1.38,
BJORN_FRATANGELO: 2.70
}
},
{
'round': 512,
'players': [
FILLIPPO_BALDI,
JOZEF_KOVALIK
],
'score': [(4, 6), (6, 3), (6, 2)],
'odds': {
FILLIPPO_BALDI: 2.40,
JOZEF_KOVALIK: 1.56
}
},
{
'round': 512,
'players': [
ALEXEI_POPYRIN,
GASTAO_ELIAS
],
'score': [(7, 5), (7, 6)],
'odds': {
ALEXEI_POPYRIN: 1.58,
GASTAO_ELIAS: 2.25
}
},
{
'round': 512,
'players': [
JOAO_DOMINGUES,
ELIAS_YMER
],
'score': [(6, 3), (7, 6)],
'odds': {
JOAO_DOMINGUES: 1.94,
ELIAS_YMER: 1.74
}
},
{
'round': 512,
'players': [
DANIEL_EVANS,
LORENZO_GIUSTINO
],
'score': [(6, 3), (7, 5)],
'odds': {
DANIEL_EVANS: 1.87,
LORENZO_GIUSTINO: 1.80
}
},
{
'round': 512,
'players': [
PABLO_CUEVAS,
DANIEL_BRANDS
],
'score': [(6, 1), (7, 6)],
'odds': {
PABLO_CUEVAS: 4.65,
DANIEL_BRANDS: 4.65
}
},
# 2019-04-28
{
'round': 256,
'players': [
JOAO_DOMINGUES,
FILLIPPO_BALDI
],
'score': [(6, 2), (6, 4)],
'odds': {
JOAO_DOMINGUES: 1.40,
FILLIPPO_BALDI: 2.75
}
},
{
'round': 256,
'players': [
ALEXEI_POPYRIN,
SIMONE_BOLELLI
],
'score': [(2, 6), (6, 3), (6, 4)],
'odds': {
ALEXEI_POPYRIN: 2.21,
SIMONE_BOLELLI: 1.59
}
},
{
'round': 256,
'players': [
ALEJANDRO_DAVIDOVICH_FOKINA,
DANIEL_EVANS
],
'score': [(3, 6), (6, 1), (6, 4)],
'odds': {
ALEJANDRO_DAVIDOVICH_FOKINA: 1.46,
DANIEL_EVANS: 2.40
}
},
{
'round': 256,
'players': [
SALVATORE_CARUSO,
PABLO_CUEVAS
],
'score': [(6, 4), (5, 7), (6, 4)],
# no odds
},
# 2019-04-29
{
'round': 32,
'players': [
REILLY_OPELKA,
PEDRO_SOUSA
],
'score': [(7, 6), (6, 4)],
'odds': {
REILLY_OPELKA: 2.15,
PEDRO_SOUSA: 1.71
}
},
{
'round': 32,
'players': [
YOSHIHITO_NISHIOKA,
MACKENZIE_MCDONALD
],
'score': [(6, 2), (6, 4)],
'odds': {
YOSHIHITO_NISHIOKA: 1.71,
MACKENZIE_MCDONALD: 2.10
}
},
{
'round': 32,
'players': [
GUIDO_ANDREOZZI,
HUGO_DELLIEN
],
'score': [(6, 3), (6, 3)],
'odds': {
GUIDO_ANDREOZZI: 1.91,
HUGO_DELLIEN: 1.87
}
},
{
'round': 32,
'players': [
JOAO_DOMINGUES,
ALEX_DE_MINAUR
],
'score': [(6, 2), (2, 6), (6, 2)],
'odds': {
JOAO_DOMINGUES: 2.00,
ALEX_DE_MINAUR: 1.74
}
},
# 2019-04-30
{
'round': 32,
'players': [
JOAO_SOUSA,
ALEXEI_POPYRIN
],
'score': [(6, 4), (2, 6), (6, 2)],
'odds': {
JOAO_SOUSA: 1.45,
ALEXEI_POPYRIN: 2.55
}
},
{
'round': 32,
'players': [
JOHN_MILLMAN,
BERNARD_TOMIC
],
'score': [(6, 3), (6, 0)],
'odds': {
JOHN_MILLMAN: 1.39,
BERNARD_TOMIC: 2.85
}
},
{
'round': 32,
'players': [
MALEK_JAZIRI,
NICOLAS_JARRY
],
'score': [(6, 3), (3, 6), (6, 4)],
'odds': {
MALEK_JAZIRI: 3.20,
NICOLAS_JARRY: 1.38
}
},
{
'round': 32,
'players': [
ALEJANDRO_DAVIDOVICH_FOKINA,
TAYLOR_FRITZ
],
'score': [(7, 6), (6, 4)],
'odds': {
ALEJANDRO_DAVIDOVICH_FOKINA: 1.67,
TAYLOR_FRITZ: 2.00
}
},
{
'round': 32,
'players': [
PABLO_CUEVAS,
SALVATORE_CARUSO
],
'score': [(6, 2), (6, 2)],
'odds': {
PABLO_CUEVAS: 3.25,
SALVATORE_CARUSO: 3.25
}
},
{
'round': 32,
'players': [
FRANCES_TIAFOE,
MIKHAIL_KUKUSHKIN
],
'score': [(6, 3), (7, 5)],
'odds': {
FRANCES_TIAFOE: 1.67,
MIKHAIL_KUKUSHKIN: 2.05
}
},
{
'round': 32,
'players': [
JEREMY_CHARDY,
PABLO_CARRENO_BUSTA
],
'score': [(5, 7), (6, 1), (6, 2)],
'odds': {
JEREMY_CHARDY: 2.05,
PABLO_CARRENO_BUSTA: 1.76
}
},
{
'round': 32,
'players': [
LEONARDO_MAYER,
DUSAN_LAJOVIC
],
'score': [(7, 6), (6, 4)],
'odds': {
LEONARDO_MAYER: 2.03,
DUSAN_LAJOVIC: 1.71
}
},
# 2019-05-01
{
'round': 16,
'players': [
JOAO_DOMINGUES,
JOHN_MILLMAN
],
'score': [(6, 3), (2, 1)],
'retired': True,
'odds': {
JOAO_DOMINGUES: 2.50,
JOHN_MILLMAN: 1.53
}
},
{
'round': 16,
'players': [
ALEJANDRO_DAVIDOVICH_FOKINA,
JEREMY_CHARDY
],
'score': [(6, 1), (6, 2)],
'odds': {
ALEJANDRO_DAVIDOVICH_FOKINA: 1.95,
JEREMY_CHARDY: 1.74
}
},
{
'round': 16,
'players': [
GAEL_MONFILS,
REILLY_OPELKA
],
'score': [(3, 6), (6, 3), (6, 0)],
'odds': {
GAEL_MONFILS: 1.48,
REILLY_OPELKA: 2.60
}
},
{
'round': 16,
'players': [
STEFANOS_TSITSIPAS,
GUIDO_ANDREOZZI
],
'score': [(6, 3), (6, 4)],
'odds': {
STEFANOS_TSITSIPAS: 1.15,
GUIDO_ANDREOZZI: 5.00
}
},
# 2019-05-02
{
'round': 16,
'players': [
MALEK_JAZIRI,
LEONARDO_MAYER
],
'score': [(7, 6), (6, 1)],
'odds': {
MALEK_JAZIRI: 3.35,
LEONARDO_MAYER: 1.30
}
},
{
'round': 16,
'players': [
PABLO_CUEVAS,
FILLIPPO_BALDI
],
'score': [(6, 2), (7, 5)],
'odds': {
PABLO_CUEVAS: 1.18,
FILLIPPO_BALDI: 4.51
}
},
{
'round': 16,
'players': [
FRANCES_TIAFOE,
YOSHIHITO_NISHIOKA
],
'score': [(2, 6), (6, 3), (7, 6)],
'odds': {
FRANCES_TIAFOE: 1.63,
YOSHIHITO_NISHIOKA: 2.25
}
},
{
'round': 16,
'players': [
DAVID_GOFFIN,
JOAO_SOUSA
],
'score': [(6, 3), (6, 2)],
'odds': {
DAVID_GOFFIN: 1.48,
JOAO_SOUSA: 2.60
}
},
# 2019-05-03
{
'round': 8,
'players': [
PABLO_CUEVAS,
FRANCES_TIAFOE
],
'score': [(6, 0), (6, 7), (6, 2)],
'odds': {
PABLO_CUEVAS: 1.57,
FRANCES_TIAFOE: 2.39
}
},
{
'round': 8,
'players': [
DAVID_GOFFIN,
MALEK_JAZIRI
],
'score': [(4, 6), (7, 6), (6, 2)],
'odds': {
DAVID_GOFFIN: 1.20,
MALEK_JAZIRI: 4.60
}
},
{
'round': 8,
'players': [
ALEJANDRO_DAVIDOVICH_FOKINA,
GAEL_MONFILS
],
'score': [(6, 7), (7, 5), (6, 4)],
'odds': {
ALEJANDRO_DAVIDOVICH_FOKINA: 2.63,
GAEL_MONFILS: 1.41
}
},
{
'round': 8,
'players': [
STEFANOS_TSITSIPAS,
JOAO_DOMINGUES
],
'score': [(7, 6), (6, 4)],
# no odds
},
# 2019-05-04
{
'round': 4,
'players': [
PABLO_CUEVAS,
ALEJANDRO_DAVIDOVICH_FOKINA
],
'score': [(3, 6), (6, 2), (6, 2)],
'odds': {
PABLO_CUEVAS: 1.51,
ALEJANDRO_DAVIDOVICH_FOKINA: 2.45
}
},
{
'round': 4,
'players': [
STEFANOS_TSITSIPAS,
DAVID_GOFFIN
],
'score': [(3, 6), (6, 4), (6, 4)],
'odds': {
STEFANOS_TSITSIPAS: 1.53,
DAVID_GOFFIN: 2.45
}
},
# 2019-095-05
{
'round': 2,
'players': [
STEFANOS_TSITSIPAS,
PABLO_CUEVAS
],
'score': [(6, 3), (7, 6)],
'odds': {
STEFANOS_TSITSIPAS: 1.36,
PABLO_CUEVAS: 3.15
}
}
]
}
]
| 28.663748 | 54 | 0.263413 | from men import *
from location import *
DATA_2019_04 = [
{
'location': HOUSTON,
'date': '2019-04-14',
'matches': [
{
'round': 512,
'players': [
SANTIAGO_GIRALDO,
JAMES_WARD
],
'score': [(6, 4), (6, 4)],
'odds': {
SANTIAGO_GIRALDO: 1.27,
JAMES_WARD: 3.50
}
},
{
'round': 512,
'players': [
PEDJA_KRSTIN,
MARCOS_GIRON
],
'score': [(6, 4), (6, 1)],
'odds': {
PEDJA_KRSTIN: 1.65,
MARCOS_GIRON: 2.15
}
},
{
'round': 512,
'players': [
ROBERTO_QUIROZ,
JC_ARAGONE
],
'score': [(6, 2), (6, 0)],
'odds': {
ROBERTO_QUIROZ: 1.65,
JC_ARAGONE: 2.13
}
},
{
'round': 512,
'players': [
MITCHELL_KRUEGER,
DOMINIK_KOEPFER
],
'score': [(3, 6), (6, 3), (6, 4)],
'odds': {
MITCHELL_KRUEGER: 1.67,
DOMINIK_KOEPFER: 1.96
}
},
{
'round': 512,
'players': [
DANIEL_ELAHI_GALAN,
SEBASTIAN_OFNER
],
'score': [(6, 0), (6, 2)],
'odds': {
DANIEL_ELAHI_GALAN: 2.14,
SEBASTIAN_OFNER: 1.65
}
},
{
'round': 512,
'players': [
CHRISTOPHER_EUBANKS,
JAY_CLARKE
],
'score': [(6, 3), (6, 4)],
'odds': {
CHRISTOPHER_EUBANKS: 2.40,
JAY_CLARKE: 1.56
}
},
{
'round': 512,
'players': [
DARIAN_KING,
PETER_POLANSKY
],
'score': [(6, 4), (6, 1)],
'odds': {
DARIAN_KING: 1.63,
PETER_POLANSKY: 2.20
}
},
{
'round': 512,
'players': [
HENRI_LAAKSONEN,
TOMMY_PAUL
],
'score': [(6, 4), (6, 7), (6, 4)],
'odds': {
HENRI_LAAKSONEN: 1.93,
TOMMY_PAUL: 1.69
}
},
{
'round': 256,
'players': [
PEDJA_KRSTIN,
DARIAN_KING
],
'score': [(7, 6), (7, 5)],
'odds': {
PEDJA_KRSTIN: 1.48,
DARIAN_KING: 2.51
}
},
{
'round': 256,
'players': [
DANIEL_ELAHI_GALAN,
ROBERTO_QUIROZ
],
'score': [(4, 6), (7, 5), (6, 1)],
},
{
'round': 256,
'players': [
SANTIAGO_GIRALDO,
CHRISTOPHER_EUBANKS
],
'score': [(6, 4), (6, 4)],
},
{
'round': 256,
'players': [
HENRI_LAAKSONEN,
MITCHELL_KRUEGER
],
'score': [(6, 3), (5, 7), (6, 3)],
},
{
'round': 32,
'players': [
BERNARD_TOMIC,
DENIS_KUDLA
],
'score': [(7, 6), (7, 5)],
'odds': {
BERNARD_TOMIC: 1.77,
DENIS_KUDLA: 2.05
}
},
{
'round': 32,
'players': [
CASPER_RUUD,
HUGO_DELLIEN
],
'score': [(7, 6), (6, 4)],
'odds': {
CASPER_RUUD: 1.49,
HUGO_DELLIEN: 2.68
}
},
{
'round': 32,
'players': [
RYAN_HARRISON,
IVO_KARLOVIC
],
'score': [(6, 3), (6, 4)],
'odds': {
RYAN_HARRISON: 2.26,
IVO_KARLOVIC: 1.65
}
},
{
'round': 32,
'players': [
CHRISTIAN_GARIN,
PABLO_CUEVAS
],
'score': [(4, 6), (6, 4), (6, 2)],
'odds': {
CHRISTIAN_GARIN: 2.38,
PABLO_CUEVAS: 1.59
}
},
{
'round': 32,
'players': [
MARCEL_GRANOLLERS,
TAYLOR_FRITZ
],
'score': [(6, 2), (4, 6), (6, 2)],
'odds': {
MARCEL_GRANOLLERS: 2.20,
TAYLOR_FRITZ: 1.59
}
},
{
'round': 32,
'players': [
JANKO_TIPSAREVIC,
TENNYS_SANDGREN
],
'score': [(6, 1), (7, 6)],
'odds': {
JANKO_TIPSAREVIC: 2.71,
TENNYS_SANDGREN: 1.45
}
},
{
'round': 32,
'players': [
SANTIAGO_GIRALDO,
BRADLEY_KLAHN
],
'score': [(6, 4), (6, 4)],
'odds': {
SANTIAGO_GIRALDO: 1.43,
BRADLEY_KLAHN: 2.78
}
},
{
'round': 32,
'players': [
GUILLERMO_GARCIA_LOPEZ,
NOAH_RUBIN
],
'score': [(6, 7), (6, 3), (6, 3)],
'odds': {
GUILLERMO_GARCIA_LOPEZ: 1.57,
NOAH_RUBIN: 2.48
}
},
{
'round': 32,
'players': [
DANIEL_ELAHI_GALAN,
PAOLO_LORENZI
],
'score': [(7, 6), (6, 4)],
'odds': {
DANIEL_ELAHI_GALAN: 2.00,
PAOLO_LORENZI: 1.77
}
},
{
'round': 32,
'players': [
SAM_QUERREY,
BJORN_FRATANGELO
],
'score': [(6, 3), (6, 4)],
'odds': {
SAM_QUERREY: 1.61,
BJORN_FRATANGELO: 2.30
}
},
{
'round': 32,
'players': [
JORDAN_THOMPSON,
PEDJA_KRSTIN
],
'score': [(7, 5), (6, 2)],
'odds': {
JORDAN_THOMPSON: 1.58,
PEDJA_KRSTIN: 2.48
}
},
{
'round': 32,
'players': [
HENRI_LAAKSONEN,
MACKENZIE_MCDONALD
],
'score': [(6, 3), (6, 4)],
'odds': {
HENRI_LAAKSONEN: 1.71,
MACKENZIE_MCDONALD: 2.13
}
},
{
'round': 16,
'players': [
HENRI_LAAKSONEN,
RYAN_HARRISON
],
'score': [(6, 4), (7, 5)],
'odds': {
HENRI_LAAKSONEN: 1.93,
RYAN_HARRISON: 1.83
}
},
{
'round': 16,
'players': [
MARCEL_GRANOLLERS,
BERNARD_TOMIC
],
'score': [(6, 1), (6, 2)],
'odds': {
MARCEL_GRANOLLERS: 1.59,
BERNARD_TOMIC: 2.40
}
},
{
'round': 16,
'players': [
CASPER_RUUD,
REILLY_OPELKA
],
'score': [(4, 6), (6, 4), (6, 4)],
'odds': {
CASPER_RUUD: 1.89,
REILLY_OPELKA: 1.91
}
},
{
'round': 16,
'players': [
CHRISTIAN_GARIN,
JEREMY_CHARDY
],
'score': [(3, 6), (7, 6), (7, 6)],
'odds': {
CHRISTIAN_GARIN: 1.67,
JEREMY_CHARDY: 2.03
}
},
{
'round': 16,
'players': [
SAM_QUERREY,
GUILLERMO_GARCIA_LOPEZ
],
'score': [(6, 4), (6, 3)],
'odds': {
SAM_QUERREY: 1.44,
GUILLERMO_GARCIA_LOPEZ: 2.79
}
},
{
'round': 16,
'players': [
JORDAN_THOMPSON,
SANTIAGO_GIRALDO
],
'score': [(4, 6), (7, 6), (7, 5)],
'odds': {
JORDAN_THOMPSON: 1.59,
SANTIAGO_GIRALDO: 2.30
}
},
{
'round': 16,
'players': [
JANKO_TIPSAREVIC,
CAMERON_NORRIE
],
'score': [(6, 3), (6, 4)],
'odds': {
JANKO_TIPSAREVIC: 2.27,
CAMERON_NORRIE: 1.63
}
},
{
'round': 16,
'players': [
DANIEL_ELAHI_GALAN,
STEVE_JOHNSON
],
'score': [(6, 3), (6, 3)],
'odds': {
DANIEL_ELAHI_GALAN: 2.85,
STEVE_JOHNSON: 1.43
}
},
{
'round': 8,
'players': [
CASPER_RUUD,
MARCEL_GRANOLLERS
],
'score': [(6, 1), (6, 0)],
'odds': {
CASPER_RUUD: 1.65,
MARCEL_GRANOLLERS: 2.30
}
},
{
'round': 8,
'players': [
CHRISTIAN_GARIN,
HENRI_LAAKSONEN
],
'score': [(6, 3), (6, 2)],
'odds': {
CHRISTIAN_GARIN: 1.49,
HENRI_LAAKSONEN: 2.72
}
},
{
'round': 8,
'players': [
SAM_QUERREY,
JANKO_TIPSAREVIC
],
'score': [(7, 6), (7, 6)],
'odds': {
SAM_QUERREY: 1.40,
JANKO_TIPSAREVIC: 2.96
}
},
{
'round': 8,
'players': [
DANIEL_ELAHI_GALAN,
JORDAN_THOMPSON
],
'score': [(6, 1), (4, 6), (6, 4)],
},
{
'round': 4,
'players': [
CASPER_RUUD,
DANIEL_ELAHI_GALAN
],
'score': [(7, 5), (6, 2)],
'odds': {
CASPER_RUUD: 1.29,
DANIEL_ELAHI_GALAN: 3.60
}
},
{
'round': 4,
'players': [
CHRISTIAN_GARIN,
SAM_QUERREY
],
'score': [(7, 6), (6, 2)],
'odds': {
CHRISTIAN_GARIN: 2.00,
SAM_QUERREY: 1.81
}
},
{
'round': 2,
'players': [
CHRISTIAN_GARIN,
CASPER_RUUD
],
'score': [(7, 6), (4, 6), (6, 3)],
'odds': {
CHRISTIAN_GARIN: 1.67,
CASPER_RUUD: 2.25
}
}
]
},
{
'location': MARRAKECH,
'date': '2019-04-14',
'matches': [
{
'round': 512,
'players': [
EVGENY_KARLOVSKIY,
TIM_PUETZ
],
'score': [(7, 6), (4, 6), (6, 2)],
},
{
'round': 512,
'players': [
CARLOS_BERLOCQ,
ADAM_MOUNDIR
],
'score': [(6, 1), (6, 2)],
'odds': {
CARLOS_BERLOCQ: 1.07,
ADAM_MOUNDIR: 7.18
}
},
{
'round': 512,
'players': [
FACUNDO_BAGNIS,
VIKTOR_TROICKI
],
'score': [(7, 6), (6, 4)],
'odds': {
FACUNDO_BAGNIS: 1.69,
VIKTOR_TROICKI: 2.01
}
},
{
'round': 512,
'players': [
ADRIAN_MENENDEZ_MACEIRAS,
LAMINE_OUAHAB
],
'score': [(7, 6), (6, 7), (6, 4)],
'odds': {
ADRIAN_MENENDEZ_MACEIRAS: 2.28,
LAMINE_OUAHAB: 1.57
}
},
{
'round': 512,
'players': [
ELLIOT_BENCHETRIT,
CORENTIN_MOUTET
],
'score': [(6, 3), (7, 6)],
'odds': {
ELLIOT_BENCHETRIT: 3.24,
CORENTIN_MOUTET: 1.33
}
},
{
'round': 512,
'players': [
ALEJANDRO_DAVIDOVICH_FOKINA,
GREGOIRE_BARRERE
],
'score': [(7, 5), (3, 6), (6, 4)],
'odds': {
ALEJANDRO_DAVIDOVICH_FOKINA: 1.59,
GREGOIRE_BARRERE: 2.30
}
},
{
'round': 512,
'players': [
ELIAS_YMER,
KEVIN_KRAWIETZ
],
'score': [(7, 6), (6, 4)],
'odds': {
ELIAS_YMER: 1.31,
KEVIN_KRAWIETZ: 3.40
}
},
{
'round': 512,
'players': [
LORENZO_SONEGO,
ALEXEY_VATUTIN
],
'score': [(7, 6), (6, 4)],
'odds': {
LORENZO_SONEGO: 1.36,
ALEXEY_VATUTIN: 3.00
}
},
{
'round': 256,
'players': [
ALEJANDRO_DAVIDOVICH_FOKINA,
EVGENY_KARLOVSKIY
],
'score': [(6, 2), (6, 2)],
'odds': {
ALEJANDRO_DAVIDOVICH_FOKINA: 1.15,
EVGENY_KARLOVSKIY: 5.16
}
},
{
'round': 256,
'players': [
ADRIAN_MENENDEZ_MACEIRAS,
ELLIOT_BENCHETRIT
],
'score': [(7, 5), (7, 5)],
'odds': {
ADRIAN_MENENDEZ_MACEIRAS: 2.55,
ELLIOT_BENCHETRIT: 1.48
}
},
{
'round': 256,
'players': [
FACUNDO_BAGNIS,
ELIAS_YMER
],
'score': [(1, 6), (6, 3), (7, 5)],
'odds': {
FACUNDO_BAGNIS: 2.13,
ELIAS_YMER: 1.63
}
},
{
'round': 256,
'players': [
LORENZO_SONEGO,
CARLOS_BERLOCQ
],
'score': [(6, 4), (7, 5)],
'odds': {
LORENZO_SONEGO: 1.28,
CARLOS_BERLOCQ: 3.34
}
},
{
'round': 32,
'players': [
JO_WILFRIED_TSONGA,
CEDRIC_MARCEL_STEBE
],
'score': [(6, 1), (7, 6)],
'odds': {
JO_WILFRIED_TSONGA: 1.11,
CEDRIC_MARCEL_STEBE: 6.50
}
},
{
'round': 32,
'players': [
TARO_DANIEL,
MISCHA_ZVEREV
],
'score': [(6, 3), (6, 0)],
'odds': {
TARO_DANIEL: 1.45,
MISCHA_ZVEREV: 2.80
}
},
{
'round': 32,
'players': [
GUIDO_ANDREOZZI,
ALBERT_RAMOS_VINOLAS
],
'score': [(6, 3), (7, 6)],
'odds': {
GUIDO_ANDREOZZI: 2.40,
ALBERT_RAMOS_VINOLAS: 1.59
}
},
{
'round': 32,
'players': [
GILLES_SIMON,
JOZEF_KOVALIK
],
'score': [(6, 4), (6, 1)],
'odds': {
GILLES_SIMON: 1.38,
JOZEF_KOVALIK: 3.00
}
},
{
'round': 32,
'players': [
KYLE_EDMUND,
UGO_HUMBERT
],
'score': [(6, 3), (6, 2)],
'odds': {
KYLE_EDMUND: 1.20,
UGO_HUMBERT: 4.70
}
},
{
'round': 32,
'players': [
BENOIT_PAIRE,
ALJAZ_BEDENE
],
'score': [(3, 6), (6, 4), (7, 5)],
'odds': {
BENOIT_PAIRE: 1.95,
ALJAZ_BEDENE: 1.74
}
},
{
'round': 32,
'players': [
JAUME_MUNAR,
FACUNDO_BAGNIS
],
'score': [(6, 1), (7, 6)],
'odds': {
JAUME_MUNAR: 1.30,
FACUNDO_BAGNIS: 3.30
}
},
{
'round': 32,
'players': [
JUAN_IGNACIO_LONDERO,
CARLOS_BERLOCQ
],
'score': [(6, 2), (6, 4)],
'odds': {
JUAN_IGNACIO_LONDERO: 1.50,
CARLOS_BERLOCQ: 2.51
}
},
{
'round': 32,
'players': [
ROBIN_HAASE,
MALEK_JAZIRI
],
'score': [(6, 3), (6, 4)],
'odds': {
ROBIN_HAASE: 1.44,
MALEK_JAZIRI: 2.75
}
},
{
'round': 32,
'players': [
PABLO_ANDUJAR,
FEDERICO_DELBONIS
],
'score': [(7, 6), (6, 3)],
'odds': {
PABLO_ANDUJAR: 2.25,
FEDERICO_DELBONIS: 1.63
}
},
{
'round': 32,
'players': [
PIERRE_HUGUES_HERBERT,
THOMAS_FABBIANO
],
'score': [(6, 7), (6, 4), (6, 1)],
'odds': {
PIERRE_HUGUES_HERBERT: 1.61,
THOMAS_FABBIANO: 2.35
}
},
{
'round': 32,
'players': [
PHILIPP_KOHLSCHREIBER,
ALEJANDRO_DAVIDOVICH_FOKINA
],
'score': [(7, 6), (7, 5)],
'odds': {
PHILIPP_KOHLSCHREIBER: 1.56,
ALEJANDRO_DAVIDOVICH_FOKINA: 2.45
}
},
{
'round': 32,
'players': [
ADRIAN_MENENDEZ_MACEIRAS,
FERNANDO_VERDASCO
],
'score': [(5, 7), (6, 2), (6, 2)],
'odds': {
ADRIAN_MENENDEZ_MACEIRAS: 4.60,
FERNANDO_VERDASCO: 1.20
}
},
{
'round': 32,
'players': [
LORENZO_SONEGO,
LASLO_DJERE
],
'score': [(6, 3), (6, 3)],
'odds': {
LORENZO_SONEGO: 2.13,
LASLO_DJERE: 1.63
}
},
{
'round': 32,
'players': [
JIRI_VESELY,
FABIO_FOGNINI
],
'score': [(7, 6), (6, 4)],
'odds': {
JIRI_VESELY: 2.32,
FABIO_FOGNINI: 1.54
}
},
{
'round': 32,
'players': [
ALEXANDER_ZVEREV,
DENIS_ISTOMIN
],
'score': [(6, 4), (6, 4)],
'odds': {
ALEXANDER_ZVEREV: 1.10,
DENIS_ISTOMIN: 7.70
}
},
{
'round': 16,
'players': [
LORENZO_SONEGO,
ROBIN_HAASE
],
'score': [(7, 6), (6, 3)],
'odds': {
LORENZO_SONEGO: 1.66,
ROBIN_HAASE: 2.20
}
},
{
'round': 16,
'players': [
TARO_DANIEL,
ADRIAN_MENENDEZ_MACEIRAS
],
'score': [(6, 2), (1, 6), (6, 1)],
'odds': {
TARO_DANIEL: 1.30,
ADRIAN_MENENDEZ_MACEIRAS: 3.40
}
},
{
'round': 16,
'players': [
GILLES_SIMON,
GUIDO_ANDREOZZI
],
'score': [(6, 2), (6, 2)],
'odds': {
GILLES_SIMON: 1.59,
GUIDO_ANDREOZZI: 2.25
}
},
{
'round': 16,
'players': [
JO_WILFRIED_TSONGA,
KYLE_EDMUND
],
'score': [(7, 6), (6, 3)],
'odds': {
JO_WILFRIED_TSONGA: 2.65,
KYLE_EDMUND: 1.49
}
},
{
'round': 16,
'players': [
JIRI_VESELY,
JUAN_IGNACIO_LONDERO
],
'score': [(6, 3), (6, 4)],
'odds': {
JIRI_VESELY: 1.77,
JUAN_IGNACIO_LONDERO: 2.05
}
},
{
'round': 16,
'players': [
BENOIT_PAIRE,
PIERRE_HUGUES_HERBERT
],
'score': [(6, 4), (6, 2)],
'odds': {
BENOIT_PAIRE: 1.67,
PIERRE_HUGUES_HERBERT: 2.15
}
},
{
'round': 16,
'players': [
PABLO_ANDUJAR,
PHILIPP_KOHLSCHREIBER
],
'score': [(7, 6), (6, 4)],
'odds': {
PABLO_ANDUJAR: 1.95,
PHILIPP_KOHLSCHREIBER: 1.74
}
},
{
'round': 16,
'players': [
JAUME_MUNAR,
ALEXANDER_ZVEREV
],
'score': [(7, 6), (2, 6), (6, 3)],
'odds': {
JAUME_MUNAR: 4.11,
ALEXANDER_ZVEREV: 1.24
}
},
{
'round': 8,
'players': [
JO_WILFRIED_TSONGA,
LORENZO_SONEGO
],
'score': [(6, 3), (6, 2)],
'odds': {
JO_WILFRIED_TSONGA: 1.42,
LORENZO_SONEGO: 2.75
}
},
{
'round': 8,
'players': [
BENOIT_PAIRE,
JAUME_MUNAR
],
'score': [(6, 1), (6, 3)],
'odds': {
BENOIT_PAIRE: 2.35,
JAUME_MUNAR: 1.59
}
},
{
'round': 8,
'players': [
PABLO_ANDUJAR,
JIRI_VESELY
],
'score': [],
'retired': True,
'odds': {
PABLO_ANDUJAR: 1.77,
JIRI_VESELY: 2.05
}
},
{
'round': 8,
'players': [
GILLES_SIMON,
TARO_DANIEL
],
'score': [(6, 4), (7, 5)],
'odds': {
GILLES_SIMON: 1.36,
TARO_DANIEL: 3.00
}
},
{
'round': 4,
'players': [
BENOIT_PAIRE,
JO_WILFRIED_TSONGA
],
'score': [(2, 6), (6, 4), (6, 3)],
'odds': {
BENOIT_PAIRE: 3.00,
JO_WILFRIED_TSONGA: 1.38
}
},
{
'round': 4,
'players': [
PABLO_ANDUJAR,
GILLES_SIMON
],
'score': [(6, 1), (6, 1)],
'odds': {
PABLO_ANDUJAR: 1.91,
GILLES_SIMON: 1.80
}
},
{
'round': 2,
'players': [
BENOIT_PAIRE,
PABLO_ANDUJAR
],
'score': [(6, 2), (6, 3)],
'odds': {
BENOIT_PAIRE: 2.10,
PABLO_ANDUJAR: 1.69
}
}
]
},
{
'location': MONTE_CARLO,
'date': '2019-04-21',
'matches': [
{
'round': 512,
'players': [
ELIAS_YMER,
MIOMIR_KECMANOVIC
],
'score': [(6, 1), (6, 3)],
'odds': {
ELIAS_YMER: 2.15,
MIOMIR_KECMANOVIC: 1.65
}
},
{
'round': 512,
'players': [
THOMAS_FABBIANO,
FELICIANO_LOPEZ
],
'score': [(3, 6), (6, 4), (6, 2)],
'odds': {
THOMAS_FABBIANO: 1.83,
FELICIANO_LOPEZ: 1.82
}
},
{
'round': 512,
'players': [
MARCO_TRUNGELLITI,
PETER_GOJOWCZYK
],
'score': [(6, 4), (6, 2)],
'odds': {
MARCO_TRUNGELLITI: 1.83,
PETER_GOJOWCZYK: 1.71
}
},
{
'round': 512,
'players': [
ALBERT_RAMOS_VINOLAS,
MAXIMILIAN_MARTERER
],
'score': [(6, 2), (6, 2)],
'odds': {
ALBERT_RAMOS_VINOLAS: 1.41,
MAXIMILIAN_MARTERER: 2.70
}
},
{
'round': 512,
'players': [
ANDREY_RUBLEV,
BERNARD_TOMIC
],
'score': [(4, 6), (7, 6), (7, 6)],
'odds': {
ANDREY_RUBLEV: 1.19,
BERNARD_TOMIC: 4.44
}
},
{
'round': 512,
'players': [
GUIDO_ANDREOZZI,
ERNESTS_GULBIS
],
'score': [(6, 4), (6, 1)],
'odds': {
GUIDO_ANDREOZZI: 1.50,
ERNESTS_GULBIS: 2.40
}
},
{
'round': 512,
'players': [
TARO_DANIEL,
YANNICK_MADEN
],
'score': [(6, 4), (6, 4)],
'odds': {
TARO_DANIEL: 1.63,
YANNICK_MADEN: 2.10
}
},
{
'round': 512,
'players': [
FEDERICO_DELBONIS,
ILYA_IVASHKA
],
'score': [(6, 2), (3, 4)],
'retired': True,
'odds': {
FEDERICO_DELBONIS: 1.24,
ILYA_IVASHKA: 3.79
}
},
{
'round': 512,
'players': [
JULIAN_OCLEPPO,
MISCHA_ZVEREV
],
'score': [(7, 6), (7, 6)],
'odds': {
JULIAN_OCLEPPO: 2.78,
MISCHA_ZVEREV: 1.36
}
},
{
'round': 512,
'players': [
ALJAZ_BEDENE,
HUGO_NYS
],
'score': [(6, 2), (6, 4)],
'odds': {
ALJAZ_BEDENE: 1.07,
HUGO_NYS: 8.00
}
},
{
'round': 512,
'players': [
JUAN_IGNACIO_LONDERO,
ROMAIN_ARNEODO
],
'score': [(6, 0), (6, 4)],
'odds': {
JUAN_IGNACIO_LONDERO: 1.06,
ROMAIN_ARNEODO: 7.00
}
},
{
'round': 512,
'players': [
LORENZO_SONEGO,
YOSHIHITO_NISHIOKA
],
'score': [(6, 2), (4, 6), (6, 0)],
'odds': {
LORENZO_SONEGO: 1.33,
YOSHIHITO_NISHIOKA: 3.00
}
},
{
'round': 512,
'players': [
UGO_HUMBERT,
FLORENT_DIEP
],
'score': [(3, 6), (7, 5), (6, 3)],
'odds': {
UGO_HUMBERT: 1.05,
FLORENT_DIEP: 11.00
}
},
{
'round': 512,
'players': [
ALEXEI_POPYRIN,
LEONARDO_MAYER
],
'score': [(7, 6), (2, 6), (7, 6)],
'odds': {
ALEXEI_POPYRIN: 3.20,
LEONARDO_MAYER: 1.33
}
},
{
'round': 256,
'players': [
LORENZO_SONEGO,
MARCO_TRUNGELLITI
],
'score': [],
'retired': True,
},
{
'round': 256,
'players': [
ALEXEI_POPYRIN,
ELIAS_YMER
],
'score': [(6, 3), (7, 6)],
'odds': {
ALEXEI_POPYRIN: 2.35,
ELIAS_YMER: 1.57
}
},
{
'round': 256,
'players': [
GUIDO_ANDREOZZI,
JULIAN_OCLEPPO
],
'score': [(6, 3), (6, 1)],
'odds': {
GUIDO_ANDREOZZI: 1.07,
JULIAN_OCLEPPO: 7.43
}
},
{
'round': 256,
'players': [
FEDERICO_DELBONIS,
ALBERT_RAMOS_VINOLAS
],
'score': [(7, 5), (6, 0)],
'odds': {
FEDERICO_DELBONIS: 1.80,
ALBERT_RAMOS_VINOLAS: 1.81
}
},
{
'round': 256,
'players': [
ALJAZ_BEDENE,
TARO_DANIEL
],
'score': [(7, 6), (6, 3)],
'odds': {
ALJAZ_BEDENE: 1.54,
TARO_DANIEL: 2.39
}
},
{
'round': 256,
'players': [
JUAN_IGNACIO_LONDERO,
THOMAS_FABBIANO
],
'score': [(6, 4), (6, 1)],
'odds': {
JUAN_IGNACIO_LONDERO: 1.47,
THOMAS_FABBIANO: 2.46
}
},
{
'round': 256,
'players': [
ANDREY_RUBLEV,
UGO_HUMBERT
],
'score': [(6, 4), (6, 4)],
'odds': {
ANDREY_RUBLEV: 1.36,
UGO_HUMBERT: 3.00
}
},
{
'round': 64,
'players': [
STAN_WAWRINKA,
LUCAS_POUILLE
],
'score': [(7, 5), (6, 3)],
'odds': {
STAN_WAWRINKA: 1.39,
LUCAS_POUILLE: 3.03
}
},
{
'round': 64,
'players': [
GUIDO_PELLA,
LASLO_DJERE
],
'score': [(6, 7), (6, 2), (6, 4)],
'odds': {
GUIDO_PELLA: 1.69,
LASLO_DJERE: 2.15
}
},
{
'round': 64,
'players': [
GRIGOR_DIMITROV,
MATTEO_BERRETTINI
],
'score': [(7, 5), (6, 4)],
'odds': {
GRIGOR_DIMITROV: 1.71,
MATTEO_BERRETTINI: 2.10
}
},
{
'round': 64,
'players': [
BORNA_CORIC,
HUBERT_HURKACZ
],
'score': [(6, 4), (5, 7), (7, 5)],
'odds': {
BORNA_CORIC: 1.53,
HUBERT_HURKACZ: 2.63
}
},
{
'round': 64,
'players': [
LORENZO_SONEGO,
ANDREAS_SEPPI
],
'score': [(7, 6), (6, 4)],
'odds': {
LORENZO_SONEGO: 1.48,
ANDREAS_SEPPI: 2.70
}
},
{
'round': 64,
'players': [
JAUME_MUNAR,
LUCAS_CATARINA
],
'score': [(6, 0), (6, 3)],
'odds': {
JAUME_MUNAR: 1.04,
LUCAS_CATARINA: 12.24
}
},
{
'round': 64,
'players': [
DUSAN_LAJOVIC,
MALEK_JAZIRI
],
'score': [(6, 4), (6, 4)],
'odds': {
DUSAN_LAJOVIC: 1.37,
MALEK_JAZIRI: 3.18
}
},
{
'round': 64,
'players': [
MIKHAIL_KUKUSHKIN,
JEREMY_CHARDY
],
'score': [(6, 3), (6, 4)],
'odds': {
MIKHAIL_KUKUSHKIN: 2.60,
JEREMY_CHARDY: 1.51
}
},
{
'round': 64,
'players': [
PHILIPP_KOHLSCHREIBER,
TARO_DANIEL
],
'score': [(6, 1), (6, 3)],
'odds': {
PHILIPP_KOHLSCHREIBER: 1.36,
TARO_DANIEL: 3.22
}
},
{
'round': 64,
'players': [
MARTIN_KLIZAN,
FEDERICO_DELBONIS
],
'score': [(7, 6), (7, 5)],
'odds': {
MARTIN_KLIZAN: 2.99,
FEDERICO_DELBONIS: 1.39
}
},
{
'round': 64,
'players': [
ROBERTO_BAUTISTA_AGUT,
JOHN_MILLMAN
],
'score': [(3, 6), (6, 1), (6, 1)],
'odds': {
ROBERTO_BAUTISTA_AGUT: 1.18,
JOHN_MILLMAN: 5.16
}
},
{
'round': 64,
'players': [
RADU_ALBOT,
ALJAZ_BEDENE
],
'score': [(6, 4), (6, 2)],
'odds': {
RADU_ALBOT: 2.35,
ALJAZ_BEDENE: 1.57
}
},
{
'round': 64,
'players': [
DIEGO_SCHWARTZMAN,
KYLE_EDMUND
],
'score': [(4, 6), (6, 3), (6, 1)],
'odds': {
DIEGO_SCHWARTZMAN: 2.15,
KYLE_EDMUND: 1.69
}
},
{
'round': 64,
'players': [
DAVID_GOFFIN,
GUIDO_ANDREOZZI
],
'score': [(6, 1), (6, 4)],
'odds': {
DAVID_GOFFIN: 1.30,
GUIDO_ANDREOZZI: 3.57
}
},
{
'round': 64,
'players': [
JAN_LENNARD_STRUFF,
DENIS_SHAPOVALOV
],
'score': [(5, 7), (6, 3), (6, 1)],
'odds': {
JAN_LENNARD_STRUFF: 2.20,
DENIS_SHAPOVALOV: 1.67
}
},
{
'round': 64,
'players': [
FABIO_FOGNINI,
ANDREY_RUBLEV
],
'score': [(4, 6), (7, 5), (6, 4)],
'odds': {
FABIO_FOGNINI: 2.02,
ANDREY_RUBLEV: 1.74
}
},
{
'round': 64,
'players': [
MARTON_FUCSOVICS,
NIKOLOZ_BASILASHVILI
],
'score': [(7, 5), (3, 6), (6, 1)],
'odds': {
MARTON_FUCSOVICS: 1.75,
NIKOLOZ_BASILASHVILI: 2.05
}
},
{
'round': 64,
'players': [
MARCO_CECCHINATO,
DAMIR_DZUMHUR
],
'score': [(4, 0)],
'retired': True,
'odds': {
MARCO_CECCHINATO: 1.28,
DAMIR_DZUMHUR: 3.60
}
},
{
'round': 64,
'players': [
DANIIL_MEDVEDEV,
JOAO_SOUSA
],
'score': [(6, 1), (6, 1)],
'odds': {
DANIIL_MEDVEDEV: 1.54,
JOAO_SOUSA: 2.49
}
},
{
'round': 64,
'players': [
GILLES_SIMON,
ALEXEI_POPYRIN
],
'score': [(7, 5), (6, 1)],
'odds': {
GILLES_SIMON: 1.41,
ALEXEI_POPYRIN: 2.84
}
},
{
'round': 64,
'players': [
CAMERON_NORRIE,
ADRIAN_MANNARINO
],
'score': [(6, 4), (6, 3)],
'odds': {
CAMERON_NORRIE: 1.62,
ADRIAN_MANNARINO: 2.36
}
},
{
'round': 64,
'players': [
PIERRE_HUGUES_HERBERT,
FERNANDO_VERDASCO
],
'score': [(6, 4), (6, 4)],
'odds': {
PIERRE_HUGUES_HERBERT: 2.49,
FERNANDO_VERDASCO: 1.54
}
},
{
'round': 64,
'players': [
TAYLOR_FRITZ,
JO_WILFRIED_TSONGA
],
'score': [(6, 4), (2, 0)],
'retired': True,
'odds': {
TAYLOR_FRITZ: 5.75,
JO_WILFRIED_TSONGA: 1.14
}
},
{
'round': 64,
'players': [
FELIX_AUGER_ALIASSIME,
JUAN_IGNACIO_LONDERO
],
'score': [(7, 5), (7, 6)],
'odds': {
FELIX_AUGER_ALIASSIME: 1.48,
JUAN_IGNACIO_LONDERO: 2.75
}
},
{
'round': 32,
'players': [
MARCO_CECCHINATO,
STAN_WAWRINKA
],
'score': [(0, 6), (7, 5), (6, 3)],
'odds': {
MARCO_CECCHINATO: 2.44,
STAN_WAWRINKA: 1.57
}
},
{
'round': 32,
'players': [
BORNA_CORIC,
JAUME_MUNAR
],
'score': [(6, 7), (7, 6), (6, 4)],
'odds': {
BORNA_CORIC: 1.65,
JAUME_MUNAR: 2.25
}
},
{
'round': 32,
'players': [
LORENZO_SONEGO,
KAREN_KHACHANOV
],
'score': [(7, 6), (6, 4)],
'odds': {
LORENZO_SONEGO: 2.35,
KAREN_KHACHANOV: 1.59
}
},
{
'round': 32,
'players': [
GUIDO_PELLA,
MARIN_CILIC
],
'score': [(6, 3), (5, 7), (6, 1)],
'odds': {
GUIDO_PELLA: 2.23,
MARIN_CILIC: 1.67
}
},
{
'round': 32,
'players': [
NOVAK_DJOKOVIC,
PHILIPP_KOHLSCHREIBER
],
'score': [(6, 3), (4, 6), (6, 4)],
'odds': {
NOVAK_DJOKOVIC: 1.16,
PHILIPP_KOHLSCHREIBER: 5.00
}
},
{
'round': 32,
'players': [
CAMERON_NORRIE,
MARTON_FUCSOVICS
],
'score': [(7, 6), (6, 3)],
'odds': {
CAMERON_NORRIE: 2.95,
MARTON_FUCSOVICS: 1.41
}
},
{
'round': 32,
'players': [
TAYLOR_FRITZ,
DIEGO_SCHWARTZMAN
],
'score': [(6, 4), (6, 2)],
'odds': {
TAYLOR_FRITZ: 5.00,
DIEGO_SCHWARTZMAN: 1.16
}
},
{
'round': 32,
'players': [
GRIGOR_DIMITROV,
JAN_LENNARD_STRUFF
],
'score': [(7, 6), (6, 4)],
'odds': {
GRIGOR_DIMITROV: 1.67,
JAN_LENNARD_STRUFF: 2.20
}
},
{
'round': 32,
'players': [
DUSAN_LAJOVIC,
DAVID_GOFFIN
],
'score': [(6, 3), (6, 4)],
'odds': {
DUSAN_LAJOVIC: 3.61,
DAVID_GOFFIN: 1.27
}
},
{
'round': 32,
'players': [
FABIO_FOGNINI,
GILLES_SIMON
],
'score': [],
'retired': True,
'odds': {
FABIO_FOGNINI: 1.91,
GILLES_SIMON: 1.87
}
},
{
'round': 32,
'players': [
DANIIL_MEDVEDEV,
RADU_ALBOT
],
'score': [(6, 1), (6, 2)],
'odds': {
DANIIL_MEDVEDEV: 1.43,
RADU_ALBOT: 2.75
}
},
{
'round': 32,
'players': [
STEFANOS_TSITSIPAS,
MIKHAIL_KUKUSHKIN
],
'score': [(6, 3), (7, 5)],
'odds': {
STEFANOS_TSITSIPAS: 1.23,
MIKHAIL_KUKUSHKIN: 3.95
}
},
{
'round': 32,
'players': [
PIERRE_HUGUES_HERBERT,
KEI_NISHIKORI
],
'score': [(7, 5), (6, 4)],
'odds': {
PIERRE_HUGUES_HERBERT: 4.04,
KEI_NISHIKORI: 1.20
}
},
{
'round': 32,
'players': [
DOMINIC_THIEM,
MARTIN_KLIZAN
],
'score': [(6, 1), (6, 4)],
'odds': {
DOMINIC_THIEM: 1.21,
MARTIN_KLIZAN: 4.34
}
},
{
'round': 32,
'players': [
ALEXANDER_ZVEREV,
FELIX_AUGER_ALIASSIME
],
'score': [(6, 1), (6, 4)],
'odds': {
ALEXANDER_ZVEREV: 1.47,
FELIX_AUGER_ALIASSIME: 2.75
}
},
{
'round': 32,
'players': [
RAFAEL_NADAL,
ROBERTO_BAUTISTA_AGUT
],
'score': [(6, 1), (6, 1)],
'odds': {
RAFAEL_NADAL: 1.07,
ROBERTO_BAUTISTA_AGUT: 7.50
}
},
{
'round': 16,
'players': [
LORENZO_SONEGO,
CAMERON_NORRIE
],
'score': [(6, 2), (7, 5)],
'odds': {
LORENZO_SONEGO: 1.57,
CAMERON_NORRIE: 2.40
}
},
{
'round': 16,
'players': [
GUIDO_PELLA,
MARCO_CECCHINATO
],
'score': [(6, 4), (4, 6), (6, 4)],
'odds': {
GUIDO_PELLA: 2.49,
MARCO_CECCHINATO: 1.58
}
},
{
'round': 16,
'players': [
BORNA_CORIC,
PIERRE_HUGUES_HERBERT
],
'score': [(6, 4), (6, 2)],
'odds': {
BORNA_CORIC: 1.42,
PIERRE_HUGUES_HERBERT: 2.70
}
},
{
'round': 16,
'players': [
DANIIL_MEDVEDEV,
STEFANOS_TSITSIPAS
],
'score': [(6, 2), (1, 6), (6, 4)],
'odds': {
DANIIL_MEDVEDEV: 2.00,
STEFANOS_TSITSIPAS: 1.79
}
},
{
'round': 16,
'players': [
DUSAN_LAJOVIC,
DOMINIC_THIEM
],
'score': [(6, 3), (6, 3)],
'odds': {
DUSAN_LAJOVIC: 4.90,
DOMINIC_THIEM: 1.17
}
},
{
'round': 16,
'players': [
FABIO_FOGNINI,
ALEXANDER_ZVEREV
],
'score': [(7, 6), (6, 1)],
'odds': {
FABIO_FOGNINI: 4.25,
ALEXANDER_ZVEREV: 1.26
}
},
{
'round': 16,
'players': [
RAFAEL_NADAL,
GRIGOR_DIMITROV
],
'score': [(6, 4), (6, 1)],
'odds': {
RAFAEL_NADAL: 1.02,
GRIGOR_DIMITROV: 13.19
}
},
{
'round': 16,
'players': [
NOVAK_DJOKOVIC,
TAYLOR_FRITZ
],
'score': [(6, 3), (6, 0)],
'odds': {
NOVAK_DJOKOVIC: 1.06,
TAYLOR_FRITZ: 7.50
}
},
{
'round': 8,
'players': [
DUSAN_LAJOVIC,
LORENZO_SONEGO
],
'score': [(6, 4), (7, 5)],
'odds': {
DUSAN_LAJOVIC: 1.69,
LORENZO_SONEGO: 2.10
}
},
{
'round': 8,
'players': [
FABIO_FOGNINI,
BORNA_CORIC
],
'score': [(1, 6), (6, 3), (6, 2)],
'odds': {
FABIO_FOGNINI: 2.20,
BORNA_CORIC: 1.65
}
},
{
'round': 8,
'players': [
RAFAEL_NADAL,
GUIDO_PELLA
],
'score': [(7, 6), (6, 3)],
'odds': {
RAFAEL_NADAL: 1.02,
GUIDO_PELLA: 16.00
}
},
{
'round': 8,
'players': [
DANIIL_MEDVEDEV,
NOVAK_DJOKOVIC
],
'score': [(6, 3), (4, 6), (6, 2)],
'odds': {
DANIIL_MEDVEDEV: 4.40,
NOVAK_DJOKOVIC: 1.20
}
},
{
'round': 4,
'players': [
DUSAN_LAJOVIC,
DANIIL_MEDVEDEV
],
'score': [(7, 5), (6, 1)],
'odds': {
DUSAN_LAJOVIC: 2.75,
DANIIL_MEDVEDEV: 1.42
}
},
{
'round': 4,
'players': [
FABIO_FOGNINI,
RAFAEL_NADAL
],
'score': [(6, 4), (6, 2)],
'odds': {
FABIO_FOGNINI: 7.50,
RAFAEL_NADAL: 1.06
}
},
{
'round': 2,
'players': [
FABIO_FOGNINI,
DUSAN_LAJOVIC
],
'score': [(6, 3), (6, 4)],
'odds': {
FABIO_FOGNINI: 1.59,
DUSAN_LAJOVIC: 2.25
}
}
]
},
{
'location': BARCELONA,
'date': '2019-04-28',
'matches': [
{
'round': 512,
'players': [
GUILLERMO_GARCIA_LOPEZ,
CARLOS_BERLOCQ
],
'score': [(6, 4), (6, 3)],
},
{
'round': 512,
'players': [
PEDRO_SOUSA,
CARLOS_ALCARAZ_GARFIA
],
'score': [(6, 7), (6, 3), (6, 1)],
'odds': {
PEDRO_SOUSA: 1.27,
CARLOS_ALCARAZ_GARFIA: 3.02
}
},
{
'round': 512,
'players': [
ALEJANDRO_DAVIDOVICH_FOKINA,
DENIS_ISTOMIN
],
'score': [(6, 4), (6, 4)],
'odds': {
ALEJANDRO_DAVIDOVICH_FOKINA: 1.34,
DENIS_ISTOMIN: 2.70
}
},
{
'round': 512,
'players': [
ROBERTO_CARBALLES_BAENA,
PEDRO_MARTINEZ
],
'score': [(7, 5), (6, 1)],
'odds': {
ROBERTO_CARBALLES_BAENA: 1.36,
PEDRO_MARTINEZ: 2.85
}
},
{
'round': 512,
'players': [
ANTOINE_HOANG,
ANDREY_RUBLEV
],
'score': [(5, 7), (7, 6), (7, 6)],
'odds': {
ANTOINE_HOANG: 4.65,
ANDREY_RUBLEV: 1.18
}
},
{
'round': 512,
'players': [
MARCEL_GRANOLLERS,
DANIEL_EVANS
],
'score': [(6, 3), (4, 6), (7, 5)],
'odds': {
MARCEL_GRANOLLERS: 1.59,
DANIEL_EVANS: 2.25
}
},
{
'round': 512,
'players': [
GUIDO_ANDREOZZI,
TOMMY_ROBREDO
],
'score': [(3, 6), (6, 3), (7, 6)],
'odds': {
GUIDO_ANDREOZZI: 1.32,
TOMMY_ROBREDO: 3.30
}
},
{
'round': 512,
'players': [
NICOLAS_JARRY,
CHUN_HSIN_TSENG
],
'score': [(6, 4), (3, 6), (7, 6)],
'odds': {
NICOLAS_JARRY: 1.07,
CHUN_HSIN_TSENG: 6.25
}
},
{
'round': 512,
'players': [
ALBERT_RAMOS_VINOLAS,
ALEXEI_POPYRIN
],
'score': [(6, 2), (7, 6)],
'odds': {
ALBERT_RAMOS_VINOLAS: 1.34,
ALEXEI_POPYRIN: 3.10
}
},
{
'round': 512,
'players': [
HUGO_DELLIEN,
GREGOIRE_BARRERE
],
'score': [(6, 7), (7, 6), (7, 6)],
'odds': {
HUGO_DELLIEN: 1.67,
GREGOIRE_BARRERE: 2.10
}
},
{
'round': 512,
'players': [
FEDERICO_DELBONIS,
THIAGO_MONTEIRO
],
'score': [(7, 6), (6, 3)],
'odds': {
FEDERICO_DELBONIS: 1.24,
THIAGO_MONTEIRO: 3.81
}
},
{
'round': 512,
'players': [
DIEGO_SCHWARTZMAN,
JOZEF_KOVALIK
],
'score': [(6, 4), (6, 4)],
'odds': {
DIEGO_SCHWARTZMAN: 1.18,
JOZEF_KOVALIK: 4.52
}
},
{
'round': 256,
'players': [
PEDRO_SOUSA,
GUIDO_ANDREOZZI
],
'score': [(6, 4), (6, 2)],
},
{
'round': 256,
'players': [
MARCEL_GRANOLLERS,
NICOLAS_JARRY
],
'score': [(6, 7), (7, 5), (6, 4)],
'odds': {
MARCEL_GRANOLLERS: 1.77,
NICOLAS_JARRY: 1.91
}
},
{
'round': 256,
'players': [
ALBERT_RAMOS_VINOLAS,
ALEJANDRO_DAVIDOVICH_FOKINA
],
'score': [(7, 5), (7, 5)],
'odds': {
ALBERT_RAMOS_VINOLAS: 1.63,
ALEJANDRO_DAVIDOVICH_FOKINA: 2.20
}
},
{
'round': 256,
'players': [
HUGO_DELLIEN,
ANTOINE_HOANG
],
'score': [(6, 4), (6, 4)],
'odds': {
HUGO_DELLIEN: 1.64,
ANTOINE_HOANG: 2.05
}
},
{
'round': 256,
'players': [
FEDERICO_DELBONIS,
GUILLERMO_GARCIA_LOPEZ
],
'score': [(6, 3), (6, 0)],
'odds': {
FEDERICO_DELBONIS: 1.38,
GUILLERMO_GARCIA_LOPEZ: 2.77
}
},
{
'round': 256,
'players': [
DIEGO_SCHWARTZMAN,
ROBERTO_CARBALLES_BAENA
],
'score': [(6, 4), (6, 4)],
'odds': {
DIEGO_SCHWARTZMAN: 1.51,
ROBERTO_CARBALLES_BAENA: 2.46
}
},
{
'round': 64,
'players': [
FERNANDO_VERDASCO,
FELICIANO_LOPEZ
],
'score': [(6, 4), (6, 3)],
'odds': {
FERNANDO_VERDASCO: 1.48,
FELICIANO_LOPEZ: 2.67
}
},
{
'round': 64,
'players': [
JAN_LENNARD_STRUFF,
HUGO_DELLIEN
],
'score': [(6, 3), (6, 1)],
'odds': {
JAN_LENNARD_STRUFF: 1.50,
HUGO_DELLIEN: 2.51
}
},
{
'round': 64,
'players': [
DIEGO_SCHWARTZMAN,
YOSHIHITO_NISHIOKA
],
'score': [(4, 6), (6, 4), (6, 2)],
'odds': {
DIEGO_SCHWARTZMAN: 1.25,
YOSHIHITO_NISHIOKA: 3.80
}
},
{
'round': 64,
'players': [
BENOIT_PAIRE,
JUAN_IGNACIO_LONDERO
],
'score': [(7, 5), (6, 2)],
'odds': {
BENOIT_PAIRE: 1.54,
JUAN_IGNACIO_LONDERO: 2.40
}
},
{
'round': 64,
'players': [
JAUME_MUNAR,
PEDRO_SOUSA
],
'score': [(2, 6), (6, 4), (6, 0)],
'odds': {
JAUME_MUNAR: 1.30,
PEDRO_SOUSA: 3.60
}
},
{
'round': 64,
'players': [
MACKENZIE_MCDONALD,
TARO_DANIEL
],
'score': [(6, 2), (6, 2)],
'odds': {
MACKENZIE_MCDONALD: 3.50,
TARO_DANIEL: 1.31
}
},
{
'round': 64,
'players': [
LEONARDO_MAYER,
MARIUS_COPIL
],
'score': [(6, 3), (6, 7), (7, 5)],
'odds': {
LEONARDO_MAYER: 1.27,
MARIUS_COPIL: 3.52
}
},
{
'round': 64,
'players': [
NICOLAS_JARRY,
MARCEL_GRANOLLERS
],
'score': [(7, 5), (4, 6), (6, 4)],
'odds': {
NICOLAS_JARRY: 1.83,
MARCEL_GRANOLLERS: 1.89
}
},
{
'round': 64,
'players': [
MARTON_FUCSOVICS,
DENIS_KUDLA
],
'score': [(6, 4), (6, 1)],
'odds': {
MARTON_FUCSOVICS: 1.18,
DENIS_KUDLA: 4.80
}
},
{
'round': 64,
'players': [
TAYLOR_FRITZ,
REILLY_OPELKA
],
'score': [(6, 3), (6, 4)],
'odds': {
TAYLOR_FRITZ: 1.72,
REILLY_OPELKA: 1.97
}
},
{
'round': 64,
'players': [
ALBERT_RAMOS_VINOLAS,
CAMERON_NORRIE
],
'score': [(6, 2), (6, 2)],
'odds': {
ALBERT_RAMOS_VINOLAS: 1.56,
CAMERON_NORRIE: 2.35
}
},
{
'round': 64,
'players': [
GUIDO_PELLA,
JOAO_SOUSA
],
'score': [(3, 6), (7, 6), (6, 2)],
'odds': {
GUIDO_PELLA: 1.36,
JOAO_SOUSA: 3.15
}
},
{
'round': 64,
'players': [
NICOLA_KUHN,
FEDERICO_DELBONIS
],
'score': [(7, 6), (4, 6), (6, 2)],
'odds': {
NICOLA_KUHN: 4.90,
FEDERICO_DELBONIS: 1.16
}
},
{
'round': 64,
'players': [
MALEK_JAZIRI,
GUIDO_ANDREOZZI
],
'score': [(6, 7), (4, 6), (6, 2)],
'odds': {
MALEK_JAZIRI: 2.70,
GUIDO_ANDREOZZI: 1.44
}
},
{
'round': 64,
'players': [
CHRISTIAN_GARIN,
MARTIN_KLIZAN
],
'score': [(7, 5), (6, 4)],
'odds': {
CHRISTIAN_GARIN: 1.71,
MARTIN_KLIZAN: 2.10
}
},
{
'round': 64,
'players': [
DAVID_FERRER,
MISCHA_ZVEREV
],
'score': [(6, 3), (6, 1)],
'odds': {
DAVID_FERRER: 1.14,
MISCHA_ZVEREV: 6.00
}
},
{
'round': 32,
'players': [
JAUME_MUNAR,
FRANCES_TIAFOE
],
'score': [(6, 4), (6, 3)],
'odds': {
JAUME_MUNAR: 1.43,
FRANCES_TIAFOE: 2.70
}
},
{
'round': 32,
'players': [
JAN_LENNARD_STRUFF,
DAVID_GOFFIN
],
'score': [(7, 6), (6, 3)],
'odds': {
JAN_LENNARD_STRUFF: 2.40,
DAVID_GOFFIN: 1.56
}
},
{
'round': 32,
'players': [
STEFANOS_TSITSIPAS,
MARTON_FUCSOVICS
],
'score': [(6, 3), (6, 4)],
'odds': {
STEFANOS_TSITSIPAS: 1.31,
MARTON_FUCSOVICS: 3.44
}
},
{
'round': 32,
'players': [
KEI_NISHIKORI,
TAYLOR_FRITZ
],
'score': [(7, 5), (6, 2)],
'odds': {
KEI_NISHIKORI: 1.27,
TAYLOR_FRITZ: 3.65
}
},
{
'round': 32,
'players': [
DOMINIC_THIEM,
DIEGO_SCHWARTZMAN
],
'score': [(6, 3), (6, 3)],
'odds': {
DOMINIC_THIEM: 1.31,
DIEGO_SCHWARTZMAN: 3.45
}
},
{
'round': 32,
'players': [
NICOLAS_JARRY,
ALEXANDER_ZVEREV
],
'score': [(3, 6), (7, 5), (7, 6)],
'odds': {
NICOLAS_JARRY: 4.65,
ALEXANDER_ZVEREV: 1.20
}
},
{
'round': 32,
'players': [
ROBERTO_CARBALLES_BAENA,
NICOLA_KUHN
],
'score': [(6, 7), (6, 4), (6, 2)],
'odds': {
ROBERTO_CARBALLES_BAENA: 1.37,
NICOLA_KUHN: 3.20
}
},
{
'round': 32,
'players': [
FELIX_AUGER_ALIASSIME,
MALEK_JAZIRI
],
'score': [(6, 3), (7, 6)],
'odds': {
FELIX_AUGER_ALIASSIME: 1.19,
MALEK_JAZIRI: 4.20
}
},
{
'round': 32,
'players': [
DAVID_FERRER,
LUCAS_POUILLE
],
'score': [(6, 3), (6, 1)],
'odds': {
DAVID_FERRER: 1.62,
LUCAS_POUILLE: 2.30
}
},
{
'round': 32,
'players': [
GRIGOR_DIMITROV,
FERNANDO_VERDASCO
],
'score': [(6, 2), (6, 7), (6, 3)],
'odds': {
GRIGOR_DIMITROV: 1.65,
FERNANDO_VERDASCO: 2.20
}
},
{
'round': 32,
'players': [
BENOIT_PAIRE,
PABLO_CARRENO_BUSTA
],
'score': [(6, 4), (6, 7), (6, 1)],
'odds': {
BENOIT_PAIRE: 1.42,
PABLO_CARRENO_BUSTA: 2.85
}
},
{
'round': 32,
'players': [
MACKENZIE_MCDONALD,
GILLES_SIMON
],
'score': [(6, 3), (6, 2)],
'odds': {
MACKENZIE_MCDONALD: 2.78,
GILLES_SIMON: 1.43
}
},
{
'round': 32,
'players': [
CHRISTIAN_GARIN,
DENIS_SHAPOVALOV
],
'score': [(7, 5), (6, 2)],
'odds': {
CHRISTIAN_GARIN: 1.65,
DENIS_SHAPOVALOV: 2.10
}
},
{
'round': 32,
'players': [
DANIIL_MEDVEDEV,
ALBERT_RAMOS_VINOLAS
],
'score': [(6, 3), (2, 6), (6, 1)],
'odds': {
DANIIL_MEDVEDEV: 1.44,
ALBERT_RAMOS_VINOLAS: 2.60
}
},
{
'round': 32,
'players': [
GUIDO_PELLA,
KAREN_KHACHANOV
],
'score': [(6, 2), (7, 6)],
'odds': {
GUIDO_PELLA: 1.87,
KAREN_KHACHANOV: 1.87
}
},
{
'round': 32,
'players': [
RAFAEL_NADAL,
LEONARDO_MAYER
],
'score': [(6, 7), (6, 4), (6, 2)],
'odds': {
RAFAEL_NADAL: 1.02,
LEONARDO_MAYER: 13.84
}
},
{
'round': 16,
'players': [
GUIDO_PELLA,
BENOIT_PAIRE
],
'score': [(7, 5), (6, 3)],
'odds': {
GUIDO_PELLA: 1.61,
BENOIT_PAIRE: 2.20
}
},
{
'round': 16,
'players': [
ROBERTO_CARBALLES_BAENA,
CHRISTIAN_GARIN
],
'score': [(6, 4), (7, 6)],
'odds': {
ROBERTO_CARBALLES_BAENA: 2.76,
CHRISTIAN_GARIN: 1.42
}
},
{
'round': 16,
'players': [
NICOLAS_JARRY,
GRIGOR_DIMITROV
],
'score': [(2, 6), (6, 4), (7, 6)],
'odds': {
NICOLAS_JARRY: 2.79,
GRIGOR_DIMITROV: 1.43
}
},
{
'round': 16,
'players': [
DANIIL_MEDVEDEV,
MACKENZIE_MCDONALD
],
'score': [(6, 3), (6, 2)],
'odds': {
DANIIL_MEDVEDEV: 1.17,
MACKENZIE_MCDONALD: 4.60
}
},
{
'round': 16,
'players': [
JAN_LENNARD_STRUFF,
STEFANOS_TSITSIPAS
],
'score': [(6, 4), (3, 6), (6, 2)],
'odds': {
JAN_LENNARD_STRUFF: 3.70,
STEFANOS_TSITSIPAS: 1.28
}
},
{
'round': 16,
'players': [
KEI_NISHIKORI,
FELIX_AUGER_ALIASSIME
],
'score': [(6, 1), (6, 3)],
'odds': {
KEI_NISHIKORI: 1.63,
FELIX_AUGER_ALIASSIME: 2.25
}
},
{
'round': 16,
'players': [
DOMINIC_THIEM,
JAUME_MUNAR
],
'score': [(7, 5), (6, 1)],
'odds': {
DOMINIC_THIEM: 1.27,
JAUME_MUNAR: 3.68
}
},
{
'round': 16,
'players': [
RAFAEL_NADAL,
DAVID_FERRER
],
'score': [(6, 3), (6, 3)],
'odds': {
RAFAEL_NADAL: 1.14,
DAVID_FERRER: 5.00
}
},
{
'round': 8,
'players': [
DANIIL_MEDVEDEV,
NICOLAS_JARRY
],
'score': [(6, 3), (6, 4)],
'odds': {
DANIIL_MEDVEDEV: 1.29,
NICOLAS_JARRY: 3.83
}
},
{
'round': 8,
'players': [
KEI_NISHIKORI,
ROBERTO_CARBALLES_BAENA
],
'score': [(6, 4), (7, 5)],
'odds': {
KEI_NISHIKORI: 1.30,
ROBERTO_CARBALLES_BAENA: 3.84
}
},
{
'round': 8,
'players': [
DOMINIC_THIEM,
GUIDO_PELLA
],
'score': [(7, 5), (6, 2)],
'odds': {
DOMINIC_THIEM: 1.36,
GUIDO_PELLA: 3.15
}
},
{
'round': 8,
'players': [
RAFAEL_NADAL,
JAN_LENNARD_STRUFF
],
'score': [(7, 5), (7, 5)],
'odds': {
RAFAEL_NADAL: 1.06,
JAN_LENNARD_STRUFF: 8.50
}
},
{
'round': 4,
'players': [
DANIIL_MEDVEDEV,
KEI_NISHIKORI
],
'score': [(6, 4), (3, 6), (7, 5)],
'odds': {
DANIIL_MEDVEDEV: 1.95,
KEI_NISHIKORI: 1.80
}
},
{
'round': 4,
'players': [
DOMINIC_THIEM,
RAFAEL_NADAL
],
'score': [(6, 4), (6, 4)],
'odds': {
DOMINIC_THIEM: 3.20,
RAFAEL_NADAL: 1.33
}
},
{
'round': 2,
'players': [
DOMINIC_THIEM,
DANIIL_MEDVEDEV
],
'score': [(6, 4), (6, 0)]
}
]
},
{
'location': BUDAPEST,
'date': '2019-04-22',
'matches': [
{
'round': 512,
'players': [
EGOR_GERASIMOV,
FILLIPPO_BALDI
],
'score': [(6, 3), (6, 2)],
'odds': {
EGOR_GERASIMOV: 2.45,
FILLIPPO_BALDI: 1.43
}
},
{
'round': 512,
'players': [
JANNIK_SINNER,
LUKAS_ROSOL
],
'score': [(6, 2), (3, 0)],
'retired': True,
'odds': {
JANNIK_SINNER: 2.31,
LUKAS_ROSOL: 1.56
}
},
{
'round': 512,
'players': [
MATTHIAS_BACHINGER,
FILIP_HORANSKY
],
'score': [(6, 1), (6, 4)],
'odds': {
MATTHIAS_BACHINGER: 2.44,
FILIP_HORANSKY: 1.53
}
},
{
'round': 512,
'players': [
SERGIY_STAKHOVSKY,
DANIEL_BRANDS
],
'score': [(7, 5), (6, 1)],
'odds': {
SERGIY_STAKHOVSKY: 1.53,
DANIEL_BRANDS: 2.48
}
},
{
'round': 512,
'players': [
YANNICK_MADEN,
ZSOMBOR_PIROS
],
'score': [(6, 4), (1, 6), (6, 3)],
'odds': {
YANNICK_MADEN: 1.32,
ZSOMBOR_PIROS: 3.30
}
},
{
'round': 512,
'players': [
FILIP_KRAJINOVIC,
ROBERTO_MARCORA
],
'score': [(7, 5), (6, 2)],
'odds': {
FILIP_KRAJINOVIC: 1.13,
ROBERTO_MARCORA: 5.43
}
},
{
'round': 512,
'players': [
LLOYD_HARRIS,
DANIEL_GIMENO_TRAVER
],
'score': [(7, 6), (6, 3)],
'odds': {
LLOYD_HARRIS: 1.69,
DANIEL_GIMENO_TRAVER: 2.05
}
},
{
'round': 512,
'players': [
MIOMIR_KECMANOVIC,
ALESSANDRO_GIANNESSI
],
'score': [(6, 3), (6, 4)],
'odds': {
MIOMIR_KECMANOVIC: 1.57,
ALESSANDRO_GIANNESSI: 2.34
}
},
{
'round': 256,
'players': [
YANNICK_MADEN,
JANNIK_SINNER
],
'score': [(6, 3), (6, 4)],
'odds': {
YANNICK_MADEN: 1.46,
JANNIK_SINNER: 2.45
}
},
{
'round': 256,
'players': [
FILIP_KRAJINOVIC,
EGOR_GERASIMOV
],
'score': [(7, 6), (6, 1)],
'odds': {
FILIP_KRAJINOVIC: 1.15,
EGOR_GERASIMOV: 5.10
}
},
{
'round': 256,
'players': [
LLOYD_HARRIS,
MATTHIAS_BACHINGER
],
'score': [(7, 5), (6, 4)],
'odds': {
LLOYD_HARRIS: 1.77,
MATTHIAS_BACHINGER: 1.91
}
},
{
'round': 256,
'players': [
MIOMIR_KECMANOVIC,
SERGIY_STAKHOVSKY
],
'score': [(6, 4), (6, 4)],
},
{
'round': 32,
'players': [
FILIP_KRAJINOVIC,
ANDREAS_SEPPI
],
'score': [(6, 2), (6, 7), (7, 5)],
'odds': {
FILIP_KRAJINOVIC: 1.44,
ANDREAS_SEPPI: 2.52
}
},
{
'round': 32,
'players': [
ALJAZ_BEDENE,
BERNARD_TOMIC
],
'score': [(7, 6), (6, 4)],
'odds': {
ALJAZ_BEDENE: 1.41,
BERNARD_TOMIC: 2.63
}
},
{
'round': 32,
'players': [
RADU_ALBOT,
SERGIY_STAKHOVSKY
],
'score': [(7, 5), (6, 4)],
'odds': {
RADU_ALBOT: 1.28,
SERGIY_STAKHOVSKY: 3.55
}
},
{
'round': 32,
'players': [
MATTEO_BERRETTINI,
MIKHAIL_KUKUSHKIN
],
'score': [(6, 4), (6, 4)],
'odds': {
MATTEO_BERRETTINI: 1.57,
MIKHAIL_KUKUSHKIN: 2.30
}
},
{
'round': 32,
'players': [
PIERRE_HUGUES_HERBERT,
EGOR_GERASIMOV
],
'score': [(6, 3), 96, 2],
},
{
'round': 32,
'players': [
ROBIN_HAASE,
THOMAS_FABBIANO
],
'score': [(6, 7), (6, 3), (6, 2)],
'odds': {
ROBIN_HAASE: 1.45,
THOMAS_FABBIANO: 2.60
}
},
{
'round': 32,
'players': [
PETER_GOJOWCZYK,
LLOYD_HARRIS
],
'score': [(7, 5), (6, 4)],
'odds': {
PETER_GOJOWCZYK: 2.27,
LLOYD_HARRIS: 1.63
}
},
{
'round': 32,
'players': [
ATTILA_BALAZS,
HUBERT_HURKACZ
],
'score': [(6, 3), (6, 4)],
'odds': {
ATTILA_BALAZS: 3.35,
HUBERT_HURKACZ: 1.32
}
},
{
'round': 32,
'players': [
JOHN_MILLMAN,
MIOMIR_KECMANOVIC
],
'score': [(6, 1), (6, 2)],
'odds': {
JOHN_MILLMAN: 2.20,
MIOMIR_KECMANOVIC: 1.69
}
},
{
'round': 32,
'players': [
LASLO_DJERE,
ERNESTS_GULBIS
],
'score': [(6, 4), (6, 7), (7, 6)],
'odds': {
LASLO_DJERE: 1.35,
ERNESTS_GULBIS: 2.90
}
},
{
'round': 32,
'players': [
JANNIK_SINNER,
MATE_VALKUSZ
],
'score': [(6, 2), (0, 6), (6, 4)],
'odds': {
JANNIK_SINNER: 1.83,
MATE_VALKUSZ: 1.83
}
},
{
'round': 32,
'players': [
PABLO_CUEVAS,
YANNICK_MADEN
],
'score': [(6, 3), (3, 6), (6, 4)],
'odds': {
PABLO_CUEVAS: 1.38,
YANNICK_MADEN: 3.05
}
},
{
'round': 16,
'players': [
PIERRE_HUGUES_HERBERT,
MATTHIAS_BACHINGER
],
'score': [(7, 5), (6, 2)],
'odds': {
PIERRE_HUGUES_HERBERT: 1.26,
MATTHIAS_BACHINGER: 3.65
}
},
{
'round': 16,
'players': [
ATTILA_BALAZS,
JOHN_MILLMAN
],
'score': [(6, 4), (2, 6), (6, 2)],
'odds': {
ATTILA_BALAZS: 2.71,
JOHN_MILLMAN: 1.43
}
},
{
'round': 16,
'players': [
MATTEO_BERRETTINI,
ALJAZ_BEDENE
],
'score': [(7, 6), (6, 2)],
'odds': {
MATTEO_BERRETTINI: 1.65,
ALJAZ_BEDENE: 2.20
}
},
{
'round': 16,
'players': [
FILIP_KRAJINOVIC,
RADU_ALBOT
],
'score': [(7, 5), (6, 4)],
'odds': {
FILIP_KRAJINOVIC: 1.37,
RADU_ALBOT: 3.00
}
},
{
'round': 16,
'players': [
LASLO_DJERE,
JANNIK_SINNER
],
'score': [(6, 3), (6, 1)],
'odds': {
LASLO_DJERE: 1.20,
JANNIK_SINNER: 4.55
}
},
{
'round': 16,
'players': [
NIKOLOZ_BASILASHVILI,
PETER_GOJOWCZYK
],
'score': [(6, 3), (0, 6), (6, 3)],
'odds': {
NIKOLOZ_BASILASHVILI: 1.42,
PETER_GOJOWCZYK: 2.90
}
},
{
'round': 16,
'players': [
BORNA_CORIC,
ROBIN_HAASE
],
'score': [(6, 3), (4, 6), (6, 4)],
'odds': {
BORNA_CORIC: 1.33,
ROBIN_HAASE: 3.25
}
},
{
'round': 16,
'players': [
PABLO_CUEVAS,
MARIN_CILIC
],
'score': [(5, 7), (7, 6), (7, 6)],
'odds': {
PABLO_CUEVAS: 2.25,
MARIN_CILIC: 1.61
}
},
{
'round': 8,
'players': [
PIERRE_HUGUES_HERBERT,
ATTILA_BALAZS
],
'score': [(6, 3), (6, 4)],
'odds': {
PIERRE_HUGUES_HERBERT: 1.55,
ATTILA_BALAZS: 2.45
}
},
{
'round': 8,
'players': [
MATTEO_BERRETTINI,
PABLO_CUEVAS
],
'score': [(6, 3), (1, 6), (6, 3)],
'odds': {
MATTEO_BERRETTINI: 1.59,
PABLO_CUEVAS: 2.30
}
},
{
'round': 8,
'players': [
LASLO_DJERE,
NIKOLOZ_BASILASHVILI
],
'score': [(3, 6), (6, 2), (6, 3)],
'odds': {
LASLO_DJERE: 1.61,
NIKOLOZ_BASILASHVILI: 2.20
}
},
{
'round': 8,
'players': [
FILIP_KRAJINOVIC,
BORNA_CORIC
],
'score': [(6, 4), (7, 5)],
'odds': {
FILIP_KRAJINOVIC: 2.05,
BORNA_CORIC: 1.74
}
},
{
'round': 4,
'players': [
FILIP_KRAJINOVIC,
PIERRE_HUGUES_HERBERT
],
'score': [(6, 2), (6, 2)],
'odds': {
FILIP_KRAJINOVIC: 1.41,
PIERRE_HUGUES_HERBERT: 2.95
}
},
{
'round': 4,
'players': [
MATTEO_BERRETTINI,
LASLO_DJERE
],
'score': [(6, 4), (6, 2)],
'odds': {
MATTEO_BERRETTINI: 1.67,
LASLO_DJERE: 2.15
}
},
{
'round': 2,
'players': [
MATTEO_BERRETTINI,
FILIP_KRAJINOVIC
],
'score': [(4, 6), (6, 3), (6, 1)],
'odds': {
MATTEO_BERRETTINI: 2.10,
FILIP_KRAJINOVIC: 1.69
}
}
]
},
{
'location': MUNICH,
'date': '2019-04-29',
'matches': [
{
'round': 512,
'players': [
DENIS_ISTOMIN,
CEDRIC_MARCEL_STEBE
],
'score': [(6, 3), (7, 6)],
'odds': {
DENIS_ISTOMIN: 1.63,
CEDRIC_MARCEL_STEBE: 2.20
}
},
{
'round': 512,
'players': [
YANNICK_MADEN,
THOMAS_FABBIANO
],
'score': [(4, 6), (6, 2), (6, 2)],
'odds': {
YANNICK_MADEN: 1.42,
THOMAS_FABBIANO: 2.55
}
},
{
'round': 512,
'players': [
ANDREY_RUBLEV,
MATTHIAS_BACHINGER
],
'score': [(7, 6), (6, 2)],
'odds': {
ANDREY_RUBLEV: 1.25,
MATTHIAS_BACHINGER: 3.80
}
},
{
'round': 512,
'players': [
HENRI_LAAKSONEN,
MIOMIR_KECMANOVIC
],
'score': [(4, 6), (6, 1), (6, 4)],
'odds': {
HENRI_LAAKSONEN: 2.25,
MIOMIR_KECMANOVIC: 1.61
}
},
{
'round': 512,
'players': [
LUKAS_ROSOL,
PETER_GOJOWCZYK
],
'score': [(6, 4), (2, 6), (6, 4)],
'odds': {
LUKAS_ROSOL: 2.61,
PETER_GOJOWCZYK: 1.47
}
},
{
'round': 512,
'players': [
THIAGO_MONTEIRO,
ALBERT_RAMOS_VINOLAS
],
'score': [(6, 3), (2, 6), (6, 2)],
'odds': {
THIAGO_MONTEIRO: 3.27,
ALBERT_RAMOS_VINOLAS: 1.32
}
},
{
'round': 512,
'players': [
PRAJNESH_GUNNESWARAN,
ALEXANDER_ERLER
],
'score': [(3, 6), (7, 6), (7, 5)],
'odds': {
PRAJNESH_GUNNESWARAN: 1.05,
ALEXANDER_ERLER: 10.00
}
},
{
'round': 512,
'players': [
LORENZO_SONEGO,
YANNICK_HANFMANN
],
'score': [(7, 6), (6, 7), (6, 3)],
'odds': {
LORENZO_SONEGO: 1.23,
YANNICK_HANFMANN: 3.85
}
},
{
'round': 256,
'players': [
YANNICK_MADEN,
LUKAS_ROSOL
],
'score': [(6, 2), (6, 2)],
'odds': {
YANNICK_MADEN: 1.32,
LUKAS_ROSOL: 3.12
}
},
{
'round': 256,
'players': [
THIAGO_MONTEIRO,
ANDREY_RUBLEV
],
'score': [(6, 3), (6, 7), (6, 4)],
'odds': {
THIAGO_MONTEIRO: 2.70,
ANDREY_RUBLEV: 1.43
}
},
{
'round': 256,
'players': [
DENIS_ISTOMIN,
PRAJNESH_GUNNESWARAN
],
'score': [(4, 6), (6, 2), (6, 2)],
'odds': {
DENIS_ISTOMIN: 1.67,
PRAJNESH_GUNNESWARAN: 1.93
}
},
{
'round': 256,
'players': [
LORENZO_SONEGO,
HENRI_LAAKSONEN
],
'score': [(6, 0), (5, 7), (7, 6)],
'odds': {
LORENZO_SONEGO: 1.38,
HENRI_LAAKSONEN: 2.73
}
},
{
'round': 32,
'players': [
TARO_DANIEL,
UGO_HUMBERT,
],
'score': [(6, 4), (6, 4)],
'odds': {
TARO_DANIEL: 1.65,
UGO_HUMBERT: 2.15
}
},
{
'round': 32,
'players': [
MARTON_FUCSOVICS,
LORENZO_SONEGO
],
'score': [(7, 5), (4, 6), (7, 6)],
'odds': {
MARTON_FUCSOVICS: 1.91,
LORENZO_SONEGO: 1.80
}
},
{
'round': 32,
'players': [
THIAGO_MONTEIRO,
JAN_LENNARD_STRUFF
],
'score': [(6, 1), (6, 1)],
'odds': {
THIAGO_MONTEIRO: 3.50,
JAN_LENNARD_STRUFF: 1.29
}
},
{
'round': 32,
'players': [
RUDOLF_MOLLEKER,
MARIUS_COPIL
],
'score': [(7, 6), (4, 6), (6, 4)],
'odds': {
RUDOLF_MOLLEKER: 1.77,
MARIUS_COPIL: 1.97
}
},
{
'round': 32,
'players': [
JUAN_IGNACIO_LONDERO,
MAXIMILIAN_MARTERER
],
'score': [(6, 2), (4, 6), (6, 2)],
'odds': {
JUAN_IGNACIO_LONDERO: 1.50,
MAXIMILIAN_MARTERER: 2.53
}
},
{
'round': 32,
'players': [
PHILIPP_KOHLSCHREIBER,
ANDREAS_SEPPI
],
'score': [(6, 2), (7, 5)],
'odds': {
PHILIPP_KOHLSCHREIBER: 1.32,
ANDREAS_SEPPI: 3.30
}
},
{
'round': 32,
'players': [
MARTIN_KLIZAN,
ERNESTS_GULBIS
],
'score': [(6, 3), (7, 5)],
'odds': {
MARTIN_KLIZAN: 1.50,
ERNESTS_GULBIS: 2.54
}
},
{
'round': 32,
'players': [
CHRISTIAN_GARIN,
YANNICK_MADEN
],
'score': [(6, 4), (6, 2)],
'odds': {
CHRISTIAN_GARIN: 1.50,
YANNICK_MADEN: 2.55
}
},
{
'round': 32,
'players': [
MATTEO_BERRETTINI,
DENIS_ISTOMIN
],
'score': [(7, 6), (6, 3)],
'odds': {
MATTEO_BERRETTINI: 1.33,
DENIS_ISTOMIN: 3.10
}
},
{
'round': 32,
'players': [
GUIDO_PELLA,
MISCHA_ZVEREV
],
'score': [(6, 2), (6, 1)],
'odds': {
GUIDO_PELLA: 1.11,
MISCHA_ZVEREV: 7.04
}
},
{
'round': 32,
'players': [
DIEGO_SCHWARTZMAN,
BENOIT_PAIRE
],
'score': [(6, 4), (1, 6), (6, 1)],
'odds': {
DIEGO_SCHWARTZMAN: 1.60,
BENOIT_PAIRE: 2.30
}
},
{
'round': 32,
'players': [
DENIS_KUDLA,
KYLE_EDMUND
],
'score': [(6, 4), (6, 3)],
'odds': {
DENIS_KUDLA: 6.00,
KYLE_EDMUND: 1.11
}
},
{
'round': 16,
'players': [
MARTON_FUCSOVICS,
THIAGO_MONTEIRO
],
'score': [(6, 7), (6, 4), (6, 3)],
'odds': {
MARTON_FUCSOVICS: 1.57,
THIAGO_MONTEIRO: 2.30
}
},
{
'round': 16,
'players': [
CHRISTIAN_GARIN,
DIEGO_SCHWARTZMAN
],
'score': [(6, 1), (7, 5)],
'odds': {
CHRISTIAN_GARIN: 2.02,
DIEGO_SCHWARTZMAN: 1.74
}
},
{
'round': 16,
'players': [
MARCO_CECCHINATO,
MARTIN_KLIZAN
],
'score': [(6, 1), (6, 3)],
'odds': {
MARCO_CECCHINATO: 1.65,
MARTIN_KLIZAN: 2.31
}
},
{
'round': 16,
'players': [
ALEXANDER_ZVEREV,
JUAN_IGNACIO_LONDERO
],
'score': [(7, 5), (6, 1)],
'odds': {
ALEXANDER_ZVEREV: 1.18,
JUAN_IGNACIO_LONDERO: 5.15
}
},
{
'round': 16,
'players': [
MATTEO_BERRETTINI,
DENIS_KUDLA
],
'score': [(7, 5), (6, 3)],
'odds': {
MATTEO_BERRETTINI: 1.26,
DENIS_KUDLA: 3.85
}
},
{
'round': 16,
'players': [
GUIDO_PELLA,
TARO_DANIEL
],
'score': [(6, 1), (6, 7), (6, 3)],
'odds': {
GUIDO_PELLA: 1.17,
TARO_DANIEL: 5.00
}
},
{
'round': 16,
'players': [
ROBERTO_BAUTISTA_AGUT,
RUDOLF_MOLLEKER
],
'score': [(6, 4), (6, 2)],
'odds': {
ROBERTO_BAUTISTA_AGUT: 1.22,
RUDOLF_MOLLEKER: 4.10
}
},
{
'round': 16,
'players': [
PHILIPP_KOHLSCHREIBER,
KAREN_KHACHANOV
],
'score': [(7, 6), (6, 4)],
'odds': {
PHILIPP_KOHLSCHREIBER: 1.50,
KAREN_KHACHANOV: 2.35
}
},
{
'round': 8,
'players': [
MATTEO_BERRETTINI,
PHILIPP_KOHLSCHREIBER
],
'score': [(4, 6), (7, 5), (6, 4)],
'odds': {
MATTEO_BERRETTINI: 2.20,
PHILIPP_KOHLSCHREIBER: 1.65
}
},
{
'round': 8,
'players': [
ROBERTO_BAUTISTA_AGUT,
GUIDO_PELLA
],
'score': [(4, 6), (6, 4), (6, 0)],
'odds': {
ROBERTO_BAUTISTA_AGUT: 1.79,
GUIDO_PELLA: 1.95
}
},
{
'round': 8,
'players': [
MARCO_CECCHINATO,
MARTON_FUCSOVICS
],
'score': [(1, 6), (7, 5), (7, 5)],
'odds': {
MARCO_CECCHINATO: 1.57,
MARTON_FUCSOVICS: 2.41
}
},
{
'round': 8,
'players': [
CHRISTIAN_GARIN,
ALEXANDER_ZVEREV
],
'score': [(6, 4), (5, 7), (7, 5)],
'odds': {
CHRISTIAN_GARIN: 3.28,
ALEXANDER_ZVEREV: 1.37
}
},
{
'round': 4,
'players': [
CHRISTIAN_GARIN,
MARCO_CECCHINATO
],
'score': [(6, 2), (6, 4)],
'odds': {
CHRISTIAN_GARIN: 1.92,
MARCO_CECCHINATO: 1.83
}
},
{
'round': 4,
'players': [
MATTEO_BERRETTINI,
ROBERTO_BAUTISTA_AGUT
],
'score': [(6, 4), (6, 2)],
'odds': {
MATTEO_BERRETTINI: 2.13,
ROBERTO_BAUTISTA_AGUT: 1.69
}
},
{
'round': 2,
'players': [
CHRISTIAN_GARIN,
MATTEO_BERRETTINI
],
'score': [(6, 1), (3, 6), (7, 6)],
'odds': {
CHRISTIAN_GARIN: 1.86,
MATTEO_BERRETTINI: 1.95
}
},
]
},
{
'location': ESTORIL,
'date': '2019-04-29',
'matches': [
{
'round': 512,
'players': [
SALVATORE_CARUSO,
PEDRO_MARTINEZ
],
'score': [(6, 3), (5, 7), (7, 6)],
'odds': {
SALVATORE_CARUSO: 1.83,
PEDRO_MARTINEZ: 1.83
}
},
{
'round': 512,
'players': [
SIMONE_BOLELLI,
EGOR_GERASIMOV
],
'score': [(6, 2), (7, 6)],
'odds': {
SIMONE_BOLELLI: 1.41,
EGOR_GERASIMOV: 2.66
}
},
{
'round': 512,
'players': [
ALEJANDRO_DAVIDOVICH_FOKINA,
BJORN_FRATANGELO
],
'score': [(6, 2), (6, 4)],
'odds': {
ALEJANDRO_DAVIDOVICH_FOKINA: 1.38,
BJORN_FRATANGELO: 2.70
}
},
{
'round': 512,
'players': [
FILLIPPO_BALDI,
JOZEF_KOVALIK
],
'score': [(4, 6), (6, 3), (6, 2)],
'odds': {
FILLIPPO_BALDI: 2.40,
JOZEF_KOVALIK: 1.56
}
},
{
'round': 512,
'players': [
ALEXEI_POPYRIN,
GASTAO_ELIAS
],
'score': [(7, 5), (7, 6)],
'odds': {
ALEXEI_POPYRIN: 1.58,
GASTAO_ELIAS: 2.25
}
},
{
'round': 512,
'players': [
JOAO_DOMINGUES,
ELIAS_YMER
],
'score': [(6, 3), (7, 6)],
'odds': {
JOAO_DOMINGUES: 1.94,
ELIAS_YMER: 1.74
}
},
{
'round': 512,
'players': [
DANIEL_EVANS,
LORENZO_GIUSTINO
],
'score': [(6, 3), (7, 5)],
'odds': {
DANIEL_EVANS: 1.87,
LORENZO_GIUSTINO: 1.80
}
},
{
'round': 512,
'players': [
PABLO_CUEVAS,
DANIEL_BRANDS
],
'score': [(6, 1), (7, 6)],
'odds': {
PABLO_CUEVAS: 4.65,
DANIEL_BRANDS: 4.65
}
},
{
'round': 256,
'players': [
JOAO_DOMINGUES,
FILLIPPO_BALDI
],
'score': [(6, 2), (6, 4)],
'odds': {
JOAO_DOMINGUES: 1.40,
FILLIPPO_BALDI: 2.75
}
},
{
'round': 256,
'players': [
ALEXEI_POPYRIN,
SIMONE_BOLELLI
],
'score': [(2, 6), (6, 3), (6, 4)],
'odds': {
ALEXEI_POPYRIN: 2.21,
SIMONE_BOLELLI: 1.59
}
},
{
'round': 256,
'players': [
ALEJANDRO_DAVIDOVICH_FOKINA,
DANIEL_EVANS
],
'score': [(3, 6), (6, 1), (6, 4)],
'odds': {
ALEJANDRO_DAVIDOVICH_FOKINA: 1.46,
DANIEL_EVANS: 2.40
}
},
{
'round': 256,
'players': [
SALVATORE_CARUSO,
PABLO_CUEVAS
],
'score': [(6, 4), (5, 7), (6, 4)],
},
{
'round': 32,
'players': [
REILLY_OPELKA,
PEDRO_SOUSA
],
'score': [(7, 6), (6, 4)],
'odds': {
REILLY_OPELKA: 2.15,
PEDRO_SOUSA: 1.71
}
},
{
'round': 32,
'players': [
YOSHIHITO_NISHIOKA,
MACKENZIE_MCDONALD
],
'score': [(6, 2), (6, 4)],
'odds': {
YOSHIHITO_NISHIOKA: 1.71,
MACKENZIE_MCDONALD: 2.10
}
},
{
'round': 32,
'players': [
GUIDO_ANDREOZZI,
HUGO_DELLIEN
],
'score': [(6, 3), (6, 3)],
'odds': {
GUIDO_ANDREOZZI: 1.91,
HUGO_DELLIEN: 1.87
}
},
{
'round': 32,
'players': [
JOAO_DOMINGUES,
ALEX_DE_MINAUR
],
'score': [(6, 2), (2, 6), (6, 2)],
'odds': {
JOAO_DOMINGUES: 2.00,
ALEX_DE_MINAUR: 1.74
}
},
{
'round': 32,
'players': [
JOAO_SOUSA,
ALEXEI_POPYRIN
],
'score': [(6, 4), (2, 6), (6, 2)],
'odds': {
JOAO_SOUSA: 1.45,
ALEXEI_POPYRIN: 2.55
}
},
{
'round': 32,
'players': [
JOHN_MILLMAN,
BERNARD_TOMIC
],
'score': [(6, 3), (6, 0)],
'odds': {
JOHN_MILLMAN: 1.39,
BERNARD_TOMIC: 2.85
}
},
{
'round': 32,
'players': [
MALEK_JAZIRI,
NICOLAS_JARRY
],
'score': [(6, 3), (3, 6), (6, 4)],
'odds': {
MALEK_JAZIRI: 3.20,
NICOLAS_JARRY: 1.38
}
},
{
'round': 32,
'players': [
ALEJANDRO_DAVIDOVICH_FOKINA,
TAYLOR_FRITZ
],
'score': [(7, 6), (6, 4)],
'odds': {
ALEJANDRO_DAVIDOVICH_FOKINA: 1.67,
TAYLOR_FRITZ: 2.00
}
},
{
'round': 32,
'players': [
PABLO_CUEVAS,
SALVATORE_CARUSO
],
'score': [(6, 2), (6, 2)],
'odds': {
PABLO_CUEVAS: 3.25,
SALVATORE_CARUSO: 3.25
}
},
{
'round': 32,
'players': [
FRANCES_TIAFOE,
MIKHAIL_KUKUSHKIN
],
'score': [(6, 3), (7, 5)],
'odds': {
FRANCES_TIAFOE: 1.67,
MIKHAIL_KUKUSHKIN: 2.05
}
},
{
'round': 32,
'players': [
JEREMY_CHARDY,
PABLO_CARRENO_BUSTA
],
'score': [(5, 7), (6, 1), (6, 2)],
'odds': {
JEREMY_CHARDY: 2.05,
PABLO_CARRENO_BUSTA: 1.76
}
},
{
'round': 32,
'players': [
LEONARDO_MAYER,
DUSAN_LAJOVIC
],
'score': [(7, 6), (6, 4)],
'odds': {
LEONARDO_MAYER: 2.03,
DUSAN_LAJOVIC: 1.71
}
},
{
'round': 16,
'players': [
JOAO_DOMINGUES,
JOHN_MILLMAN
],
'score': [(6, 3), (2, 1)],
'retired': True,
'odds': {
JOAO_DOMINGUES: 2.50,
JOHN_MILLMAN: 1.53
}
},
{
'round': 16,
'players': [
ALEJANDRO_DAVIDOVICH_FOKINA,
JEREMY_CHARDY
],
'score': [(6, 1), (6, 2)],
'odds': {
ALEJANDRO_DAVIDOVICH_FOKINA: 1.95,
JEREMY_CHARDY: 1.74
}
},
{
'round': 16,
'players': [
GAEL_MONFILS,
REILLY_OPELKA
],
'score': [(3, 6), (6, 3), (6, 0)],
'odds': {
GAEL_MONFILS: 1.48,
REILLY_OPELKA: 2.60
}
},
{
'round': 16,
'players': [
STEFANOS_TSITSIPAS,
GUIDO_ANDREOZZI
],
'score': [(6, 3), (6, 4)],
'odds': {
STEFANOS_TSITSIPAS: 1.15,
GUIDO_ANDREOZZI: 5.00
}
},
{
'round': 16,
'players': [
MALEK_JAZIRI,
LEONARDO_MAYER
],
'score': [(7, 6), (6, 1)],
'odds': {
MALEK_JAZIRI: 3.35,
LEONARDO_MAYER: 1.30
}
},
{
'round': 16,
'players': [
PABLO_CUEVAS,
FILLIPPO_BALDI
],
'score': [(6, 2), (7, 5)],
'odds': {
PABLO_CUEVAS: 1.18,
FILLIPPO_BALDI: 4.51
}
},
{
'round': 16,
'players': [
FRANCES_TIAFOE,
YOSHIHITO_NISHIOKA
],
'score': [(2, 6), (6, 3), (7, 6)],
'odds': {
FRANCES_TIAFOE: 1.63,
YOSHIHITO_NISHIOKA: 2.25
}
},
{
'round': 16,
'players': [
DAVID_GOFFIN,
JOAO_SOUSA
],
'score': [(6, 3), (6, 2)],
'odds': {
DAVID_GOFFIN: 1.48,
JOAO_SOUSA: 2.60
}
},
{
'round': 8,
'players': [
PABLO_CUEVAS,
FRANCES_TIAFOE
],
'score': [(6, 0), (6, 7), (6, 2)],
'odds': {
PABLO_CUEVAS: 1.57,
FRANCES_TIAFOE: 2.39
}
},
{
'round': 8,
'players': [
DAVID_GOFFIN,
MALEK_JAZIRI
],
'score': [(4, 6), (7, 6), (6, 2)],
'odds': {
DAVID_GOFFIN: 1.20,
MALEK_JAZIRI: 4.60
}
},
{
'round': 8,
'players': [
ALEJANDRO_DAVIDOVICH_FOKINA,
GAEL_MONFILS
],
'score': [(6, 7), (7, 5), (6, 4)],
'odds': {
ALEJANDRO_DAVIDOVICH_FOKINA: 2.63,
GAEL_MONFILS: 1.41
}
},
{
'round': 8,
'players': [
STEFANOS_TSITSIPAS,
JOAO_DOMINGUES
],
'score': [(7, 6), (6, 4)],
},
{
'round': 4,
'players': [
PABLO_CUEVAS,
ALEJANDRO_DAVIDOVICH_FOKINA
],
'score': [(3, 6), (6, 2), (6, 2)],
'odds': {
PABLO_CUEVAS: 1.51,
ALEJANDRO_DAVIDOVICH_FOKINA: 2.45
}
},
{
'round': 4,
'players': [
STEFANOS_TSITSIPAS,
DAVID_GOFFIN
],
'score': [(3, 6), (6, 4), (6, 4)],
'odds': {
STEFANOS_TSITSIPAS: 1.53,
DAVID_GOFFIN: 2.45
}
},
{
'round': 2,
'players': [
STEFANOS_TSITSIPAS,
PABLO_CUEVAS
],
'score': [(6, 3), (7, 6)],
'odds': {
STEFANOS_TSITSIPAS: 1.36,
PABLO_CUEVAS: 3.15
}
}
]
}
]
| true | true |
f7fd7944bc0e85e2af633b688f4b3964ec5ccfb3 | 2,791 | py | Python | Nepali_nlp/Nepali_tokenizer.py | potamides/Nepali_nlp | d3d078ed50c8224f290d772f7b895354d0cb0266 | [
"MIT"
] | 123 | 2019-09-11T11:01:58.000Z | 2022-02-28T22:22:46.000Z | Nepali_nlp/Nepali_tokenizer.py | potamides/Nepali_nlp | d3d078ed50c8224f290d772f7b895354d0cb0266 | [
"MIT"
] | 6 | 2020-02-25T08:41:59.000Z | 2022-03-19T15:12:05.000Z | Nepali_nlp/Nepali_tokenizer.py | potamides/Nepali_nlp | d3d078ed50c8224f290d772f7b895354d0cb0266 | [
"MIT"
] | 30 | 2020-02-25T08:08:27.000Z | 2022-03-01T14:04:42.000Z | import os
import sys
sys.path.append('..')
import string
import tensorflow as tf
import sentencepiece as spm
class Tokenizer:
def __init__(self):
self.this_dir, self.this_file = os.path.split(__file__)
def sentence_tokenize(self, text):
"""This function tokenize the sentences
Arguments:
text {string} -- Sentences you want to tokenize
Returns:
sentence {list} -- tokenized sentence in list
"""
sentences = text.strip().split(u"।")
sentences = [sentence.translate(str.maketrans('', '', string.punctuation)) for sentence in sentences]
return sentences
def word_tokenize(self, sentence, new_punctuation=[]):
"""This function tokenize with respect to word
Arguments:
sentence {string} -- sentence you want to tokenize
new_punctuation {list} -- more punctutaion for tokenizing default ['।',',',';','?','!','—','-']
Returns:
list -- tokenized words
"""
punctuations = ['।', ',', ';', '?', '!', '—', '-', '.']
if new_punctuation:
punctuations = set(punctuations + new_punctuation)
for punct in punctuations:
sentence = ' '.join(sentence.split(punct))
return sentence.split()
def character_tokenize(self, word):
""" Returns the tokenization in character level.
Arguments:
word {string} -- word to be tokenized in character level.
Returns:
[list] -- list of characters.
"""
try:
import icu
except:
print("please install PyICU")
temp_ = icu.BreakIterator.createCharacterInstance(icu.Locale())
temp_.setText(word)
char = []
i = 0
for j in temp_:
s = word[i:j]
char.append(s)
i = j
return char
def sentencepeice_tokenize(self, text):
"""unsupervised way of tokenizing the text using google sentencepiece library. More info at https://github.com/google/sentencepiece
Args:
text (string): Text in Nepali language
Returns:
list: tokenized words.
"""
try:
model = tf.gfile.Gfile(os.path.join(self.this_dir, "local_dataset", "m_bpe.model"), "rb").read() #tf version 1
except:
model = tf.io.gfile.GFile(os.path.join(self.this_dir, "local_dataset", "m_bpe.model"), "rb").read() #tf version 2
sp = spm.SentencePieceProcessor()
sp.load_from_serialized_proto(model)
return sp.encode_as_pieces(text)
def __str__(self):
return "Helps to tokenize content written in Nepali language."
| 30.67033 | 139 | 0.571121 | import os
import sys
sys.path.append('..')
import string
import tensorflow as tf
import sentencepiece as spm
class Tokenizer:
def __init__(self):
self.this_dir, self.this_file = os.path.split(__file__)
def sentence_tokenize(self, text):
sentences = text.strip().split(u"।")
sentences = [sentence.translate(str.maketrans('', '', string.punctuation)) for sentence in sentences]
return sentences
def word_tokenize(self, sentence, new_punctuation=[]):
punctuations = ['।', ',', ';', '?', '!', '—', '-', '.']
if new_punctuation:
punctuations = set(punctuations + new_punctuation)
for punct in punctuations:
sentence = ' '.join(sentence.split(punct))
return sentence.split()
def character_tokenize(self, word):
try:
import icu
except:
print("please install PyICU")
temp_ = icu.BreakIterator.createCharacterInstance(icu.Locale())
temp_.setText(word)
char = []
i = 0
for j in temp_:
s = word[i:j]
char.append(s)
i = j
return char
def sentencepeice_tokenize(self, text):
try:
model = tf.gfile.Gfile(os.path.join(self.this_dir, "local_dataset", "m_bpe.model"), "rb").read()
except:
model = tf.io.gfile.GFile(os.path.join(self.this_dir, "local_dataset", "m_bpe.model"), "rb").read()
sp = spm.SentencePieceProcessor()
sp.load_from_serialized_proto(model)
return sp.encode_as_pieces(text)
def __str__(self):
return "Helps to tokenize content written in Nepali language."
| true | true |
f7fd795d6fa81646651d587683db516589ffa49f | 16,432 | py | Python | greykite/framework/templates/base_template.py | briancpark/greykite | 2f484978a7ed206ebd9356e02fc1fb881cd25205 | [
"BSD-2-Clause"
] | null | null | null | greykite/framework/templates/base_template.py | briancpark/greykite | 2f484978a7ed206ebd9356e02fc1fb881cd25205 | [
"BSD-2-Clause"
] | null | null | null | greykite/framework/templates/base_template.py | briancpark/greykite | 2f484978a7ed206ebd9356e02fc1fb881cd25205 | [
"BSD-2-Clause"
] | null | null | null | # BSD 2-CLAUSE LICENSE
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# #ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# original author: Albert Chen
"""Base class for templates.
Contains common code used by multiple templates.
"""
import functools
from abc import ABC
from abc import abstractmethod
from typing import Dict
from typing import Optional
import modin.pandas as pd
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.common.time_properties_forecast import get_forecast_time_properties
from greykite.framework.pipeline.utils import get_basic_pipeline
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.forecast_config_defaults import ForecastConfigDefaults
from greykite.framework.templates.template_interface import TemplateInterface
from greykite.sklearn.estimator.base_forecast_estimator import BaseForecastEstimator
class BaseTemplate(TemplateInterface, ForecastConfigDefaults, ABC):
"""Base template with common code used by multiple templates.
Provides a particular modular approach to implement
`~greykite.framework.templates.template_interface.TemplateInterface.apply_template_for_pipeline_params`.
Includes the config defaults from
`~greykite.framework.templates.forecast_config_defaults.ForecastConfigDefaults`.
Subclasses must provide these properties / functions used by ``apply_template_for_pipeline_params``:
- estimator (__init__ default value)
- get_regressor_cols
- get_lagged_regressor_info
- get_hyperparameter_grid
Subclasses may optionally want to override:
- get_pipeline
- get_forecast_time_properties
- apply_metadata_defaults
- apply_evaluation_metric_defaults
- apply_evaluation_period_defaults
- apply_computation_defaults
- apply_model_components_defaults
- apply_forecast_config_defaults
"""
def __init__(self, estimator: BaseForecastEstimator):
# See attributes of `TemplateInterface` and `ForecastConfigDefaults`.
# Note that `self.config` includes modifications after applying default values.
super().__init__()
self._estimator: BaseForecastEstimator = estimator
"""The estimator instance to use as the final step in the pipeline.
An instance of `greykite.sklearn.estimator.base_forecast_estimator.BaseForecastEstimator`.
"""
# Attributes used by `~greykite.framework.templates.base_template.apply_template_for_pipeline_params`.
self.score_func = None
"""Score function used to select optimal model in CV."""
self.score_func_greater_is_better = None
"""True if ``score_func`` is a score function, meaning higher is better,
and False if it is a loss function, meaning lower is better.
"""
self.regressor_cols = None
"""A list of regressor columns used in the training and prediction DataFrames.
If None, no regressor columns are used.
"""
self.lagged_regressor_cols = None
"""A list of lagged regressor columns used in the training and prediction DataFrames.
If None, no lagged regressor columns are used.
"""
self.pipeline = None
"""Pipeline to fit. The final named step must be called "estimator"."""
self.time_properties = None
"""Time properties dictionary (likely produced by
`~greykite.common.time_properties_forecast.get_forecast_time_properties`)
"""
self.hyperparameter_grid = None
"""Sets properties of the steps in the pipeline,
and specifies combinations to search over.
Should be valid input to `sklearn.model_selection.GridSearchCV` (param_grid)
or `sklearn.model_selection.RandomizedSearchCV` (param_distributions).
"""
@property
def estimator(self):
"""The estimator instance to use as the final step in the pipeline.
An instance of `greykite.sklearn.estimator.base_forecast_estimator.BaseForecastEstimator`.
"""
return self._estimator
@abstractmethod
def get_regressor_cols(self):
"""Returns regressor column names.
To be implemented by subclass.
Available parameters:
- self.df
- self.config
- self.score_func
- self.score_func_greater_is_better
Returns
-------
regressor_cols : `list` [`str`] or None
See `~greykite.framework.pipeline.pipeline.forecast_pipeline`.
"""
pass
def get_lagged_regressor_info(self):
"""Returns lagged regressor column names and minimal/maximal lag order. The lag order
can be used to check potential imputation in the computation of lags.
Can be overridden by subclass.
Returns
-------
lagged_regressor_info : `dict`
A dictionary that includes the lagged regressor column names and maximal/minimal lag order
The keys are:
lagged_regressor_cols : `list` [`str`] or None
See `~greykite.framework.pipeline.pipeline.forecast_pipeline`.
overall_min_lag_order : `int` or None
overall_max_lag_order : `int` or None
"""
lagged_regressor_info = {
"lagged_regressor_cols": None,
"overall_min_lag_order": None,
"overall_max_lag_order": None
}
return lagged_regressor_info
def get_pipeline(self):
"""Returns pipeline.
Implementation may be overridden by subclass
if a different pipeline is desired.
Uses ``self.estimator``, ``self.score_func``,
``self.score_func_greater_is_better``, ``self.config``,
``self.regressor_cols``.
Available parameters:
- self.df
- self.config
- self.score_func
- self.score_func_greater_is_better
- self.regressor_cols
- self.estimator
Returns
-------
pipeline : `sklearn.pipeline.Pipeline`
See `~greykite.framework.pipeline.pipeline.forecast_pipeline`.
"""
return get_basic_pipeline(
estimator=self.estimator,
score_func=self.score_func,
score_func_greater_is_better=self.score_func_greater_is_better,
agg_periods=self.config.evaluation_metric_param.agg_periods,
agg_func=self.config.evaluation_metric_param.agg_func,
relative_error_tolerance=self.config.evaluation_metric_param.relative_error_tolerance,
coverage=self.config.coverage,
null_model_params=self.config.evaluation_metric_param.null_model_params,
regressor_cols=self.regressor_cols,
lagged_regressor_cols=self.lagged_regressor_cols)
def get_forecast_time_properties(self):
"""Returns forecast time parameters.
Uses ``self.df``, ``self.config``, ``self.regressor_cols``.
Available parameters:
- self.df
- self.config
- self.score_func
- self.score_func_greater_is_better
- self.regressor_cols
- self.lagged_regressor_cols
- self.estimator
- self.pipeline
Returns
-------
time_properties : `dict` [`str`, `any`] or None, default None
Time properties dictionary (likely produced by
`~greykite.common.time_properties_forecast.get_forecast_time_properties`)
with keys:
``"period"`` : `int`
Period of each observation (i.e. minimum time between observations, in seconds).
``"simple_freq"`` : `SimpleTimeFrequencyEnum`
``SimpleTimeFrequencyEnum`` member corresponding to data frequency.
``"num_training_points"`` : `int`
Number of observations for training.
``"num_training_days"`` : `int`
Number of days for training.
``"start_year"`` : `int`
Start year of the training period.
``"end_year"`` : `int`
End year of the forecast period.
``"origin_for_time_vars"`` : `float`
Continuous time representation of the first date in ``df``.
"""
return get_forecast_time_properties(
df=self.df,
time_col=self.config.metadata_param.time_col,
value_col=self.config.metadata_param.value_col,
freq=self.config.metadata_param.freq,
train_end_date=self.config.metadata_param.train_end_date,
regressor_cols=self.regressor_cols,
lagged_regressor_cols=self.lagged_regressor_cols,
forecast_horizon=self.config.forecast_horizon)
@abstractmethod
def get_hyperparameter_grid(self):
"""Returns hyperparameter grid.
To be implemented by subclass.
Available parameters:
- self.df
- self.config
- self.score_func
- self.score_func_greater_is_better
- self.regressor_cols
- self.estimator
- self.pipeline
- self.time_properties
Returns
-------
hyperparameter_grid : `dict`, `list` [`dict`] or None
See :func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`.
The output dictionary values are lists, combined in grid search.
"""
pass
def apply_template_decorator(func):
"""Decorator for ``apply_template_for_pipeline_params`` function.
By default, this applies ``apply_forecast_config_defaults`` to ``config``.
Subclass may override this for pre/post processing of
``apply_template_for_pipeline_params``, such as input validation.
In this case, ``apply_template_for_pipeline_params`` must also be implemented in the subclass.
"""
@functools.wraps(func)
def process_wrapper(self, df: pd.DataFrame, config: Optional[ForecastConfig] = None):
# Sets defaults and makes a copy of ``config``
# All subclasses should keep this line.
config = self.apply_forecast_config_defaults(config)
# <optional processing before calling `func`, if needed>
pipeline_params = func(self, df, config)
# <optional postprocessing after calling `func`, if needed>
return pipeline_params
return process_wrapper
@apply_template_decorator
def apply_template_for_pipeline_params(
self,
df: pd.DataFrame,
config: Optional[ForecastConfig] = None) -> Dict:
"""Implements template interface method.
Takes input data and optional configuration parameters
to customize the model. Returns a set of parameters to call
:func:`~greykite.framework.pipeline.pipeline.forecast_pipeline`.
See template interface for parameters and return value.
Uses the methods in this class to set:
- ``"regressor_cols"`` : get_regressor_cols()
- ``lagged_regressor_cols`` : get_lagged_regressor_info()
- ``"pipeline"`` : get_pipeline()
- ``"time_properties"`` : get_forecast_time_properties()
- ``"hyperparameter_grid"`` : get_hyperparameter_grid()
All other parameters are taken directly from ``config``.
"""
self.df = df
self.config = config
# Defines score_func, score_func_greater_is_better
# Sets `score_func` to a string instead of a function, so CV results are
# reported as "mean_test_{short_name}" instead of "mean_test_score".
metric = EvaluationMetricEnum[config.evaluation_metric_param.cv_selection_metric]
self.score_func = metric.name
self.score_func_greater_is_better = metric.get_metric_greater_is_better()
self.regressor_cols = self.get_regressor_cols()
self.lagged_regressor_cols = self.get_lagged_regressor_info().get("lagged_regressor_cols", None)
self.pipeline = self.get_pipeline()
self.time_properties = self.get_forecast_time_properties()
self.hyperparameter_grid = self.get_hyperparameter_grid()
self.pipeline_params = dict(
# input
df=self.df,
time_col=self.config.metadata_param.time_col,
value_col=self.config.metadata_param.value_col,
date_format=self.config.metadata_param.date_format,
freq=self.config.metadata_param.freq,
train_end_date=self.config.metadata_param.train_end_date,
anomaly_info=self.config.metadata_param.anomaly_info,
# model
pipeline=self.pipeline,
regressor_cols=self.regressor_cols,
lagged_regressor_cols=self.lagged_regressor_cols,
estimator=None, # ignored when `pipeline` is provided
hyperparameter_grid=self.hyperparameter_grid,
hyperparameter_budget=self.config.computation_param.hyperparameter_budget,
n_jobs=self.config.computation_param.n_jobs,
verbose=self.config.computation_param.verbose,
# forecast
forecast_horizon=self.config.forecast_horizon,
coverage=self.config.coverage,
test_horizon=self.config.evaluation_period_param.test_horizon,
periods_between_train_test=self.config.evaluation_period_param.periods_between_train_test,
agg_periods=self.config.evaluation_metric_param.agg_periods,
agg_func=self.config.evaluation_metric_param.agg_func,
# evaluation
score_func=self.score_func,
score_func_greater_is_better=self.score_func_greater_is_better,
cv_report_metrics=self.config.evaluation_metric_param.cv_report_metrics,
null_model_params=self.config.evaluation_metric_param.null_model_params,
relative_error_tolerance=self.config.evaluation_metric_param.relative_error_tolerance,
# CV
cv_horizon=self.config.evaluation_period_param.cv_horizon,
cv_min_train_periods=self.config.evaluation_period_param.cv_min_train_periods,
cv_expanding_window=self.config.evaluation_period_param.cv_expanding_window,
cv_periods_between_splits=self.config.evaluation_period_param.cv_periods_between_splits,
cv_periods_between_train_test=self.config.evaluation_period_param.cv_periods_between_train_test,
cv_max_splits=self.config.evaluation_period_param.cv_max_splits,
)
return self.pipeline_params
# `apply_template_decorator` needs to be a static method and take `func` as the
# only argument (not self). It also needs to be defined in this class to allow
# override. If we use the @staticmethod decorator above, this error appears:
# `TypeError: 'staticmethod' object is not callable`
# However, if we call staticmethod at the bottom of the class, after it is
# applied to `apply_template_for_pipeline_params`, it works.
apply_template_decorator = staticmethod(apply_template_decorator)
| 44.172043 | 110 | 0.683909 |
typing import Dict
from typing import Optional
import modin.pandas as pd
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.common.time_properties_forecast import get_forecast_time_properties
from greykite.framework.pipeline.utils import get_basic_pipeline
from greykite.framework.templates.autogen.forecast_config import ForecastConfig
from greykite.framework.templates.forecast_config_defaults import ForecastConfigDefaults
from greykite.framework.templates.template_interface import TemplateInterface
from greykite.sklearn.estimator.base_forecast_estimator import BaseForecastEstimator
class BaseTemplate(TemplateInterface, ForecastConfigDefaults, ABC):
def __init__(self, estimator: BaseForecastEstimator):
super().__init__()
self._estimator: BaseForecastEstimator = estimator
self.score_func = None
self.score_func_greater_is_better = None
self.regressor_cols = None
self.lagged_regressor_cols = None
self.pipeline = None
self.time_properties = None
self.hyperparameter_grid = None
@property
def estimator(self):
return self._estimator
@abstractmethod
def get_regressor_cols(self):
pass
def get_lagged_regressor_info(self):
lagged_regressor_info = {
"lagged_regressor_cols": None,
"overall_min_lag_order": None,
"overall_max_lag_order": None
}
return lagged_regressor_info
def get_pipeline(self):
return get_basic_pipeline(
estimator=self.estimator,
score_func=self.score_func,
score_func_greater_is_better=self.score_func_greater_is_better,
agg_periods=self.config.evaluation_metric_param.agg_periods,
agg_func=self.config.evaluation_metric_param.agg_func,
relative_error_tolerance=self.config.evaluation_metric_param.relative_error_tolerance,
coverage=self.config.coverage,
null_model_params=self.config.evaluation_metric_param.null_model_params,
regressor_cols=self.regressor_cols,
lagged_regressor_cols=self.lagged_regressor_cols)
def get_forecast_time_properties(self):
return get_forecast_time_properties(
df=self.df,
time_col=self.config.metadata_param.time_col,
value_col=self.config.metadata_param.value_col,
freq=self.config.metadata_param.freq,
train_end_date=self.config.metadata_param.train_end_date,
regressor_cols=self.regressor_cols,
lagged_regressor_cols=self.lagged_regressor_cols,
forecast_horizon=self.config.forecast_horizon)
@abstractmethod
def get_hyperparameter_grid(self):
pass
def apply_template_decorator(func):
@functools.wraps(func)
def process_wrapper(self, df: pd.DataFrame, config: Optional[ForecastConfig] = None):
config = self.apply_forecast_config_defaults(config)
pipeline_params = func(self, df, config)
return pipeline_params
return process_wrapper
@apply_template_decorator
def apply_template_for_pipeline_params(
self,
df: pd.DataFrame,
config: Optional[ForecastConfig] = None) -> Dict:
self.df = df
self.config = config
metric = EvaluationMetricEnum[config.evaluation_metric_param.cv_selection_metric]
self.score_func = metric.name
self.score_func_greater_is_better = metric.get_metric_greater_is_better()
self.regressor_cols = self.get_regressor_cols()
self.lagged_regressor_cols = self.get_lagged_regressor_info().get("lagged_regressor_cols", None)
self.pipeline = self.get_pipeline()
self.time_properties = self.get_forecast_time_properties()
self.hyperparameter_grid = self.get_hyperparameter_grid()
self.pipeline_params = dict(
df=self.df,
time_col=self.config.metadata_param.time_col,
value_col=self.config.metadata_param.value_col,
date_format=self.config.metadata_param.date_format,
freq=self.config.metadata_param.freq,
train_end_date=self.config.metadata_param.train_end_date,
anomaly_info=self.config.metadata_param.anomaly_info,
pipeline=self.pipeline,
regressor_cols=self.regressor_cols,
lagged_regressor_cols=self.lagged_regressor_cols,
estimator=None,
hyperparameter_grid=self.hyperparameter_grid,
hyperparameter_budget=self.config.computation_param.hyperparameter_budget,
n_jobs=self.config.computation_param.n_jobs,
verbose=self.config.computation_param.verbose,
forecast_horizon=self.config.forecast_horizon,
coverage=self.config.coverage,
test_horizon=self.config.evaluation_period_param.test_horizon,
periods_between_train_test=self.config.evaluation_period_param.periods_between_train_test,
agg_periods=self.config.evaluation_metric_param.agg_periods,
agg_func=self.config.evaluation_metric_param.agg_func,
score_func=self.score_func,
score_func_greater_is_better=self.score_func_greater_is_better,
cv_report_metrics=self.config.evaluation_metric_param.cv_report_metrics,
null_model_params=self.config.evaluation_metric_param.null_model_params,
relative_error_tolerance=self.config.evaluation_metric_param.relative_error_tolerance,
cv_horizon=self.config.evaluation_period_param.cv_horizon,
cv_min_train_periods=self.config.evaluation_period_param.cv_min_train_periods,
cv_expanding_window=self.config.evaluation_period_param.cv_expanding_window,
cv_periods_between_splits=self.config.evaluation_period_param.cv_periods_between_splits,
cv_periods_between_train_test=self.config.evaluation_period_param.cv_periods_between_train_test,
cv_max_splits=self.config.evaluation_period_param.cv_max_splits,
)
return self.pipeline_params
apply_template_decorator = staticmethod(apply_template_decorator)
| true | true |
f7fd7c10f311b68e9ad647397f97c4d32e016396 | 2,329 | py | Python | tests/localization/color/utils/test_color_converter.py | Lukasz1928/mobile-robots-control | 81820b35dab10b14f58d66079b0a8f82ef819bee | [
"MIT"
] | 2 | 2018-06-28T08:07:06.000Z | 2018-07-14T10:00:31.000Z | tests/localization/color/utils/test_color_converter.py | Lukasz1928/mobile-robots-control | 81820b35dab10b14f58d66079b0a8f82ef819bee | [
"MIT"
] | 6 | 2018-10-15T11:00:13.000Z | 2018-12-19T18:06:49.000Z | tests/localization/color/utils/test_color_converter.py | Lukasz1928/mobile-robots-control | 81820b35dab10b14f58d66079b0a8f82ef819bee | [
"MIT"
] | null | null | null | import cv2
from unittest import TestCase
import numpy as np
from parameterized import parameterized
from mrc.localization.color.utils.color_converter import ColorConverter
from tests.test_utils.read_image import read_image
class TestColorConverterGrayscale(TestCase):
def setUp(self):
self.converter = ColorConverter()
self.imageBGR = read_image('localization/color/utils/color_conversion/gray/source.png')
self.imageRGB = cv2.cvtColor(self.imageBGR, cv2.COLOR_BGR2RGB)
self.expected_grayscale = read_image('localization/color/utils/color_conversion/gray/gray.png')[:, :, 0]
def test_BGR_to_Grayscale(self):
grayscale = self.converter.convert_to_grayscale(self.imageBGR, 'BGR')
np.testing.assert_array_equal(grayscale, self.expected_grayscale)
def test_RGB_to_Grayscale(self):
grayscale = self.converter.convert_to_grayscale(self.imageRGB, 'RGB')
np.testing.assert_array_equal(grayscale, self.expected_grayscale)
def test_BGR_to_Grayscale_special(self):
grayscale = self.converter.convert_to_grayscale(self.imageBGR, 'BGR')
np.testing.assert_array_equal(grayscale, self.expected_grayscale)
def test_RGB_to_Grayscale_special(self):
grayscale = self.converter.convert_to_grayscale(self.imageBGR, 'BGR')
np.testing.assert_array_equal(grayscale, self.expected_grayscale)
class TestColorConverterBinary(TestCase):
def setUp(self):
self.converter = ColorConverter()
self.imageBGR = read_image('localization/color/utils/color_conversion/binary/source.png')
self.imageRGB = cv2.cvtColor(self.imageBGR, cv2.COLOR_BGR2RGB)
self.expected_images = [read_image('localization/color/utils/color_conversion/binary/{}.png'.format(i))[:, :, 0] for i in
range(9)]
@parameterized.expand([[i] for i in range(9)])
def test_BGR_to_binary(self, i):
binary = self.converter.convert_to_binary(self.imageBGR, i / 8 * 255, 'BGR')
np.testing.assert_array_equal(binary, self.expected_images[i])
@parameterized.expand([[i] for i in range(9)])
def test_RGB_to_binary(self, i):
binary = self.converter.convert_to_binary(self.imageRGB, i / 8 * 255, 'RGB')
np.testing.assert_array_equal(binary, self.expected_images[i])
| 44.788462 | 129 | 0.729927 | import cv2
from unittest import TestCase
import numpy as np
from parameterized import parameterized
from mrc.localization.color.utils.color_converter import ColorConverter
from tests.test_utils.read_image import read_image
class TestColorConverterGrayscale(TestCase):
def setUp(self):
self.converter = ColorConverter()
self.imageBGR = read_image('localization/color/utils/color_conversion/gray/source.png')
self.imageRGB = cv2.cvtColor(self.imageBGR, cv2.COLOR_BGR2RGB)
self.expected_grayscale = read_image('localization/color/utils/color_conversion/gray/gray.png')[:, :, 0]
def test_BGR_to_Grayscale(self):
grayscale = self.converter.convert_to_grayscale(self.imageBGR, 'BGR')
np.testing.assert_array_equal(grayscale, self.expected_grayscale)
def test_RGB_to_Grayscale(self):
grayscale = self.converter.convert_to_grayscale(self.imageRGB, 'RGB')
np.testing.assert_array_equal(grayscale, self.expected_grayscale)
def test_BGR_to_Grayscale_special(self):
grayscale = self.converter.convert_to_grayscale(self.imageBGR, 'BGR')
np.testing.assert_array_equal(grayscale, self.expected_grayscale)
def test_RGB_to_Grayscale_special(self):
grayscale = self.converter.convert_to_grayscale(self.imageBGR, 'BGR')
np.testing.assert_array_equal(grayscale, self.expected_grayscale)
class TestColorConverterBinary(TestCase):
def setUp(self):
self.converter = ColorConverter()
self.imageBGR = read_image('localization/color/utils/color_conversion/binary/source.png')
self.imageRGB = cv2.cvtColor(self.imageBGR, cv2.COLOR_BGR2RGB)
self.expected_images = [read_image('localization/color/utils/color_conversion/binary/{}.png'.format(i))[:, :, 0] for i in
range(9)]
@parameterized.expand([[i] for i in range(9)])
def test_BGR_to_binary(self, i):
binary = self.converter.convert_to_binary(self.imageBGR, i / 8 * 255, 'BGR')
np.testing.assert_array_equal(binary, self.expected_images[i])
@parameterized.expand([[i] for i in range(9)])
def test_RGB_to_binary(self, i):
binary = self.converter.convert_to_binary(self.imageRGB, i / 8 * 255, 'RGB')
np.testing.assert_array_equal(binary, self.expected_images[i])
| true | true |
f7fd7c279e69f2e85f1af5748cbfc5fd204662bc | 35,031 | py | Python | praw/reddit.py | NedJunk/praw | dd75d91e5574f1499cbef445dd68eb71445629df | [
"BSD-2-Clause"
] | null | null | null | praw/reddit.py | NedJunk/praw | dd75d91e5574f1499cbef445dd68eb71445629df | [
"BSD-2-Clause"
] | null | null | null | praw/reddit.py | NedJunk/praw | dd75d91e5574f1499cbef445dd68eb71445629df | [
"BSD-2-Clause"
] | null | null | null | """Provide the Reddit class."""
import asyncio
import configparser
import os
import re
import time
from itertools import islice
from logging import getLogger
from typing import (
IO,
TYPE_CHECKING,
Any,
Dict,
Generator,
Iterable,
Optional,
Type,
Union,
)
from warnings import warn
from prawcore import (
Authorizer,
DeviceIDAuthorizer,
ReadOnlyAuthorizer,
Redirect,
Requestor,
ScriptAuthorizer,
TrustedAuthenticator,
UntrustedAuthenticator,
session,
)
from prawcore.exceptions import BadRequest
from . import models
from .config import Config
from .const import API_PATH, USER_AGENT_FORMAT, __version__
from .exceptions import (
ClientException,
MissingRequiredAttributeException,
RedditAPIException,
)
from .objector import Objector
from .util import _deprecate_args
from .util.token_manager import BaseTokenManager
try:
from update_checker import update_check
UPDATE_CHECKER_MISSING = False
except ImportError: # pragma: no cover
UPDATE_CHECKER_MISSING = True
if TYPE_CHECKING: # pragma: no cover
import praw
Comment = models.Comment
Redditor = models.Redditor
Submission = models.Submission
Subreddit = models.Subreddit
logger = getLogger("praw")
class Reddit:
"""The Reddit class provides convenient access to Reddit's API.
Instances of this class are the gateway to interacting with Reddit's API through
PRAW. The canonical way to obtain an instance of this class is via:
.. code-block:: python
import praw
reddit = praw.Reddit(
client_id="CLIENT_ID",
client_secret="CLIENT_SECRET",
password="PASSWORD",
user_agent="USERAGENT",
username="USERNAME",
)
"""
update_checked = False
_ratelimit_regex = re.compile(r"([0-9]{1,3}) (milliseconds?|seconds?|minutes?)")
@property
def _next_unique(self) -> int:
value = self._unique_counter
self._unique_counter += 1
return value
@property
def read_only(self) -> bool:
"""Return ``True`` when using the ``ReadOnlyAuthorizer``."""
return self._core == self._read_only_core
@read_only.setter
def read_only(self, value: bool) -> None:
"""Set or unset the use of the ReadOnlyAuthorizer.
:raises: :class:`.ClientException` when attempting to unset ``read_only`` and
only the ``ReadOnlyAuthorizer`` is available.
"""
if value:
self._core = self._read_only_core
elif self._authorized_core is None:
raise ClientException(
"read_only cannot be unset as only the ReadOnlyAuthorizer is available."
)
else:
self._core = self._authorized_core
@property
def validate_on_submit(self) -> bool:
"""Get validate_on_submit.
.. deprecated:: 7.0
If property :attr:`.validate_on_submit` is set to ``False``, the behavior is
deprecated by Reddit. This attribute will be removed around May-June 2020.
"""
value = self._validate_on_submit
if value is False:
warn(
"Reddit will check for validation on all posts around May-June 2020. It"
" is recommended to check for validation by setting"
" reddit.validate_on_submit to True.",
category=DeprecationWarning,
stacklevel=3,
)
return value
@validate_on_submit.setter
def validate_on_submit(self, val: bool):
self._validate_on_submit = val
def __enter__(self):
"""Handle the context manager open."""
return self
def __exit__(self, *_args):
"""Handle the context manager close."""
@_deprecate_args(
"site_name",
"config_interpolation",
"requestor_class",
"requestor_kwargs",
"token_manager",
)
def __init__(
self,
site_name: Optional[str] = None,
*,
config_interpolation: Optional[str] = None,
requestor_class: Optional[Type[Requestor]] = None,
requestor_kwargs: Optional[Dict[str, Any]] = None,
token_manager: Optional[BaseTokenManager] = None,
**config_settings: Optional[Union[str, bool]],
): # noqa: D207, D301
"""Initialize a :class:`.Reddit` instance.
:param site_name: The name of a section in your ``praw.ini`` file from which to
load settings from. This parameter, in tandem with an appropriately
configured ``praw.ini``, file is useful if you wish to easily save
credentials for different applications, or communicate with other servers
running Reddit. If ``site_name`` is ``None``, then the site name will be
looked for in the environment variable ``praw_site``. If it is not found
there, the ``DEFAULT`` site will be used (default: ``None``).
:param config_interpolation: Config parser interpolation type that will be
passed to :class:`.Config` (default: ``None``).
:param requestor_class: A class that will be used to create a requestor. If not
set, use ``prawcore.Requestor`` (default: ``None``).
:param requestor_kwargs: Dictionary with additional keyword arguments used to
initialize the requestor (default: ``None``).
:param token_manager: When provided, the passed instance, a subclass of
:class:`.BaseTokenManager`, will manage tokens via two callback functions.
This parameter must be provided in order to work with refresh tokens
(default: ``None``).
Additional keyword arguments will be used to initialize the :class:`.Config`
object. This can be used to specify configuration settings during instantiation
of the :class:`.Reddit` instance. For more details, please see
:ref:`configuration`.
Required settings are:
- ``client_id``
- ``client_secret`` (for installed applications set this value to ``None``)
- ``user_agent``
The ``requestor_class`` and ``requestor_kwargs`` allow for customization of the
requestor :class:`.Reddit` will use. This allows, e.g., easily adding behavior
to the requestor or wrapping its |Session|_ in a caching layer. Example usage:
.. |Session| replace:: ``Session``
.. _session: https://2.python-requests.org/en/master/api/#requests.Session
.. code-block:: python
import json
import betamax
import requests
from prawcore import Requestor
from praw import Reddit
class JSONDebugRequestor(Requestor):
def request(self, *args, **kwargs):
response = super().request(*args, **kwargs)
print(json.dumps(response.json(), indent=4))
return response
my_session = betamax.Betamax(requests.Session())
reddit = Reddit(
..., requestor_class=JSONDebugRequestor, requestor_kwargs={"session": my_session}
)
"""
self._core = self._authorized_core = self._read_only_core = None
self._objector = None
self._token_manager = token_manager
self._unique_counter = 0
self._validate_on_submit = False
try:
config_section = site_name or os.getenv("praw_site") or "DEFAULT"
self.config = Config(
config_section, config_interpolation, **config_settings
)
except configparser.NoSectionError as exc:
help_message = (
"You provided the name of a praw.ini configuration which does not"
" exist.\n\nFor help with creating a Reddit instance,"
" visit\nhttps://praw.readthedocs.io/en/latest/code_overview/reddit_instance.html\n\nFor"
" help on configuring PRAW,"
" visit\nhttps://praw.readthedocs.io/en/latest/getting_started/configuration.html"
)
if site_name is not None:
exc.message += f"\n{help_message}"
raise
required_message = (
"Required configuration setting {!r} missing. \nThis setting can be"
" provided in a praw.ini file, as a keyword argument to the `Reddit` class"
" constructor, or as an environment variable."
)
for attribute in ("client_id", "user_agent"):
if getattr(self.config, attribute) in (self.config.CONFIG_NOT_SET, None):
raise MissingRequiredAttributeException(
required_message.format(attribute)
)
if self.config.client_secret is self.config.CONFIG_NOT_SET:
raise MissingRequiredAttributeException(
f"{required_message.format('client_secret')}\nFor installed"
" applications this value must be set to None via a keyword argument"
" to the `Reddit` class constructor."
)
self._check_for_update()
self._prepare_objector()
self._prepare_prawcore(
requestor_class=requestor_class, requestor_kwargs=requestor_kwargs
)
self.auth = models.Auth(self, None)
"""An instance of :class:`.Auth`.
Provides the interface for interacting with installed and web applications.
.. seealso::
:ref:`auth_url`
"""
self.drafts = models.DraftHelper(self, None)
"""An instance of :class:`.DraftHelper`.
Provides the interface for working with :class:`.Draft` instances.
For example, to list the currently authenticated user's drafts:
.. code-block:: python
drafts = reddit.drafts()
To create a draft on r/test run:
.. code-block:: python
reddit.drafts.create(title="title", selftext="selftext", subreddit="test")
"""
self.front = models.Front(self)
"""An instance of :class:`.Front`.
Provides the interface for interacting with front page listings. For example:
.. code-block:: python
for submission in reddit.front.hot():
print(submission)
"""
self.inbox = models.Inbox(self, None)
"""An instance of :class:`.Inbox`.
Provides the interface to a user's inbox which produces :class:`.Message`,
:class:`.Comment`, and :class:`.Submission` instances. For example, to iterate
through comments which mention the authorized user run:
.. code-block:: python
for comment in reddit.inbox.mentions():
print(comment)
"""
self.live = models.LiveHelper(self, None)
"""An instance of :class:`.LiveHelper`.
Provides the interface for working with :class:`.LiveThread` instances. At
present only new live threads can be created.
.. code-block:: python
reddit.live.create(title="title", description="description")
"""
self.multireddit = models.MultiredditHelper(self, None)
"""An instance of :class:`.MultiredditHelper`.
Provides the interface to working with :class:`.Multireddit` instances. For
example, you can obtain a :class:`.Multireddit` instance via:
.. code-block:: python
reddit.multireddit(redditor="samuraisam", name="programming")
"""
self.redditors = models.Redditors(self, None)
"""An instance of :class:`.Redditors`.
Provides the interface for :class:`.Redditor` discovery. For example, to iterate
over the newest Redditors, run:
.. code-block:: python
for redditor in reddit.redditors.new(limit=None):
print(redditor)
"""
self.subreddit = models.SubredditHelper(self, None)
"""An instance of :class:`.SubredditHelper`.
Provides the interface to working with :class:`.Subreddit` instances. For
example to create a :class:`.Subreddit` run:
.. code-block:: python
reddit.subreddit.create(name="coolnewsubname")
To obtain a lazy :class:`.Subreddit` instance run:
.. code-block:: python
reddit.subreddit("test")
Multiple subreddits can be combined and filtered views of r/all can also be used
just like a subreddit:
.. code-block:: python
reddit.subreddit("redditdev+learnpython+botwatch")
reddit.subreddit("all-redditdev-learnpython")
"""
self.subreddits = models.Subreddits(self, None)
"""An instance of :class:`.Subreddits`.
Provides the interface for :class:`.Subreddit` discovery. For example, to
iterate over the set of default subreddits run:
.. code-block:: python
for subreddit in reddit.subreddits.default(limit=None):
print(subreddit)
"""
self.user = models.User(self)
"""An instance of :class:`.User`.
Provides the interface to the currently authorized :class:`.Redditor`. For
example to get the name of the current user run:
.. code-block:: python
print(reddit.user.me())
"""
def _check_for_async(self):
if self.config.check_for_async: # pragma: no cover
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return
except NameError:
pass
in_async = False
try:
asyncio.get_running_loop()
in_async = True
except Exception: # Quietly fail if any exception occurs during the check
pass
if in_async:
logger.warning(
"It appears that you are using PRAW in an asynchronous"
" environment.\nIt is strongly recommended to use Async PRAW:"
" https://asyncpraw.readthedocs.io.\nSee"
" https://praw.readthedocs.io/en/latest/getting_started/multiple_instances.html#discord-bots-and-asynchronous-environments"
" for more info.\n",
)
def _check_for_update(self):
if UPDATE_CHECKER_MISSING:
return
if not Reddit.update_checked and self.config.check_for_updates:
update_check(__package__, __version__)
Reddit.update_checked = True
def _prepare_common_authorizer(self, authenticator):
if self._token_manager is not None:
warn(
"Token managers have been deprecated and will be removed in the near"
" future. See https://www.reddit.com/r/redditdev/comments/olk5e6/"
"followup_oauth2_api_changes_regarding_refresh/ for more details.",
category=DeprecationWarning,
stacklevel=2,
)
if self.config.refresh_token:
raise TypeError(
"``refresh_token`` setting cannot be provided when providing"
" ``token_manager``"
)
self._token_manager.reddit = self
authorizer = Authorizer(
authenticator,
post_refresh_callback=self._token_manager.post_refresh_callback,
pre_refresh_callback=self._token_manager.pre_refresh_callback,
)
elif self.config.refresh_token:
authorizer = Authorizer(
authenticator, refresh_token=self.config.refresh_token
)
else:
self._core = self._read_only_core
return
self._core = self._authorized_core = session(authorizer)
def _prepare_objector(self):
mappings = {
self.config.kinds["comment"]: models.Comment,
self.config.kinds["message"]: models.Message,
self.config.kinds["redditor"]: models.Redditor,
self.config.kinds["submission"]: models.Submission,
self.config.kinds["subreddit"]: models.Subreddit,
self.config.kinds["trophy"]: models.Trophy,
"Button": models.Button,
"Collection": models.Collection,
"Draft": models.Draft,
"DraftList": models.DraftList,
"Image": models.Image,
"LabeledMulti": models.Multireddit,
"Listing": models.Listing,
"LiveUpdate": models.LiveUpdate,
"LiveUpdateEvent": models.LiveThread,
"MenuLink": models.MenuLink,
"ModeratedList": models.ModeratedList,
"ModmailAction": models.ModmailAction,
"ModmailConversation": models.ModmailConversation,
"ModmailConversations-list": models.ModmailConversationsListing,
"ModmailMessage": models.ModmailMessage,
"Submenu": models.Submenu,
"TrophyList": models.TrophyList,
"UserList": models.RedditorList,
"UserSubreddit": models.UserSubreddit,
"button": models.ButtonWidget,
"calendar": models.Calendar,
"community-list": models.CommunityList,
"custom": models.CustomWidget,
"id-card": models.IDCard,
"image": models.ImageWidget,
"menu": models.Menu,
"modaction": models.ModAction,
"moderator-list": models.ModeratorListing,
"moderators": models.ModeratorsWidget,
"more": models.MoreComments,
"post-flair": models.PostFlairWidget,
"rule": models.Rule,
"stylesheet": models.Stylesheet,
"subreddit-rules": models.RulesWidget,
"textarea": models.TextArea,
"widget": models.Widget,
}
self._objector = Objector(self, mappings)
def _prepare_prawcore(self, *, requestor_class=None, requestor_kwargs=None):
requestor_class = requestor_class or Requestor
requestor_kwargs = requestor_kwargs or {}
requestor = requestor_class(
USER_AGENT_FORMAT.format(self.config.user_agent),
self.config.oauth_url,
self.config.reddit_url,
**requestor_kwargs,
)
if self.config.client_secret:
self._prepare_trusted_prawcore(requestor)
else:
self._prepare_untrusted_prawcore(requestor)
def _prepare_trusted_prawcore(self, requestor):
authenticator = TrustedAuthenticator(
requestor,
self.config.client_id,
self.config.client_secret,
self.config.redirect_uri,
)
read_only_authorizer = ReadOnlyAuthorizer(authenticator)
self._read_only_core = session(read_only_authorizer)
if self.config.username and self.config.password:
script_authorizer = ScriptAuthorizer(
authenticator, self.config.username, self.config.password
)
self._core = self._authorized_core = session(script_authorizer)
else:
self._prepare_common_authorizer(authenticator)
def _prepare_untrusted_prawcore(self, requestor):
authenticator = UntrustedAuthenticator(
requestor, self.config.client_id, self.config.redirect_uri
)
read_only_authorizer = DeviceIDAuthorizer(authenticator)
self._read_only_core = session(read_only_authorizer)
self._prepare_common_authorizer(authenticator)
@_deprecate_args("id", "url")
def comment(
self, # pylint: disable=invalid-name
id: Optional[str] = None, # pylint: disable=redefined-builtin
*,
url: Optional[str] = None,
):
"""Return a lazy instance of :class:`.Comment`.
:param id: The ID of the comment.
:param url: A permalink pointing to the comment.
.. note::
If you want to obtain the comment's replies, you will need to call
:meth:`~.Comment.refresh` on the returned :class:`.Comment`.
"""
return models.Comment(self, id=id, url=url)
def domain(self, domain: str):
"""Return an instance of :class:`.DomainListing`.
:param domain: The domain to obtain submission listings for.
"""
return models.DomainListing(self, domain)
@_deprecate_args("path", "params")
def get(
self,
path: str,
*,
params: Optional[Union[str, Dict[str, Union[str, int]]]] = None,
):
"""Return parsed objects returned from a GET request to ``path``.
:param path: The path to fetch.
:param params: The query parameters to add to the request (default: ``None``).
"""
return self._objectify_request(method="GET", params=params, path=path)
@_deprecate_args("fullnames", "url", "subreddits")
def info(
self,
*,
fullnames: Optional[Iterable[str]] = None,
subreddits: Optional[Iterable[Union["praw.models.Subreddit", str]]] = None,
url: Optional[str] = None,
) -> Generator[
Union["praw.models.Subreddit", "praw.models.Comment", "praw.models.Submission"],
None,
None,
]:
"""Fetch information about each item in ``fullnames``, ``url``, or ``subreddits``.
:param fullnames: A list of fullnames for comments, submissions, and/or
subreddits.
:param subreddits: A list of subreddit names or :class:`.Subreddit` objects to
retrieve subreddits from.
:param url: A url (as a string) to retrieve lists of link submissions from.
:returns: A generator that yields found items in their relative order.
Items that cannot be matched will not be generated. Requests will be issued in
batches for each 100 fullnames.
.. note::
For comments that are retrieved via this method, if you want to obtain its
replies, you will need to call :meth:`~.Comment.refresh` on the yielded
:class:`.Comment`.
.. note::
When using the URL option, it is important to be aware that URLs are treated
literally by Reddit's API. As such, the URLs ``"youtube.com"`` and
``"https://www.youtube.com"`` will provide a different set of submissions.
"""
none_count = (fullnames, url, subreddits).count(None)
if none_count != 2:
raise TypeError(
"Either `fullnames`, `url`, or `subreddits` must be provided."
)
is_using_fullnames = fullnames is not None
ids_or_names = fullnames if is_using_fullnames else subreddits
if ids_or_names is not None:
if isinstance(ids_or_names, str):
raise TypeError(
"`fullnames` and `subreddits` must be a non-str iterable."
)
api_parameter_name = "id" if is_using_fullnames else "sr_name"
def generator(names):
if is_using_fullnames:
iterable = iter(names)
else:
iterable = iter([str(item) for item in names])
while True:
chunk = list(islice(iterable, 100))
if not chunk:
break
params = {api_parameter_name: ",".join(chunk)}
for result in self.get(API_PATH["info"], params=params):
yield result
return generator(ids_or_names)
def generator(url):
params = {"url": url}
for result in self.get(API_PATH["info"], params=params):
yield result
return generator(url)
def _objectify_request(
self,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
json: Optional[Dict[Any, Any]] = None,
method: str = "",
params: Optional[Union[str, Dict[str, str]]] = None,
path: str = "",
) -> Any:
"""Run a request through the ``Objector``.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param files: Dictionary, filename to file (like) object mapping (default:
``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
:param method: The HTTP method (e.g., ``"GET"``, ``"POST"``, ``"PUT"``,
``"DELETE"``).
:param params: The query parameters to add to the request (default: ``None``).
:param path: The path to fetch.
"""
return self._objector.objectify(
self.request(
data=data,
files=files,
json=json,
method=method,
params=params,
path=path,
)
)
def _handle_rate_limit(
self, exception: RedditAPIException
) -> Optional[Union[int, float]]:
for item in exception.items:
if item.error_type == "RATELIMIT":
amount_search = self._ratelimit_regex.search(item.message)
if not amount_search:
break
seconds = int(amount_search.group(1))
if amount_search.group(2).startswith("minute"):
seconds *= 60
elif amount_search.group(2).startswith("millisecond"):
seconds = 0
if seconds <= int(self.config.ratelimit_seconds):
sleep_seconds = seconds + 1
return sleep_seconds
return None
@_deprecate_args("path", "data", "json", "params")
def delete(
self,
path: str,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json: Optional[Dict[Any, Any]] = None,
params: Optional[Union[str, Dict[str, str]]] = None,
) -> Any:
"""Return parsed objects returned from a DELETE request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
:param params: The query parameters to add to the request (default: ``None``).
"""
return self._objectify_request(
data=data, json=json, method="DELETE", params=params, path=path
)
@_deprecate_args("path", "data", "json")
def patch(
self,
path: str,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json: Optional[Dict[Any, Any]] = None,
) -> Any:
"""Return parsed objects returned from a PATCH request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
"""
return self._objectify_request(data=data, json=json, method="PATCH", path=path)
@_deprecate_args("path", "data", "files", "params", "json")
def post(
self,
path: str,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
json: Optional[Dict[Any, Any]] = None,
params: Optional[Union[str, Dict[str, str]]] = None,
) -> Any:
"""Return parsed objects returned from a POST request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param files: Dictionary, filename to file (like) object mapping (default:
``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
:param params: The query parameters to add to the request (default: ``None``).
"""
if json is None:
data = data or {}
attempts = 3
last_exception = None
while attempts > 0:
attempts -= 1
try:
return self._objectify_request(
data=data,
files=files,
json=json,
method="POST",
params=params,
path=path,
)
except RedditAPIException as exception:
last_exception = exception
seconds = self._handle_rate_limit(exception=exception)
if seconds is None:
break
second_string = "second" if seconds == 1 else "seconds"
logger.debug(f"Rate limit hit, sleeping for {seconds} {second_string}")
time.sleep(seconds)
raise last_exception
@_deprecate_args("path", "data", "json")
def put(
self,
path: str,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json: Optional[Dict[Any, Any]] = None,
):
"""Return parsed objects returned from a PUT request to ``path``.
:param path: The path to fetch.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
"""
return self._objectify_request(data=data, json=json, method="PUT", path=path)
@_deprecate_args("nsfw")
def random_subreddit(self, *, nsfw: bool = False) -> "praw.models.Subreddit":
"""Return a random lazy instance of :class:`.Subreddit`.
:param nsfw: Return a random NSFW (not safe for work) subreddit (default:
``False``).
"""
url = API_PATH["subreddit"].format(subreddit="randnsfw" if nsfw else "random")
path = None
try:
self.get(url, params={"unique": self._next_unique})
except Redirect as redirect:
path = redirect.path
return models.Subreddit(self, path.split("/")[2])
@_deprecate_args("name", "fullname")
def redditor(
self, name: Optional[str] = None, *, fullname: Optional[str] = None
) -> "praw.models.Redditor":
"""Return a lazy instance of :class:`.Redditor`.
:param name: The name of the redditor.
:param fullname: The fullname of the redditor, starting with ``t2_``.
Either ``name`` or ``fullname`` can be provided, but not both.
"""
return models.Redditor(self, name=name, fullname=fullname)
@_deprecate_args("method", "path", "params", "data", "files", "json")
def request(
self,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
json: Optional[Dict[Any, Any]] = None,
method: str,
params: Optional[Union[str, Dict[str, Union[str, int]]]] = None,
path: str,
) -> Any:
"""Return the parsed JSON data returned from a request to URL.
:param data: Dictionary, bytes, or file-like object to send in the body of the
request (default: ``None``).
:param files: Dictionary, filename to file (like) object mapping (default:
``None``).
:param json: JSON-serializable object to send in the body of the request with a
Content-Type header of application/json (default: ``None``). If ``json`` is
provided, ``data`` should not be.
:param method: The HTTP method (e.g., ``"GET"``, ``"POST"``, ``"PUT"``,
``"DELETE"``).
:param params: The query parameters to add to the request (default: ``None``).
:param path: The path to fetch.
"""
if self.config.check_for_async:
self._check_for_async()
if data and json:
raise ClientException("At most one of `data` or `json` is supported.")
try:
return self._core.request(
data=data,
files=files,
json=json,
method=method,
params=params,
path=path,
)
except BadRequest as exception:
try:
data = exception.response.json()
except ValueError:
if exception.response.text:
data = {"reason": exception.response.text}
else:
raise exception
if set(data) == {"error", "message"}:
raise
explanation = data.get("explanation")
if "fields" in data:
assert len(data["fields"]) == 1
field = data["fields"][0]
else:
field = None
raise RedditAPIException(
[data["reason"], explanation, field]
) from exception
@_deprecate_args("id", "url")
def submission( # pylint: disable=invalid-name,redefined-builtin
self, id: Optional[str] = None, *, url: Optional[str] = None
) -> "praw.models.Submission":
"""Return a lazy instance of :class:`.Submission`.
:param id: A Reddit base36 submission ID, e.g., ``"2gmzqe"``.
:param url: A URL supported by :meth:`.Submission.id_from_url`.
Either ``id`` or ``url`` can be provided, but not both.
"""
return models.Submission(self, id=id, url=url)
def username_available(self, name: str) -> bool:
"""Check to see if the username is available.
For example, to check if the username ``bboe`` is available, try:
.. code-block:: python
reddit.username_available("bboe")
"""
return self._objectify_request(
method="GET", params={"user": name}, path=API_PATH["username_available"]
)
| 36.114433 | 143 | 0.587851 | import asyncio
import configparser
import os
import re
import time
from itertools import islice
from logging import getLogger
from typing import (
IO,
TYPE_CHECKING,
Any,
Dict,
Generator,
Iterable,
Optional,
Type,
Union,
)
from warnings import warn
from prawcore import (
Authorizer,
DeviceIDAuthorizer,
ReadOnlyAuthorizer,
Redirect,
Requestor,
ScriptAuthorizer,
TrustedAuthenticator,
UntrustedAuthenticator,
session,
)
from prawcore.exceptions import BadRequest
from . import models
from .config import Config
from .const import API_PATH, USER_AGENT_FORMAT, __version__
from .exceptions import (
ClientException,
MissingRequiredAttributeException,
RedditAPIException,
)
from .objector import Objector
from .util import _deprecate_args
from .util.token_manager import BaseTokenManager
try:
from update_checker import update_check
UPDATE_CHECKER_MISSING = False
except ImportError:
UPDATE_CHECKER_MISSING = True
if TYPE_CHECKING:
import praw
Comment = models.Comment
Redditor = models.Redditor
Submission = models.Submission
Subreddit = models.Subreddit
logger = getLogger("praw")
class Reddit:
update_checked = False
_ratelimit_regex = re.compile(r"([0-9]{1,3}) (milliseconds?|seconds?|minutes?)")
@property
def _next_unique(self) -> int:
value = self._unique_counter
self._unique_counter += 1
return value
@property
def read_only(self) -> bool:
return self._core == self._read_only_core
@read_only.setter
def read_only(self, value: bool) -> None:
if value:
self._core = self._read_only_core
elif self._authorized_core is None:
raise ClientException(
"read_only cannot be unset as only the ReadOnlyAuthorizer is available."
)
else:
self._core = self._authorized_core
@property
def validate_on_submit(self) -> bool:
value = self._validate_on_submit
if value is False:
warn(
"Reddit will check for validation on all posts around May-June 2020. It"
" is recommended to check for validation by setting"
" reddit.validate_on_submit to True.",
category=DeprecationWarning,
stacklevel=3,
)
return value
@validate_on_submit.setter
def validate_on_submit(self, val: bool):
self._validate_on_submit = val
def __enter__(self):
return self
def __exit__(self, *_args):
@_deprecate_args(
"site_name",
"config_interpolation",
"requestor_class",
"requestor_kwargs",
"token_manager",
)
def __init__(
self,
site_name: Optional[str] = None,
*,
config_interpolation: Optional[str] = None,
requestor_class: Optional[Type[Requestor]] = None,
requestor_kwargs: Optional[Dict[str, Any]] = None,
token_manager: Optional[BaseTokenManager] = None,
**config_settings: Optional[Union[str, bool]],
):
self._core = self._authorized_core = self._read_only_core = None
self._objector = None
self._token_manager = token_manager
self._unique_counter = 0
self._validate_on_submit = False
try:
config_section = site_name or os.getenv("praw_site") or "DEFAULT"
self.config = Config(
config_section, config_interpolation, **config_settings
)
except configparser.NoSectionError as exc:
help_message = (
"You provided the name of a praw.ini configuration which does not"
" exist.\n\nFor help with creating a Reddit instance,"
" visit\nhttps://praw.readthedocs.io/en/latest/code_overview/reddit_instance.html\n\nFor"
" help on configuring PRAW,"
" visit\nhttps://praw.readthedocs.io/en/latest/getting_started/configuration.html"
)
if site_name is not None:
exc.message += f"\n{help_message}"
raise
required_message = (
"Required configuration setting {!r} missing. \nThis setting can be"
" provided in a praw.ini file, as a keyword argument to the `Reddit` class"
" constructor, or as an environment variable."
)
for attribute in ("client_id", "user_agent"):
if getattr(self.config, attribute) in (self.config.CONFIG_NOT_SET, None):
raise MissingRequiredAttributeException(
required_message.format(attribute)
)
if self.config.client_secret is self.config.CONFIG_NOT_SET:
raise MissingRequiredAttributeException(
f"{required_message.format('client_secret')}\nFor installed"
" applications this value must be set to None via a keyword argument"
" to the `Reddit` class constructor."
)
self._check_for_update()
self._prepare_objector()
self._prepare_prawcore(
requestor_class=requestor_class, requestor_kwargs=requestor_kwargs
)
self.auth = models.Auth(self, None)
self.drafts = models.DraftHelper(self, None)
self.front = models.Front(self)
self.inbox = models.Inbox(self, None)
self.live = models.LiveHelper(self, None)
self.multireddit = models.MultiredditHelper(self, None)
self.redditors = models.Redditors(self, None)
self.subreddit = models.SubredditHelper(self, None)
self.subreddits = models.Subreddits(self, None)
self.user = models.User(self)
def _check_for_async(self):
if self.config.check_for_async:
try:
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return
except NameError:
pass
in_async = False
try:
asyncio.get_running_loop()
in_async = True
except Exception:
pass
if in_async:
logger.warning(
"It appears that you are using PRAW in an asynchronous"
" environment.\nIt is strongly recommended to use Async PRAW:"
" https://asyncpraw.readthedocs.io.\nSee"
" https://praw.readthedocs.io/en/latest/getting_started/multiple_instances.html#discord-bots-and-asynchronous-environments"
" for more info.\n",
)
def _check_for_update(self):
if UPDATE_CHECKER_MISSING:
return
if not Reddit.update_checked and self.config.check_for_updates:
update_check(__package__, __version__)
Reddit.update_checked = True
def _prepare_common_authorizer(self, authenticator):
if self._token_manager is not None:
warn(
"Token managers have been deprecated and will be removed in the near"
" future. See https://www.reddit.com/r/redditdev/comments/olk5e6/"
"followup_oauth2_api_changes_regarding_refresh/ for more details.",
category=DeprecationWarning,
stacklevel=2,
)
if self.config.refresh_token:
raise TypeError(
"``refresh_token`` setting cannot be provided when providing"
" ``token_manager``"
)
self._token_manager.reddit = self
authorizer = Authorizer(
authenticator,
post_refresh_callback=self._token_manager.post_refresh_callback,
pre_refresh_callback=self._token_manager.pre_refresh_callback,
)
elif self.config.refresh_token:
authorizer = Authorizer(
authenticator, refresh_token=self.config.refresh_token
)
else:
self._core = self._read_only_core
return
self._core = self._authorized_core = session(authorizer)
def _prepare_objector(self):
mappings = {
self.config.kinds["comment"]: models.Comment,
self.config.kinds["message"]: models.Message,
self.config.kinds["redditor"]: models.Redditor,
self.config.kinds["submission"]: models.Submission,
self.config.kinds["subreddit"]: models.Subreddit,
self.config.kinds["trophy"]: models.Trophy,
"Button": models.Button,
"Collection": models.Collection,
"Draft": models.Draft,
"DraftList": models.DraftList,
"Image": models.Image,
"LabeledMulti": models.Multireddit,
"Listing": models.Listing,
"LiveUpdate": models.LiveUpdate,
"LiveUpdateEvent": models.LiveThread,
"MenuLink": models.MenuLink,
"ModeratedList": models.ModeratedList,
"ModmailAction": models.ModmailAction,
"ModmailConversation": models.ModmailConversation,
"ModmailConversations-list": models.ModmailConversationsListing,
"ModmailMessage": models.ModmailMessage,
"Submenu": models.Submenu,
"TrophyList": models.TrophyList,
"UserList": models.RedditorList,
"UserSubreddit": models.UserSubreddit,
"button": models.ButtonWidget,
"calendar": models.Calendar,
"community-list": models.CommunityList,
"custom": models.CustomWidget,
"id-card": models.IDCard,
"image": models.ImageWidget,
"menu": models.Menu,
"modaction": models.ModAction,
"moderator-list": models.ModeratorListing,
"moderators": models.ModeratorsWidget,
"more": models.MoreComments,
"post-flair": models.PostFlairWidget,
"rule": models.Rule,
"stylesheet": models.Stylesheet,
"subreddit-rules": models.RulesWidget,
"textarea": models.TextArea,
"widget": models.Widget,
}
self._objector = Objector(self, mappings)
def _prepare_prawcore(self, *, requestor_class=None, requestor_kwargs=None):
requestor_class = requestor_class or Requestor
requestor_kwargs = requestor_kwargs or {}
requestor = requestor_class(
USER_AGENT_FORMAT.format(self.config.user_agent),
self.config.oauth_url,
self.config.reddit_url,
**requestor_kwargs,
)
if self.config.client_secret:
self._prepare_trusted_prawcore(requestor)
else:
self._prepare_untrusted_prawcore(requestor)
def _prepare_trusted_prawcore(self, requestor):
authenticator = TrustedAuthenticator(
requestor,
self.config.client_id,
self.config.client_secret,
self.config.redirect_uri,
)
read_only_authorizer = ReadOnlyAuthorizer(authenticator)
self._read_only_core = session(read_only_authorizer)
if self.config.username and self.config.password:
script_authorizer = ScriptAuthorizer(
authenticator, self.config.username, self.config.password
)
self._core = self._authorized_core = session(script_authorizer)
else:
self._prepare_common_authorizer(authenticator)
def _prepare_untrusted_prawcore(self, requestor):
authenticator = UntrustedAuthenticator(
requestor, self.config.client_id, self.config.redirect_uri
)
read_only_authorizer = DeviceIDAuthorizer(authenticator)
self._read_only_core = session(read_only_authorizer)
self._prepare_common_authorizer(authenticator)
@_deprecate_args("id", "url")
def comment(
self,
id: Optional[str] = None,
*,
url: Optional[str] = None,
):
return models.Comment(self, id=id, url=url)
def domain(self, domain: str):
return models.DomainListing(self, domain)
@_deprecate_args("path", "params")
def get(
self,
path: str,
*,
params: Optional[Union[str, Dict[str, Union[str, int]]]] = None,
):
return self._objectify_request(method="GET", params=params, path=path)
@_deprecate_args("fullnames", "url", "subreddits")
def info(
self,
*,
fullnames: Optional[Iterable[str]] = None,
subreddits: Optional[Iterable[Union["praw.models.Subreddit", str]]] = None,
url: Optional[str] = None,
) -> Generator[
Union["praw.models.Subreddit", "praw.models.Comment", "praw.models.Submission"],
None,
None,
]:
none_count = (fullnames, url, subreddits).count(None)
if none_count != 2:
raise TypeError(
"Either `fullnames`, `url`, or `subreddits` must be provided."
)
is_using_fullnames = fullnames is not None
ids_or_names = fullnames if is_using_fullnames else subreddits
if ids_or_names is not None:
if isinstance(ids_or_names, str):
raise TypeError(
"`fullnames` and `subreddits` must be a non-str iterable."
)
api_parameter_name = "id" if is_using_fullnames else "sr_name"
def generator(names):
if is_using_fullnames:
iterable = iter(names)
else:
iterable = iter([str(item) for item in names])
while True:
chunk = list(islice(iterable, 100))
if not chunk:
break
params = {api_parameter_name: ",".join(chunk)}
for result in self.get(API_PATH["info"], params=params):
yield result
return generator(ids_or_names)
def generator(url):
params = {"url": url}
for result in self.get(API_PATH["info"], params=params):
yield result
return generator(url)
def _objectify_request(
self,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
json: Optional[Dict[Any, Any]] = None,
method: str = "",
params: Optional[Union[str, Dict[str, str]]] = None,
path: str = "",
) -> Any:
return self._objector.objectify(
self.request(
data=data,
files=files,
json=json,
method=method,
params=params,
path=path,
)
)
def _handle_rate_limit(
self, exception: RedditAPIException
) -> Optional[Union[int, float]]:
for item in exception.items:
if item.error_type == "RATELIMIT":
amount_search = self._ratelimit_regex.search(item.message)
if not amount_search:
break
seconds = int(amount_search.group(1))
if amount_search.group(2).startswith("minute"):
seconds *= 60
elif amount_search.group(2).startswith("millisecond"):
seconds = 0
if seconds <= int(self.config.ratelimit_seconds):
sleep_seconds = seconds + 1
return sleep_seconds
return None
@_deprecate_args("path", "data", "json", "params")
def delete(
self,
path: str,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json: Optional[Dict[Any, Any]] = None,
params: Optional[Union[str, Dict[str, str]]] = None,
) -> Any:
return self._objectify_request(
data=data, json=json, method="DELETE", params=params, path=path
)
@_deprecate_args("path", "data", "json")
def patch(
self,
path: str,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json: Optional[Dict[Any, Any]] = None,
) -> Any:
return self._objectify_request(data=data, json=json, method="PATCH", path=path)
@_deprecate_args("path", "data", "files", "params", "json")
def post(
self,
path: str,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
json: Optional[Dict[Any, Any]] = None,
params: Optional[Union[str, Dict[str, str]]] = None,
) -> Any:
if json is None:
data = data or {}
attempts = 3
last_exception = None
while attempts > 0:
attempts -= 1
try:
return self._objectify_request(
data=data,
files=files,
json=json,
method="POST",
params=params,
path=path,
)
except RedditAPIException as exception:
last_exception = exception
seconds = self._handle_rate_limit(exception=exception)
if seconds is None:
break
second_string = "second" if seconds == 1 else "seconds"
logger.debug(f"Rate limit hit, sleeping for {seconds} {second_string}")
time.sleep(seconds)
raise last_exception
@_deprecate_args("path", "data", "json")
def put(
self,
path: str,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
json: Optional[Dict[Any, Any]] = None,
):
return self._objectify_request(data=data, json=json, method="PUT", path=path)
@_deprecate_args("nsfw")
def random_subreddit(self, *, nsfw: bool = False) -> "praw.models.Subreddit":
url = API_PATH["subreddit"].format(subreddit="randnsfw" if nsfw else "random")
path = None
try:
self.get(url, params={"unique": self._next_unique})
except Redirect as redirect:
path = redirect.path
return models.Subreddit(self, path.split("/")[2])
@_deprecate_args("name", "fullname")
def redditor(
self, name: Optional[str] = None, *, fullname: Optional[str] = None
) -> "praw.models.Redditor":
return models.Redditor(self, name=name, fullname=fullname)
@_deprecate_args("method", "path", "params", "data", "files", "json")
def request(
self,
*,
data: Optional[Union[Dict[str, Union[str, Any]], bytes, IO, str]] = None,
files: Optional[Dict[str, IO]] = None,
json: Optional[Dict[Any, Any]] = None,
method: str,
params: Optional[Union[str, Dict[str, Union[str, int]]]] = None,
path: str,
) -> Any:
if self.config.check_for_async:
self._check_for_async()
if data and json:
raise ClientException("At most one of `data` or `json` is supported.")
try:
return self._core.request(
data=data,
files=files,
json=json,
method=method,
params=params,
path=path,
)
except BadRequest as exception:
try:
data = exception.response.json()
except ValueError:
if exception.response.text:
data = {"reason": exception.response.text}
else:
raise exception
if set(data) == {"error", "message"}:
raise
explanation = data.get("explanation")
if "fields" in data:
assert len(data["fields"]) == 1
field = data["fields"][0]
else:
field = None
raise RedditAPIException(
[data["reason"], explanation, field]
) from exception
@_deprecate_args("id", "url")
def submission(
self, id: Optional[str] = None, *, url: Optional[str] = None
) -> "praw.models.Submission":
return models.Submission(self, id=id, url=url)
def username_available(self, name: str) -> bool:
return self._objectify_request(
method="GET", params={"user": name}, path=API_PATH["username_available"]
)
| true | true |
f7fd7c9c4713370a2d7d19701f523b768973eb7f | 1,483 | py | Python | venv/lib/python3.8/site-packages/vsts/work_item_tracking_process_definitions/v4_1/models/work_item_state_input_model.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/work_item_tracking_process_definitions/v4_1/models/work_item_state_input_model.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/vsts/work_item_tracking_process_definitions/v4_1/models/work_item_state_input_model.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class WorkItemStateInputModel(Model):
"""WorkItemStateInputModel.
:param color: Color of the state
:type color: str
:param name: Name of the state
:type name: str
:param order: Order in which state should appear
:type order: int
:param state_category: Category of the state
:type state_category: str
"""
_attribute_map = {
'color': {'key': 'color', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'state_category': {'key': 'stateCategory', 'type': 'str'}
}
def __init__(self, color=None, name=None, order=None, state_category=None):
super(WorkItemStateInputModel, self).__init__()
self.color = color
self.name = name
self.order = order
self.state_category = state_category
| 39.026316 | 95 | 0.513823 |
from msrest.serialization import Model
class WorkItemStateInputModel(Model):
_attribute_map = {
'color': {'key': 'color', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'state_category': {'key': 'stateCategory', 'type': 'str'}
}
def __init__(self, color=None, name=None, order=None, state_category=None):
super(WorkItemStateInputModel, self).__init__()
self.color = color
self.name = name
self.order = order
self.state_category = state_category
| true | true |
f7fd7ca8d0f5e73967a4f5b5743658a0a4422e31 | 186 | py | Python | lib/matplotlib/tri/__init__.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 8 | 2021-12-14T21:30:01.000Z | 2022-02-14T11:30:03.000Z | lib/matplotlib/tri/__init__.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | lib/matplotlib/tri/__init__.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 3 | 2017-05-31T01:42:22.000Z | 2020-06-23T13:57:49.000Z | """
Unstructured triangular grid functions.
"""
from __future__ import print_function
from triangulation import *
from tricontour import *
from tripcolor import *
from triplot import *
| 18.6 | 39 | 0.795699 |
from __future__ import print_function
from triangulation import *
from tricontour import *
from tripcolor import *
from triplot import *
| true | true |
f7fd7cd4894f14adfd39d9d3acfd511035572d38 | 27,769 | py | Python | examples/inc/pytorch/multiple-choice/run_swag.py | michaelbenayoun/optimum | 21c5809577e2ef5687f293d31d1d3e28288e1bb7 | [
"Apache-2.0"
] | null | null | null | examples/inc/pytorch/multiple-choice/run_swag.py | michaelbenayoun/optimum | 21c5809577e2ef5687f293d31d1d3e28288e1bb7 | [
"Apache-2.0"
] | null | null | null | examples/inc/pytorch/multiple-choice/run_swag.py | michaelbenayoun/optimum | 21c5809577e2ef5687f293d31d1d3e28288e1bb7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for multiple choice.
"""
# You can also adapt this script on your own multiple choice task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional, Union
import datasets
import numpy as np
import torch
import transformers
from datasets import load_dataset
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.fx import symbolic_trace
import yaml
from optimum.intel.neural_compressor import (
IncOptimizer,
IncPruner,
IncPruningConfig,
IncQuantizationConfig,
IncQuantizationMode,
IncQuantizer,
IncTrainer,
)
from optimum.intel.neural_compressor.quantization import IncQuantizedModelForMultipleChoice
from optimum.intel.neural_compressor.utils import CONFIG_NAME
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.12.0")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class OptimizationArguments:
"""
Arguments pertaining to what type of optimization we are going to apply on the model.
"""
quantize: bool = field(
default=False,
metadata={"help": "Whether or not to apply quantization."},
)
quantization_approach: Optional[str] = field(
default=None,
metadata={"help": "Quantization approach. Supported approach are static, dynamic and aware_training."},
)
prune: bool = field(
default=False,
metadata={"help": "Whether or not to apply pruning."},
)
target_sparsity: Optional[float] = field(
default=None,
metadata={"help": "Targeted sparsity when pruning the model."},
)
quantization_config: Optional[str] = field(
default=None,
metadata={
"help": "Path to the directory containing the YAML configuration file used to control the quantization and "
"tuning behavior."
},
)
pruning_config: Optional[str] = field(
default=None,
metadata={
"help": "Path to the directory containing the YAML configuration file used to control the pruning behavior."
},
)
tune_metric: str = field(
default="eval_accuracy",
metadata={"help": "Metric used for the tuning strategy."},
)
perf_tol: Optional[float] = field(
default=None,
metadata={"help": "Performance tolerance when optimizing the model."},
)
verify_loading: bool = field(
default=False,
metadata={"help": "Whether or not to verify the loading of the quantized model."},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
def __post_init__(self):
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class DataCollatorForMultipleChoice:
"""
Data collator that will dynamically pad the inputs for multiple choice received.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
def __call__(self, features):
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature.pop(label_name) for feature in features]
batch_size = len(features)
num_choices = len(features[0]["input_ids"])
flattened_features = [
[{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features
]
flattened_features = sum(flattened_features, [])
batch = self.tokenizer.pad(
flattened_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
# Un-flatten
batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()}
# Add back labels
batch["labels"] = torch.tensor(labels, dtype=torch.int64)
return batch
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, OptimizationArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args, optim_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args, optim_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
else:
# Downloading and loading the swag dataset from the hub.
raw_datasets = load_dataset("swag", "regular", cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
ending_names = [f"ending{i}" for i in range(4)]
context_name = "sent1"
question_header_name = "sent2"
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Preprocessing the datasets.
def preprocess_function(examples):
first_sentences = [[context] * 4 for context in examples[context_name]]
question_headers = examples[question_header_name]
second_sentences = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers)
]
# Flatten out
first_sentences = sum(first_sentences, [])
second_sentences = sum(second_sentences, [])
# Tokenize
tokenized_examples = tokenizer(
first_sentences,
second_sentences,
truncation=True,
max_length=max_seq_length,
padding="max_length" if data_args.pad_to_max_length else False,
)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
# Data collator
data_collator = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
)
# Metric
def compute_metrics(eval_predictions):
predictions, label_ids = eval_predictions
preds = np.argmax(predictions, axis=1)
return {"accuracy": (preds == label_ids).astype(np.float32).mean().item()}
# Initialize our Trainer
trainer = IncTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
eval_dataloader = trainer.get_eval_dataloader()
it = iter(eval_dataloader)
try:
input_names = next(it).keys()
except StopIteration:
input_names = None
logger.warning(
"Unable to determine the names of the inputs of the model to trace, input_names is set to None and "
"model.dummy_inputs().keys() will be used instead."
)
resume_from_checkpoint = training_args.resume_from_checkpoint
metric_name = optim_args.tune_metric
def take_eval_steps(model, trainer, metric_name, save_metrics=False):
trainer.model = model
metrics = trainer.evaluate()
if save_metrics:
trainer.save_metrics("eval", metrics)
logger.info("{}: {}".format(metric_name, metrics.get(metric_name)))
logger.info("Throughput: {} samples/sec".format(metrics.get("eval_samples_per_second")))
return metrics.get(metric_name)
def eval_func(model):
return take_eval_steps(model, trainer, metric_name)
def take_train_steps(model, trainer, resume_from_checkpoint, last_checkpoint):
trainer.model_wrapped = model
trainer.model = model
checkpoint = None
if resume_from_checkpoint is not None:
checkpoint = resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(pruner, resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
def train_func(model):
return take_train_steps(model, trainer, resume_from_checkpoint, last_checkpoint)
quantizer = None
pruner = None
num_choices = len(eval_dataset[0]["input_ids"])
if not optim_args.quantize and not optim_args.prune:
raise ValueError("quantize and prune are both set to False.")
result_baseline_model = take_eval_steps(model, trainer, metric_name)
default_config = os.path.join(os.path.abspath(os.path.join(__file__, os.path.pardir, os.path.pardir)), "config")
if optim_args.quantize:
if not training_args.do_eval:
raise ValueError("do_eval must be set to True for quantization.")
q8_config = IncQuantizationConfig.from_pretrained(
optim_args.quantization_config if optim_args.quantization_config is not None else default_config,
config_file_name="quantization.yml",
cache_dir=model_args.cache_dir,
)
# Set metric tolerance if specified
if optim_args.perf_tol is not None:
q8_config.set_tolerance(optim_args.perf_tol)
# Set quantization approach if specified
if optim_args.quantization_approach is not None:
supported_approach = {"static", "dynamic", "aware_training"}
if optim_args.quantization_approach not in supported_approach:
raise ValueError(
"Unknown quantization approach. Supported approach are " + ", ".join(supported_approach)
)
quant_approach = getattr(IncQuantizationMode, optim_args.quantization_approach.upper()).value
q8_config.set_config("quantization.approach", quant_approach)
# torch FX used for post-training quantization and quantization aware training
# dynamic quantization will be added when torch FX is more mature
if q8_config.get_config("quantization.approach") != IncQuantizationMode.DYNAMIC.value:
if not training_args.do_train:
raise ValueError("do_train must be set to True for static and aware training quantization.")
# TODO : Remove when dynamic axes support
if (
not training_args.dataloader_drop_last
and eval_dataset.shape[0] % training_args.per_device_eval_batch_size != 0
):
raise ValueError(
"The number of samples of the dataset is not a multiple of the batch size."
"Use --dataloader_drop_last to overcome."
)
if not data_args.pad_to_max_length:
raise ValueError(
"All the samples must have the same sequence length, use --pad_to_max_length to overcome."
)
q8_config.set_config("model.framework", "pytorch_fx")
model.config.save_pretrained(training_args.output_dir)
model = symbolic_trace(
model,
input_names=input_names,
batch_size=training_args.per_device_eval_batch_size,
sequence_length=max_seq_length,
num_choices=num_choices,
)
calib_dataloader = trainer.get_train_dataloader()
inc_quantizer = IncQuantizer(
model, q8_config, eval_func=eval_func, train_func=train_func, calib_dataloader=calib_dataloader
)
quantizer = inc_quantizer.fit()
if optim_args.prune:
if not training_args.do_train:
raise ValueError("do_train must be set to True for pruning.")
pruning_config = IncPruningConfig.from_pretrained(
optim_args.pruning_config if optim_args.pruning_config is not None else default_config,
config_file_name="prune.yml",
cache_dir=model_args.cache_dir,
)
# Set targeted sparsity if specified
if optim_args.target_sparsity is not None:
pruning_config.set_config(
"pruning.approach.weight_compression.target_sparsity", optim_args.target_sparsity
)
pruning_start_epoch = pruning_config.get_config("pruning.approach.weight_compression.start_epoch")
pruning_end_epoch = pruning_config.get_config("pruning.approach.weight_compression.end_epoch")
if pruning_start_epoch > training_args.num_train_epochs - 1:
logger.warning(
f"Pruning end epoch {pruning_start_epoch} is higher than the total number of training epoch "
f"{training_args.num_train_epochs}. No pruning will be applied."
)
if pruning_end_epoch > training_args.num_train_epochs - 1:
logger.warning(
f"Pruning end epoch {pruning_end_epoch} is higher than the total number of training epoch "
f"{training_args.num_train_epochs}. The target sparsity will not be reached."
)
inc_pruner = IncPruner(model, pruning_config, eval_func=eval_func, train_func=train_func)
# Creation Pruning object used for IncTrainer training loop
pruner = inc_pruner.fit()
inc_optimizer = IncOptimizer(model, quantizer=quantizer, pruner=pruner)
opt_model = inc_optimizer.fit()
_, sparsity = opt_model.report_sparsity()
result_opt_model = take_eval_steps(opt_model.model, trainer, metric_name, save_metrics=True)
trainer.save_model(training_args.output_dir)
with open(os.path.join(training_args.output_dir, CONFIG_NAME), "w") as f:
yaml.dump(opt_model.tune_cfg, f, default_flow_style=False)
logger.info(
f"Optimized model with final sparsity of {sparsity} and {metric_name} of {result_opt_model} saved to: "
f"{training_args.output_dir}. Original model had an {metric_name} of {result_baseline_model}"
)
if optim_args.quantize and optim_args.verify_loading:
# Load the model obtained after Intel Neural Compressor (INC) quantization
loaded_model = IncQuantizedModelForMultipleChoice.from_pretrained(
training_args.output_dir,
input_names=input_names,
batch_size=training_args.per_device_eval_batch_size,
sequence_length=max_seq_length,
num_choices=num_choices,
)
loaded_model.eval()
result_loaded_model = take_eval_steps(loaded_model, trainer, metric_name)
if result_loaded_model != result_opt_model:
raise ValueError("The quantized model was not successfully loaded.")
else:
logger.info(f"The quantized model was successfully loaded.")
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 41.94713 | 132 | 0.674961 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional, Union
import datasets
import numpy as np
import torch
import transformers
from datasets import load_dataset
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.fx import symbolic_trace
import yaml
from optimum.intel.neural_compressor import (
IncOptimizer,
IncPruner,
IncPruningConfig,
IncQuantizationConfig,
IncQuantizationMode,
IncQuantizer,
IncTrainer,
)
from optimum.intel.neural_compressor.quantization import IncQuantizedModelForMultipleChoice
from optimum.intel.neural_compressor.utils import CONFIG_NAME
os.environ["CUDA_VISIBLE_DEVICES"] = ""
check_min_version("4.12.0")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class OptimizationArguments:
quantize: bool = field(
default=False,
metadata={"help": "Whether or not to apply quantization."},
)
quantization_approach: Optional[str] = field(
default=None,
metadata={"help": "Quantization approach. Supported approach are static, dynamic and aware_training."},
)
prune: bool = field(
default=False,
metadata={"help": "Whether or not to apply pruning."},
)
target_sparsity: Optional[float] = field(
default=None,
metadata={"help": "Targeted sparsity when pruning the model."},
)
quantization_config: Optional[str] = field(
default=None,
metadata={
"help": "Path to the directory containing the YAML configuration file used to control the quantization and "
"tuning behavior."
},
)
pruning_config: Optional[str] = field(
default=None,
metadata={
"help": "Path to the directory containing the YAML configuration file used to control the pruning behavior."
},
)
tune_metric: str = field(
default="eval_accuracy",
metadata={"help": "Metric used for the tuning strategy."},
)
perf_tol: Optional[float] = field(
default=None,
metadata={"help": "Performance tolerance when optimizing the model."},
)
verify_loading: bool = field(
default=False,
metadata={"help": "Whether or not to verify the loading of the quantized model."},
)
@dataclass
class DataTrainingArguments:
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
def __post_init__(self):
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class DataCollatorForMultipleChoice:
tokenizer: PreTrainedTokenizerBase
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
def __call__(self, features):
label_name = "label" if "label" in features[0].keys() else "labels"
labels = [feature.pop(label_name) for feature in features]
batch_size = len(features)
num_choices = len(features[0]["input_ids"])
flattened_features = [
[{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features
]
flattened_features = sum(flattened_features, [])
batch = self.tokenizer.pad(
flattened_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()}
batch["labels"] = torch.tensor(labels, dtype=torch.int64)
return batch
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, OptimizationArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# let's parse it to get our arguments.
model_args, data_args, training_args, optim_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args, optim_args = parser.parse_args_into_dataclasses()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
set_seed(training_args.seed)
if data_args.train_file is not None or data_args.validation_file is not None:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
else:
raw_datasets = load_dataset("swag", "regular", cache_dir=model_args.cache_dir)
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
ending_names = [f"ending{i}" for i in range(4)]
context_name = "sent1"
question_header_name = "sent2"
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
first_sentences = [[context] * 4 for context in examples[context_name]]
question_headers = examples[question_header_name]
second_sentences = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers)
]
first_sentences = sum(first_sentences, [])
second_sentences = sum(second_sentences, [])
tokenized_examples = tokenizer(
first_sentences,
second_sentences,
truncation=True,
max_length=max_seq_length,
padding="max_length" if data_args.pad_to_max_length else False,
)
return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
data_collator = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
)
def compute_metrics(eval_predictions):
predictions, label_ids = eval_predictions
preds = np.argmax(predictions, axis=1)
return {"accuracy": (preds == label_ids).astype(np.float32).mean().item()}
trainer = IncTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
eval_dataloader = trainer.get_eval_dataloader()
it = iter(eval_dataloader)
try:
input_names = next(it).keys()
except StopIteration:
input_names = None
logger.warning(
"Unable to determine the names of the inputs of the model to trace, input_names is set to None and "
"model.dummy_inputs().keys() will be used instead."
)
resume_from_checkpoint = training_args.resume_from_checkpoint
metric_name = optim_args.tune_metric
def take_eval_steps(model, trainer, metric_name, save_metrics=False):
trainer.model = model
metrics = trainer.evaluate()
if save_metrics:
trainer.save_metrics("eval", metrics)
logger.info("{}: {}".format(metric_name, metrics.get(metric_name)))
logger.info("Throughput: {} samples/sec".format(metrics.get("eval_samples_per_second")))
return metrics.get(metric_name)
def eval_func(model):
return take_eval_steps(model, trainer, metric_name)
def take_train_steps(model, trainer, resume_from_checkpoint, last_checkpoint):
trainer.model_wrapped = model
trainer.model = model
checkpoint = None
if resume_from_checkpoint is not None:
checkpoint = resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(pruner, resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model()
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
def train_func(model):
return take_train_steps(model, trainer, resume_from_checkpoint, last_checkpoint)
quantizer = None
pruner = None
num_choices = len(eval_dataset[0]["input_ids"])
if not optim_args.quantize and not optim_args.prune:
raise ValueError("quantize and prune are both set to False.")
result_baseline_model = take_eval_steps(model, trainer, metric_name)
default_config = os.path.join(os.path.abspath(os.path.join(__file__, os.path.pardir, os.path.pardir)), "config")
if optim_args.quantize:
if not training_args.do_eval:
raise ValueError("do_eval must be set to True for quantization.")
q8_config = IncQuantizationConfig.from_pretrained(
optim_args.quantization_config if optim_args.quantization_config is not None else default_config,
config_file_name="quantization.yml",
cache_dir=model_args.cache_dir,
)
if optim_args.perf_tol is not None:
q8_config.set_tolerance(optim_args.perf_tol)
if optim_args.quantization_approach is not None:
supported_approach = {"static", "dynamic", "aware_training"}
if optim_args.quantization_approach not in supported_approach:
raise ValueError(
"Unknown quantization approach. Supported approach are " + ", ".join(supported_approach)
)
quant_approach = getattr(IncQuantizationMode, optim_args.quantization_approach.upper()).value
q8_config.set_config("quantization.approach", quant_approach)
if q8_config.get_config("quantization.approach") != IncQuantizationMode.DYNAMIC.value:
if not training_args.do_train:
raise ValueError("do_train must be set to True for static and aware training quantization.")
if (
not training_args.dataloader_drop_last
and eval_dataset.shape[0] % training_args.per_device_eval_batch_size != 0
):
raise ValueError(
"The number of samples of the dataset is not a multiple of the batch size."
"Use --dataloader_drop_last to overcome."
)
if not data_args.pad_to_max_length:
raise ValueError(
"All the samples must have the same sequence length, use --pad_to_max_length to overcome."
)
q8_config.set_config("model.framework", "pytorch_fx")
model.config.save_pretrained(training_args.output_dir)
model = symbolic_trace(
model,
input_names=input_names,
batch_size=training_args.per_device_eval_batch_size,
sequence_length=max_seq_length,
num_choices=num_choices,
)
calib_dataloader = trainer.get_train_dataloader()
inc_quantizer = IncQuantizer(
model, q8_config, eval_func=eval_func, train_func=train_func, calib_dataloader=calib_dataloader
)
quantizer = inc_quantizer.fit()
if optim_args.prune:
if not training_args.do_train:
raise ValueError("do_train must be set to True for pruning.")
pruning_config = IncPruningConfig.from_pretrained(
optim_args.pruning_config if optim_args.pruning_config is not None else default_config,
config_file_name="prune.yml",
cache_dir=model_args.cache_dir,
)
if optim_args.target_sparsity is not None:
pruning_config.set_config(
"pruning.approach.weight_compression.target_sparsity", optim_args.target_sparsity
)
pruning_start_epoch = pruning_config.get_config("pruning.approach.weight_compression.start_epoch")
pruning_end_epoch = pruning_config.get_config("pruning.approach.weight_compression.end_epoch")
if pruning_start_epoch > training_args.num_train_epochs - 1:
logger.warning(
f"Pruning end epoch {pruning_start_epoch} is higher than the total number of training epoch "
f"{training_args.num_train_epochs}. No pruning will be applied."
)
if pruning_end_epoch > training_args.num_train_epochs - 1:
logger.warning(
f"Pruning end epoch {pruning_end_epoch} is higher than the total number of training epoch "
f"{training_args.num_train_epochs}. The target sparsity will not be reached."
)
inc_pruner = IncPruner(model, pruning_config, eval_func=eval_func, train_func=train_func)
pruner = inc_pruner.fit()
inc_optimizer = IncOptimizer(model, quantizer=quantizer, pruner=pruner)
opt_model = inc_optimizer.fit()
_, sparsity = opt_model.report_sparsity()
result_opt_model = take_eval_steps(opt_model.model, trainer, metric_name, save_metrics=True)
trainer.save_model(training_args.output_dir)
with open(os.path.join(training_args.output_dir, CONFIG_NAME), "w") as f:
yaml.dump(opt_model.tune_cfg, f, default_flow_style=False)
logger.info(
f"Optimized model with final sparsity of {sparsity} and {metric_name} of {result_opt_model} saved to: "
f"{training_args.output_dir}. Original model had an {metric_name} of {result_baseline_model}"
)
if optim_args.quantize and optim_args.verify_loading:
loaded_model = IncQuantizedModelForMultipleChoice.from_pretrained(
training_args.output_dir,
input_names=input_names,
batch_size=training_args.per_device_eval_batch_size,
sequence_length=max_seq_length,
num_choices=num_choices,
)
loaded_model.eval()
result_loaded_model = take_eval_steps(loaded_model, trainer, metric_name)
if result_loaded_model != result_opt_model:
raise ValueError("The quantized model was not successfully loaded.")
else:
logger.info(f"The quantized model was successfully loaded.")
def _mp_fn(index):
main()
if __name__ == "__main__":
main()
| true | true |
f7fd7cf4fb2362860c0f29984a3cc27dd564557e | 3,469 | py | Python | awx/main/dispatch/publish.py | sumit-21/awx | 966a62c6bf2ec0c672e076684341bc6bd75827af | [
"Apache-2.0"
] | 17 | 2021-04-03T01:40:17.000Z | 2022-03-03T11:45:20.000Z | awx/main/dispatch/publish.py | Saurabh-Thakre/awx | 8eb377a3ea8303c394ad4c958cc828c7239c1e11 | [
"Apache-2.0"
] | 24 | 2021-05-18T21:13:35.000Z | 2022-03-29T10:23:52.000Z | awx/main/dispatch/publish.py | hostinger/awx | dac01b14e2c04c201a162ea03ef8386d822e3923 | [
"Apache-2.0"
] | 24 | 2020-11-27T08:37:35.000Z | 2021-03-08T13:27:15.000Z | import inspect
import logging
import sys
import json
from uuid import uuid4
from django.conf import settings
from . import pg_bus_conn
logger = logging.getLogger('awx.main.dispatch')
def serialize_task(f):
return '.'.join([f.__module__, f.__name__])
class task:
"""
Used to decorate a function or class so that it can be run asynchronously
via the task dispatcher. Tasks can be simple functions:
@task()
def add(a, b):
return a + b
...or classes that define a `run` method:
@task()
class Adder:
def run(self, a, b):
return a + b
# Tasks can be run synchronously...
assert add(1, 1) == 2
assert Adder().run(1, 1) == 2
# ...or published to a queue:
add.apply_async([1, 1])
Adder.apply_async([1, 1])
# Tasks can also define a specific target queue or use the special fan-out queue tower_broadcast:
@task(queue='slow-tasks')
def snooze():
time.sleep(10)
@task(queue='tower_broadcast')
def announce():
print("Run this everywhere!")
"""
def __init__(self, queue=None):
self.queue = queue
def __call__(self, fn=None):
queue = self.queue
class PublisherMixin(object):
queue = None
@classmethod
def delay(cls, *args, **kwargs):
return cls.apply_async(args, kwargs)
@classmethod
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
task_id = uuid or str(uuid4())
args = args or []
kwargs = kwargs or {}
queue = (
queue or
getattr(cls.queue, 'im_func', cls.queue)
)
if not queue:
msg = f'{cls.name}: Queue value required and may not be None'
logger.error(msg)
raise ValueError(msg)
obj = {
'uuid': task_id,
'args': args,
'kwargs': kwargs,
'task': cls.name
}
obj.update(**kw)
if callable(queue):
queue = queue()
if not settings.IS_TESTING(sys.argv):
with pg_bus_conn() as conn:
conn.notify(queue, json.dumps(obj))
return (obj, queue)
# If the object we're wrapping *is* a class (e.g., RunJob), return
# a *new* class that inherits from the wrapped class *and* BaseTask
# In this way, the new class returned by our decorator is the class
# being decorated *plus* PublisherMixin so cls.apply_async() and
# cls.delay() work
bases = []
ns = {'name': serialize_task(fn), 'queue': queue}
if inspect.isclass(fn):
bases = list(fn.__bases__)
ns.update(fn.__dict__)
cls = type(
fn.__name__,
tuple(bases + [PublisherMixin]),
ns
)
if inspect.isclass(fn):
return cls
# if the object being decorated is *not* a class (it's a Python
# function), make fn.apply_async and fn.delay proxy through to the
# PublisherMixin we dynamically created above
setattr(fn, 'name', cls.name)
setattr(fn, 'apply_async', cls.apply_async)
setattr(fn, 'delay', cls.delay)
return fn
| 29.151261 | 101 | 0.533871 | import inspect
import logging
import sys
import json
from uuid import uuid4
from django.conf import settings
from . import pg_bus_conn
logger = logging.getLogger('awx.main.dispatch')
def serialize_task(f):
return '.'.join([f.__module__, f.__name__])
class task:
def __init__(self, queue=None):
self.queue = queue
def __call__(self, fn=None):
queue = self.queue
class PublisherMixin(object):
queue = None
@classmethod
def delay(cls, *args, **kwargs):
return cls.apply_async(args, kwargs)
@classmethod
def apply_async(cls, args=None, kwargs=None, queue=None, uuid=None, **kw):
task_id = uuid or str(uuid4())
args = args or []
kwargs = kwargs or {}
queue = (
queue or
getattr(cls.queue, 'im_func', cls.queue)
)
if not queue:
msg = f'{cls.name}: Queue value required and may not be None'
logger.error(msg)
raise ValueError(msg)
obj = {
'uuid': task_id,
'args': args,
'kwargs': kwargs,
'task': cls.name
}
obj.update(**kw)
if callable(queue):
queue = queue()
if not settings.IS_TESTING(sys.argv):
with pg_bus_conn() as conn:
conn.notify(queue, json.dumps(obj))
return (obj, queue)
# a *new* class that inherits from the wrapped class *and* BaseTask
# In this way, the new class returned by our decorator is the class
# being decorated *plus* PublisherMixin so cls.apply_async() and
# cls.delay() work
bases = []
ns = {'name': serialize_task(fn), 'queue': queue}
if inspect.isclass(fn):
bases = list(fn.__bases__)
ns.update(fn.__dict__)
cls = type(
fn.__name__,
tuple(bases + [PublisherMixin]),
ns
)
if inspect.isclass(fn):
return cls
# if the object being decorated is *not* a class (it's a Python
setattr(fn, 'name', cls.name)
setattr(fn, 'apply_async', cls.apply_async)
setattr(fn, 'delay', cls.delay)
return fn
| true | true |
f7fd7dbb7e204289f32cc1ccf0535d75693a45b0 | 3,186 | py | Python | spiral/core/extension.py | acdaniells/spiral | d78344007969d7c991216901b4a9d3ad7d768587 | [
"BSD-3-Clause"
] | null | null | null | spiral/core/extension.py | acdaniells/spiral | d78344007969d7c991216901b4a9d3ad7d768587 | [
"BSD-3-Clause"
] | 1 | 2020-04-01T18:39:48.000Z | 2020-04-01T18:39:48.000Z | spiral/core/extension.py | acdaniells/spiral | d78344007969d7c991216901b4a9d3ad7d768587 | [
"BSD-3-Clause"
] | 1 | 2020-04-01T18:36:44.000Z | 2020-04-01T18:36:44.000Z | """
Spiral core extensions module.
"""
import sys
from spiral.core.exc import SpiralError
from cement.core.extension import ExtensionInterface
from cement.core.handler import Handler
from cement.utils.misc import minimal_logger
LOG = minimal_logger(__name__)
class ExtensionHandler(ExtensionInterface, Handler):
"""
Extension handler class.
This handler implements the Extension Interface, which handles
loading framework extensions. All extension handlers should sub-
class from here, or ensure that their implementation meets the
requirements of this base class.
"""
class Meta:
"""
Handler meta-data.
"""
label = "spiral"
"""The string identifier of the handler."""
def __init__(self, **kw):
super().__init__(**kw)
self.app = None
self._loaded_extensions = []
def get_loaded_extensions(self):
"""
Get all loaded extensions.
Returns
-------
list
A list of loaded extensions.
"""
return self._loaded_extensions
def list(self):
"""
Get all loaded extensions.
Synonymous with ``get_loaded_extensions()``.
Returns
-------
list
A list of loaded extensions.
"""
return self._loaded_extensions
def load_extension(self, ext_module):
"""
Load an extension.
Parameters
----------
ext_module : str
The extension module name. For example: ``spiral.ext.ext_logging``.
Raises
------
SpiralError
Raised if ``ext_module`` can not be loaded.
"""
# If its not a full module path then prepend our default path
if ext_module.find(".") == -1:
ext_module = f"spiral.ext.ext_{ext_module}"
if ext_module in self._loaded_extensions:
LOG.debug(f"framework extension '{ext_module}' already loaded")
return
LOG.debug(f"loading the '{ext_module}' framework extension")
# try loading the extension from Spiral
try:
self._load_extension(ext_module)
except ImportError:
# try loading the extension from Cement
try:
self._load_extension(ext_module.replace("spiral", "cement"))
except ImportError as e:
raise SpiralError(e.args[0])
def _load_extension(self, ext_module):
if ext_module not in sys.modules:
__import__(ext_module, globals(), locals(), [], 0)
if hasattr(sys.modules[ext_module], "load"):
sys.modules[ext_module].load(self.app)
if ext_module not in self._loaded_extensions:
self._loaded_extensions.append(ext_module)
def load_extensions(self, ext_list):
"""
Load extensions.
Iterates over the list of extension modules passing each to
``self.load_extension()``.
Parameters
----------
ext_list : list
A list of extension module names.
"""
for ext in ext_list:
self.load_extension(ext)
| 24.890625 | 79 | 0.592593 |
import sys
from spiral.core.exc import SpiralError
from cement.core.extension import ExtensionInterface
from cement.core.handler import Handler
from cement.utils.misc import minimal_logger
LOG = minimal_logger(__name__)
class ExtensionHandler(ExtensionInterface, Handler):
class Meta:
label = "spiral"
def __init__(self, **kw):
super().__init__(**kw)
self.app = None
self._loaded_extensions = []
def get_loaded_extensions(self):
return self._loaded_extensions
def list(self):
return self._loaded_extensions
def load_extension(self, ext_module):
if ext_module.find(".") == -1:
ext_module = f"spiral.ext.ext_{ext_module}"
if ext_module in self._loaded_extensions:
LOG.debug(f"framework extension '{ext_module}' already loaded")
return
LOG.debug(f"loading the '{ext_module}' framework extension")
try:
self._load_extension(ext_module)
except ImportError:
try:
self._load_extension(ext_module.replace("spiral", "cement"))
except ImportError as e:
raise SpiralError(e.args[0])
def _load_extension(self, ext_module):
if ext_module not in sys.modules:
__import__(ext_module, globals(), locals(), [], 0)
if hasattr(sys.modules[ext_module], "load"):
sys.modules[ext_module].load(self.app)
if ext_module not in self._loaded_extensions:
self._loaded_extensions.append(ext_module)
def load_extensions(self, ext_list):
for ext in ext_list:
self.load_extension(ext)
| true | true |
f7fd7eaf436bd3c969fa9c374a9d2bfdf03a940b | 1,446 | py | Python | assets/baekjoon/2667_building_site_number/python_2667.py | TakeaimK/TakeaimK.github.io | 13ef7dd7093fed5f60b16599b6b6d76190a2aaf8 | [
"MIT"
] | null | null | null | assets/baekjoon/2667_building_site_number/python_2667.py | TakeaimK/TakeaimK.github.io | 13ef7dd7093fed5f60b16599b6b6d76190a2aaf8 | [
"MIT"
] | null | null | null | assets/baekjoon/2667_building_site_number/python_2667.py | TakeaimK/TakeaimK.github.io | 13ef7dd7093fed5f60b16599b6b6d76190a2aaf8 | [
"MIT"
] | null | null | null |
def building_bfs(arr, point, n):
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
site_total_count = 0
site_building = []
first_search = False
while point:
x, y = point.pop(0)
now = []
first_search = False
if arr[x][y] == 1:
now.append((x, y))
site_total_count += 1
site_count = 0
first_search = True
while now:
nowx, nowy = now.pop(0)
if arr[nowx][nowy] == 1:
site_count += 1
arr[nowx][nowy] = 0
for i in range(4):
tempx = nowx + dx[i]
tempy = nowy + dy[i]
if tempx >= 0 and tempx < n and tempy >= 0 and tempy < n:
if arr[tempx][tempy] == 1:
now.append((tempx, tempy))
if first_search:
site_building.append(site_count)
return site_total_count, site_building
if __name__ == "__main__":
n = int(input().strip())
point = []
arr = [[0 for _ in range(n)]for _ in range(n)]
for i in range(n):
temp = input().strip() # strip : 문자열 양쪽 공백을 지우기
for j in range(len(temp)):
if temp[j] == '1':
arr[i][j] = 1
point.append((i, j))
total, site = building_bfs(arr, point, n)
print(total)
site.sort()
for i in range(len(site)):
print(site[i])
| 26.290909 | 77 | 0.45574 |
def building_bfs(arr, point, n):
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
site_total_count = 0
site_building = []
first_search = False
while point:
x, y = point.pop(0)
now = []
first_search = False
if arr[x][y] == 1:
now.append((x, y))
site_total_count += 1
site_count = 0
first_search = True
while now:
nowx, nowy = now.pop(0)
if arr[nowx][nowy] == 1:
site_count += 1
arr[nowx][nowy] = 0
for i in range(4):
tempx = nowx + dx[i]
tempy = nowy + dy[i]
if tempx >= 0 and tempx < n and tempy >= 0 and tempy < n:
if arr[tempx][tempy] == 1:
now.append((tempx, tempy))
if first_search:
site_building.append(site_count)
return site_total_count, site_building
if __name__ == "__main__":
n = int(input().strip())
point = []
arr = [[0 for _ in range(n)]for _ in range(n)]
for i in range(n):
temp = input().strip()
for j in range(len(temp)):
if temp[j] == '1':
arr[i][j] = 1
point.append((i, j))
total, site = building_bfs(arr, point, n)
print(total)
site.sort()
for i in range(len(site)):
print(site[i])
| true | true |
f7fd7f9142e13c51d3e44fa7d50caa28928cd3a3 | 4,619 | py | Python | JapaneseTokenizer/common/sever_handler.py | fumankaitori/JapaneseTokenizers | 3bdfb6be73de0f78e5c08f3a51376ad3efa00b6c | [
"MIT"
] | 134 | 2015-12-14T05:05:41.000Z | 2022-03-27T15:52:30.000Z | JapaneseTokenizer/common/sever_handler.py | fumankaitori/JapaneseTokenizers | 3bdfb6be73de0f78e5c08f3a51376ad3efa00b6c | [
"MIT"
] | 40 | 2016-03-29T05:41:50.000Z | 2020-07-08T08:54:50.000Z | JapaneseTokenizer/common/sever_handler.py | fumankaitori/JapaneseTokenizers | 3bdfb6be73de0f78e5c08f3a51376ad3efa00b6c | [
"MIT"
] | 22 | 2016-01-27T01:17:59.000Z | 2022-02-15T13:46:39.000Z | #! -*- coding: utf-8 -*-
import subprocess
from subprocess import Popen, PIPE, STDOUT
import multiprocessing
# socket object
import socket
# logger
from JapaneseTokenizer import init_logger
import logging
# typing
from typing import Union
# else
from six import text_type
import six
import pexpect
import shutil
import signal
import os
logger = init_logger.init_logger(logging.getLogger(init_logger.LOGGER_NAME))
class ProcessDownException(Exception):
pass
class UnixProcessHandler(object):
def __init__(self,
command,
option=None,
pattern='EOS',
timeout_second=10):
# type: (text_type,text_type,text_type,int)->None
"""* Get communication with unix process using pexpect module."""
self.command = command
self.timeout_second = timeout_second
self.pattern = pattern
self.option = option
self.launch_process(command)
def __del__(self):
if hasattr(self, "process_analyzer"):
self.process_analyzer.kill(sig=9)
def launch_process(self, command):
# type: (Union[bytes,text_type])->None
"""* What you can do
- It starts process and keep it.
"""
if not self.option is None:
command_plus_option = self.command + " " + self.option
else:
command_plus_option = self.command
if six.PY3:
if shutil.which(command) is None:
raise Exception("No command at {}".format(command))
else:
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid
else:
doc_command_string = "echo '' | {}".format(command)
command_check = os.system(doc_command_string)
if not command_check == 0:
raise Exception("No command at {}".format(command))
else:
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid
def restart_process(self):
# type: ()->None
if not self.option is None:
command_plus_option = self.command + " " + self.option
else:
command_plus_option = self.command
self.process_analyzer.kill(sig=9)
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid
def stop_process(self):
# type: ()->bool
"""* What you can do
- You're able to stop the process which this instance has now.
"""
if hasattr(self, "process_analyzer"):
self.process_analyzer.kill(sig=9)
else:
pass
return True
def __query(self, input_string):
# type: (text_type)->text_type
"""* What you can do
- It takes the result of Juman++
- This function monitors time which takes for getting the result.
"""
signal.signal(signal.SIGALRM, self.__notify_handler)
signal.alarm(self.timeout_second)
self.process_analyzer.sendline(input_string)
buffer = ""
while True:
line_string = self.process_analyzer.readline() # type: text_type
if line_string.strip() == input_string:
"""Skip if process returns the same input string"""
continue
elif line_string.strip() == self.pattern:
buffer += line_string
signal.alarm(0)
return buffer
else:
buffer += line_string
def __notify_handler(self, signum, frame):
raise ProcessDownException("""It takes longer time than {time} seconds. You're able to try,
1. Change your setting of 'timeout_second' parameter
2. Run restart_process() method when the exception happens.""".format(**{"time": self.timeout_second}))
def query(self, input_string):
# type: (text_type)->text_type
return self.__query(input_string=input_string)
class JumanppHnadler(UnixProcessHandler):
def __init__(self,
jumanpp_command,
option = None,
pattern = 'EOS',
timeout_second = 10):
# type: (text_type,text_type,text_type,int)->None
super(JumanppHnadler, self).__init__(command=jumanpp_command, option=option, pattern=pattern, timeout_second=timeout_second)
def launch_jumanpp_process(self, command):
# type: (text_type)->None
return self.launch_process(command)
| 33.715328 | 132 | 0.614852 |
import subprocess
from subprocess import Popen, PIPE, STDOUT
import multiprocessing
import socket
from JapaneseTokenizer import init_logger
import logging
from typing import Union
from six import text_type
import six
import pexpect
import shutil
import signal
import os
logger = init_logger.init_logger(logging.getLogger(init_logger.LOGGER_NAME))
class ProcessDownException(Exception):
pass
class UnixProcessHandler(object):
def __init__(self,
command,
option=None,
pattern='EOS',
timeout_second=10):
self.command = command
self.timeout_second = timeout_second
self.pattern = pattern
self.option = option
self.launch_process(command)
def __del__(self):
if hasattr(self, "process_analyzer"):
self.process_analyzer.kill(sig=9)
def launch_process(self, command):
if not self.option is None:
command_plus_option = self.command + " " + self.option
else:
command_plus_option = self.command
if six.PY3:
if shutil.which(command) is None:
raise Exception("No command at {}".format(command))
else:
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid
else:
doc_command_string = "echo '' | {}".format(command)
command_check = os.system(doc_command_string)
if not command_check == 0:
raise Exception("No command at {}".format(command))
else:
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid
def restart_process(self):
if not self.option is None:
command_plus_option = self.command + " " + self.option
else:
command_plus_option = self.command
self.process_analyzer.kill(sig=9)
self.process_analyzer = pexpect.spawnu(command_plus_option)
self.process_id = self.process_analyzer.pid
def stop_process(self):
if hasattr(self, "process_analyzer"):
self.process_analyzer.kill(sig=9)
else:
pass
return True
def __query(self, input_string):
signal.signal(signal.SIGALRM, self.__notify_handler)
signal.alarm(self.timeout_second)
self.process_analyzer.sendline(input_string)
buffer = ""
while True:
line_string = self.process_analyzer.readline()
if line_string.strip() == input_string:
continue
elif line_string.strip() == self.pattern:
buffer += line_string
signal.alarm(0)
return buffer
else:
buffer += line_string
def __notify_handler(self, signum, frame):
raise ProcessDownException("""It takes longer time than {time} seconds. You're able to try,
1. Change your setting of 'timeout_second' parameter
2. Run restart_process() method when the exception happens.""".format(**{"time": self.timeout_second}))
def query(self, input_string):
# type: (text_type)->text_type
return self.__query(input_string=input_string)
class JumanppHnadler(UnixProcessHandler):
def __init__(self,
jumanpp_command,
option = None,
pattern = 'EOS',
timeout_second = 10):
# type: (text_type,text_type,text_type,int)->None
super(JumanppHnadler, self).__init__(command=jumanpp_command, option=option, pattern=pattern, timeout_second=timeout_second)
def launch_jumanpp_process(self, command):
# type: (text_type)->None
return self.launch_process(command)
| true | true |
f7fd808584f2cabbc9eb31d6906805c3b0d4d1f5 | 1,525 | py | Python | queue_/queue_.py | ashirka/programming-2021-19fpl | d4a233ac874a9e0397b6e61559b678da8b55b274 | [
"MIT"
] | null | null | null | queue_/queue_.py | ashirka/programming-2021-19fpl | d4a233ac874a9e0397b6e61559b678da8b55b274 | [
"MIT"
] | null | null | null | queue_/queue_.py | ashirka/programming-2021-19fpl | d4a233ac874a9e0397b6e61559b678da8b55b274 | [
"MIT"
] | null | null | null | """
Programming for linguists
Implementation of the data structure "Queue"
"""
from typing import Iterable
# pylint: disable=invalid-name
class Queue_:
"""
Queue Data Structure
"""
def __init__(self, data: Iterable = (), capacity: int = 50):
self.data = list(data)
self._capacity = capacity
def put(self, element):
"""
Add the element ‘element’ at the end of queue_
:param element: element to add to queue_
"""
if self.full():
raise IndexError
self.data.append(element)
def get(self):
"""
Remove and return an item from queue_
"""
if self.empty():
raise IndexError
return self.data.pop(0)
def empty(self) -> bool:
"""
Return whether queue_ is empty or not
:return: True if queue_ does not contain any elements.
False if the queue_ contains elements
"""
return not self.data
def size(self) -> int:
"""
Return the number of elements in queue_
:return: Number of elements in queue_
"""
return len(self.data)
def top(self):
"""
Return the element on the top of queue_
:return: the element that is on the top of queue_
"""
return self.data[0]
def full(self):
"""
Return whether queue_ is full or not
"""
if self.size() == self._capacity:
return True
return False
| 23.461538 | 64 | 0.552787 |
from typing import Iterable
class Queue_:
def __init__(self, data: Iterable = (), capacity: int = 50):
self.data = list(data)
self._capacity = capacity
def put(self, element):
if self.full():
raise IndexError
self.data.append(element)
def get(self):
if self.empty():
raise IndexError
return self.data.pop(0)
def empty(self) -> bool:
return not self.data
def size(self) -> int:
return len(self.data)
def top(self):
return self.data[0]
def full(self):
if self.size() == self._capacity:
return True
return False
| true | true |
f7fd8131eb1abe4714e348a86b103cc2f4995887 | 12,251 | py | Python | marsyas-vamp/marsyas/src/marsyas_python/pitch_plots.py | jaouahbi/VampPlugins | 27c2248d1c717417fe4d448cdfb4cb882a8a336a | [
"Apache-2.0"
] | null | null | null | marsyas-vamp/marsyas/src/marsyas_python/pitch_plots.py | jaouahbi/VampPlugins | 27c2248d1c717417fe4d448cdfb4cb882a8a336a | [
"Apache-2.0"
] | null | null | null | marsyas-vamp/marsyas/src/marsyas_python/pitch_plots.py | jaouahbi/VampPlugins | 27c2248d1c717417fe4d448cdfb4cb882a8a336a | [
"Apache-2.0"
] | null | null | null |
#!/usr/bin/evn python
from pylab import *
from marsyas_util import *
import sys
import getopt
import os
# plot zerocrossings
def zerocrossings(frame_num, winSize, input_filename):
print "ZeroCrossings"
spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Gain/gain",
]
]
net = create(spec)
fname = net.getControl("SoundFileSource/src/mrs_string/filename")
fname.setValue_string(input_filename)
inSamples = net.getControl("mrs_natural/inSamples")
inSamples.setValue_natural(winSize);
for i in range(frame_num+1):
net.tick()
if (i==frame_num):
figure(1);
waveform = control2array(net,
"SoundFileSource/src/mrs_realvec/processedData").transpose();
zcrs = zeros(winSize)
zcrs_x = [];
zcrs_y = [];
num_zcrs = 0
for j in range(1,winSize):
if (((waveform[j-1] > 0.0) and (waveform[j] < 0.0)) or
((waveform[j-1] < 0.0) and (waveform[j] > 0.0))) :
zcrs_x.append(j)
zcrs_y.append(0.0)
num_zcrs = num_zcrs + 1;
title("Time Domain Zero Crossings " + "(" + str(num_zcrs) +")")
# plot the time domain waveform
marplot(waveform)
# plot the zero-crossings with stars
plot(zcrs_x, zcrs_y, 'r*', drawstyle = 'steps', markersize=8)
# plot a line 0.0
plot(zcrs)
# label the axes
xlabel("Time in Samples")
ylabel("Amplitude")
# save the figure
output_filename = os.path.splitext(input_filename)[0] + ".png"
savefig(output_filename)
# plot a spectrum
def spectrum(frame_num, winSize, input_filename):
spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Windowing/win",
"Spectrum/spk",
"PowerSpectrum/pspk",
"Gain/gain"
]
]
net = create(spec)
fname = net.getControl("SoundFileSource/src/mrs_string/filename")
fname.setValue_string(input_filename)
inSamples = net.getControl("mrs_natural/inSamples");
inSamples.setValue_natural(winSize)
for i in range(frame_num+1):
net.tick()
if (i==frame_num):
figure(1);
data = net.getControl("PowerSpectrum/pspk/mrs_realvec/processedData").to_realvec()
# restrict spectrum to first 93 bins corresponding approximately to 4000Hz
spectrum = control2array(net, "PowerSpectrum/pspk/mrs_realvec/processedData", eo=93);
# plot spectrum with frequency axis
marplot(spectrum,
x_label="Frequency in Hz",
y_label="Power",
plot_title = "Power Spectrum",
ex=4000)
output_filename = os.path.splitext(input_filename)[0] + ".png"
savefig(output_filename)
def autocorrelation(frame_num, winSize, input_filename):
spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Windowing/win",
"AutoCorrelation/acr",
"Gain/gain"
]
]
net = create(spec)
fname = net.getControl("SoundFileSource/src/mrs_string/filename")
fname.setValue_string(input_filename)
inSamples = net.getControl("mrs_natural/inSamples");
inSamples.setValue_natural(winSize)
for i in range(frame_num+1):
net.tick()
if (i==frame_num):
figure(1);
acr = control2array(net, "AutoCorrelation/acr/mrs_realvec/processedData")
title("AutoCorrelation")
figure(1);
marplot(acr, x_label = "Time in samples",
y_label = "Correlation",
plot_title = "AutoCorrelation")
figure(2);
marplot(acr);
output_filename = os.path.splitext(input_filename)[0] + ".png"
savefig(output_filename)
def amdf(frame_num, winSize, input_filename):
spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Windowing/win",
"AMDF/amdf",
"Gain/gain"
]
]
net = create(spec)
fname = net.getControl("SoundFileSource/src/mrs_string/filename")
fname.setValue_string(input_filename)
inSamples = net.getControl("mrs_natural/inSamples");
inSamples.setValue_natural(winSize)
for i in range(frame_num+1):
net.tick()
if (i==frame_num):
figure(1)
amdf = control2array(net, "AMDF/amdf/mrs_realvec/processedData")
marplot(amdf,
plot_title = "Average Magnitude Difference Function",
x_label = "Time in samples",
y_label = "Difference")
output_filename = os.path.splitext(input_filename)[0] + ".png"
savefig(output_filename)
def chroma(frame_num, winSize, input_filename):
spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Windowing/win",
"Spectrum/spk",
"PowerSpectrum/pspk",
"Spectrum2Chroma/s2c",
"Gain/gain"
]
]
net = create(spec)
fname = net.getControl("SoundFileSource/src/mrs_string/filename")
fname.setValue_string(input_filename)
inSamples = net.getControl("mrs_natural/inSamples");
inSamples.setValue_natural(winSize)
for i in range(frame_num+1):
net.tick()
if (i==frame_num):
figure(1);
data = net.getControl("Spectrum2Chroma/s2c/mrs_realvec/processedData").to_realvec()
data2 = net.getControl("PowerSpectrum/pspk/mrs_realvec/processedData").to_realvec()
print realvec2array(data2)
spectrum = realvec2array(data)
print spectrum
title("Chroma Profile")
plot(spectrum[0])
xlabel("Pitch Class(Chroma)")
ylabel("Average Energy")
output_filename = os.path.splitext(input_filename)[0] + ".png"
savefig(output_filename)
figure(2)
sdata = net.getControl("PowerSpectrum/pspk/mrs_realvec/processedData").to_realvec();
sspectrum = realvec2array(sdata);
plot(sspectrum[0])
def something_gram(net, winSize, input_filename, output_filename,
plot_title, colormap, start, end):
fname = net.getControl("Series/pitchExtract/SoundFileSource/src/mrs_string/filename")
fname.setValue_string(input_filename)
inSamples = net.getControl("mrs_natural/inSamples")
inSamples.setValue_natural(winSize)
srate = net.getControl("Series/pitchExtract/SoundFileSource/src/mrs_real/osrate").to_real()
nTimes = net.getControl("mrs_natural/nTimes")
fsize = net.getControl("Series/pitchExtract/SoundFileSource/src/mrs_natural/size").to_natural()
pos = net.getControl("Series/pitchExtract/SoundFileSource/src/mrs_natural/pos")
spos = float(start) * srate
pos.setValue_natural(int(spos))
if (end == None):
fend = fsize
else:
fend = (float(end) - float(start)) * srate
nTicks = int(fend / winSize)
nTimes.setValue_natural(nTicks)
duration = nTicks * winSize / srate
print start
print duration
net.tick()
figure(1);
# use eo, so to limit the y-axis
correlogram = control2array(net, "mrs_realvec/processedData")
# marplot(correlogram, colormap, 'auto', plot_title = plot_title, x_label = "Time(seconds)",
# ey=duration, y_label = "Lag (samples)")
marplot(correlogram, aspect='auto', cmap=colormap, plot_title = plot_title, x_label= "Time (seconds)", y_label = "Lag(samples)",
sy = float(start),ey = float(start)+duration)
print "Writing " + output_filename
savefig(output_filename)
def chromagram():
pitch_spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Windowing/win",
"Spectrum/spk",
"PowerSpectrum/pspk",
"Spectrum2Chroma/s2c"
]
]
accum_spec = ["Accumulator/accum",
[pitch_spec]]
mean_spec = ["Series/mean",
[accum_spec,
"Mean/mean"]]
pitchnet = create(mean_spec)
fname = pitchnet.getControl("Accumulator/accum/Series/pitchExtract/SoundFileSource/src/mrs_string/filename")
fname.setValue_string(sys.argv[1])
inSamples = pitchnet.getControl("mrs_natural/inSamples")
inSamples.setValue_natural(1024)
nTimes = pitchnet.getControl("Accumulator/accum/mrs_natural/nTimes")
nTimes.setValue_natural(600)
pitchnet.tick()
figure(1);
spectrum = control2array(pitchnet, "Accumulator/accum/mrs_realvec/processedData")
print spectrum.shape
marplot(spectrum,'jet', 'auto')
savefig("chromagram.png")
figure(2)
mean_chroma = pitchnet.getControl("mrs_realvec/processedData").to_realvec();
plot(mean_chroma)
def usage():
print "Available options:"
print "\tcolormap:string (valid colormaps: jet, bone, bone_r, spectral, hot)"
print "\tstart:float (start of plot in seconds)"
print "\tend:float (start of plot in seconds)"
print "\tinput:string (input file .wav)"
print "\touput:string (output file .png)"
print "\tmethod:string (valid methods: correlogram, spectrogram, amdfogram)"
print "\tplot_title:string"
print "\twin_size:int"
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hc:e:i:m:o:p:s:w:v", ["help","colormap=","end=","input=","method=","output=",
"plot_title=","start=","win_size="])
except:
print str(err)
usage()
sys.exit(2)
input_file = None
method = None
output_file = None
verbose = False
colormap = 'jet'
plot_title = 'Marsyas plot'
start = 0
end = None
win_size = 1024
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
output_file = a
elif o in ("-i", "--input"):
input_file = a
elif o in ("-m", "--method"):
method = a
elif o in ("-c", "--colormap"):
colormap = a
elif o in ("-p", "--plot_title"):
plot_title = a
elif o in ("-s", "--start"):
start = a
elif o in ("-e", "--end"):
end = a
elif o in ("-w", "--win_size"):
win_size = int(a)
else:
assert False, "unhandled option"
if (input_file == None):
print "No input .wav file specified"
sys.exit(2)
if (output_file == None):
output_file = os.path.splitext(input_file)[0] + ".png"
if (method == None):
method = "spectrogram"
print "start"+str(start)
spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Windowing/win",
"AutoCorrelation/acr",
"Transposer/transpose"
]
]
accum_spec = ["Accumulator/acum",
[spec]]
correlogram_net = create(accum_spec)
spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Windowing/win",
"Spectrum/spk",
"PowerSpectrum/pspk",
"Gain/gain"
]
]
accum_spec = ["Accumulator/acum",
[spec]]
spectrogram_net = create(accum_spec)
spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Windowing/win",
"AMDF/amdf",
"Transposer/transpose"
]
]
accum_spec = ["Accumulator/acum",
[spec]]
amdfogram_net = create(accum_spec)
if (method == "zerocrossings"):
zerocrossings(10, 512, input_file)
elif (method == "spectrum"):
spectrum(5, 1024, input_file)
elif (method == "autocorrelation"):
autocorrelation(5, 1024, input_file)
elif (method == "amdf"):
amdf(5, 1024, input_file)
elif (method == "chroma"):
chroma(5, 4096, input_file)
elif (method == "spectrogram"):
something_gram(spectrogram_net, win_size, input_file, output_file,
plot_title, colormap, start, end)
elif (method == "correlogram"):
something_gram(correlogram_net, win_size, input_file, output_file,
plot_title, colormap, start, end)
elif (method == "amdfogram"):
something_gram(amdfogram_net, win_size, input_file, output_file,
plot_title, colormap, start, end)
show()
return 0
if __name__ == "__main__":
main()
| 29.663438 | 130 | 0.605746 |
from pylab import *
from marsyas_util import *
import sys
import getopt
import os
def zerocrossings(frame_num, winSize, input_filename):
print "ZeroCrossings"
spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Gain/gain",
]
]
net = create(spec)
fname = net.getControl("SoundFileSource/src/mrs_string/filename")
fname.setValue_string(input_filename)
inSamples = net.getControl("mrs_natural/inSamples")
inSamples.setValue_natural(winSize);
for i in range(frame_num+1):
net.tick()
if (i==frame_num):
figure(1);
waveform = control2array(net,
"SoundFileSource/src/mrs_realvec/processedData").transpose();
zcrs = zeros(winSize)
zcrs_x = [];
zcrs_y = [];
num_zcrs = 0
for j in range(1,winSize):
if (((waveform[j-1] > 0.0) and (waveform[j] < 0.0)) or
((waveform[j-1] < 0.0) and (waveform[j] > 0.0))) :
zcrs_x.append(j)
zcrs_y.append(0.0)
num_zcrs = num_zcrs + 1;
title("Time Domain Zero Crossings " + "(" + str(num_zcrs) +")")
marplot(waveform)
plot(zcrs_x, zcrs_y, 'r*', drawstyle = 'steps', markersize=8)
plot(zcrs)
xlabel("Time in Samples")
ylabel("Amplitude")
output_filename = os.path.splitext(input_filename)[0] + ".png"
savefig(output_filename)
def spectrum(frame_num, winSize, input_filename):
spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Windowing/win",
"Spectrum/spk",
"PowerSpectrum/pspk",
"Gain/gain"
]
]
net = create(spec)
fname = net.getControl("SoundFileSource/src/mrs_string/filename")
fname.setValue_string(input_filename)
inSamples = net.getControl("mrs_natural/inSamples");
inSamples.setValue_natural(winSize)
for i in range(frame_num+1):
net.tick()
if (i==frame_num):
figure(1);
data = net.getControl("PowerSpectrum/pspk/mrs_realvec/processedData").to_realvec()
spectrum = control2array(net, "PowerSpectrum/pspk/mrs_realvec/processedData", eo=93);
marplot(spectrum,
x_label="Frequency in Hz",
y_label="Power",
plot_title = "Power Spectrum",
ex=4000)
output_filename = os.path.splitext(input_filename)[0] + ".png"
savefig(output_filename)
def autocorrelation(frame_num, winSize, input_filename):
spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Windowing/win",
"AutoCorrelation/acr",
"Gain/gain"
]
]
net = create(spec)
fname = net.getControl("SoundFileSource/src/mrs_string/filename")
fname.setValue_string(input_filename)
inSamples = net.getControl("mrs_natural/inSamples");
inSamples.setValue_natural(winSize)
for i in range(frame_num+1):
net.tick()
if (i==frame_num):
figure(1);
acr = control2array(net, "AutoCorrelation/acr/mrs_realvec/processedData")
title("AutoCorrelation")
figure(1);
marplot(acr, x_label = "Time in samples",
y_label = "Correlation",
plot_title = "AutoCorrelation")
figure(2);
marplot(acr);
output_filename = os.path.splitext(input_filename)[0] + ".png"
savefig(output_filename)
def amdf(frame_num, winSize, input_filename):
spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Windowing/win",
"AMDF/amdf",
"Gain/gain"
]
]
net = create(spec)
fname = net.getControl("SoundFileSource/src/mrs_string/filename")
fname.setValue_string(input_filename)
inSamples = net.getControl("mrs_natural/inSamples");
inSamples.setValue_natural(winSize)
for i in range(frame_num+1):
net.tick()
if (i==frame_num):
figure(1)
amdf = control2array(net, "AMDF/amdf/mrs_realvec/processedData")
marplot(amdf,
plot_title = "Average Magnitude Difference Function",
x_label = "Time in samples",
y_label = "Difference")
output_filename = os.path.splitext(input_filename)[0] + ".png"
savefig(output_filename)
def chroma(frame_num, winSize, input_filename):
spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Windowing/win",
"Spectrum/spk",
"PowerSpectrum/pspk",
"Spectrum2Chroma/s2c",
"Gain/gain"
]
]
net = create(spec)
fname = net.getControl("SoundFileSource/src/mrs_string/filename")
fname.setValue_string(input_filename)
inSamples = net.getControl("mrs_natural/inSamples");
inSamples.setValue_natural(winSize)
for i in range(frame_num+1):
net.tick()
if (i==frame_num):
figure(1);
data = net.getControl("Spectrum2Chroma/s2c/mrs_realvec/processedData").to_realvec()
data2 = net.getControl("PowerSpectrum/pspk/mrs_realvec/processedData").to_realvec()
print realvec2array(data2)
spectrum = realvec2array(data)
print spectrum
title("Chroma Profile")
plot(spectrum[0])
xlabel("Pitch Class(Chroma)")
ylabel("Average Energy")
output_filename = os.path.splitext(input_filename)[0] + ".png"
savefig(output_filename)
figure(2)
sdata = net.getControl("PowerSpectrum/pspk/mrs_realvec/processedData").to_realvec();
sspectrum = realvec2array(sdata);
plot(sspectrum[0])
def something_gram(net, winSize, input_filename, output_filename,
plot_title, colormap, start, end):
fname = net.getControl("Series/pitchExtract/SoundFileSource/src/mrs_string/filename")
fname.setValue_string(input_filename)
inSamples = net.getControl("mrs_natural/inSamples")
inSamples.setValue_natural(winSize)
srate = net.getControl("Series/pitchExtract/SoundFileSource/src/mrs_real/osrate").to_real()
nTimes = net.getControl("mrs_natural/nTimes")
fsize = net.getControl("Series/pitchExtract/SoundFileSource/src/mrs_natural/size").to_natural()
pos = net.getControl("Series/pitchExtract/SoundFileSource/src/mrs_natural/pos")
spos = float(start) * srate
pos.setValue_natural(int(spos))
if (end == None):
fend = fsize
else:
fend = (float(end) - float(start)) * srate
nTicks = int(fend / winSize)
nTimes.setValue_natural(nTicks)
duration = nTicks * winSize / srate
print start
print duration
net.tick()
figure(1);
correlogram = control2array(net, "mrs_realvec/processedData")
marplot(correlogram, aspect='auto', cmap=colormap, plot_title = plot_title, x_label= "Time (seconds)", y_label = "Lag(samples)",
sy = float(start),ey = float(start)+duration)
print "Writing " + output_filename
savefig(output_filename)
def chromagram():
pitch_spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Windowing/win",
"Spectrum/spk",
"PowerSpectrum/pspk",
"Spectrum2Chroma/s2c"
]
]
accum_spec = ["Accumulator/accum",
[pitch_spec]]
mean_spec = ["Series/mean",
[accum_spec,
"Mean/mean"]]
pitchnet = create(mean_spec)
fname = pitchnet.getControl("Accumulator/accum/Series/pitchExtract/SoundFileSource/src/mrs_string/filename")
fname.setValue_string(sys.argv[1])
inSamples = pitchnet.getControl("mrs_natural/inSamples")
inSamples.setValue_natural(1024)
nTimes = pitchnet.getControl("Accumulator/accum/mrs_natural/nTimes")
nTimes.setValue_natural(600)
pitchnet.tick()
figure(1);
spectrum = control2array(pitchnet, "Accumulator/accum/mrs_realvec/processedData")
print spectrum.shape
marplot(spectrum,'jet', 'auto')
savefig("chromagram.png")
figure(2)
mean_chroma = pitchnet.getControl("mrs_realvec/processedData").to_realvec();
plot(mean_chroma)
def usage():
print "Available options:"
print "\tcolormap:string (valid colormaps: jet, bone, bone_r, spectral, hot)"
print "\tstart:float (start of plot in seconds)"
print "\tend:float (start of plot in seconds)"
print "\tinput:string (input file .wav)"
print "\touput:string (output file .png)"
print "\tmethod:string (valid methods: correlogram, spectrogram, amdfogram)"
print "\tplot_title:string"
print "\twin_size:int"
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hc:e:i:m:o:p:s:w:v", ["help","colormap=","end=","input=","method=","output=",
"plot_title=","start=","win_size="])
except:
print str(err)
usage()
sys.exit(2)
input_file = None
method = None
output_file = None
verbose = False
colormap = 'jet'
plot_title = 'Marsyas plot'
start = 0
end = None
win_size = 1024
for o, a in opts:
if o == "-v":
verbose = True
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-o", "--output"):
output_file = a
elif o in ("-i", "--input"):
input_file = a
elif o in ("-m", "--method"):
method = a
elif o in ("-c", "--colormap"):
colormap = a
elif o in ("-p", "--plot_title"):
plot_title = a
elif o in ("-s", "--start"):
start = a
elif o in ("-e", "--end"):
end = a
elif o in ("-w", "--win_size"):
win_size = int(a)
else:
assert False, "unhandled option"
if (input_file == None):
print "No input .wav file specified"
sys.exit(2)
if (output_file == None):
output_file = os.path.splitext(input_file)[0] + ".png"
if (method == None):
method = "spectrogram"
print "start"+str(start)
spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Windowing/win",
"AutoCorrelation/acr",
"Transposer/transpose"
]
]
accum_spec = ["Accumulator/acum",
[spec]]
correlogram_net = create(accum_spec)
spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Windowing/win",
"Spectrum/spk",
"PowerSpectrum/pspk",
"Gain/gain"
]
]
accum_spec = ["Accumulator/acum",
[spec]]
spectrogram_net = create(accum_spec)
spec = ["Series/pitchExtract",
["SoundFileSource/src",
"Windowing/win",
"AMDF/amdf",
"Transposer/transpose"
]
]
accum_spec = ["Accumulator/acum",
[spec]]
amdfogram_net = create(accum_spec)
if (method == "zerocrossings"):
zerocrossings(10, 512, input_file)
elif (method == "spectrum"):
spectrum(5, 1024, input_file)
elif (method == "autocorrelation"):
autocorrelation(5, 1024, input_file)
elif (method == "amdf"):
amdf(5, 1024, input_file)
elif (method == "chroma"):
chroma(5, 4096, input_file)
elif (method == "spectrogram"):
something_gram(spectrogram_net, win_size, input_file, output_file,
plot_title, colormap, start, end)
elif (method == "correlogram"):
something_gram(correlogram_net, win_size, input_file, output_file,
plot_title, colormap, start, end)
elif (method == "amdfogram"):
something_gram(amdfogram_net, win_size, input_file, output_file,
plot_title, colormap, start, end)
show()
return 0
if __name__ == "__main__":
main()
| false | true |
f7fd814c32684300b0fa5e5ddcf529bc340b8224 | 4,006 | py | Python | jackett-to-sonarr.py | marcus-crane/scripts | e349806d8494882d5cd45bc77b0445592c400d89 | [
"MIT"
] | 1 | 2021-06-30T06:19:37.000Z | 2021-06-30T06:19:37.000Z | jackett-to-sonarr.py | marcus-crane/scripts | e349806d8494882d5cd45bc77b0445592c400d89 | [
"MIT"
] | null | null | null | jackett-to-sonarr.py | marcus-crane/scripts | e349806d8494882d5cd45bc77b0445592c400d89 | [
"MIT"
] | null | null | null | """
Jackett to Sonarr
An extremely sloppy script for inserting Jackett indexes into Sonarr.
If you don't know what that is, you probably don't need it!
I've manually done this setup in the past and it's very tedious
so I decided to automate it once and for all
You can get a Sonarr API key by using the dev tools and checking
the request headers
I might clean this up in future but for now, you'll just have to
manually flip some bits
"""
import json
import requests
sonarr_url = "http://192.168.1.xx:8989"
sonarr_api_key = "<api_key>"
jackett_url = "http://192.168.1.xx:9117"
jackett_api_key = "<api_key>"
jackett_api_url = f"{jackett_url}/api/v2.0/indexers"
test_url = f"{sonarr_url}/api/v3/indexer/test"
submit_url = f"{sonarr_url}/api/v3/indexer"
headers = {
"Content-Type": "application/json",
"User-Agent": "Sonarr Jackett Sync Script/1.0",
"X-Api-Key": sonarr_api_key
}
payload = {
"configContract": "TorznabSettings",
"enableAutomaticSearch": True,
"enableInteractiveSearch": True,
"enableRss": True,
"implementation": "Torznab",
"implementationName": "Torznab",
"infoLink": "https://wiki.servarr.com/Sonarr_Supported_Indexers",
"priority": 25,
"protocol": "torrent",
"supportsRss": True,
"supportsSearch": True,
"tags": []
}
fields = [
{ "name": "baseUrl", "value": jackett_url },
{ "name": "apiKey", "value": jackett_api_key },
{ "name": "additionalParameters" },
{ "name": "minimumSeeders", "value": 1 },
{ "name": "seedCriteria.seedRatio" },
{ "name": "seedCriteria.seedTime" },
{ "name": "seedCriteria.seasonPackSeedTime" }
]
r = requests.get(jackett_api_url)
print(r.status_code)
indexers = r.json()
active_indexers = []
tv_numbers = [5000, 5030, 5040, 5080, 100074, 100006, 100009, 100005, 100041, 100071, 100075, 100007, 108346, 120797, 105867, 105503]
anime_numbers = [5070, 100028, 100078, 100079, 100080, 100001, 142158, 122266, 152237, 147671, 120797, 105867, 105503]
def submit_indexer(indexer: dict, include_tv_categories=True, include_anime_categories=True, dryrun=True):
indexer_id = indexer['id']
indexer_name = indexer['name']
valid_anime_numbers = list()
valid_tv_numbers = list()
for category in indexer['caps']:
category_id = int(category['ID'])
if category_id in tv_numbers and include_tv_categories:
valid_tv_numbers.append(category_id)
if category_id in anime_numbers and include_anime_categories:
valid_anime_numbers.append(category_id)
index_payload = payload.copy()
index_payload['name'] = indexer_name
index_fields = fields.copy()
index_fields.append({
"name": "apiPath",
"value": f"/api/v2.0/indexers/{indexer_id}/results/torznab/"
})
index_fields.append({
"name": "categories",
"value": valid_tv_numbers
})
index_fields.append({
"name": "animeCategories",
"value": valid_anime_numbers
})
index_payload['fields'] = index_fields
if dryrun:
r = requests.post(test_url, data=json.dumps(index_payload), headers=headers)
if r.status_code == 200:
print(f"Settings for {indexer_name} are valid.")
return True
if r.status_code == 400:
print(f"{indexer_name} threw an error. Perhaps it already exists?")
print(r.json())
return False
return False
r = requests.post(submit_url, data=json.dumps(index_payload), headers=headers)
if r.status_code == 201:
print(f"Successfully added {indexer_name}")
for indexer in indexers:
if indexer['configured']:
active_indexers.append(indexer)
test_result = submit_indexer(
indexer, include_tv_categories=True,
include_anime_categories=True, dryrun=True
)
if test_result:
submit_indexer(
indexer, include_tv_categories=True,
include_anime_categories=True, dryrun=False)
| 33.107438 | 133 | 0.667 |
import json
import requests
sonarr_url = "http://192.168.1.xx:8989"
sonarr_api_key = "<api_key>"
jackett_url = "http://192.168.1.xx:9117"
jackett_api_key = "<api_key>"
jackett_api_url = f"{jackett_url}/api/v2.0/indexers"
test_url = f"{sonarr_url}/api/v3/indexer/test"
submit_url = f"{sonarr_url}/api/v3/indexer"
headers = {
"Content-Type": "application/json",
"User-Agent": "Sonarr Jackett Sync Script/1.0",
"X-Api-Key": sonarr_api_key
}
payload = {
"configContract": "TorznabSettings",
"enableAutomaticSearch": True,
"enableInteractiveSearch": True,
"enableRss": True,
"implementation": "Torznab",
"implementationName": "Torznab",
"infoLink": "https://wiki.servarr.com/Sonarr_Supported_Indexers",
"priority": 25,
"protocol": "torrent",
"supportsRss": True,
"supportsSearch": True,
"tags": []
}
fields = [
{ "name": "baseUrl", "value": jackett_url },
{ "name": "apiKey", "value": jackett_api_key },
{ "name": "additionalParameters" },
{ "name": "minimumSeeders", "value": 1 },
{ "name": "seedCriteria.seedRatio" },
{ "name": "seedCriteria.seedTime" },
{ "name": "seedCriteria.seasonPackSeedTime" }
]
r = requests.get(jackett_api_url)
print(r.status_code)
indexers = r.json()
active_indexers = []
tv_numbers = [5000, 5030, 5040, 5080, 100074, 100006, 100009, 100005, 100041, 100071, 100075, 100007, 108346, 120797, 105867, 105503]
anime_numbers = [5070, 100028, 100078, 100079, 100080, 100001, 142158, 122266, 152237, 147671, 120797, 105867, 105503]
def submit_indexer(indexer: dict, include_tv_categories=True, include_anime_categories=True, dryrun=True):
indexer_id = indexer['id']
indexer_name = indexer['name']
valid_anime_numbers = list()
valid_tv_numbers = list()
for category in indexer['caps']:
category_id = int(category['ID'])
if category_id in tv_numbers and include_tv_categories:
valid_tv_numbers.append(category_id)
if category_id in anime_numbers and include_anime_categories:
valid_anime_numbers.append(category_id)
index_payload = payload.copy()
index_payload['name'] = indexer_name
index_fields = fields.copy()
index_fields.append({
"name": "apiPath",
"value": f"/api/v2.0/indexers/{indexer_id}/results/torznab/"
})
index_fields.append({
"name": "categories",
"value": valid_tv_numbers
})
index_fields.append({
"name": "animeCategories",
"value": valid_anime_numbers
})
index_payload['fields'] = index_fields
if dryrun:
r = requests.post(test_url, data=json.dumps(index_payload), headers=headers)
if r.status_code == 200:
print(f"Settings for {indexer_name} are valid.")
return True
if r.status_code == 400:
print(f"{indexer_name} threw an error. Perhaps it already exists?")
print(r.json())
return False
return False
r = requests.post(submit_url, data=json.dumps(index_payload), headers=headers)
if r.status_code == 201:
print(f"Successfully added {indexer_name}")
for indexer in indexers:
if indexer['configured']:
active_indexers.append(indexer)
test_result = submit_indexer(
indexer, include_tv_categories=True,
include_anime_categories=True, dryrun=True
)
if test_result:
submit_indexer(
indexer, include_tv_categories=True,
include_anime_categories=True, dryrun=False)
| true | true |
f7fd81b4a06a9d7940495a8813bfd7850d55d428 | 6,577 | py | Python | torchreid/engine/image/viewpoint_aware.py | iankuoli/OSNet-TopDrop | 3ab57ba507e9f8939762e27834137172375cd91c | [
"MIT"
] | null | null | null | torchreid/engine/image/viewpoint_aware.py | iankuoli/OSNet-TopDrop | 3ab57ba507e9f8939762e27834137172375cd91c | [
"MIT"
] | null | null | null | torchreid/engine/image/viewpoint_aware.py | iankuoli/OSNet-TopDrop | 3ab57ba507e9f8939762e27834137172375cd91c | [
"MIT"
] | null | null | null | from __future__ import division, print_function, absolute_import
from ... import metrics
from ...engine.engine import Engine
from ...losses import FocalLoss, CrossEntropyLoss, ALSRLoss
from .vat import VATLoss
import math
import torch
from torch.nn import Parameter
import torch.nn.functional as F
import torch.nn as nn
class ImageVAReIDEngine(Engine):
r"""Viewpoint-Aware Loss with Angular Regularization engine for image-reid.
Ref: Viewpoint-Aware Loss with Angular Regularization for Person Re-Identification. AAAI, 2020.
https://arxiv.org/pdf/1912.01300v1.pdf
Args:
datamanager (DataManager): an instance of ``deepreid.data.ImageDataManager``
or ``deepreid.data.VideoDataManager``.
model (nn.Module): model instance.
optimizer (Optimizer): an Optimizer.
weight_f (float, optional): weight for focal loss. Default is 1.
weight_x (float, optional): weight for softmax loss. Default is 1.
scheduler (LRScheduler, optional): if None, no learning rate decay will be performed.
use_gpu (bool, optional): use gpu. Default is True.
label_smooth (bool, optional): use label smoothing regularizer. Default is True.
Examples::
import deepreid
datamanager = deepreid.data.ImageDataManager(def medianSlidingWindow(self, nums: List[int], k: int) -> List[float]:
ans =[]
window = nums[0:k]
window.sort()
median = nums[k-1-k//2] if k%2 == 1 else (nums[k-1-k//2] + nums[k-1-k//2+1]) / 2
ans.append(median)
for ind in range(k, len(nums)):
window.remove(nums[ind-k])
bisect_left(window, nums[ind])
median = nums[ind-k//2] if k%2 == 1 else (nums[ind-k//2] + nums[ind-k//2+1]) / 2
ans.append(median)
return ans
root='path/to/reid-data',
sources='market1501',
height=256,
width=128,
combineall=False,
batch_size=32,
num_instances=4,
train_sampler='RandomIdentitySampler' # this is important
)
model = deepreid.models.build_model(
name='resnet50',
num_classes=datamanager.num_train_pids,
loss='triplet'
)
model = model.cuda()
optimizer = deepreid.optim.build_optimizer(
model, optim='adam', lr=0.0003
)
scheduler = deepreid.optim.build_lr_scheduler(
optimizer,
lr_scheduler='single_step',
stepsize=20
)
engine = deepreid.engine.ImageTripletEngine(
datamanager, model, optimizer, margin=0.3,
weight_t=0.7, weight_x=1, scheduler=scheduler
)
engine.run(
max_epoch=60,
save_dir='log/resnet50-triplet-market1501',
print_freq=10
)
"""
def __init__(
self,
datamanager,
model,
arc_margin_y,
arc_margin_v,
optimizer,
gamma=2,
weight_f=1,
weight_x=1,
weight_v=1,
scheduler=None,
use_gpu=True,
):
super(ImageVAReIDEngine, self).__init__(datamanager, use_gpu)
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.register_model('model', model, optimizer, scheduler)
self.weight_f = weight_f
self.weight_x = weight_x
self.weight_v = weight_v
self.arc_embed_y = arc_margin_y
self.arc_embed_v = arc_margin_v
self.criterion_x = CrossEntropyLoss(num_classes=self.datamanager.num_train_pids,
use_gpu=self.use_gpu,
label_smooth=True)
self.criterion_f = FocalLoss(gamma=gamma)
self.criterion_v = ALSRLoss(num_classes=self.datamanager.num_train_pids,
use_gpu=self.use_gpu,
label_smooth=True)
self.centers_yv = torch.zeros(self.datamanager.num_train_pids, 3, 512)
self.counts_yv = torch.zeros(self.datamanager.num_train_pids, 3)
def forward_backward(self, data):
imgs, pids, vids = self.parse_data_for_train(data)
if self.use_gpu:
imgs = imgs.cuda()
pids = pids.cuda()
vids = pids.cuda()
outputs, features = self.model(imgs)
embeddings_y = self.arc_embed_y(features, pids)
embeddings_v = self.arc_embed_v(features, pids*3+vids, weight=self.centers_yv.view(-1, 512))
loss_x = self.compute_loss(self.criterion_x, outputs, pids)
loss_f = self.compute_loss(self.criterion_f, embeddings_y, pids)
loss_v = self.compute_loss(self.criterion_v, embeddings_v, (pids, vids))
loss = self.weight_f * loss_f + self.weight_x * loss_x + self.weight_v * loss_v
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# Update self.centers_yv & self.counts_yv
for i in range(pids.size(0)):
self.counts_yv[pids[i], vids[i]] += 1
tmp = self.counts_yv[pids[i], vids[i]]
self.centers_yv[pids[i], vids[i]] = (tmp-1/tmp) * self.centers_yv[pids[i], vids[i]] + 1/tmp * features[i]
loss_summary = {'loss_x': loss_x.item(),
'loss_f': loss_f.item(),
'loss_v': loss_v.item(),
'acc_x': metrics.accuracy(outputs, pids)[0].item(),
'acc_f': metrics.accuracy(embeddings_y, pids)[0].item(),
}
return loss_summary
def forward(self, imgs, pids):
indexs = torch.where(pids < self.arc_embed.out_features)
imgs, pids = imgs[indexs], pids[indexs]
if self.use_gpu:
imgs = imgs.cuda()
pids = pids.cuda()
if imgs.shape[0] == 0:
return None, None, None, None
outputs, features = self.model(imgs)
embeddings_y = self.arc_embed_y(features, pids)
embeddings_v = self.arc_embed_v(features, pids)
loss_x = self.compute_loss(self.criterion_x, outputs, pids).item()
loss_f = self.compute_loss(self.criterion_f, embeddings_y, pids).item()
loss_v = self.compute_loss(self.criterion_f, embeddings_v, pids).item()
acc_x = metrics.accuracy(outputs, pids)[0].item()
acc_f = metrics.accuracy(embeddings_y, pids)[0].item()
return loss_x, loss_f, loss_v, acc_x, acc_f
| 38.238372 | 123 | 0.593432 | from __future__ import division, print_function, absolute_import
from ... import metrics
from ...engine.engine import Engine
from ...losses import FocalLoss, CrossEntropyLoss, ALSRLoss
from .vat import VATLoss
import math
import torch
from torch.nn import Parameter
import torch.nn.functional as F
import torch.nn as nn
class ImageVAReIDEngine(Engine):
def __init__(
self,
datamanager,
model,
arc_margin_y,
arc_margin_v,
optimizer,
gamma=2,
weight_f=1,
weight_x=1,
weight_v=1,
scheduler=None,
use_gpu=True,
):
super(ImageVAReIDEngine, self).__init__(datamanager, use_gpu)
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.register_model('model', model, optimizer, scheduler)
self.weight_f = weight_f
self.weight_x = weight_x
self.weight_v = weight_v
self.arc_embed_y = arc_margin_y
self.arc_embed_v = arc_margin_v
self.criterion_x = CrossEntropyLoss(num_classes=self.datamanager.num_train_pids,
use_gpu=self.use_gpu,
label_smooth=True)
self.criterion_f = FocalLoss(gamma=gamma)
self.criterion_v = ALSRLoss(num_classes=self.datamanager.num_train_pids,
use_gpu=self.use_gpu,
label_smooth=True)
self.centers_yv = torch.zeros(self.datamanager.num_train_pids, 3, 512)
self.counts_yv = torch.zeros(self.datamanager.num_train_pids, 3)
def forward_backward(self, data):
imgs, pids, vids = self.parse_data_for_train(data)
if self.use_gpu:
imgs = imgs.cuda()
pids = pids.cuda()
vids = pids.cuda()
outputs, features = self.model(imgs)
embeddings_y = self.arc_embed_y(features, pids)
embeddings_v = self.arc_embed_v(features, pids*3+vids, weight=self.centers_yv.view(-1, 512))
loss_x = self.compute_loss(self.criterion_x, outputs, pids)
loss_f = self.compute_loss(self.criterion_f, embeddings_y, pids)
loss_v = self.compute_loss(self.criterion_v, embeddings_v, (pids, vids))
loss = self.weight_f * loss_f + self.weight_x * loss_x + self.weight_v * loss_v
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
for i in range(pids.size(0)):
self.counts_yv[pids[i], vids[i]] += 1
tmp = self.counts_yv[pids[i], vids[i]]
self.centers_yv[pids[i], vids[i]] = (tmp-1/tmp) * self.centers_yv[pids[i], vids[i]] + 1/tmp * features[i]
loss_summary = {'loss_x': loss_x.item(),
'loss_f': loss_f.item(),
'loss_v': loss_v.item(),
'acc_x': metrics.accuracy(outputs, pids)[0].item(),
'acc_f': metrics.accuracy(embeddings_y, pids)[0].item(),
}
return loss_summary
def forward(self, imgs, pids):
indexs = torch.where(pids < self.arc_embed.out_features)
imgs, pids = imgs[indexs], pids[indexs]
if self.use_gpu:
imgs = imgs.cuda()
pids = pids.cuda()
if imgs.shape[0] == 0:
return None, None, None, None
outputs, features = self.model(imgs)
embeddings_y = self.arc_embed_y(features, pids)
embeddings_v = self.arc_embed_v(features, pids)
loss_x = self.compute_loss(self.criterion_x, outputs, pids).item()
loss_f = self.compute_loss(self.criterion_f, embeddings_y, pids).item()
loss_v = self.compute_loss(self.criterion_f, embeddings_v, pids).item()
acc_x = metrics.accuracy(outputs, pids)[0].item()
acc_f = metrics.accuracy(embeddings_y, pids)[0].item()
return loss_x, loss_f, loss_v, acc_x, acc_f
| true | true |
f7fd81c9a90a4731cceb83fc325a7b44197d0c7a | 613 | py | Python | ensconce/cli.py | netwrkr/ensconce | eda938c67eb0af8fb7d3ccf668e07d2f76485aa5 | [
"BSD-3-Clause"
] | 1 | 2021-05-05T13:52:44.000Z | 2021-05-05T13:52:44.000Z | ensconce/cli.py | netwrkr/ensconce | eda938c67eb0af8fb7d3ccf668e07d2f76485aa5 | [
"BSD-3-Clause"
] | null | null | null | ensconce/cli.py | netwrkr/ensconce | eda938c67eb0af8fb7d3ccf668e07d2f76485aa5 | [
"BSD-3-Clause"
] | null | null | null | import sys
import optparse
from ensconce.config import init_app, config
from ensconce import server
def run_server(argv=None):
if argv is None:
argv = sys.argv
parser = optparse.OptionParser(description='Run the ensconce cherrypy server.')
init_app()
parser.add_option('-d', '--debug',
default=config.get('debug', False),
action="store_true",
help='Run in debug mode?')
(options, args) = parser.parse_args()
config['debug'] = options.debug
server.configure()
server.serve_forever() | 26.652174 | 83 | 0.600326 | import sys
import optparse
from ensconce.config import init_app, config
from ensconce import server
def run_server(argv=None):
if argv is None:
argv = sys.argv
parser = optparse.OptionParser(description='Run the ensconce cherrypy server.')
init_app()
parser.add_option('-d', '--debug',
default=config.get('debug', False),
action="store_true",
help='Run in debug mode?')
(options, args) = parser.parse_args()
config['debug'] = options.debug
server.configure()
server.serve_forever() | true | true |
f7fd8222b161847ddcd7f5749f69a02e23c18396 | 30,040 | py | Python | src/onecontainer_api/routers/tests/test_media.py | intel/stacks-api | 904eeeb0eedee9d9b9cced32dcbf9d4b3871bc87 | [
"BSD-3-Clause"
] | 4 | 2020-12-08T19:06:41.000Z | 2021-08-13T09:32:21.000Z | src/onecontainer_api/routers/tests/test_media.py | intel/stacks-api | 904eeeb0eedee9d9b9cced32dcbf9d4b3871bc87 | [
"BSD-3-Clause"
] | 2 | 2020-12-15T20:35:39.000Z | 2021-01-05T17:37:12.000Z | src/onecontainer_api/routers/tests/test_media.py | intel/stacks-api | 904eeeb0eedee9d9b9cced32dcbf9d4b3871bc87 | [
"BSD-3-Clause"
] | 4 | 2020-12-04T20:39:23.000Z | 2021-01-04T10:26:33.000Z | # SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2020 Intel Corporation
import os
import time
from fastapi.testclient import TestClient
from onecontainer_api import models, schemas, config, startup_svc
from onecontainer_api.frontend import app
web_server_port = 80
rtmp_server_port = 1935
for svc in config.INITIAL_SERVICES:
if svc["image"] == "web-rtmp":
web_server_port = svc["port"]["80/tcp"]
rtmp_server_port = svc["port"]["1935/tcp"]
break
video_0 = f"http://{config.BACKEND_NETWORK_GATEWAY}:{web_server_port}/sample-videos/fruit-and-vegetable-detection.mp4"
video_1 = f"http://{config.BACKEND_NETWORK_GATEWAY}:{web_server_port}/sample-videos/bottle-detection.mp4"
video_2 = f"http://{config.BACKEND_NETWORK_GATEWAY}:{web_server_port}/sample-videos/face-demographics-walking.mp4"
rtmp_ip = f"{config.BACKEND_NETWORK_GATEWAY}:{rtmp_server_port}"
input_data = {
"source": video_0
}
probe_input = {'streams': [{'index': 0, 'codec_name': 'h264', 'codec_long_name': 'H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10', 'profile': 'High', 'codec_type': 'video', 'codec_time_base': '1001/120000', 'codec_tag_string': 'avc1', 'codec_tag': '0x31637661', 'width': 960, 'height': 540, 'coded_width': 960, 'coded_height': 544, 'closed_captions': 0, 'has_b_frames': 0, 'sample_aspect_ratio': '1:1', 'display_aspect_ratio': '16:9', 'pix_fmt': 'yuv420p', 'level': 32, 'color_range': 'tv', 'color_space': 'bt709', 'color_transfer': 'bt709', 'color_primaries': 'bt709', 'chroma_location': 'left', 'field_order': 'progressive', 'refs': 1, 'is_avc': 'true', 'nal_length_size': '4', 'r_frame_rate': '60000/1001', 'avg_frame_rate': '60000/1001', 'time_base': '1/60000', 'start_pts': 0, 'start_time': '0.000000', 'duration_ts': 3636633, 'duration': '60.610550', 'bit_rate': '2335818', 'bits_per_raw_sample': '8', 'nb_frames': '3633', 'disposition': {'default': 1, 'dub': 0, 'original': 0, 'comment': 0, 'lyrics': 0, 'karaoke': 0, 'forced': 0, 'hearing_impaired': 0, 'visual_impaired': 0, 'clean_effects': 0, 'attached_pic': 0, 'timed_thumbnails': 0}, 'tags': {'creation_time': '2018-06-15T21:05:12.000000Z', 'language': 'und', 'handler_name': 'Core Media Video'}}], 'format': {'filename': 'http://172.17.0.1:5553/sample-videos/fruit-and-vegetable-detection.mp4', 'nb_streams': 1, 'nb_programs': 0, 'format_name': 'mov,mp4,m4a,3gp,3g2,mj2', 'format_long_name': 'QuickTime / MOV', 'start_time': '0.000000', 'duration': '60.610550', 'size': '17760065', 'bit_rate': '2344154', 'probe_score': 100, 'tags': {'major_brand': 'mp42', 'minor_version': '1', 'compatible_brands': 'mp41mp42isom', 'creation_time': '2018-06-15T21:05:12.000000Z'}}}
supported_containers = ["mkv", "mp4", "mov", "m4a", "avi", "webm", "wmv", "vob"]
supported_audio_codecs = {
"aac": "aac",
"ogg": "libvorbis",
"wav": "pcm_s16le",
"flac": "flac",
"ac3": "ac3",
"wma": "wmav2",
}
supported_gpu_codecs = {
"mp4": "h264_vaapi",
"mkv": "hevc_vaapi",
"mov": "mjpeg_vaapi",
"webm": "vp8_vaapi"
}
pipeline_codecs = {
"input_file": {
"source": video_1
},
"outputs": [
{
"container": "mp4",
"channels": [
{
"stream_type": "video",
"codec": "libx264"
}
]
}
]
}
pipeline_h264 = {
"input_file": {
"source": video_1
},
"outputs": [
{
"container": "mkv",
"channels": [
{
"stream_type": "video",
"codec": "libx264",
"codec_params": {
"preset": "ultrafast",
"tune": "film",
"crf": "30"
}
}
]
}
]
}
pipeline_mpegts = {
"input_file": {
"source": video_1,
"params": {
"re": None
}
},
"outputs": [
{
"container": "mpegts",
"channels": [
{
"stream_type": "video",
"codec": "libx264",
"codec_params": {
"preset": "fast",
"crf": "30"
}
}
]
}
]
}
pipeline_rtmp = {
"input_file": {
"source": video_1,
"params": {
"re": None
}
},
"outputs": [
{
"container": "flv",
"rtmp_ip": rtmp_ip,
"rtmp_path": "live",
"channels": [
{
"stream_type": "video",
"codec": "libx264",
"codec_params": {
"preset": "fast",
"crf": "30"
}
}
]
}
]
}
pipeline_filters = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "mkv",
"channels": [
{
"stream_type": "video",
"filters": {
"scale": {
"w": "iw/2",
"h": -1
},
"deflicker": {
"mode": "pm",
"size": 10
},
"reverse": {},
"hue": {
"s": 0
}
}
},
{
"stream_type": "audio",
"filters": {
"atrim": {
"start": 1
},
"asetpts": "PTS-STARTPTS",
"volume": {
"volume": 0.8
},
"areverse": {},
"aphaser": {}
}
}
]
}
]
}
pipeline_copy = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "mp4",
"channels": [
{
"stream_type": "video",
"codec": "copy"
},
{
"stream_type": "audio",
"codec": "copy"
}
]
}
]
}
pipeline_empty = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "mp4"
}
]
}
pipeline_mkv = {
"input_file": {
"source": video_1
},
"outputs": [
{
"container": "mkv",
"params": {
"metadata": "stereo_mode=left_right",
"default_mode": "infer_no_subs"
}
}
]
}
pipeline_mp4 = {
"input_file": {
"source":video_1
},
"outputs": [
{
"container": "mp4",
"params": {
"movflags": "isml+frag_keyframe"
}
}
]
}
pipeline_aac = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "aac",
"channels": [
{
"stream_type": "audio",
"codec": "aac",
"codec_params": {
"ab": 192000,
"profile": "aac_ltp",
"strict": "-2",
}
},
{
"stream_type": "video",
"params": {
"vn": None
}
}
]
}
]
}
class TestMedia():
def setup_method(self):
models.Base.metadata.create_all(bind=models.engine)
def teardown_method(self):
os.remove(config.DATABASE_URL.split("///")[1])
def test_probe(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/probe?sync=true", json=input_data)
assert response.status_code == 200
assert response.json() == probe_input
def test_probe_missing_fields(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/probe?sync=true", json={})
assert response.status_code == 400
assert response.json().get("status") == "InputFile field required: source"
def test_probe_wrong_data(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": "wrong"})
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get('description') == ["wrong: No such file or directory"]
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": ""})
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get("description") == [": No such file or directory"]
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": None})
assert response.status_code == 400
assert response.json().get("status") == "InputFile none is not an allowed value: source"
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": 1})
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get("description") == ["1: No such file or directory"]
def test_pipeline_missing_fields(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_codecs.copy()
json_data["outputs"] = [{}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status") == "Pipeline field required: outputs,0,container"
json_data["outputs"][0] = {"container": "test", "channels": [{}]}
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status") == "Pipeline field required: outputs,0,channels,0,stream_type"
json_data["outputs"] = []
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get('description') == "No outputs specified"
json_data.pop("input_file")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status") == "Pipeline field required: input_file"
def test_pipeline_unsupported_data(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_codecs.copy()
json_data["outputs"][0]["container"] = "wrong"
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
for output in response.json():
assert output['status'] == 'error'
assert output['command_output'][-1].strip() == f"{output.get('id')}.wrong: Invalid argument"
json_data["outputs"][0]["container"] = "mkv"
json_data["outputs"][0]["channels"][0]["codec"] = "wrong"
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
time.sleep(2)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
for output in response.json():
assert output['status'] == 'error'
assert output['command_output'][-1].strip() == "Unknown encoder 'wrong'"
json_data["outputs"][0]["channels"][0]["codec"] = "libx264"
json_data["outputs"][0]["channels"][0]["stream_type"] = "wrong"
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v {outputs[index]}"
def test_pipeline_copy(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_copy)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -acodec copy -vcodec copy {outputs[index]}"
def test_pipeline_empty(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_empty)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a {outputs[index]}"
def test_pipeline_mkv(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mkv)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v -default_mode infer_no_subs -metadata stereo_mode=left_right {outputs[index]}"
def test_pipeline_mp4(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mp4)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v -movflags isml+frag_keyframe {outputs[index]}"
def test_pipeline_aac(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_aac)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -ab 192000 -acodec aac -profile:a aac_ltp -strict -2 -vn {outputs[index]}"
def test_pipeline_h264(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_h264)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(2)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v -crf 30 -preset ultrafast -tune film -vcodec libx264 {outputs[index]}"
def test_pipeline_filters(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_filters)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(5)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
assert result[index]['command'] == f"ffmpeg -i {video_2} -filter_complex [0:v]scale=h=-1:w=iw/2[s0];[s0]deflicker=mode=pm:size=10[s1];[s1]reverse[s2];[s2]hue=s=0[s3];[0:a]atrim=start=1[s4];[s4]asetpts=PTS-STARTPTS[s5];[s5]volume=volume=0.8[s6];[s6]areverse[s7];[s7]aphaser[s8] -map [s3] -map [s8] {outputs[index]}"
def test_pipeline_supported_containers(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_empty.copy()
for container in supported_containers:
json_data["outputs"][0]["container"] = container
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
timeout = 15
finished = False
while not finished and timeout:
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
if result[index]['status'] == 'finished':
assert result[index]['command_retcode'] == 0
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a {outputs[index]}"
finished = True
timeout -= 1
if not finished:
assert False
def test_pipeline_supported_audio_codecs(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_empty.copy()
for extension, codec in supported_audio_codecs.items():
json_data["outputs"][0]["container"] = extension
json_data["outputs"][0]["channels"] = [{"stream_type": "audio", "codec": codec}, {"stream_type": "video", "params": {"vn": None}}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
timeout = 15
finished = False
while not finished and timeout:
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
if result[index]['status'] == 'finished':
assert result[index]['command_retcode'] == 0
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -acodec {codec} -vn {outputs[index]}"
finished = True
timeout -= 1
if not finished:
assert False
def test_pipeline_supported_gpu_codecs(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_empty.copy()
for extension, codec in supported_gpu_codecs.items():
json_data["outputs"][0]["container"] = extension
json_data["outputs"][0]["params"] = {"vaapi_device": "/dev/dri/renderD128"}
json_data["outputs"][0]["channels"] = [{"stream_type": "video", "codec": codec, "params": {"vf":"format=nv12,hwupload"}}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
timeout = 15
finished = False
while not finished or timeout == 0:
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
if result[index]['status'] == 'finished':
assert result[index]['command_retcode'] == 0
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -vaapi_device /dev/dri/renderD128 -vcodec {codec} -vf format=nv12,hwupload {outputs[index]}"
finished = True
timeout -= 1
if not finished:
assert False
def test_pipeline_ttl(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_copy.copy()
json_data["ttl"] = 5
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
result = response.json()
time.sleep(6)
response = client.get(f"/media/{svc_id}/pipeline/{result['id']}?sync=true")
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get("description") == f"Pipeline {result['id']} doesn't exist"
def test_pipeline_azure_upload(self):
ks = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
bucket = os.getenv("CLOUD_STORAGE_BUCKET")
if ks and bucket:
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_copy.copy()
json_data["outputs"][0]["storage"] = [{
"name": "azure",
"bucket": bucket,
"env": {
"AZURE_STORAGE_CONNECTION_STRING": ks
}
}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
# response = client.get(f"/media/{svc_id}/pipeline/{result['id']}?sync=true")
def test_pipeline_mpegts(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mpegts)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(30)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'running'
assert result[index]['command'] == f"ffmpeg -re -i {video_1} -map 0:v -f mpegts -crf 30 -preset fast -vcodec libx264 {outputs[index]}"
time.sleep(15)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
def test_pipeline_stop(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mpegts)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(2)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'running'
assert result[index]['command'] == f"ffmpeg -re -i {video_1} -map 0:v -f mpegts -crf 30 -preset fast -vcodec libx264 {outputs[index]}"
time.sleep(2)
response = client.delete(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
def test_pipeline_rtmp(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_rtmp)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(30)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert outputs[index] == f"rtmp://{rtmp_ip}/live"
assert result[index]['status'] == 'running'
assert result[index]['command'] == f"ffmpeg -re -i {video_1} -map 0:v -f flv -crf 30 -preset fast -vcodec libx264 {outputs[index]}"
time.sleep(15)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished' | 43.854015 | 1,723 | 0.54737 |
import os
import time
from fastapi.testclient import TestClient
from onecontainer_api import models, schemas, config, startup_svc
from onecontainer_api.frontend import app
web_server_port = 80
rtmp_server_port = 1935
for svc in config.INITIAL_SERVICES:
if svc["image"] == "web-rtmp":
web_server_port = svc["port"]["80/tcp"]
rtmp_server_port = svc["port"]["1935/tcp"]
break
video_0 = f"http://{config.BACKEND_NETWORK_GATEWAY}:{web_server_port}/sample-videos/fruit-and-vegetable-detection.mp4"
video_1 = f"http://{config.BACKEND_NETWORK_GATEWAY}:{web_server_port}/sample-videos/bottle-detection.mp4"
video_2 = f"http://{config.BACKEND_NETWORK_GATEWAY}:{web_server_port}/sample-videos/face-demographics-walking.mp4"
rtmp_ip = f"{config.BACKEND_NETWORK_GATEWAY}:{rtmp_server_port}"
input_data = {
"source": video_0
}
probe_input = {'streams': [{'index': 0, 'codec_name': 'h264', 'codec_long_name': 'H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10', 'profile': 'High', 'codec_type': 'video', 'codec_time_base': '1001/120000', 'codec_tag_string': 'avc1', 'codec_tag': '0x31637661', 'width': 960, 'height': 540, 'coded_width': 960, 'coded_height': 544, 'closed_captions': 0, 'has_b_frames': 0, 'sample_aspect_ratio': '1:1', 'display_aspect_ratio': '16:9', 'pix_fmt': 'yuv420p', 'level': 32, 'color_range': 'tv', 'color_space': 'bt709', 'color_transfer': 'bt709', 'color_primaries': 'bt709', 'chroma_location': 'left', 'field_order': 'progressive', 'refs': 1, 'is_avc': 'true', 'nal_length_size': '4', 'r_frame_rate': '60000/1001', 'avg_frame_rate': '60000/1001', 'time_base': '1/60000', 'start_pts': 0, 'start_time': '0.000000', 'duration_ts': 3636633, 'duration': '60.610550', 'bit_rate': '2335818', 'bits_per_raw_sample': '8', 'nb_frames': '3633', 'disposition': {'default': 1, 'dub': 0, 'original': 0, 'comment': 0, 'lyrics': 0, 'karaoke': 0, 'forced': 0, 'hearing_impaired': 0, 'visual_impaired': 0, 'clean_effects': 0, 'attached_pic': 0, 'timed_thumbnails': 0}, 'tags': {'creation_time': '2018-06-15T21:05:12.000000Z', 'language': 'und', 'handler_name': 'Core Media Video'}}], 'format': {'filename': 'http://172.17.0.1:5553/sample-videos/fruit-and-vegetable-detection.mp4', 'nb_streams': 1, 'nb_programs': 0, 'format_name': 'mov,mp4,m4a,3gp,3g2,mj2', 'format_long_name': 'QuickTime / MOV', 'start_time': '0.000000', 'duration': '60.610550', 'size': '17760065', 'bit_rate': '2344154', 'probe_score': 100, 'tags': {'major_brand': 'mp42', 'minor_version': '1', 'compatible_brands': 'mp41mp42isom', 'creation_time': '2018-06-15T21:05:12.000000Z'}}}
supported_containers = ["mkv", "mp4", "mov", "m4a", "avi", "webm", "wmv", "vob"]
supported_audio_codecs = {
"aac": "aac",
"ogg": "libvorbis",
"wav": "pcm_s16le",
"flac": "flac",
"ac3": "ac3",
"wma": "wmav2",
}
supported_gpu_codecs = {
"mp4": "h264_vaapi",
"mkv": "hevc_vaapi",
"mov": "mjpeg_vaapi",
"webm": "vp8_vaapi"
}
pipeline_codecs = {
"input_file": {
"source": video_1
},
"outputs": [
{
"container": "mp4",
"channels": [
{
"stream_type": "video",
"codec": "libx264"
}
]
}
]
}
pipeline_h264 = {
"input_file": {
"source": video_1
},
"outputs": [
{
"container": "mkv",
"channels": [
{
"stream_type": "video",
"codec": "libx264",
"codec_params": {
"preset": "ultrafast",
"tune": "film",
"crf": "30"
}
}
]
}
]
}
pipeline_mpegts = {
"input_file": {
"source": video_1,
"params": {
"re": None
}
},
"outputs": [
{
"container": "mpegts",
"channels": [
{
"stream_type": "video",
"codec": "libx264",
"codec_params": {
"preset": "fast",
"crf": "30"
}
}
]
}
]
}
pipeline_rtmp = {
"input_file": {
"source": video_1,
"params": {
"re": None
}
},
"outputs": [
{
"container": "flv",
"rtmp_ip": rtmp_ip,
"rtmp_path": "live",
"channels": [
{
"stream_type": "video",
"codec": "libx264",
"codec_params": {
"preset": "fast",
"crf": "30"
}
}
]
}
]
}
pipeline_filters = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "mkv",
"channels": [
{
"stream_type": "video",
"filters": {
"scale": {
"w": "iw/2",
"h": -1
},
"deflicker": {
"mode": "pm",
"size": 10
},
"reverse": {},
"hue": {
"s": 0
}
}
},
{
"stream_type": "audio",
"filters": {
"atrim": {
"start": 1
},
"asetpts": "PTS-STARTPTS",
"volume": {
"volume": 0.8
},
"areverse": {},
"aphaser": {}
}
}
]
}
]
}
pipeline_copy = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "mp4",
"channels": [
{
"stream_type": "video",
"codec": "copy"
},
{
"stream_type": "audio",
"codec": "copy"
}
]
}
]
}
pipeline_empty = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "mp4"
}
]
}
pipeline_mkv = {
"input_file": {
"source": video_1
},
"outputs": [
{
"container": "mkv",
"params": {
"metadata": "stereo_mode=left_right",
"default_mode": "infer_no_subs"
}
}
]
}
pipeline_mp4 = {
"input_file": {
"source":video_1
},
"outputs": [
{
"container": "mp4",
"params": {
"movflags": "isml+frag_keyframe"
}
}
]
}
pipeline_aac = {
"input_file": {
"source": video_2
},
"outputs": [
{
"container": "aac",
"channels": [
{
"stream_type": "audio",
"codec": "aac",
"codec_params": {
"ab": 192000,
"profile": "aac_ltp",
"strict": "-2",
}
},
{
"stream_type": "video",
"params": {
"vn": None
}
}
]
}
]
}
class TestMedia():
def setup_method(self):
models.Base.metadata.create_all(bind=models.engine)
def teardown_method(self):
os.remove(config.DATABASE_URL.split("///")[1])
def test_probe(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/probe?sync=true", json=input_data)
assert response.status_code == 200
assert response.json() == probe_input
def test_probe_missing_fields(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/probe?sync=true", json={})
assert response.status_code == 400
assert response.json().get("status") == "InputFile field required: source"
def test_probe_wrong_data(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": "wrong"})
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get('description') == ["wrong: No such file or directory"]
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": ""})
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get("description") == [": No such file or directory"]
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": None})
assert response.status_code == 400
assert response.json().get("status") == "InputFile none is not an allowed value: source"
response = client.post(f"/media/{svc_id}/probe?sync=true", json={"source": 1})
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get("description") == ["1: No such file or directory"]
def test_pipeline_missing_fields(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_codecs.copy()
json_data["outputs"] = [{}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status") == "Pipeline field required: outputs,0,container"
json_data["outputs"][0] = {"container": "test", "channels": [{}]}
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status") == "Pipeline field required: outputs,0,channels,0,stream_type"
json_data["outputs"] = []
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get('description') == "No outputs specified"
json_data.pop("input_file")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 400
assert response.json().get("status") == "Pipeline field required: input_file"
def test_pipeline_unsupported_data(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_codecs.copy()
json_data["outputs"][0]["container"] = "wrong"
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
for output in response.json():
assert output['status'] == 'error'
assert output['command_output'][-1].strip() == f"{output.get('id')}.wrong: Invalid argument"
json_data["outputs"][0]["container"] = "mkv"
json_data["outputs"][0]["channels"][0]["codec"] = "wrong"
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
time.sleep(2)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
for output in response.json():
assert output['status'] == 'error'
assert output['command_output'][-1].strip() == "Unknown encoder 'wrong'"
json_data["outputs"][0]["channels"][0]["codec"] = "libx264"
json_data["outputs"][0]["channels"][0]["stream_type"] = "wrong"
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v {outputs[index]}"
def test_pipeline_copy(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_copy)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -acodec copy -vcodec copy {outputs[index]}"
def test_pipeline_empty(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_empty)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a {outputs[index]}"
def test_pipeline_mkv(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mkv)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v -default_mode infer_no_subs -metadata stereo_mode=left_right {outputs[index]}"
def test_pipeline_mp4(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mp4)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v -movflags isml+frag_keyframe {outputs[index]}"
def test_pipeline_aac(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_aac)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -ab 192000 -acodec aac -profile:a aac_ltp -strict -2 -vn {outputs[index]}"
def test_pipeline_h264(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_h264)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(2)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
assert result[index]['command'] == f"ffmpeg -i {video_1} -map 0:v -crf 30 -preset ultrafast -tune film -vcodec libx264 {outputs[index]}"
def test_pipeline_filters(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_filters)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(5)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
assert result[index]['command'] == f"ffmpeg -i {video_2} -filter_complex [0:v]scale=h=-1:w=iw/2[s0];[s0]deflicker=mode=pm:size=10[s1];[s1]reverse[s2];[s2]hue=s=0[s3];[0:a]atrim=start=1[s4];[s4]asetpts=PTS-STARTPTS[s5];[s5]volume=volume=0.8[s6];[s6]areverse[s7];[s7]aphaser[s8] -map [s3] -map [s8] {outputs[index]}"
def test_pipeline_supported_containers(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_empty.copy()
for container in supported_containers:
json_data["outputs"][0]["container"] = container
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
timeout = 15
finished = False
while not finished and timeout:
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
if result[index]['status'] == 'finished':
assert result[index]['command_retcode'] == 0
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a {outputs[index]}"
finished = True
timeout -= 1
if not finished:
assert False
def test_pipeline_supported_audio_codecs(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_empty.copy()
for extension, codec in supported_audio_codecs.items():
json_data["outputs"][0]["container"] = extension
json_data["outputs"][0]["channels"] = [{"stream_type": "audio", "codec": codec}, {"stream_type": "video", "params": {"vn": None}}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
timeout = 15
finished = False
while not finished and timeout:
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
if result[index]['status'] == 'finished':
assert result[index]['command_retcode'] == 0
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -acodec {codec} -vn {outputs[index]}"
finished = True
timeout -= 1
if not finished:
assert False
def test_pipeline_supported_gpu_codecs(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_empty.copy()
for extension, codec in supported_gpu_codecs.items():
json_data["outputs"][0]["container"] = extension
json_data["outputs"][0]["params"] = {"vaapi_device": "/dev/dri/renderD128"}
json_data["outputs"][0]["channels"] = [{"stream_type": "video", "codec": codec, "params": {"vf":"format=nv12,hwupload"}}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
timeout = 15
finished = False
while not finished or timeout == 0:
time.sleep(3)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] != 'error'
if result[index]['status'] == 'finished':
assert result[index]['command_retcode'] == 0
assert result[index]['command'] == f"ffmpeg -i {video_2} -map 0:v -map 0:a -vaapi_device /dev/dri/renderD128 -vcodec {codec} -vf format=nv12,hwupload {outputs[index]}"
finished = True
timeout -= 1
if not finished:
assert False
def test_pipeline_ttl(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_copy.copy()
json_data["ttl"] = 5
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
result = response.json()
time.sleep(6)
response = client.get(f"/media/{svc_id}/pipeline/{result['id']}?sync=true")
assert response.status_code == 400
assert response.json().get("status", {}).get('detail', {}).get("description") == f"Pipeline {result['id']} doesn't exist"
def test_pipeline_azure_upload(self):
ks = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
bucket = os.getenv("CLOUD_STORAGE_BUCKET")
if ks and bucket:
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
json_data = pipeline_copy.copy()
json_data["outputs"][0]["storage"] = [{
"name": "azure",
"bucket": bucket,
"env": {
"AZURE_STORAGE_CONNECTION_STRING": ks
}
}]
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=json_data)
assert response.status_code == 200
# response = client.get(f"/media/{svc_id}/pipeline/{result['id']}?sync=true")
def test_pipeline_mpegts(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mpegts)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(30)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'running'
assert result[index]['command'] == f"ffmpeg -re -i {video_1} -map 0:v -f mpegts -crf 30 -preset fast -vcodec libx264 {outputs[index]}"
time.sleep(15)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
def test_pipeline_stop(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_mpegts)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(2)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'running'
assert result[index]['command'] == f"ffmpeg -re -i {video_1} -map 0:v -f mpegts -crf 30 -preset fast -vcodec libx264 {outputs[index]}"
time.sleep(2)
response = client.delete(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished'
def test_pipeline_rtmp(self):
with TestClient(app) as client:
response = client.get("/service")
data = list(filter(lambda x: x['app'] == 'mers-ffmpeg', response.json()))[0]
svc_id = data.pop("id")
response = client.post(f"/media/{svc_id}/pipeline?sync=true", json=pipeline_rtmp)
assert response.status_code == 200
pipeline_id = response.json()['id']
outputs = response.json().get("outputs", [])
time.sleep(30)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert outputs[index] == f"rtmp://{rtmp_ip}/live"
assert result[index]['status'] == 'running'
assert result[index]['command'] == f"ffmpeg -re -i {video_1} -map 0:v -f flv -crf 30 -preset fast -vcodec libx264 {outputs[index]}"
time.sleep(15)
response = client.get(f"/media/{svc_id}/pipeline/{pipeline_id}?sync=true")
assert response.status_code == 200
result = response.json()
for index in range(len(result)):
assert result[index]['status'] == 'finished' | true | true |
f7fd8351f039aea1f7dd655273f34851265e3b5c | 960 | py | Python | application/bus_exp/code/collect_metrics.py | UCY-LINC-LAB/5G-Slicer-demo | 0d2c7ddabb339a54591bc3f58769c88d2ff4c42a | [
"Apache-2.0"
] | null | null | null | application/bus_exp/code/collect_metrics.py | UCY-LINC-LAB/5G-Slicer-demo | 0d2c7ddabb339a54591bc3f58769c88d2ff4c42a | [
"Apache-2.0"
] | null | null | null | application/bus_exp/code/collect_metrics.py | UCY-LINC-LAB/5G-Slicer-demo | 0d2c7ddabb339a54591bc3f58769c88d2ff4c42a | [
"Apache-2.0"
] | null | null | null | import logging
from threading import Thread
from time import sleep
import requests
from cachetools import TTLCache
from utils.edge_fuctionality import get_random_metrics, propagate_to_edge, get_url_from_mobility
cache = TTLCache(maxsize=128, ttl=5)
# from utils.weather import Weather
# w = Weather()
# t = w.retrieve_all_raw()
data_list = []
def helping_function(data_list = []):
url = cache.get('url')
if url is None:
cache['url'] = get_url_from_mobility()
closest_base_station = cache.get('url')
data = get_random_metrics()
data_list.append(data)
try:
print("data length", len(data_list))
propagate_to_edge(data_list, closest_base_station)
data_list = []
except requests.exceptions.Timeout:
print("Node is not connected to the network")
return data_list
while True:
# sleep(5)
try:
data_list = helping_function(data_list)
except Exception as ex:
print(ex) | 29.090909 | 96 | 0.704167 | import logging
from threading import Thread
from time import sleep
import requests
from cachetools import TTLCache
from utils.edge_fuctionality import get_random_metrics, propagate_to_edge, get_url_from_mobility
cache = TTLCache(maxsize=128, ttl=5)
data_list = []
def helping_function(data_list = []):
url = cache.get('url')
if url is None:
cache['url'] = get_url_from_mobility()
closest_base_station = cache.get('url')
data = get_random_metrics()
data_list.append(data)
try:
print("data length", len(data_list))
propagate_to_edge(data_list, closest_base_station)
data_list = []
except requests.exceptions.Timeout:
print("Node is not connected to the network")
return data_list
while True:
try:
data_list = helping_function(data_list)
except Exception as ex:
print(ex) | true | true |
f7fd83f08e7be8ce6bb5f9396e3aeae3e22ccb1b | 11,431 | py | Python | stanza/models/parser.py | de9uch1/stanza | cafb7d5004842cd3c8a3ac334ce7649bac928830 | [
"Apache-2.0"
] | 1 | 2021-05-23T12:44:34.000Z | 2021-05-23T12:44:34.000Z | stanza/models/parser.py | de9uch1/stanza | cafb7d5004842cd3c8a3ac334ce7649bac928830 | [
"Apache-2.0"
] | null | null | null | stanza/models/parser.py | de9uch1/stanza | cafb7d5004842cd3c8a3ac334ce7649bac928830 | [
"Apache-2.0"
] | null | null | null | """
Entry point for training and evaluating a dependency parser.
This implementation combines a deep biaffine graph-based parser with linearization and distance features.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
"""
Training and evaluation for the parser.
"""
import sys
import os
import shutil
import time
from datetime import datetime
import argparse
import numpy as np
import random
import torch
from torch import nn, optim
from stanza.models.depparse.data import DataLoader
from stanza.models.depparse.trainer import Trainer
from stanza.models.depparse import scorer
from stanza.models.common import utils
from stanza.models.common.pretrain import Pretrain
from stanza.models.common.doc import *
from stanza.utils.conll import CoNLL
from stanza.models import _training_logging
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/depparse', help='Root dir for saving models.')
parser.add_argument('--wordvec_dir', type=str, default='extern_data/word2vec', help='Directory of word vectors.')
parser.add_argument('--wordvec_file', type=str, default=None, help='Word vectors filename.')
parser.add_argument('--train_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--eval_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--output_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--gold_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--mode', default='train', choices=['train', 'predict'])
parser.add_argument('--lang', type=str, help='Language')
parser.add_argument('--shorthand', type=str, help="Treebank shorthand")
parser.add_argument('--hidden_dim', type=int, default=400)
parser.add_argument('--char_hidden_dim', type=int, default=400)
parser.add_argument('--deep_biaff_hidden_dim', type=int, default=400)
parser.add_argument('--composite_deep_biaff_hidden_dim', type=int, default=100)
parser.add_argument('--word_emb_dim', type=int, default=75)
parser.add_argument('--char_emb_dim', type=int, default=100)
parser.add_argument('--tag_emb_dim', type=int, default=50)
parser.add_argument('--transformed_dim', type=int, default=125)
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--char_num_layers', type=int, default=1)
parser.add_argument('--pretrain_max_vocab', type=int, default=250000)
parser.add_argument('--word_dropout', type=float, default=0.33)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--rec_dropout', type=float, default=0, help="Recurrent dropout")
parser.add_argument('--char_rec_dropout', type=float, default=0, help="Recurrent dropout")
parser.add_argument('--no_char', dest='char', action='store_false', help="Turn off character model.")
parser.add_argument('--no_pretrain', dest='pretrain', action='store_false', help="Turn off pretrained embeddings.")
parser.add_argument('--no_linearization', dest='linearization', action='store_false', help="Turn off linearization term.")
parser.add_argument('--no_distance', dest='distance', action='store_false', help="Turn off distance term.")
parser.add_argument('--sample_train', type=float, default=1.0, help='Subsample training data.')
parser.add_argument('--optim', type=str, default='adam', help='sgd, adagrad, adam or adamax.')
parser.add_argument('--lr', type=float, default=3e-3, help='Learning rate')
parser.add_argument('--beta2', type=float, default=0.95)
parser.add_argument('--max_steps', type=int, default=50000)
parser.add_argument('--eval_interval', type=int, default=100)
parser.add_argument('--max_steps_before_stop', type=int, default=3000)
parser.add_argument('--batch_size', type=int, default=5000)
parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--save_dir', type=str, default='saved_models/depparse', help='Root dir for saving models.')
parser.add_argument('--save_name', type=str, default=None, help="File name to save the model")
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
args = parser.parse_args()
return args
def main():
args = parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
args = vars(args)
print("Running parser in {} mode".format(args['mode']))
if args['mode'] == 'train':
train(args)
else:
evaluate(args)
def train(args):
utils.ensure_dir(args['save_dir'])
model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
else '{}/{}_parser.pt'.format(args['save_dir'], args['shorthand'])
# load pretrained vectors if needed
pretrain = None
if args['pretrain']:
vec_file = args['wordvec_file'] if args['wordvec_file'] else utils.get_wordvec_file(args['wordvec_dir'], args['shorthand'])
pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'], args['shorthand'])
pretrain = Pretrain(pretrain_file, vec_file, args['pretrain_max_vocab'])
# load data
print("Loading data with batch size {}...".format(args['batch_size']))
train_doc = Document(CoNLL.conll2dict(input_file=args['train_file']))
train_batch = DataLoader(train_doc, args['batch_size'], args, pretrain, evaluation=False)
vocab = train_batch.vocab
dev_doc = Document(CoNLL.conll2dict(input_file=args['eval_file']))
dev_batch = DataLoader(dev_doc, args['batch_size'], args, pretrain, vocab=vocab, evaluation=True, sort_during_eval=True)
# pred and gold path
system_pred_file = args['output_file']
gold_file = args['gold_file']
# skip training if the language does not have training or dev data
if len(train_batch) == 0 or len(dev_batch) == 0:
print("Skip training because no data available...")
sys.exit(0)
print("Training parser...")
trainer = Trainer(args=args, vocab=vocab, pretrain=pretrain, use_cuda=args['cuda'])
global_step = 0
max_steps = args['max_steps']
dev_score_history = []
best_dev_preds = []
current_lr = args['lr']
global_start_time = time.time()
format_str = '{}: step {}/{}, loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
using_amsgrad = False
last_best_step = 0
# start training
train_loss = 0
while True:
do_break = False
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = trainer.update(batch, eval=False) # update step
train_loss += loss
if global_step % args['log_step'] == 0:
duration = time.time() - start_time
print(format_str.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), global_step,\
max_steps, loss, duration, current_lr))
if global_step % args['eval_interval'] == 0:
# eval on dev
print("Evaluating on dev set...")
dev_preds = []
for batch in dev_batch:
preds = trainer.predict(batch)
dev_preds += preds
dev_preds = utils.unsort(dev_preds, dev_batch.data_orig_idx)
dev_batch.doc.set([HEAD, DEPREL], [y for x in dev_preds for y in x])
CoNLL.dict2conll(dev_batch.doc.to_dict(), system_pred_file)
_, _, dev_score = scorer.score(system_pred_file, gold_file)
train_loss = train_loss / args['eval_interval'] # avg loss per batch
print("step {}: train_loss = {:.6f}, dev_score = {:.4f}".format(global_step, train_loss, dev_score))
train_loss = 0
# save best model
if len(dev_score_history) == 0 or dev_score > max(dev_score_history):
last_best_step = global_step
trainer.save(model_file)
print("new best model saved.")
best_dev_preds = dev_preds
dev_score_history += [dev_score]
print("")
if global_step - last_best_step >= args['max_steps_before_stop']:
if not using_amsgrad:
print("Switching to AMSGrad")
last_best_step = global_step
using_amsgrad = True
trainer.optimizer = optim.Adam(trainer.model.parameters(), amsgrad=True, lr=args['lr'], betas=(.9, args['beta2']), eps=1e-6)
else:
do_break = True
break
if global_step >= args['max_steps']:
do_break = True
break
if do_break: break
train_batch.reshuffle()
print("Training ended with {} steps.".format(global_step))
best_f, best_eval = max(dev_score_history)*100, np.argmax(dev_score_history)+1
print("Best dev F1 = {:.2f}, at iteration = {}".format(best_f, best_eval * args['eval_interval']))
def evaluate(args):
# file paths
system_pred_file = args['output_file']
gold_file = args['gold_file']
model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
else '{}/{}_parser.pt'.format(args['save_dir'], args['shorthand'])
# load pretrain; note that we allow the pretrain_file to be non-existent
pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'], args['shorthand'])
pretrain = Pretrain(pretrain_file)
# load model
print("Loading model from: {}".format(model_file))
use_cuda = args['cuda'] and not args['cpu']
trainer = Trainer(pretrain=pretrain, model_file=model_file, use_cuda=use_cuda)
loaded_args, vocab = trainer.args, trainer.vocab
# load config
for k in args:
if k.endswith('_dir') or k.endswith('_file') or k in ['shorthand'] or k == 'mode':
loaded_args[k] = args[k]
# load data
print("Loading data with batch size {}...".format(args['batch_size']))
doc = Document(CoNLL.conll2dict(input_file=args['eval_file']))
batch = DataLoader(doc, args['batch_size'], loaded_args, pretrain, vocab=vocab, evaluation=True, sort_during_eval=True)
if len(batch) > 0:
print("Start evaluation...")
preds = []
for i, b in enumerate(batch):
preds += trainer.predict(b)
else:
# skip eval if dev data does not exist
preds = []
preds = utils.unsort(preds, batch.data_orig_idx)
# write to file and score
batch.doc.set([HEAD, DEPREL], [y for x in preds for y in x])
CoNLL.dict2conll(batch.doc.to_dict(), system_pred_file)
if gold_file is not None:
_, _, score = scorer.score(system_pred_file, gold_file)
print("Parser score:")
print("{} {:.2f}".format(args['shorthand'], score*100))
if __name__ == '__main__':
main()
| 43.965385 | 144 | 0.655148 |
import sys
import os
import shutil
import time
from datetime import datetime
import argparse
import numpy as np
import random
import torch
from torch import nn, optim
from stanza.models.depparse.data import DataLoader
from stanza.models.depparse.trainer import Trainer
from stanza.models.depparse import scorer
from stanza.models.common import utils
from stanza.models.common.pretrain import Pretrain
from stanza.models.common.doc import *
from stanza.utils.conll import CoNLL
from stanza.models import _training_logging
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/depparse', help='Root dir for saving models.')
parser.add_argument('--wordvec_dir', type=str, default='extern_data/word2vec', help='Directory of word vectors.')
parser.add_argument('--wordvec_file', type=str, default=None, help='Word vectors filename.')
parser.add_argument('--train_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--eval_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--output_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--gold_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--mode', default='train', choices=['train', 'predict'])
parser.add_argument('--lang', type=str, help='Language')
parser.add_argument('--shorthand', type=str, help="Treebank shorthand")
parser.add_argument('--hidden_dim', type=int, default=400)
parser.add_argument('--char_hidden_dim', type=int, default=400)
parser.add_argument('--deep_biaff_hidden_dim', type=int, default=400)
parser.add_argument('--composite_deep_biaff_hidden_dim', type=int, default=100)
parser.add_argument('--word_emb_dim', type=int, default=75)
parser.add_argument('--char_emb_dim', type=int, default=100)
parser.add_argument('--tag_emb_dim', type=int, default=50)
parser.add_argument('--transformed_dim', type=int, default=125)
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--char_num_layers', type=int, default=1)
parser.add_argument('--pretrain_max_vocab', type=int, default=250000)
parser.add_argument('--word_dropout', type=float, default=0.33)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--rec_dropout', type=float, default=0, help="Recurrent dropout")
parser.add_argument('--char_rec_dropout', type=float, default=0, help="Recurrent dropout")
parser.add_argument('--no_char', dest='char', action='store_false', help="Turn off character model.")
parser.add_argument('--no_pretrain', dest='pretrain', action='store_false', help="Turn off pretrained embeddings.")
parser.add_argument('--no_linearization', dest='linearization', action='store_false', help="Turn off linearization term.")
parser.add_argument('--no_distance', dest='distance', action='store_false', help="Turn off distance term.")
parser.add_argument('--sample_train', type=float, default=1.0, help='Subsample training data.')
parser.add_argument('--optim', type=str, default='adam', help='sgd, adagrad, adam or adamax.')
parser.add_argument('--lr', type=float, default=3e-3, help='Learning rate')
parser.add_argument('--beta2', type=float, default=0.95)
parser.add_argument('--max_steps', type=int, default=50000)
parser.add_argument('--eval_interval', type=int, default=100)
parser.add_argument('--max_steps_before_stop', type=int, default=3000)
parser.add_argument('--batch_size', type=int, default=5000)
parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--save_dir', type=str, default='saved_models/depparse', help='Root dir for saving models.')
parser.add_argument('--save_name', type=str, default=None, help="File name to save the model")
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
args = parser.parse_args()
return args
def main():
args = parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(args.seed)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
args = vars(args)
print("Running parser in {} mode".format(args['mode']))
if args['mode'] == 'train':
train(args)
else:
evaluate(args)
def train(args):
utils.ensure_dir(args['save_dir'])
model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
else '{}/{}_parser.pt'.format(args['save_dir'], args['shorthand'])
pretrain = None
if args['pretrain']:
vec_file = args['wordvec_file'] if args['wordvec_file'] else utils.get_wordvec_file(args['wordvec_dir'], args['shorthand'])
pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'], args['shorthand'])
pretrain = Pretrain(pretrain_file, vec_file, args['pretrain_max_vocab'])
print("Loading data with batch size {}...".format(args['batch_size']))
train_doc = Document(CoNLL.conll2dict(input_file=args['train_file']))
train_batch = DataLoader(train_doc, args['batch_size'], args, pretrain, evaluation=False)
vocab = train_batch.vocab
dev_doc = Document(CoNLL.conll2dict(input_file=args['eval_file']))
dev_batch = DataLoader(dev_doc, args['batch_size'], args, pretrain, vocab=vocab, evaluation=True, sort_during_eval=True)
system_pred_file = args['output_file']
gold_file = args['gold_file']
if len(train_batch) == 0 or len(dev_batch) == 0:
print("Skip training because no data available...")
sys.exit(0)
print("Training parser...")
trainer = Trainer(args=args, vocab=vocab, pretrain=pretrain, use_cuda=args['cuda'])
global_step = 0
max_steps = args['max_steps']
dev_score_history = []
best_dev_preds = []
current_lr = args['lr']
global_start_time = time.time()
format_str = '{}: step {}/{}, loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
using_amsgrad = False
last_best_step = 0
train_loss = 0
while True:
do_break = False
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = trainer.update(batch, eval=False)
train_loss += loss
if global_step % args['log_step'] == 0:
duration = time.time() - start_time
print(format_str.format(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), global_step,\
max_steps, loss, duration, current_lr))
if global_step % args['eval_interval'] == 0:
print("Evaluating on dev set...")
dev_preds = []
for batch in dev_batch:
preds = trainer.predict(batch)
dev_preds += preds
dev_preds = utils.unsort(dev_preds, dev_batch.data_orig_idx)
dev_batch.doc.set([HEAD, DEPREL], [y for x in dev_preds for y in x])
CoNLL.dict2conll(dev_batch.doc.to_dict(), system_pred_file)
_, _, dev_score = scorer.score(system_pred_file, gold_file)
train_loss = train_loss / args['eval_interval']
print("step {}: train_loss = {:.6f}, dev_score = {:.4f}".format(global_step, train_loss, dev_score))
train_loss = 0
if len(dev_score_history) == 0 or dev_score > max(dev_score_history):
last_best_step = global_step
trainer.save(model_file)
print("new best model saved.")
best_dev_preds = dev_preds
dev_score_history += [dev_score]
print("")
if global_step - last_best_step >= args['max_steps_before_stop']:
if not using_amsgrad:
print("Switching to AMSGrad")
last_best_step = global_step
using_amsgrad = True
trainer.optimizer = optim.Adam(trainer.model.parameters(), amsgrad=True, lr=args['lr'], betas=(.9, args['beta2']), eps=1e-6)
else:
do_break = True
break
if global_step >= args['max_steps']:
do_break = True
break
if do_break: break
train_batch.reshuffle()
print("Training ended with {} steps.".format(global_step))
best_f, best_eval = max(dev_score_history)*100, np.argmax(dev_score_history)+1
print("Best dev F1 = {:.2f}, at iteration = {}".format(best_f, best_eval * args['eval_interval']))
def evaluate(args):
system_pred_file = args['output_file']
gold_file = args['gold_file']
model_file = args['save_dir'] + '/' + args['save_name'] if args['save_name'] is not None \
else '{}/{}_parser.pt'.format(args['save_dir'], args['shorthand'])
pretrain_file = '{}/{}.pretrain.pt'.format(args['save_dir'], args['shorthand'])
pretrain = Pretrain(pretrain_file)
print("Loading model from: {}".format(model_file))
use_cuda = args['cuda'] and not args['cpu']
trainer = Trainer(pretrain=pretrain, model_file=model_file, use_cuda=use_cuda)
loaded_args, vocab = trainer.args, trainer.vocab
for k in args:
if k.endswith('_dir') or k.endswith('_file') or k in ['shorthand'] or k == 'mode':
loaded_args[k] = args[k]
print("Loading data with batch size {}...".format(args['batch_size']))
doc = Document(CoNLL.conll2dict(input_file=args['eval_file']))
batch = DataLoader(doc, args['batch_size'], loaded_args, pretrain, vocab=vocab, evaluation=True, sort_during_eval=True)
if len(batch) > 0:
print("Start evaluation...")
preds = []
for i, b in enumerate(batch):
preds += trainer.predict(b)
else:
preds = []
preds = utils.unsort(preds, batch.data_orig_idx)
batch.doc.set([HEAD, DEPREL], [y for x in preds for y in x])
CoNLL.dict2conll(batch.doc.to_dict(), system_pred_file)
if gold_file is not None:
_, _, score = scorer.score(system_pred_file, gold_file)
print("Parser score:")
print("{} {:.2f}".format(args['shorthand'], score*100))
if __name__ == '__main__':
main()
| true | true |
f7fd8444f89d3b502461e65a9d2c65e3f0d16f50 | 4,481 | py | Python | cyllene/m_magics.py | 28left/psumathnotebooks | ec948216304e5f234a2f4d0f6bdcfaa1a10c435d | [
"MIT"
] | 1 | 2021-05-04T14:09:51.000Z | 2021-05-04T14:09:51.000Z | cyllene/m_magics.py | 28left/psumathnotebooks | ec948216304e5f234a2f4d0f6bdcfaa1a10c435d | [
"MIT"
] | null | null | null | cyllene/m_magics.py | 28left/psumathnotebooks | ec948216304e5f234a2f4d0f6bdcfaa1a10c435d | [
"MIT"
] | null | null | null | # problem and answer magics
from os import path
from IPython.core.magic import (register_line_magic, register_cell_magic,
register_line_cell_magic)
from IPython import get_ipython
from cyllene.p_problem import ProbStack
ip = get_ipython()
@register_line_magic
def initialize(line):
if line=='':
# if no argument is given, try to find .py file with
# same name as notebook
init_file = 'init.py'
else:
init_file = line + '.py'
try:
# run the init file to load problems etc.
ip.magic('run '+init_file)
print("Initialization successful!")
except:
# print(line)
print("Error loading initialization file.")
@register_line_magic
def Problem(line):
try:
problem = ProbStack.stack[line]
problem.state_problem()
# add_answer_cell(problem)
except:
# print(line)
print("Could not add problem to problem stack.")
@register_line_magic
def problem(line):
try:
problem = ProbStack.stack[line]
problem.state_problem()
# add_answer_cell(problem)
except:
# print(line)
print("Could not add problem to problem stack.")
@register_cell_magic
def answer(line, cell):
# Given this is an answer block, check the top line is written in a valid way
try:
problem = ProbStack.stack[line[8:]]
except:
print("Oops! Something in the top line won't let us find the right problem.")
return None
# PREWORK FOR CELL:
# if for some reason cell is not defined, stop the process
if(cell is None):
print("Your answer seems to be empty. Please try again.")
return None
# eliminate any line from cell that is "essentially empty", i.e., only whitepace
cell = "\n".join([ll.rstrip() for ll in cell.splitlines() if ll.strip()])
# strip the empty space from the beginning of each line
cell = "\n".join([ll.lstrip() for ll in cell.splitlines()])
# check if cell is essentially empty
if(cell == ""):
print("Your answer is empty. Please try again!")
return None
# PARSING ANSWER DEPENDS ON NUM_INPUTS
answer = []
try:
n = problem.num_inputs
except:
print("problem.num_inputs has not yet been defined. Please check problem's encoding")
return None
if(n < 1):
print("This problem was coded with too few num_inputs.")
return None
elif(n == 1):
# there are no "(i): " prefix strings in this case, so can just use the first meaningful line in code as our answer
answer.append(cell.split('\n')[0])
else: # num_inputs > 1
# By default, let answer[i] = ""
for i in range(n):
answer.append("")
# TWO CASES for parsing answer blocks for problems with multiple inputs
# Distinguish between the two cases by the truth value of prefix_found
prefix_found = False
prefix_string = ""
prefix_index = 0
while(prefix_index < n and not prefix_found):
prefix_string = "(" + str(prefix_index + 1) + ")"
for cell_line in cell.splitlines():
if cell_line.startswith(prefix_string):
prefix_found = True
break
prefix_index += 1
if(prefix_found):
# CASE 1: there are "(i): " prefices from (1),...,(n)
# iterate through each line in code to update answer[i] if line begins with "(i):""
# ignore any lines with an (i) for i > n
for cell_line in cell.splitlines():
# could be improved
for i in range(n):
if cell_line.startswith("(" + str(i + 1) + "):"):
answer[i] = cell_line[len("(" + str(i + 1) + "):"):].strip()
break
if cell_line.startswith("(" + str(i + 1) + ")"):
answer[i] = cell_line[len("(" + str(i + 1) + ")"):].strip()
break
else:
# CASE 2: submission assumed to be written in order without the prefices (1),...,(n)
i = 0
for cell_line in cell.splitlines():
if(i >= n):
break
answer[i] = cell_line.strip()
i += 1
problem.check_answer(answer) | 34.736434 | 123 | 0.561928 |
from os import path
from IPython.core.magic import (register_line_magic, register_cell_magic,
register_line_cell_magic)
from IPython import get_ipython
from cyllene.p_problem import ProbStack
ip = get_ipython()
@register_line_magic
def initialize(line):
if line=='':
init_file = 'init.py'
else:
init_file = line + '.py'
try:
ip.magic('run '+init_file)
print("Initialization successful!")
except:
print("Error loading initialization file.")
@register_line_magic
def Problem(line):
try:
problem = ProbStack.stack[line]
problem.state_problem()
except:
print("Could not add problem to problem stack.")
@register_line_magic
def problem(line):
try:
problem = ProbStack.stack[line]
problem.state_problem()
except:
print("Could not add problem to problem stack.")
@register_cell_magic
def answer(line, cell):
try:
problem = ProbStack.stack[line[8:]]
except:
print("Oops! Something in the top line won't let us find the right problem.")
return None
# PREWORK FOR CELL:
# if for some reason cell is not defined, stop the process
if(cell is None):
print("Your answer seems to be empty. Please try again.")
return None
# eliminate any line from cell that is "essentially empty", i.e., only whitepace
cell = "\n".join([ll.rstrip() for ll in cell.splitlines() if ll.strip()])
# strip the empty space from the beginning of each line
cell = "\n".join([ll.lstrip() for ll in cell.splitlines()])
# check if cell is essentially empty
if(cell == ""):
print("Your answer is empty. Please try again!")
return None
# PARSING ANSWER DEPENDS ON NUM_INPUTS
answer = []
try:
n = problem.num_inputs
except:
print("problem.num_inputs has not yet been defined. Please check problem's encoding")
return None
if(n < 1):
print("This problem was coded with too few num_inputs.")
return None
elif(n == 1):
answer.append(cell.split('\n')[0])
else:
for i in range(n):
answer.append("")
prefix_found = False
prefix_string = ""
prefix_index = 0
while(prefix_index < n and not prefix_found):
prefix_string = "(" + str(prefix_index + 1) + ")"
for cell_line in cell.splitlines():
if cell_line.startswith(prefix_string):
prefix_found = True
break
prefix_index += 1
if(prefix_found):
# ignore any lines with an (i) for i > n
for cell_line in cell.splitlines():
# could be improved
for i in range(n):
if cell_line.startswith("(" + str(i + 1) + "):"):
answer[i] = cell_line[len("(" + str(i + 1) + "):"):].strip()
break
if cell_line.startswith("(" + str(i + 1) + ")"):
answer[i] = cell_line[len("(" + str(i + 1) + ")"):].strip()
break
else:
# CASE 2: submission assumed to be written in order without the prefices (1),...,(n)
i = 0
for cell_line in cell.splitlines():
if(i >= n):
break
answer[i] = cell_line.strip()
i += 1
problem.check_answer(answer) | true | true |
f7fd8639164634212df3126522a346d82f6d902a | 2,420 | py | Python | backend/output/reports/MaturityAssessmentReport.py | alexafshar/config-assessment-tool | b8956f4de2aa4fa3ba80f98362fc397f6d195b1c | [
"Apache-2.0"
] | null | null | null | backend/output/reports/MaturityAssessmentReport.py | alexafshar/config-assessment-tool | b8956f4de2aa4fa3ba80f98362fc397f6d195b1c | [
"Apache-2.0"
] | null | null | null | backend/output/reports/MaturityAssessmentReport.py | alexafshar/config-assessment-tool | b8956f4de2aa4fa3ba80f98362fc397f6d195b1c | [
"Apache-2.0"
] | null | null | null | import logging
from openpyxl import Workbook
from output.ReportBase import ReportBase
from util.xcel_utils import addFilterAndFreeze, resizeColumnWidth, writeColoredRow, writeSummarySheet, writeUncoloredRow
class MaturityAssessmentReport(ReportBase):
def createWorkbook(self, jobs, controllerData, jobFileName):
for reportType in ["apm", "brum", "mrum"]:
logging.info(f"Creating {reportType} Maturity Assessment Report Workbook")
# Create Report with Raw Data
workbook = Workbook()
summarySheet = workbook["Sheet"]
summarySheet.title = "Summary"
analysisSheet = workbook.create_sheet(f"Analysis")
filteredJobs = [job for job in jobs if job.componentType == reportType]
jobNameCols = []
for jobStep in filteredJobs:
name = type(jobStep).__name__
jobNameCols.append(name if not name.startswith("OverallAssessment") else "OverallAssessment")
jobStep.reportData(workbook, controllerData, name)
# Write Headers
writeUncoloredRow(
analysisSheet,
1,
[
"controller",
"componentType",
"name",
*jobNameCols,
],
)
rowIdx = 2
for host, hostInfo in controllerData.items():
for component in hostInfo[reportType].values():
writeColoredRow(
analysisSheet,
rowIdx,
[
(hostInfo["controller"].host, None),
(reportType, None),
(component["name"], None),
*[component[jobStep]["computed"] for jobStep in [type(jobStep).__name__ for jobStep in filteredJobs]],
],
)
rowIdx += 1
addFilterAndFreeze(analysisSheet)
resizeColumnWidth(analysisSheet)
# Now that we have the data , Populate the summary sheet with headers
writeSummarySheet(summarySheet)
logging.debug(f"Saving MaturityAssessment-{reportType} Workbook")
workbook.save(f"output/{jobFileName}/{jobFileName}-MaturityAssessment-{reportType}.xlsx")
| 37.8125 | 130 | 0.551653 | import logging
from openpyxl import Workbook
from output.ReportBase import ReportBase
from util.xcel_utils import addFilterAndFreeze, resizeColumnWidth, writeColoredRow, writeSummarySheet, writeUncoloredRow
class MaturityAssessmentReport(ReportBase):
def createWorkbook(self, jobs, controllerData, jobFileName):
for reportType in ["apm", "brum", "mrum"]:
logging.info(f"Creating {reportType} Maturity Assessment Report Workbook")
workbook = Workbook()
summarySheet = workbook["Sheet"]
summarySheet.title = "Summary"
analysisSheet = workbook.create_sheet(f"Analysis")
filteredJobs = [job for job in jobs if job.componentType == reportType]
jobNameCols = []
for jobStep in filteredJobs:
name = type(jobStep).__name__
jobNameCols.append(name if not name.startswith("OverallAssessment") else "OverallAssessment")
jobStep.reportData(workbook, controllerData, name)
writeUncoloredRow(
analysisSheet,
1,
[
"controller",
"componentType",
"name",
*jobNameCols,
],
)
rowIdx = 2
for host, hostInfo in controllerData.items():
for component in hostInfo[reportType].values():
writeColoredRow(
analysisSheet,
rowIdx,
[
(hostInfo["controller"].host, None),
(reportType, None),
(component["name"], None),
*[component[jobStep]["computed"] for jobStep in [type(jobStep).__name__ for jobStep in filteredJobs]],
],
)
rowIdx += 1
addFilterAndFreeze(analysisSheet)
resizeColumnWidth(analysisSheet)
writeSummarySheet(summarySheet)
logging.debug(f"Saving MaturityAssessment-{reportType} Workbook")
workbook.save(f"output/{jobFileName}/{jobFileName}-MaturityAssessment-{reportType}.xlsx")
| true | true |
f7fd863f1cedcc790d138ac7e4fa33f473c855df | 17,121 | py | Python | owtf/managers/poutput.py | Lonewolf-Information-systems/owtf | 65355ce8bf4a4ea0177e24ee106f77e2f87c17fa | [
"BSD-3-Clause"
] | 1 | 2018-02-05T12:10:28.000Z | 2018-02-05T12:10:28.000Z | owtf/managers/poutput.py | Lonewolf-Information-systems/owtf | 65355ce8bf4a4ea0177e24ee106f77e2f87c17fa | [
"BSD-3-Clause"
] | 2 | 2021-03-11T03:35:23.000Z | 2022-02-10T23:40:23.000Z | owtf/managers/poutput.py | Lonewolf-Information-systems/owtf | 65355ce8bf4a4ea0177e24ee106f77e2f87c17fa | [
"BSD-3-Clause"
] | null | null | null | """
owtf.db.poutput_manager
~~~~~~~~~~~~~~~~~~~~~~~
"""
import os
import json
from sqlalchemy.exc import SQLAlchemyError
from owtf.dependency_management.dependency_resolver import BaseComponent
from owtf.dependency_management.interfaces import PluginOutputInterface
from owtf.managers.target import target_required
from owtf.managers.session import session_required
from owtf.lib.exceptions import InvalidParameterType
from owtf.db import models
from owtf.utils import FileOperations
class POutputDB(BaseComponent, PluginOutputInterface):
COMPONENT_NAME = "plugin_output"
def __init__(self):
self.register_in_service_locator()
self.config = self.get_component("config")
self.plugin_handler = self.get_component("plugin_handler")
self.reporter = self.get_component("reporter")
self.target = self.get_component("target")
self.db_config = self.get_component("db_config")
self.timer = self.get_component("timer")
self.db = self.get_component("db")
def plugin_output_exists(self, plugin_key, target_id):
"""Check if output exists
:param plugin_key: plugin key
:type plugin_key: `str`
:param target_id: Target id
:type target_id: `int`
:return: True if count > 0
:rtype: `bool`
"""
count = self.db.session.query(models.PluginOutput).filter_by(target_id=target_id, plugin_key=plugin_key).count()
return (count > 0)
def plugin_count_output(self):
"""Get count stats
:return: Count stats
:rtype: `dict`
"""
complete_count = self.db.session.query(models.PluginOutput).count()
left_count = self.db.session.query(models.Work).count()
left_count += self.get_component("worker_manager").get_busy_workers()
results = {'complete_count': complete_count, 'left_count': left_count}
return results
def get_html_output(self, plugin_output):
"""Get html output
:param plugin_output: Plugin output
:type plugin_output: `list`
:return: HTML string
:rtype: `str`
"""
content = ''
for item in plugin_output:
content += getattr(self.reporter, item["type"])(**item["output"])
return content
@target_required
def get_output_dict(self, obj, target_id=None, inc_output=False):
"""Gets plugin outputs as dict
:param obj: output obj
:type obj:
:param target_id: target ID
:type target_id: `int`
:param inc_output: Is there?
:type inc_output: `bool`
:return: Plugin output as a dict
:rtype: `dict`
"""
if target_id:
self.target.set_target(target_id)
if obj:
pdict = dict(obj.__dict__)
pdict.pop("_sa_instance_state", None)
pdict.pop("date_time")
# If output is present, json decode it
if inc_output:
if pdict.get("output", None):
pdict["output"] = self.get_html_output(json.loads(pdict["output"]))
else:
pdict.pop("output")
pdict["start_time"] = obj.start_time.strftime(self.db_config.get("DATE_TIME_FORMAT"))
pdict["end_time"] = obj.end_time.strftime(self.db_config.get("DATE_TIME_FORMAT"))
pdict["run_time"] = self.timer.get_time_as_str(obj.run_time)
return pdict
@target_required
def get_output_dicts(self, obj_list, target_id=None, inc_output=False):
"""Get plugin output dicts from a list of objects
:param obj_list: List of objects
:type obj_list: `list`
:param target_id: target ID
:type target_id: `int`
:param inc_output: True/false
:type inc_output: `bool`
:return: List of output dicts
:rtype: `list`
"""
if target_id:
self.target.set_target(target_id)
dict_list = []
for obj in obj_list:
dict_list.append(self.get_output_dict(obj, target_id=target_id, inc_output=inc_output))
return dict_list
def gen_query(self, filter_data, target_id, for_delete=False):
"""Generate query
:param filter_data: Filter criteria
:type filter_data: `dict`
:param target_id: target ID
:type target_id: `int`
:param for_delete: For deletion?
:type for_delete: `bool`
:return:
:rtype:
"""
query = self.db.session.query(models.PluginOutput).filter_by(target_id=target_id)
if filter_data.get("target_id", None):
query.filter_by(target_id=filter_data["target_id"])
if filter_data.get("plugin_key", None):
if isinstance(filter_data.get("plugin_key"), str):
query = query.filter_by(plugin_key=filter_data["plugin_key"])
if isinstance(filter_data.get("plugin_key"), list):
query = query.filter(models.PluginOutput.plugin_key.in_(filter_data["plugin_key"]))
if filter_data.get("plugin_type", None):
if isinstance(filter_data.get("plugin_type"), str):
query = query.filter_by(plugin_type=filter_data["plugin_type"])
if isinstance(filter_data.get("plugin_type"), list):
query = query.filter(models.PluginOutput.plugin_type.in_(filter_data["plugin_type"]))
if filter_data.get("plugin_group", None):
if isinstance(filter_data.get("plugin_group"), str):
query = query.filter_by(plugin_group=filter_data["plugin_group"])
if isinstance(filter_data.get("plugin_group"), list):
query = query.filter(models.PluginOutput.plugin_group.in_(filter_data["plugin_group"]))
if filter_data.get("plugin_code", None):
if isinstance(filter_data.get("plugin_code"), str):
query = query.filter_by(plugin_code=filter_data["plugin_code"])
if isinstance(filter_data.get("plugin_code"), list):
query = query.filter(models.PluginOutput.plugin_code.in_(filter_data["plugin_code"]))
if filter_data.get("status", None):
if isinstance(filter_data.get("status"), str):
query = query.filter_by(status=filter_data["status"])
if isinstance(filter_data.get("status"), list):
query = query.filter(models.PluginOutput.status.in_(filter_data["status"]))
try:
if filter_data.get("user_rank", None):
if isinstance(filter_data.get("user_rank"), str):
query = query.filter_by(user_rank=int(filter_data["user_rank"]))
if isinstance(filter_data.get("user_rank"), list):
numbers_list = [int(x) for x in filter_data["user_rank"]]
query = query.filter(models.PluginOutput.user_rank.in_(numbers_list))
if filter_data.get("owtf_rank", None):
if isinstance(filter_data.get("owtf_rank"), str):
query = query.filter_by(owtf_rank=int(filter_data["owtf_rank"]))
if isinstance(filter_data.get("owtf_rank"), list):
numbers_list = [int(x) for x in filter_data["owtf_rank"]]
query = query.filter(models.PluginOutput.owtf_rank.in_(numbers_list))
except ValueError:
raise InvalidParameterType("Integer has to be provided for integer fields")
if not for_delete:
query = query.order_by(models.PluginOutput.plugin_key.asc())
try:
if filter_data.get("offset", None):
if isinstance(filter_data.get("offset"), list):
query = query.offset(int(filter_data["offset"][0]))
if filter_data.get("limit", None):
if isinstance(filter_data.get("limit"), list):
query = query.limit(int(filter_data["limit"][0]))
except ValueError:
raise InvalidParameterType("Integer has to be provided for integer fields")
return query
@target_required
def get_all(self, filter_data=None, target_id=None, inc_output=False):
"""Get all data based on criteria
:param filter_data: Filter data
:type filter_data: `dict`
:param target_id: target ID
:type target_id: `int`
:param inc_output: true/false
:type inc_output: `bool`
:return: list of output dicts
:rtype: `list`
"""
if not filter_data:
filter_data = {}
self.target.set_target(target_id)
query = self.gen_query(filter_data, target_id)
results = query.all()
return self.get_output_dicts(results, target_id=target_id, inc_output=inc_output)
@target_required
def get_unique(self, target_id=None):
"""Returns a dict of some column names and their unique database, useful for advanced filter
:param target_id: target ID
:type target_id: `int`
:return: Results
:rtype: `dict`
"""
unique_data = {
"plugin_type": [i[0] for i in self.db.session.query(models.PluginOutput.plugin_type).filter_by(
target_id=target_id).distinct().all()],
"plugin_group": [i[0] for i in self.db.session.query(models.PluginOutput.plugin_group).filter_by(
target_id=target_id).distinct().all()],
"status": [i[0] for i in self.db.session.query(models.PluginOutput.status).filter_by(
target_id=target_id).distinct().all()],
"user_rank": [i[0] for i in self.db.session.query(models.PluginOutput.user_rank).filter_by(
target_id=target_id).distinct().all()],
"owtf_rank": [i[0] for i in self.db.session.query(models.PluginOutput.owtf_rank).filter_by(
target_id=target_id).distinct().all()],
}
return unique_data
@target_required
def delete_all(self, filter_data, target_id=None):
"""Delete all plugin output
.note::
Here keeping filter_data optional is very risky
:param filter_data: Filter data
:type filter_data: `dict`
:param target_id: target ID
:type target_id: `int`
:return: None
:rtype: None
"""
# for_delete = True: empty dict will match all results
query = self.gen_query(filter_data, target_id, for_delete=True)
# Delete the folders created for these plugins
for plugin in query.all():
# First check if path exists in db
if plugin.output_path:
output_path = os.path.join(self.config.get_output_dir_target(), plugin.output_path)
if os.path.exists(output_path):
FileOperations.rm_tree(output_path)
# When folders are removed delete the results from db
results = query.delete()
self.db.session.commit()
@target_required
def update(self, plugin_group, plugin_type, plugin_code, patch_data, target_id=None):
"""Update output in DB
:param plugin_group: Plugin group
:type plugin_group: `str`
:param plugin_type: Plugin type
:type plugin_type: `str`
:param plugin_code: Plugin code
:type plugin_code: `str`
:param patch_data: Patched data
:type patch_data: `dict`
:param target_id: target ID
:type target_id: `int`
:return: None
:rtype: None
"""
plugin_dict = {"plugin_group": plugin_group, "plugin_type": plugin_type, "plugin_code": plugin_code}
query = self.gen_query(plugin_dict, target_id)
obj = query.first()
if obj:
try:
if patch_data.get("user_rank", None):
if isinstance(patch_data["user_rank"], list):
patch_data["user_rank"] = patch_data["user_rank"][0]
obj.user_rank = int(patch_data["user_rank"])
obj.owtf_rank = -1
if patch_data.get("user_notes", None):
if isinstance(patch_data["user_notes"], list):
patch_data["user_notes"] = patch_data["user_notes"][0]
obj.user_notes = patch_data["user_notes"]
self.db.session.merge(obj)
self.db.session.commit()
except ValueError:
raise InvalidParameterType("Integer has to be provided for integer fields")
def plugin_already_run(self, plugin_info, target_id=None):
"""Check if plugin already ran
:param plugin_info: Plugin info
:type plugin_info: `dict`
:param target_id: target ID
:type target_id: `int`
:return: True if already ran
:rtype: `bool`
"""
plugin_output_count = self.db.session.query(models.PluginOutput).filter_by(
target_id=target_id,
plugin_code=plugin_info["code"],
plugin_type=plugin_info["type"],
plugin_group=plugin_info["group"]).count()
return plugin_output_count > 0 # This is nothing but a "None" returned
@target_required
def save_plugin_output(self, plugin, output, target_id=None):
"""Save into the database the command output of the plugin.
:param plugin: Plugin dict
:type plugin: `dict`
:param output: Plugin output
:type output: `str`
:param target_id: target ID
:type target_id: `int`
:return: None
:rtype: None
"""
self.db.session.merge(models.PluginOutput(
plugin_key=plugin["key"],
plugin_code=plugin["code"],
plugin_group=plugin["group"],
plugin_type=plugin["type"],
output=json.dumps(output),
start_time=plugin["start"],
end_time=plugin["end"],
status=plugin["status"],
target_id=target_id,
# Save path only if path exists i.e if some files were to be stored it will be there
output_path=(plugin["output_path"] if os.path.exists(
self.plugin_handler.get_plugin_output_dir(plugin)) else None),
owtf_rank=plugin['owtf_rank'])
)
try:
self.db.session.commit()
except SQLAlchemyError as e:
self.db.session.rollback()
raise e
@target_required
def save_partial_output(self, plugin, output, message, target_id=None):
"""Save partial plugin output
:param plugin: Plugin dict
:type plugin: `dict`
:param output: Output
:type output: `str`
:param message: Message
:type message: `str`
:param target_id: target ID
:type target_id: `int`
:return: None
:rtype: None
"""
self.db.session.merge(models.PluginOutput(
plugin_key=plugin["key"],
plugin_code=plugin["code"],
plugin_group=plugin["group"],
plugin_type=plugin["type"],
output=json.dumps(output),
error=message,
start_time=plugin["start"],
end_time=plugin["end"],
status=plugin["status"],
target_id=target_id,
# Save path only if path exists i.e if some files were to be stored it will be there
output_path=(plugin["output_path"] if os.path.exists(
self.plugin_handler.get_plugin_output_dir(plugin)) else None),
owtf_rank=plugin['owtf_rank'])
)
try:
self.db.session.commit()
except SQLAlchemyError as e:
self.db.session.rollback()
raise e
@session_required
def get_severity_freq(self, session_id=None):
"""Get severity frequencies for the analytics
:param session_id: session ID
:type session_id: `int`
:return: Frequency data
:rtype: `dict`
"""
severity_frequency = [
{"id": 0, "label": "Passing", "value": 0},
{"id": 1, "label": "Info", "value": 0},
{"id": 2, "label": "Low", "value": 0},
{"id": 3, "label": "Medium", "value": 0},
{"id": 4, "label": "High", "value": 0},
{"id": 5, "label": "Critical", "value": 0},
]
targets = []
target_objs = self.db.session.query(models.Target.id).filter(models.Target.sessions.any(id=session_id)).all()
for target_obj in target_objs:
targets.append(target_obj.id)
plugin_objs = self.db.session.query(models.PluginOutput).all()
for plugin_obj in plugin_objs:
if plugin_obj.target_id in targets:
if plugin_obj.user_rank != -1:
severity_frequency[plugin_obj.user_rank]["value"] += 1
else:
if plugin_obj.owtf_rank != -1:
# Removing the not ranked plugins
severity_frequency[plugin_obj.owtf_rank]["value"] += 1
return {"data": severity_frequency[::-1]}
| 40.764286 | 120 | 0.600899 |
import os
import json
from sqlalchemy.exc import SQLAlchemyError
from owtf.dependency_management.dependency_resolver import BaseComponent
from owtf.dependency_management.interfaces import PluginOutputInterface
from owtf.managers.target import target_required
from owtf.managers.session import session_required
from owtf.lib.exceptions import InvalidParameterType
from owtf.db import models
from owtf.utils import FileOperations
class POutputDB(BaseComponent, PluginOutputInterface):
COMPONENT_NAME = "plugin_output"
def __init__(self):
self.register_in_service_locator()
self.config = self.get_component("config")
self.plugin_handler = self.get_component("plugin_handler")
self.reporter = self.get_component("reporter")
self.target = self.get_component("target")
self.db_config = self.get_component("db_config")
self.timer = self.get_component("timer")
self.db = self.get_component("db")
def plugin_output_exists(self, plugin_key, target_id):
count = self.db.session.query(models.PluginOutput).filter_by(target_id=target_id, plugin_key=plugin_key).count()
return (count > 0)
def plugin_count_output(self):
complete_count = self.db.session.query(models.PluginOutput).count()
left_count = self.db.session.query(models.Work).count()
left_count += self.get_component("worker_manager").get_busy_workers()
results = {'complete_count': complete_count, 'left_count': left_count}
return results
def get_html_output(self, plugin_output):
content = ''
for item in plugin_output:
content += getattr(self.reporter, item["type"])(**item["output"])
return content
@target_required
def get_output_dict(self, obj, target_id=None, inc_output=False):
if target_id:
self.target.set_target(target_id)
if obj:
pdict = dict(obj.__dict__)
pdict.pop("_sa_instance_state", None)
pdict.pop("date_time")
if inc_output:
if pdict.get("output", None):
pdict["output"] = self.get_html_output(json.loads(pdict["output"]))
else:
pdict.pop("output")
pdict["start_time"] = obj.start_time.strftime(self.db_config.get("DATE_TIME_FORMAT"))
pdict["end_time"] = obj.end_time.strftime(self.db_config.get("DATE_TIME_FORMAT"))
pdict["run_time"] = self.timer.get_time_as_str(obj.run_time)
return pdict
@target_required
def get_output_dicts(self, obj_list, target_id=None, inc_output=False):
if target_id:
self.target.set_target(target_id)
dict_list = []
for obj in obj_list:
dict_list.append(self.get_output_dict(obj, target_id=target_id, inc_output=inc_output))
return dict_list
def gen_query(self, filter_data, target_id, for_delete=False):
query = self.db.session.query(models.PluginOutput).filter_by(target_id=target_id)
if filter_data.get("target_id", None):
query.filter_by(target_id=filter_data["target_id"])
if filter_data.get("plugin_key", None):
if isinstance(filter_data.get("plugin_key"), str):
query = query.filter_by(plugin_key=filter_data["plugin_key"])
if isinstance(filter_data.get("plugin_key"), list):
query = query.filter(models.PluginOutput.plugin_key.in_(filter_data["plugin_key"]))
if filter_data.get("plugin_type", None):
if isinstance(filter_data.get("plugin_type"), str):
query = query.filter_by(plugin_type=filter_data["plugin_type"])
if isinstance(filter_data.get("plugin_type"), list):
query = query.filter(models.PluginOutput.plugin_type.in_(filter_data["plugin_type"]))
if filter_data.get("plugin_group", None):
if isinstance(filter_data.get("plugin_group"), str):
query = query.filter_by(plugin_group=filter_data["plugin_group"])
if isinstance(filter_data.get("plugin_group"), list):
query = query.filter(models.PluginOutput.plugin_group.in_(filter_data["plugin_group"]))
if filter_data.get("plugin_code", None):
if isinstance(filter_data.get("plugin_code"), str):
query = query.filter_by(plugin_code=filter_data["plugin_code"])
if isinstance(filter_data.get("plugin_code"), list):
query = query.filter(models.PluginOutput.plugin_code.in_(filter_data["plugin_code"]))
if filter_data.get("status", None):
if isinstance(filter_data.get("status"), str):
query = query.filter_by(status=filter_data["status"])
if isinstance(filter_data.get("status"), list):
query = query.filter(models.PluginOutput.status.in_(filter_data["status"]))
try:
if filter_data.get("user_rank", None):
if isinstance(filter_data.get("user_rank"), str):
query = query.filter_by(user_rank=int(filter_data["user_rank"]))
if isinstance(filter_data.get("user_rank"), list):
numbers_list = [int(x) for x in filter_data["user_rank"]]
query = query.filter(models.PluginOutput.user_rank.in_(numbers_list))
if filter_data.get("owtf_rank", None):
if isinstance(filter_data.get("owtf_rank"), str):
query = query.filter_by(owtf_rank=int(filter_data["owtf_rank"]))
if isinstance(filter_data.get("owtf_rank"), list):
numbers_list = [int(x) for x in filter_data["owtf_rank"]]
query = query.filter(models.PluginOutput.owtf_rank.in_(numbers_list))
except ValueError:
raise InvalidParameterType("Integer has to be provided for integer fields")
if not for_delete:
query = query.order_by(models.PluginOutput.plugin_key.asc())
try:
if filter_data.get("offset", None):
if isinstance(filter_data.get("offset"), list):
query = query.offset(int(filter_data["offset"][0]))
if filter_data.get("limit", None):
if isinstance(filter_data.get("limit"), list):
query = query.limit(int(filter_data["limit"][0]))
except ValueError:
raise InvalidParameterType("Integer has to be provided for integer fields")
return query
@target_required
def get_all(self, filter_data=None, target_id=None, inc_output=False):
if not filter_data:
filter_data = {}
self.target.set_target(target_id)
query = self.gen_query(filter_data, target_id)
results = query.all()
return self.get_output_dicts(results, target_id=target_id, inc_output=inc_output)
@target_required
def get_unique(self, target_id=None):
unique_data = {
"plugin_type": [i[0] for i in self.db.session.query(models.PluginOutput.plugin_type).filter_by(
target_id=target_id).distinct().all()],
"plugin_group": [i[0] for i in self.db.session.query(models.PluginOutput.plugin_group).filter_by(
target_id=target_id).distinct().all()],
"status": [i[0] for i in self.db.session.query(models.PluginOutput.status).filter_by(
target_id=target_id).distinct().all()],
"user_rank": [i[0] for i in self.db.session.query(models.PluginOutput.user_rank).filter_by(
target_id=target_id).distinct().all()],
"owtf_rank": [i[0] for i in self.db.session.query(models.PluginOutput.owtf_rank).filter_by(
target_id=target_id).distinct().all()],
}
return unique_data
@target_required
def delete_all(self, filter_data, target_id=None):
query = self.gen_query(filter_data, target_id, for_delete=True)
for plugin in query.all():
if plugin.output_path:
output_path = os.path.join(self.config.get_output_dir_target(), plugin.output_path)
if os.path.exists(output_path):
FileOperations.rm_tree(output_path)
results = query.delete()
self.db.session.commit()
@target_required
def update(self, plugin_group, plugin_type, plugin_code, patch_data, target_id=None):
plugin_dict = {"plugin_group": plugin_group, "plugin_type": plugin_type, "plugin_code": plugin_code}
query = self.gen_query(plugin_dict, target_id)
obj = query.first()
if obj:
try:
if patch_data.get("user_rank", None):
if isinstance(patch_data["user_rank"], list):
patch_data["user_rank"] = patch_data["user_rank"][0]
obj.user_rank = int(patch_data["user_rank"])
obj.owtf_rank = -1
if patch_data.get("user_notes", None):
if isinstance(patch_data["user_notes"], list):
patch_data["user_notes"] = patch_data["user_notes"][0]
obj.user_notes = patch_data["user_notes"]
self.db.session.merge(obj)
self.db.session.commit()
except ValueError:
raise InvalidParameterType("Integer has to be provided for integer fields")
def plugin_already_run(self, plugin_info, target_id=None):
plugin_output_count = self.db.session.query(models.PluginOutput).filter_by(
target_id=target_id,
plugin_code=plugin_info["code"],
plugin_type=plugin_info["type"],
plugin_group=plugin_info["group"]).count()
return plugin_output_count > 0
@target_required
def save_plugin_output(self, plugin, output, target_id=None):
self.db.session.merge(models.PluginOutput(
plugin_key=plugin["key"],
plugin_code=plugin["code"],
plugin_group=plugin["group"],
plugin_type=plugin["type"],
output=json.dumps(output),
start_time=plugin["start"],
end_time=plugin["end"],
status=plugin["status"],
target_id=target_id,
output_path=(plugin["output_path"] if os.path.exists(
self.plugin_handler.get_plugin_output_dir(plugin)) else None),
owtf_rank=plugin['owtf_rank'])
)
try:
self.db.session.commit()
except SQLAlchemyError as e:
self.db.session.rollback()
raise e
@target_required
def save_partial_output(self, plugin, output, message, target_id=None):
self.db.session.merge(models.PluginOutput(
plugin_key=plugin["key"],
plugin_code=plugin["code"],
plugin_group=plugin["group"],
plugin_type=plugin["type"],
output=json.dumps(output),
error=message,
start_time=plugin["start"],
end_time=plugin["end"],
status=plugin["status"],
target_id=target_id,
output_path=(plugin["output_path"] if os.path.exists(
self.plugin_handler.get_plugin_output_dir(plugin)) else None),
owtf_rank=plugin['owtf_rank'])
)
try:
self.db.session.commit()
except SQLAlchemyError as e:
self.db.session.rollback()
raise e
@session_required
def get_severity_freq(self, session_id=None):
severity_frequency = [
{"id": 0, "label": "Passing", "value": 0},
{"id": 1, "label": "Info", "value": 0},
{"id": 2, "label": "Low", "value": 0},
{"id": 3, "label": "Medium", "value": 0},
{"id": 4, "label": "High", "value": 0},
{"id": 5, "label": "Critical", "value": 0},
]
targets = []
target_objs = self.db.session.query(models.Target.id).filter(models.Target.sessions.any(id=session_id)).all()
for target_obj in target_objs:
targets.append(target_obj.id)
plugin_objs = self.db.session.query(models.PluginOutput).all()
for plugin_obj in plugin_objs:
if plugin_obj.target_id in targets:
if plugin_obj.user_rank != -1:
severity_frequency[plugin_obj.user_rank]["value"] += 1
else:
if plugin_obj.owtf_rank != -1:
severity_frequency[plugin_obj.owtf_rank]["value"] += 1
return {"data": severity_frequency[::-1]}
| true | true |
f7fd865bee772808f0f9dc4cfad226b44038465f | 1,252 | py | Python | grr/core/grr_response_core/lib/parsers/cron_file_parser.py | magnologan/grr | 06eeb071e9a925b34f67caf776c3330b39154850 | [
"Apache-2.0"
] | null | null | null | grr/core/grr_response_core/lib/parsers/cron_file_parser.py | magnologan/grr | 06eeb071e9a925b34f67caf776c3330b39154850 | [
"Apache-2.0"
] | null | null | null | grr/core/grr_response_core/lib/parsers/cron_file_parser.py | magnologan/grr | 06eeb071e9a925b34f67caf776c3330b39154850 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Simple parsers for cron type files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import crontab
from future.builtins import str
from grr_response_core.lib import parser
from grr_response_core.lib.rdfvalues import cronjobs as rdf_cronjobs
class CronTabParser(parser.FileParser):
"""Parser for crontab files."""
output_types = [rdf_cronjobs.CronTabFile]
supported_artifacts = ["LinuxCronTabs", "MacOSCronTabs"]
def Parse(self, stat, file_object, knowledge_base):
"""Parse the crontab file."""
_ = knowledge_base
entries = []
crondata = file_object.read().decode("utf-8")
jobs = crontab.CronTab(tab=crondata)
for job in jobs:
entries.append(
rdf_cronjobs.CronTabEntry(
minute=str(job.minute),
hour=str(job.hour),
dayofmonth=str(job.dom),
month=str(job.month),
dayofweek=str(job.dow),
command=str(job.command),
comment=str(job.comment)))
try:
source_urn = file_object.urn
except AttributeError:
source_urn = None
yield rdf_cronjobs.CronTabFile(aff4path=source_urn, jobs=entries)
| 26.083333 | 69 | 0.679712 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import crontab
from future.builtins import str
from grr_response_core.lib import parser
from grr_response_core.lib.rdfvalues import cronjobs as rdf_cronjobs
class CronTabParser(parser.FileParser):
output_types = [rdf_cronjobs.CronTabFile]
supported_artifacts = ["LinuxCronTabs", "MacOSCronTabs"]
def Parse(self, stat, file_object, knowledge_base):
_ = knowledge_base
entries = []
crondata = file_object.read().decode("utf-8")
jobs = crontab.CronTab(tab=crondata)
for job in jobs:
entries.append(
rdf_cronjobs.CronTabEntry(
minute=str(job.minute),
hour=str(job.hour),
dayofmonth=str(job.dom),
month=str(job.month),
dayofweek=str(job.dow),
command=str(job.command),
comment=str(job.comment)))
try:
source_urn = file_object.urn
except AttributeError:
source_urn = None
yield rdf_cronjobs.CronTabFile(aff4path=source_urn, jobs=entries)
| true | true |
f7fd8725e0a57737aa8294f7d1389060706697fe | 1,073 | py | Python | tensorflow/contrib/py2tf/__init__.py | harunpehlivan/tensorflow | 376e2cfdab31f4da251ea2e50992a9bf97fd171b | [
"Apache-2.0"
] | 2 | 2020-05-18T03:08:51.000Z | 2020-09-25T03:11:50.000Z | tensorflow/contrib/py2tf/__init__.py | hamzabekkouri/tensorflow | d87a9fbbc5f49ec5ae8eb52c62628f0b1a0bf67f | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/py2tf/__init__.py | hamzabekkouri/tensorflow | d87a9fbbc5f49ec5ae8eb52c62628f0b1a0bf67f | [
"Apache-2.0"
] | 1 | 2021-07-20T16:07:01.000Z | 2021-07-20T16:07:01.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Py2TF compiles Python code into equivalent TensorFlow code.
Equivalent here means that they have the same effect when executed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = []
remove_undocumented(__name__, _allowed_symbols)
| 35.766667 | 80 | 0.732526 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = []
remove_undocumented(__name__, _allowed_symbols)
| true | true |
f7fd881b232ce7cc66123714aacdd5e1a725e137 | 1,989 | py | Python | kornia/losses/psnr.py | pmeier/kornia | 57f5aeb605d0c69de88a0a1aa1563cee52d4bfaf | [
"ECL-2.0",
"Apache-2.0"
] | 5 | 2021-04-15T01:20:01.000Z | 2022-01-12T14:12:54.000Z | kornia/losses/psnr.py | pmeier/kornia | 57f5aeb605d0c69de88a0a1aa1563cee52d4bfaf | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/losses/psnr.py | pmeier/kornia | 57f5aeb605d0c69de88a0a1aa1563cee52d4bfaf | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-10-20T06:57:07.000Z | 2020-10-20T06:57:07.000Z | import torch
import torch.nn as nn
from torch.nn.functional import mse_loss
class PSNRLoss(nn.Module):
r"""Creates a criterion that calculates the PSNR between 2 images. Given an m x n image, the PSNR is:
.. math::
\text{PSNR} = 10 \log_{10} \bigg(\frac{\text{MAX}_I^2}{MSE(I,T)}\bigg)
where
.. math::
\text{MSE}(I,T) = \frac{1}{mn}\sum_{i=0}^{m-1}\sum_{j=0}^{n-1} [I(i,j) - T(i,j)]^2
and :math:`\text{MAX}_I` is the maximum possible input value
(e.g for floating point images :math:`\text{MAX}_I=1`).
Arguments:
max_val (float): Maximum value of input
Shape:
- input: :math:`(*)`
- approximation: :math:`(*)` same shape as input
- output: :math:`()` a scalar
Examples:
>>> kornia.losses.psnr_loss(torch.ones(1), 1.2*torch.ones(1), 2)
tensor(20.0000) # 10 * log(4/((1.2-1)**2)) / log(10)
Reference:
https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio#Definition
"""
def __init__(self, max_val: float) -> None:
super(PSNRLoss, self).__init__()
self.max_val: float = max_val
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: # type: ignore
return psnr_loss(input, target, self.max_val)
def psnr_loss(input: torch.Tensor, target: torch.Tensor, max_val: float) -> torch.Tensor:
r"""Function that computes PSNR
See :class:`~kornia.losses.PSNRLoss` for details.
"""
if not torch.is_tensor(input) or not torch.is_tensor(target):
raise TypeError(f"Expected 2 torch tensors but got {type(input)} and {type(target)}")
if input.shape != target.shape:
raise TypeError(f"Expected tensors of equal shapes, but got {input.shape} and {target.shape}")
mse_val = mse_loss(input, target, reduction='mean')
max_val_tensor: torch.Tensor = torch.tensor(max_val).to(input.device).to(input.dtype)
return 10 * torch.log10(max_val_tensor * max_val_tensor / mse_val)
| 33.15 | 105 | 0.64002 | import torch
import torch.nn as nn
from torch.nn.functional import mse_loss
class PSNRLoss(nn.Module):
def __init__(self, max_val: float) -> None:
super(PSNRLoss, self).__init__()
self.max_val: float = max_val
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
return psnr_loss(input, target, self.max_val)
def psnr_loss(input: torch.Tensor, target: torch.Tensor, max_val: float) -> torch.Tensor:
if not torch.is_tensor(input) or not torch.is_tensor(target):
raise TypeError(f"Expected 2 torch tensors but got {type(input)} and {type(target)}")
if input.shape != target.shape:
raise TypeError(f"Expected tensors of equal shapes, but got {input.shape} and {target.shape}")
mse_val = mse_loss(input, target, reduction='mean')
max_val_tensor: torch.Tensor = torch.tensor(max_val).to(input.device).to(input.dtype)
return 10 * torch.log10(max_val_tensor * max_val_tensor / mse_val)
| true | true |
f7fd8a82dbd8001e68bbadbbea1dcf53182a86fc | 4,131 | py | Python | Plots/Skew-T/NCL_skewt_3_2.py | learn2free/GeoCAT-examples | 3ac152a767e78a362a8ebb6f677005f3de320ca6 | [
"Apache-2.0"
] | 1 | 2021-05-09T02:54:10.000Z | 2021-05-09T02:54:10.000Z | Plots/Skew-T/NCL_skewt_3_2.py | learn2free/GeoCAT-examples | 3ac152a767e78a362a8ebb6f677005f3de320ca6 | [
"Apache-2.0"
] | null | null | null | Plots/Skew-T/NCL_skewt_3_2.py | learn2free/GeoCAT-examples | 3ac152a767e78a362a8ebb6f677005f3de320ca6 | [
"Apache-2.0"
] | null | null | null | """
NCL_skewt_3_2.py
=================
This script illustrates the following concepts:
- Drawing Skew-T plots
- Thinning the wind barbs in a Skew-T plot
- Customizing the background of a Skew_T plot
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/skewt_3.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/skewt_3_2_lg.png
"""
###############################################################################
# Import packages:
import geocat.datafiles as gdf
import matplotlib.lines as mlines
import matplotlib.pyplot as plt
import metpy.calc as mpcalc
import numpy as np
import pandas as pd
from geocat.viz import util as gvutil
from metpy.plots import SkewT
from metpy.units import units
###############################################################################
# Read in data:
# Open a netCDF data file using xarray default engine and load the data into xarrays
ds = pd.read_csv(gdf.get('ascii_files/sounding_ATS.csv'), header=None)
# Extract the data
p = ds[0].values * units.hPa # Pressure [mb/hPa]
tc = ds[1].values * units.degC # Temperature [C]
tdc = ds[2].values * units.degC # Dew pt temp [C]
wspd = ds[5].values * units.knots # Wind speed [knots or m/s]
wdir = ds[6].values * units.degrees # Meteorological wind dir
u, v = mpcalc.wind_components(wspd, wdir) # Calculate wind components
###############################################################################
# Plot
fig = plt.figure(figsize=(12, 12))
# Adding the "rotation" kwarg will over-ride the default MetPy rotation of
# 30 degrees for the 45 degree default found in NCL Skew-T plots
skew = SkewT(fig, rotation=45)
ax = skew.ax
# Shade every other section between isotherms
x1 = np.linspace(-100, 40, 8) # The starting x values for the shaded regions
x2 = np.linspace(-90, 50, 8) # The ending x values for the shaded regions
y = [1050, 100] # The range of y values that the shaded regions should cover
for i in range(0, 8):
skew.shade_area(y=y,
x1=x1[i],
x2=x2[i],
color='limegreen',
alpha=0.25,
zorder=1)
skew.plot(p, tc, 'black')
skew.plot(p, tdc, 'blue')
# Plot only every third windbarb
skew.plot_barbs(pressure=p[::3],
u=u[::3],
v=v[::3],
xloc=1.05,
fill_empty=True,
sizes=dict(emptybarb=0.075, width=0.1, height=0.2))
# Draw line underneath wind barbs
line = mlines.Line2D([1.05, 1.05], [0, 1],
color='gray',
linewidth=0.5,
transform=ax.transAxes,
dash_joinstyle='round',
clip_on=False,
zorder=0)
ax.add_line(line)
# Add relevant special lines
# Choose starting temperatures in Kelvin for the dry adiabats
t0 = units.K * np.arange(243.15, 473.15, 10)
skew.plot_dry_adiabats(t0=t0, linestyles='solid', colors='gray', linewidth=1.5)
# Choose temperatures for moist adiabats
t0 = units.K * np.arange(281.15, 306.15, 4)
msa = skew.plot_moist_adiabats(t0=t0,
linestyles='solid',
colors='lime',
linewidths=1.5)
# Choose mixing ratios
w = np.array([0.001, 0.002, 0.003, 0.005, 0.008, 0.012, 0.020]).reshape(-1, 1)
# Choose the range of pressures that the mixing ratio lines are drawn over
p_levs = units.hPa * np.linspace(1000, 400, 7)
skew.plot_mixing_lines(mixing_ratio=w, pressure=p_levs, colors='lime')
skew.ax.set_ylim(1000, 100)
gvutil.set_titles_and_labels(ax, maintitle="ATS Rawinsonde: degC + Thin wind")
# Set axes limits and ticks
gvutil.set_axes_limits_and_ticks(
ax=ax,
xlim=[-30, 50],
yticks=[1000, 850, 700, 500, 400, 300, 250, 200, 150, 100])
# Change the style of the gridlines
plt.grid(True,
which='major',
axis='both',
color='tan',
linewidth=1.5,
alpha=0.5)
plt.xlabel("Temperature (C)")
plt.ylabel("P (hPa)")
plt.show()
| 33.585366 | 86 | 0.597434 | true | true | |
f7fd8af0f84ec714ff8e6274f3c52715d339784f | 9,608 | py | Python | STL_Py/venv/Version_Extended/ExtendedOutputDemo.py | pb-10/Smart-Traffic-Light | 334ba878f42723b72ea2a23fe912e429763ba3af | [
"MIT"
] | 3 | 2021-05-19T04:59:08.000Z | 2021-08-23T20:35:54.000Z | STL_Py/venv/Version_Extended/ExtendedOutputDemo.py | pb-10/Smart-Traffic-Light | 334ba878f42723b72ea2a23fe912e429763ba3af | [
"MIT"
] | null | null | null | STL_Py/venv/Version_Extended/ExtendedOutputDemo.py | pb-10/Smart-Traffic-Light | 334ba878f42723b72ea2a23fe912e429763ba3af | [
"MIT"
] | 3 | 2022-02-16T04:56:58.000Z | 2022-02-25T09:51:38.000Z | from turtle import Turtle
import turtle
from turtle import Screen
def HeadText():
turtle.color('black')
style = ('Courier', 14,)
turtle.speed(1000)
turtle.penup()
turtle.setposition(-198, 285)
turtle.write('Side 1', font=style, align='center')
turtle.penup()
turtle.setposition(-48, 285)
turtle.write('Side 2', font=style, align='center')
turtle.penup()
turtle.setposition(102, 285)
turtle.write('Side 3', font=style, align='center')
turtle.penup()
turtle.setposition(252, 285)
turtle.write('Side 4', font=style, align='center')
turtle.setposition(-245, 140)
turtle.write('Left ', font=style, align='center')
turtle.penup()
turtle.setposition(-260, 90)
turtle.write('Straight ', font=style, align='center')
turtle.penup()
turtle.setposition(-250, 40)
turtle.write('Right ', font=style, align='center')
turtle.penup()
turtle.hideturtle()
def Back():
for i in range(0,4):
pen9 = Turtle(shape='square')
pen9.color('white')
pen9.shapesize(12.65, 2.5)
pen9.speed(100)
pen9.color('grey')
pen9.penup()
pen9.sety(150)
pen9.setx(-200+(i*150))
def Pole():
for i in range(0, 4):
pen9 = Turtle(shape='square')
pen9.shapesize(9, 1)
pen9.color('white')
pen9.speed(100)
pen9.penup()
pen9.sety(-65)
pen9.setx(-200+(i*150))
pen9.color('grey')
def Base():
for i in range(0, 4):
pen9 = Turtle(shape='square')
pen9.color('white')
pen9.penup()
pen9.speed(100)
pen9.sety(-150)
pen9.setx(-200+(i*150))
pen9.shapesize(1, 2)
pen9.color('grey')
turtle.color('black')
style = ('Courier', 14,)
turtle.speed(1000)
turtle.penup()
turtle.setposition(-320, -207)
turtle.write('Total Cars :', font=style, align='center')
turtle.penup()
turtle.setposition(-329, -227)
turtle.write('Passing Cars :', font=style, align='center')
turtle.penup()
turtle.setposition(-297, -247)
turtle.write('Time :', font=style, align='center')
turtle.penup()
turtle.hideturtle()
def Red(Num):
i=Num-1
pen1 = Turtle(shape='circle')
pen1.color('white')
pen1.speed(100)
pen1.shapesize(2)
pen1.color('red')
pen1.penup()
pen1.sety(250)
pen1.setx(-200 + (i * 150))
pen2 = Turtle(shape='circle')
pen2.color('white')
pen2.speed(100)
pen2.shapesize(2)
pen2.color('white')
pen2.penup()
pen2.sety(200)
pen2.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('white')
pen3.penup()
pen3.sety(150)
pen3.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('white')
pen3.penup()
pen3.sety(100)
pen3.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('white')
pen3.penup()
pen3.sety(50)
pen3.setx(-200 + (i * 150))
def Yellow(Num):
i=Num-1
pen1 = Turtle(shape='circle')
pen1.color('white')
pen1.speed(100)
pen1.shapesize(2)
pen1.color('white')
pen1.penup()
pen1.sety(250)
pen1.setx(-200 + (i * 150))
pen2 = Turtle(shape='circle')
pen2.color('white')
pen2.speed(100)
pen2.shapesize(2)
pen2.color('yellow')
pen2.penup()
pen2.sety(200)
pen2.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('white')
pen3.penup()
pen3.sety(150)
pen3.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('white')
pen3.penup()
pen3.sety(100)
pen3.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('white')
pen3.penup()
pen3.sety(50)
pen3.setx(-200 + (i * 150))
def GreenL(Num,TCars,PCars,Time):
i=Num-1
pen1 = Turtle(shape='circle')
pen1.color('white')
pen1.speed(100)
pen1.shapesize(2)
pen1.color('white')
pen1.penup()
pen1.sety(250)
pen1.setx(-200 + (i * 150))
pen2 = Turtle(shape='circle')
pen2.color('white')
pen2.speed(100)
pen2.shapesize(2)
pen2.color('white')
pen2.penup()
pen2.sety(200)
pen2.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('green')
pen3.penup()
pen3.sety(150)
pen3.setx(-200 + (i * 150))
turtle.color('black')
style = ('Courier', 14,)
turtle.speed(1000)
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-207)
pen3.setx(-230 + ((i) * 150))
turtle.setposition(-230 + (i * 150), -207)
turtle.write(TCars, font=style, align='center')
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-227)
pen3.setx(-230 + ((i) * 150))
turtle.setposition(-230 + (i * 150), -227)
turtle.write(PCars, font=style, align='center')
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-247)
pen3.setx(-230 + ((i) * 150))
turtle.setposition(-230 + (i * 150), -247)
turtle.write(Time, font=style, align='center')
turtle.hideturtle()
def GreenM(Num,TCars,PCars,Time):
i=Num-1
pen1 = Turtle(shape='circle')
pen1.color('white')
pen1.speed(100)
pen1.shapesize(2)
pen1.color('white')
pen1.penup()
pen1.sety(250)
pen1.setx(-200 + (i * 150))
pen2 = Turtle(shape='circle')
pen2.color('white')
pen2.speed(100)
pen2.shapesize(2)
pen2.color('white')
pen2.penup()
pen2.sety(200)
pen2.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('green')
pen3.penup()
pen3.sety(100)
pen3.setx(-200 + (i * 150))
turtle.color('black')
style = ('Courier', 14,)
turtle.speed(1000)
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-207)
pen3.setx(-200 + ((i) * 150))
turtle.setposition(-200 + (i * 150), -207)
turtle.write(TCars, font=style, align='center')
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-227)
pen3.setx(-200 + ((i) * 150))
turtle.setposition(-200 + (i * 150), -227)
turtle.write(PCars, font=style, align='center')
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-247)
pen3.setx(-200 + ((i) * 150))
turtle.setposition(-200 + (i * 150), -247)
turtle.write(Time, font=style, align='center')
turtle.hideturtle()
def GreenR(Num,TCars,PCars,Time):
i=Num-1
pen1 = Turtle(shape='circle')
pen1.color('white')
pen1.speed(100)
pen1.shapesize(2)
pen1.color('white')
pen1.penup()
pen1.sety(250)
pen1.setx(-200 + (i * 150))
pen2 = Turtle(shape='circle')
pen2.color('white')
pen2.speed(100)
pen2.shapesize(2)
pen2.color('white')
pen2.penup()
pen2.sety(200)
pen2.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('green')
pen3.penup()
pen3.sety(50)
pen3.setx(-200 + (i * 150))
turtle.color('black')
style = ('Courier', 14,)
turtle.speed(1000)
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-207)
pen3.setx(-170 + ((i) * 150))
turtle.setposition(-170 + (i * 150), -207)
turtle.write(TCars, font=style, align='center')
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-227)
pen3.setx(-170 + ((i) * 150))
turtle.setposition(-170 + (i * 150), -227)
turtle.write(PCars, font=style, align='center')
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-247)
pen3.setx(-170 + ((i) * 150))
turtle.setposition(-170 + (i * 150), -247)
turtle.write(Time, font=style, align='center')
turtle.hideturtle()
def RightOff(Num):
i=Num-1
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('white')
pen3.penup()
pen3.sety(50)
pen3.setx(-200 + (i * 150))
def Reset():
Yellow(1)
Yellow(2)
Yellow(3)
Yellow(4)
'''
screen=Screen()
screen.setup(1000,1000)
Base()
Pole()
Back()
HeadText()
GreenR(1,12,12,123)
RightOff(1)
#Reset()
screen.mainloop()
'''
| 23.434146 | 62 | 0.583264 | from turtle import Turtle
import turtle
from turtle import Screen
def HeadText():
turtle.color('black')
style = ('Courier', 14,)
turtle.speed(1000)
turtle.penup()
turtle.setposition(-198, 285)
turtle.write('Side 1', font=style, align='center')
turtle.penup()
turtle.setposition(-48, 285)
turtle.write('Side 2', font=style, align='center')
turtle.penup()
turtle.setposition(102, 285)
turtle.write('Side 3', font=style, align='center')
turtle.penup()
turtle.setposition(252, 285)
turtle.write('Side 4', font=style, align='center')
turtle.setposition(-245, 140)
turtle.write('Left ', font=style, align='center')
turtle.penup()
turtle.setposition(-260, 90)
turtle.write('Straight ', font=style, align='center')
turtle.penup()
turtle.setposition(-250, 40)
turtle.write('Right ', font=style, align='center')
turtle.penup()
turtle.hideturtle()
def Back():
for i in range(0,4):
pen9 = Turtle(shape='square')
pen9.color('white')
pen9.shapesize(12.65, 2.5)
pen9.speed(100)
pen9.color('grey')
pen9.penup()
pen9.sety(150)
pen9.setx(-200+(i*150))
def Pole():
for i in range(0, 4):
pen9 = Turtle(shape='square')
pen9.shapesize(9, 1)
pen9.color('white')
pen9.speed(100)
pen9.penup()
pen9.sety(-65)
pen9.setx(-200+(i*150))
pen9.color('grey')
def Base():
for i in range(0, 4):
pen9 = Turtle(shape='square')
pen9.color('white')
pen9.penup()
pen9.speed(100)
pen9.sety(-150)
pen9.setx(-200+(i*150))
pen9.shapesize(1, 2)
pen9.color('grey')
turtle.color('black')
style = ('Courier', 14,)
turtle.speed(1000)
turtle.penup()
turtle.setposition(-320, -207)
turtle.write('Total Cars :', font=style, align='center')
turtle.penup()
turtle.setposition(-329, -227)
turtle.write('Passing Cars :', font=style, align='center')
turtle.penup()
turtle.setposition(-297, -247)
turtle.write('Time :', font=style, align='center')
turtle.penup()
turtle.hideturtle()
def Red(Num):
i=Num-1
pen1 = Turtle(shape='circle')
pen1.color('white')
pen1.speed(100)
pen1.shapesize(2)
pen1.color('red')
pen1.penup()
pen1.sety(250)
pen1.setx(-200 + (i * 150))
pen2 = Turtle(shape='circle')
pen2.color('white')
pen2.speed(100)
pen2.shapesize(2)
pen2.color('white')
pen2.penup()
pen2.sety(200)
pen2.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('white')
pen3.penup()
pen3.sety(150)
pen3.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('white')
pen3.penup()
pen3.sety(100)
pen3.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('white')
pen3.penup()
pen3.sety(50)
pen3.setx(-200 + (i * 150))
def Yellow(Num):
i=Num-1
pen1 = Turtle(shape='circle')
pen1.color('white')
pen1.speed(100)
pen1.shapesize(2)
pen1.color('white')
pen1.penup()
pen1.sety(250)
pen1.setx(-200 + (i * 150))
pen2 = Turtle(shape='circle')
pen2.color('white')
pen2.speed(100)
pen2.shapesize(2)
pen2.color('yellow')
pen2.penup()
pen2.sety(200)
pen2.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('white')
pen3.penup()
pen3.sety(150)
pen3.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('white')
pen3.penup()
pen3.sety(100)
pen3.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('white')
pen3.penup()
pen3.sety(50)
pen3.setx(-200 + (i * 150))
def GreenL(Num,TCars,PCars,Time):
i=Num-1
pen1 = Turtle(shape='circle')
pen1.color('white')
pen1.speed(100)
pen1.shapesize(2)
pen1.color('white')
pen1.penup()
pen1.sety(250)
pen1.setx(-200 + (i * 150))
pen2 = Turtle(shape='circle')
pen2.color('white')
pen2.speed(100)
pen2.shapesize(2)
pen2.color('white')
pen2.penup()
pen2.sety(200)
pen2.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('green')
pen3.penup()
pen3.sety(150)
pen3.setx(-200 + (i * 150))
turtle.color('black')
style = ('Courier', 14,)
turtle.speed(1000)
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-207)
pen3.setx(-230 + ((i) * 150))
turtle.setposition(-230 + (i * 150), -207)
turtle.write(TCars, font=style, align='center')
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-227)
pen3.setx(-230 + ((i) * 150))
turtle.setposition(-230 + (i * 150), -227)
turtle.write(PCars, font=style, align='center')
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-247)
pen3.setx(-230 + ((i) * 150))
turtle.setposition(-230 + (i * 150), -247)
turtle.write(Time, font=style, align='center')
turtle.hideturtle()
def GreenM(Num,TCars,PCars,Time):
i=Num-1
pen1 = Turtle(shape='circle')
pen1.color('white')
pen1.speed(100)
pen1.shapesize(2)
pen1.color('white')
pen1.penup()
pen1.sety(250)
pen1.setx(-200 + (i * 150))
pen2 = Turtle(shape='circle')
pen2.color('white')
pen2.speed(100)
pen2.shapesize(2)
pen2.color('white')
pen2.penup()
pen2.sety(200)
pen2.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('green')
pen3.penup()
pen3.sety(100)
pen3.setx(-200 + (i * 150))
turtle.color('black')
style = ('Courier', 14,)
turtle.speed(1000)
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-207)
pen3.setx(-200 + ((i) * 150))
turtle.setposition(-200 + (i * 150), -207)
turtle.write(TCars, font=style, align='center')
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-227)
pen3.setx(-200 + ((i) * 150))
turtle.setposition(-200 + (i * 150), -227)
turtle.write(PCars, font=style, align='center')
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-247)
pen3.setx(-200 + ((i) * 150))
turtle.setposition(-200 + (i * 150), -247)
turtle.write(Time, font=style, align='center')
turtle.hideturtle()
def GreenR(Num,TCars,PCars,Time):
i=Num-1
pen1 = Turtle(shape='circle')
pen1.color('white')
pen1.speed(100)
pen1.shapesize(2)
pen1.color('white')
pen1.penup()
pen1.sety(250)
pen1.setx(-200 + (i * 150))
pen2 = Turtle(shape='circle')
pen2.color('white')
pen2.speed(100)
pen2.shapesize(2)
pen2.color('white')
pen2.penup()
pen2.sety(200)
pen2.setx(-200 + (i * 150))
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('green')
pen3.penup()
pen3.sety(50)
pen3.setx(-200 + (i * 150))
turtle.color('black')
style = ('Courier', 14,)
turtle.speed(1000)
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-207)
pen3.setx(-170 + ((i) * 150))
turtle.setposition(-170 + (i * 150), -207)
turtle.write(TCars, font=style, align='center')
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-227)
pen3.setx(-170 + ((i) * 150))
turtle.setposition(-170 + (i * 150), -227)
turtle.write(PCars, font=style, align='center')
turtle.penup()
pen3 = Turtle(shape='square')
pen3.color('white')
pen3.speed(100)
pen3.shapesize(1)
pen3.color('white')
pen3.penup()
pen3.sety(-247)
pen3.setx(-170 + ((i) * 150))
turtle.setposition(-170 + (i * 150), -247)
turtle.write(Time, font=style, align='center')
turtle.hideturtle()
def RightOff(Num):
i=Num-1
pen3 = Turtle(shape='circle')
pen3.color('white')
pen3.speed(165)
pen3.shapesize(2)
pen3.color('white')
pen3.penup()
pen3.sety(50)
pen3.setx(-200 + (i * 150))
def Reset():
Yellow(1)
Yellow(2)
Yellow(3)
Yellow(4)
| true | true |
f7fd8b209317e2cf8c85b59d2fc7c232fc74fd93 | 322 | py | Python | cedar_settings/utils/datetime.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | null | null | null | cedar_settings/utils/datetime.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | 11 | 2020-03-24T15:29:46.000Z | 2022-03-11T23:14:48.000Z | cedar_settings/utils/datetime.py | stewardshiptools/stewardshiptools | ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e | [
"MIT"
] | null | null | null | import pytz
from django.conf import settings
def localize_datetime(dt):
""" Takes a datetime object and localizes it to the timezone saved in settings.TIME_ZONE
:param dt: datetime object
:return: Timezone aware datetime object
"""
tz = pytz.timezone(settings.TIME_ZONE)
return tz.localize(dt)
| 23 | 92 | 0.726708 | import pytz
from django.conf import settings
def localize_datetime(dt):
tz = pytz.timezone(settings.TIME_ZONE)
return tz.localize(dt)
| true | true |
f7fd8bf9ca810018cfe75f333dbf3fcc7251274f | 2,306 | py | Python | setup.py | caseypw/m2g | be29587322ab1fafb96f6afb726efbdb39b64b66 | [
"Apache-2.0"
] | null | null | null | setup.py | caseypw/m2g | be29587322ab1fafb96f6afb726efbdb39b64b66 | [
"Apache-2.0"
] | null | null | null | setup.py | caseypw/m2g | be29587322ab1fafb96f6afb726efbdb39b64b66 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
setup.py
~~~~~~~~
on package install:
- generates metadata
- installs json files for use in m2g_cloud
- installs `m2g` script keywords to the command line
- ensures python version
- installs m2g dependencies
Use `pip install .` to install the package.
Use `pip install -e .` to install the package in developer mode.
See our README for more details on package installation : https://github.com/neurodata/m2g/blob/staging/README.md
"""
from setuptools import setup, find_packages
from m2g import __version__
# initial setup
kwargs = {}
# add metadata
kwargs.update(
dict(
name="m2g",
version=__version__,
description="Neuro Data MRI to Graphs Pipeline",
author="Derek Pisner, Alex Loftus, Greg Kiar, Eric Bridgeford, and Will Gray Roncal",
author_email="dpisner@utexas.edu, aloftus2@jhu.edu, gkiar@jhu.edu, wgr@jhu.edu, ebridge2@jhu.edu",
url="https://github.com/neurodata/m2g",
download_url="https://github.com/neurodata/m2g/tarball/" + __version__,
keywords=["connectome", "mri", "pipeline"],
classifiers=["Programming Language :: Python :: 3.6"],
)
)
# add utility info
kwargs.update(
dict(
packages=find_packages(),
package_data={"templates": ["*.json"]},
include_package_data=False, # only include the m2g_cloud template jsons
entry_points={
"console_scripts": [
"m2g=m2g.scripts.m2g_bids:main",
"m2g_dwi_pipeline=m2g.scripts.m2g_dwi_pipeline:main",
"m2g_cloud=m2g.scripts.m2g_cloud:main",
"m2g_bids=m2g.scripts.m2g_bids:main", # for backwards compatibility
]
},
python_requires=">=3.6",
)
)
# add requirements
kwargs.update(
dict(
install_requires=[
"nibabel",
"numpy",
"dipy>=1.0.0",
"scipy",
"boto3",
"awscli",
"matplotlib",
"nilearn",
"vtk",
"pyvtk",
"fury",
"requests",
"plotly",
"pybids>=0.9.0",
"scikit-image",
"networkx>=2.4",
"configparser>=3.7.4",
"pytest",
]
)
)
# run setup
setup(**kwargs)
| 27.129412 | 113 | 0.582827 |
from setuptools import setup, find_packages
from m2g import __version__
kwargs = {}
kwargs.update(
dict(
name="m2g",
version=__version__,
description="Neuro Data MRI to Graphs Pipeline",
author="Derek Pisner, Alex Loftus, Greg Kiar, Eric Bridgeford, and Will Gray Roncal",
author_email="dpisner@utexas.edu, aloftus2@jhu.edu, gkiar@jhu.edu, wgr@jhu.edu, ebridge2@jhu.edu",
url="https://github.com/neurodata/m2g",
download_url="https://github.com/neurodata/m2g/tarball/" + __version__,
keywords=["connectome", "mri", "pipeline"],
classifiers=["Programming Language :: Python :: 3.6"],
)
)
kwargs.update(
dict(
packages=find_packages(),
package_data={"templates": ["*.json"]},
include_package_data=False,
entry_points={
"console_scripts": [
"m2g=m2g.scripts.m2g_bids:main",
"m2g_dwi_pipeline=m2g.scripts.m2g_dwi_pipeline:main",
"m2g_cloud=m2g.scripts.m2g_cloud:main",
"m2g_bids=m2g.scripts.m2g_bids:main",
]
},
python_requires=">=3.6",
)
)
kwargs.update(
dict(
install_requires=[
"nibabel",
"numpy",
"dipy>=1.0.0",
"scipy",
"boto3",
"awscli",
"matplotlib",
"nilearn",
"vtk",
"pyvtk",
"fury",
"requests",
"plotly",
"pybids>=0.9.0",
"scikit-image",
"networkx>=2.4",
"configparser>=3.7.4",
"pytest",
]
)
)
setup(**kwargs)
| true | true |
f7fd8c191bd9b665e91705fe3371b26bde803c75 | 23,120 | py | Python | ddganAE/architectures/cae/D2/cae.py | Zeff020/Adversarial_ROM | 8c9e7ff86250e9370e5fdd2018f9ad04ded5f122 | [
"MIT"
] | 1 | 2021-12-27T06:14:32.000Z | 2021-12-27T06:14:32.000Z | ddganAE/architectures/cae/D2/cae.py | Zeff020/Adversarial_ROM | 8c9e7ff86250e9370e5fdd2018f9ad04ded5f122 | [
"MIT"
] | null | null | null | ddganAE/architectures/cae/D2/cae.py | Zeff020/Adversarial_ROM | 8c9e7ff86250e9370e5fdd2018f9ad04ded5f122 | [
"MIT"
] | 3 | 2021-08-05T11:17:37.000Z | 2021-09-02T02:37:44.000Z | """
Collection of encoders and decoders that can readily be imported
and used by the 2D adversarial and convolutional autoencoder and predictive
models.
Note that these models are currently adjusted to a 55 by 42 input shape.
"""
from keras.layers import Dense, Flatten, Reshape, Conv2D, UpSampling2D, \
Cropping2D, MaxPool2D
from keras.models import Sequential
__author__ = "Zef Wolffs"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Zef Wolffs"
__email__ = "zefwolffs@gmail.com"
__status__ = "Development"
def build_custom_conv_encoder(input_shape, latent_dim, initializer,
info=False):
"""
Builds a 2D convolutional encoder
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
Returns:
tf.keras.Model: encoder
"""
encoder = Sequential()
encoder.add(Conv2D(32, (5, 5), padding="same", activation="relu",
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(Conv2D(64, (5, 5), strides=(2, 2), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(Conv2D(128, (5, 5), strides=(2, 2), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
return encoder
def build_custom_conv_decoder(latent_dim, initializer, info=False):
"""
Builds a 2D convolutional decoder
Args:
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
Returns:
tf.keras.Model: encoder
"""
decoder = Sequential()
decoder.add(Dense(78848, input_dim=latent_dim,
kernel_initializer=initializer))
decoder.add(Reshape((56, 11, 128)))
decoder.add(Conv2D(64, (5, 5), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (5, 5), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (5, 5), activation="sigmoid", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((2, 1), (1, 1))))
if info:
print(decoder.summary())
return decoder
def build_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
"""
This encoder-decoder pair currently works for 55 by 42 grids
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
act (str, optional): Activation function to use. Defaults to "elu".
dense_act (str, optional): Dense layer activation function to use.
Defaults to None.
Returns:
tuple: encoder, decoder pair
"""
encoder = Sequential()
encoder.add(Conv2D(16, (3, 3), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(8, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(8, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(392, input_dim=latent_dim,
kernel_initializer=initializer,
activation=dense_act))
decoder.add(Reshape((encoder.layers[6].input_shape[1],
encoder.layers[6].input_shape[1], 8)))
decoder.add(Conv2D(8, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(8, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(16, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 0), (1, 1))))
if info:
print(decoder.summary())
return encoder, decoder
def build_wider_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
"""
This encoder-decoder pair currently works for 55 by 42 grids
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
act (str, optional): Activation function to use. Defaults to "elu".
dense_act (str, optional): Dense layer activation function to use.
Defaults to None.
Returns:
tuple: encoder, decoder pair
"""
encoder = Sequential()
encoder.add(Conv2D(16, (5, 5), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(32, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(2688, input_dim=latent_dim,
kernel_initializer=initializer,
activation=dense_act))
decoder.add(Reshape((encoder.layers[6].input_shape[1],
encoder.layers[6].input_shape[2], 64)))
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(16, (5, 5), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 0), (3, 3))))
if info:
print(decoder.summary())
return encoder, decoder
def build_wide_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
"""
This encoder-decoder pair currently works for 55 by 42 grids
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
act (str, optional): Activation function to use. Defaults to "elu".
dense_act (str, optional): Dense layer activation function to use.
Defaults to None.
Returns:
tuple: encoder, decoder pair
"""
encoder = Sequential()
encoder.add(Conv2D(32, (5, 5), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(128, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(5376, input_dim=latent_dim,
kernel_initializer=initializer,
activation=dense_act))
decoder.add(Reshape((encoder.layers[6].input_shape[1],
encoder.layers[6].input_shape[2], 128)))
decoder.add(Conv2D(128, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 0), (3, 3))))
if info:
print(decoder.summary())
return encoder, decoder
def build_deeper_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
"""
This encoder-decoder pair currently works for 55 by 42 grids
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
act (str, optional): Activation function to use. Defaults to "elu".
dense_act (str, optional): Dense layer activation function to use.
Defaults to None.
Returns:
tuple: encoder, decoder pair
"""
encoder = Sequential()
encoder.add(Conv2D(32, (5, 5), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(128, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(1536, input_dim=latent_dim,
kernel_initializer=initializer,
activation=dense_act))
decoder.add(Reshape((encoder.layers[8].input_shape[1],
encoder.layers[8].input_shape[2], 128)))
decoder.add(Conv2D(128, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation=act, padding="valid",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="valid",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((2, 1), (0, 0))))
if info:
print(decoder.summary())
return encoder, decoder
def build_denser_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
"""
This encoder-decoder pair currently works for 55 by 42 grids
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
act (str, optional): Activation function to use. Defaults to "elu".
dense_act (str, optional): Dense layer activation function to use.
Defaults to None.
Returns:
tuple: encoder, decoder pair
"""
encoder = Sequential()
encoder.add(Conv2D(32, (5, 5), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(128, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(int(5376/2),
kernel_initializer=initializer,
activation=dense_act))
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(int(5376/2),
kernel_initializer=initializer,
activation=dense_act,
input_shape=(latent_dim,)))
decoder.add(Dense(5376,
kernel_initializer=initializer,
activation=dense_act,
input_shape=(int(5376/2),)))
decoder.add(Reshape((encoder.layers[6].input_shape[1],
encoder.layers[6].input_shape[2], 128)))
decoder.add(Conv2D(128, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 0), (3, 3))))
decoder.build(input_shape)
if info:
print(decoder.summary())
return encoder, decoder
def build_densest_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
"""
This encoder-decoder pair currently works for 55 by 42 grids
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
act (str, optional): Activation function to use. Defaults to "elu".
dense_act (str, optional): Dense layer activation function to use.
Defaults to None.
Returns:
tuple: encoder, decoder pair
"""
encoder = Sequential()
encoder.add(Conv2D(32, (5, 5), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(int(9856),
kernel_initializer=initializer,
activation=dense_act))
encoder.add(Dense(int(9856/2),
kernel_initializer=initializer,
activation=dense_act))
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(int(9856/2),
kernel_initializer=initializer,
activation=dense_act,
input_shape=(latent_dim,)))
decoder.add(Dense(9856,
kernel_initializer=initializer,
activation=dense_act,
input_shape=(int(9856/2),)))
decoder.add(Dense(9856,
kernel_initializer=initializer,
activation=dense_act,
input_shape=(int(9856),)))
decoder.add(Reshape((encoder.layers[4].input_shape[1],
encoder.layers[4].input_shape[2], 64)))
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 0), (1, 1))))
decoder.build(input_shape)
if info:
print(decoder.summary())
return encoder, decoder
def build_agostini_encoder_decoder(input_shape, latent_dim, initializer,
info=False):
"""
This encoder-decoder pair currently works for 221 by 42 grids
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
Returns:
tuple: encoder, decoder pair
"""
encoder = Sequential()
encoder.add(Conv2D(16, (5, 5), padding="same", activation="relu",
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(32, (3, 3), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(9856, input_dim=latent_dim,
kernel_initializer=initializer))
decoder.add(Reshape((encoder.layers[5].input_shape[1],
encoder.layers[5].input_shape[2], 16)))
decoder.add(Conv2D(64, (3, 3), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(16, (5, 5), activation="sigmoid", padding="same",
kernel_initializer=initializer))
decoder.add(Conv2D(2, (3, 3), activation="sigmoid", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 2), (1, 1))))
if info:
print(decoder.summary())
return encoder, decoder
def build_mnist_wide_omata_encoder_decoder(input_shape, latent_dim,
initializer, info=False):
"""
This encoder-decoder pair currently works for 28 by 28 grids so can work
on MNIST dataset as a test
Args:
input_shape (tuple): Shape tuple of input grids
latent_dim (int): Number of latent variables
initializer (tf.keras.initializers.Initializer): Weights initializer
info (bool, optional): Whether to print info. Defaults to False.
Returns:
tuple: encoder, decoder pair
"""
encoder = Sequential()
encoder.add(Conv2D(128, (3, 3), padding="same", activation="relu",
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(32, (3, 3), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(784, input_dim=latent_dim,
kernel_initializer=initializer))
decoder.add(Reshape((encoder.layers[5].input_shape[1],
encoder.layers[5].input_shape[2], 16)))
decoder.add(Conv2D(32, (3, 3), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(64, (3, 3), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(1, (3, 3), activation="sigmoid", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((0, 0), (0, 0))))
if info:
print(decoder.summary())
return encoder, decoder
| 40.41958 | 79 | 0.611289 |
from keras.layers import Dense, Flatten, Reshape, Conv2D, UpSampling2D, \
Cropping2D, MaxPool2D
from keras.models import Sequential
__author__ = "Zef Wolffs"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Zef Wolffs"
__email__ = "zefwolffs@gmail.com"
__status__ = "Development"
def build_custom_conv_encoder(input_shape, latent_dim, initializer,
info=False):
encoder = Sequential()
encoder.add(Conv2D(32, (5, 5), padding="same", activation="relu",
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(Conv2D(64, (5, 5), strides=(2, 2), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(Conv2D(128, (5, 5), strides=(2, 2), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
return encoder
def build_custom_conv_decoder(latent_dim, initializer, info=False):
decoder = Sequential()
decoder.add(Dense(78848, input_dim=latent_dim,
kernel_initializer=initializer))
decoder.add(Reshape((56, 11, 128)))
decoder.add(Conv2D(64, (5, 5), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (5, 5), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (5, 5), activation="sigmoid", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((2, 1), (1, 1))))
if info:
print(decoder.summary())
return decoder
def build_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
encoder = Sequential()
encoder.add(Conv2D(16, (3, 3), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(8, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(8, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(392, input_dim=latent_dim,
kernel_initializer=initializer,
activation=dense_act))
decoder.add(Reshape((encoder.layers[6].input_shape[1],
encoder.layers[6].input_shape[1], 8)))
decoder.add(Conv2D(8, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(8, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(16, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 0), (1, 1))))
if info:
print(decoder.summary())
return encoder, decoder
def build_wider_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
encoder = Sequential()
encoder.add(Conv2D(16, (5, 5), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(32, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(2688, input_dim=latent_dim,
kernel_initializer=initializer,
activation=dense_act))
decoder.add(Reshape((encoder.layers[6].input_shape[1],
encoder.layers[6].input_shape[2], 64)))
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(16, (5, 5), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 0), (3, 3))))
if info:
print(decoder.summary())
return encoder, decoder
def build_wide_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
encoder = Sequential()
encoder.add(Conv2D(32, (5, 5), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(128, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(5376, input_dim=latent_dim,
kernel_initializer=initializer,
activation=dense_act))
decoder.add(Reshape((encoder.layers[6].input_shape[1],
encoder.layers[6].input_shape[2], 128)))
decoder.add(Conv2D(128, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 0), (3, 3))))
if info:
print(decoder.summary())
return encoder, decoder
def build_deeper_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
encoder = Sequential()
encoder.add(Conv2D(32, (5, 5), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(128, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(1536, input_dim=latent_dim,
kernel_initializer=initializer,
activation=dense_act))
decoder.add(Reshape((encoder.layers[8].input_shape[1],
encoder.layers[8].input_shape[2], 128)))
decoder.add(Conv2D(128, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation=act, padding="valid",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="valid",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((2, 1), (0, 0))))
if info:
print(decoder.summary())
return encoder, decoder
def build_denser_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
encoder = Sequential()
encoder.add(Conv2D(32, (5, 5), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(128, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(int(5376/2),
kernel_initializer=initializer,
activation=dense_act))
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(int(5376/2),
kernel_initializer=initializer,
activation=dense_act,
input_shape=(latent_dim,)))
decoder.add(Dense(5376,
kernel_initializer=initializer,
activation=dense_act,
input_shape=(int(5376/2),)))
decoder.add(Reshape((encoder.layers[6].input_shape[1],
encoder.layers[6].input_shape[2], 128)))
decoder.add(Conv2D(128, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 0), (3, 3))))
decoder.build(input_shape)
if info:
print(decoder.summary())
return encoder, decoder
def build_densest_omata_encoder_decoder(input_shape, latent_dim, initializer,
info=False, act="elu", dense_act=None):
encoder = Sequential()
encoder.add(Conv2D(32, (5, 5), padding="same", activation=act,
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation=act,
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(int(9856),
kernel_initializer=initializer,
activation=dense_act))
encoder.add(Dense(int(9856/2),
kernel_initializer=initializer,
activation=dense_act))
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(int(9856/2),
kernel_initializer=initializer,
activation=dense_act,
input_shape=(latent_dim,)))
decoder.add(Dense(9856,
kernel_initializer=initializer,
activation=dense_act,
input_shape=(int(9856/2),)))
decoder.add(Dense(9856,
kernel_initializer=initializer,
activation=dense_act,
input_shape=(int(9856),)))
decoder.add(Reshape((encoder.layers[4].input_shape[1],
encoder.layers[4].input_shape[2], 64)))
decoder.add(Conv2D(64, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation=act, padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(2, (3, 3), activation="linear", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 0), (1, 1))))
decoder.build(input_shape)
if info:
print(decoder.summary())
return encoder, decoder
def build_agostini_encoder_decoder(input_shape, latent_dim, initializer,
info=False):
encoder = Sequential()
encoder.add(Conv2D(16, (5, 5), padding="same", activation="relu",
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(32, (3, 3), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(9856, input_dim=latent_dim,
kernel_initializer=initializer))
decoder.add(Reshape((encoder.layers[5].input_shape[1],
encoder.layers[5].input_shape[2], 16)))
decoder.add(Conv2D(64, (3, 3), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(32, (3, 3), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(16, (5, 5), activation="sigmoid", padding="same",
kernel_initializer=initializer))
decoder.add(Conv2D(2, (3, 3), activation="sigmoid", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((1, 2), (1, 1))))
if info:
print(decoder.summary())
return encoder, decoder
def build_mnist_wide_omata_encoder_decoder(input_shape, latent_dim,
initializer, info=False):
encoder = Sequential()
encoder.add(Conv2D(128, (3, 3), padding="same", activation="relu",
input_shape=input_shape,
kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(64, (3, 3), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Conv2D(32, (3, 3), activation="relu",
padding="same", kernel_initializer=initializer))
encoder.add(MaxPool2D(padding="same"))
encoder.add(Flatten())
encoder.add(Dense(latent_dim, activation="linear"))
if info:
print(encoder.summary())
decoder = Sequential()
decoder.add(Dense(784, input_dim=latent_dim,
kernel_initializer=initializer))
decoder.add(Reshape((encoder.layers[5].input_shape[1],
encoder.layers[5].input_shape[2], 16)))
decoder.add(Conv2D(32, (3, 3), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(64, (3, 3), activation="relu", padding="same",
kernel_initializer=initializer))
decoder.add(UpSampling2D())
decoder.add(Conv2D(1, (3, 3), activation="sigmoid", padding="same",
kernel_initializer=initializer))
decoder.add(Cropping2D(cropping=((0, 0), (0, 0))))
if info:
print(decoder.summary())
return encoder, decoder
| true | true |
f7fd8d118cefc62e3e3570851035ec70d87acec4 | 6,400 | py | Python | cea/tests/create_unittest_data.py | justinfmccarty/CityEnergyAnalyst_bigmacc | a7f2d6085e83730bdc4bcb2321e1613070372027 | [
"MIT"
] | null | null | null | cea/tests/create_unittest_data.py | justinfmccarty/CityEnergyAnalyst_bigmacc | a7f2d6085e83730bdc4bcb2321e1613070372027 | [
"MIT"
] | null | null | null | cea/tests/create_unittest_data.py | justinfmccarty/CityEnergyAnalyst_bigmacc | a7f2d6085e83730bdc4bcb2321e1613070372027 | [
"MIT"
] | null | null | null | """
Create the data for cea/tests/test_calc_thermal_loads.py
Run this script when the core algorithms get updated and the unittests in ``test_calc_thermal_loads.py`` stop working.
The script overwrites the file ``cea/tests/test_calc_thermal_loads.config`` which contains the data used for the
unit tests. You can safely ignore the output printed to STDOUT - it is used for debugging purposes only.
NOTE: Check first to make sure the core algorithms are correct, i.e. the changes to the outputs behave as expected.
"""
import configparser
import json
import os
import tempfile
import zipfile
import pandas as pd
from cea.demand.building_properties import BuildingProperties
from cea.demand.schedule_maker.schedule_maker import schedule_maker_main
from cea.demand.thermal_loads import calc_thermal_loads
from cea.inputlocator import InputLocator
from cea.utilities import epwreader
from cea.utilities.date import get_date_range_hours_from_year
def main(output_file):
import cea.examples
archive = zipfile.ZipFile(os.path.join(os.path.dirname(cea.examples.__file__), 'reference-case-open.zip'))
archive.extractall(tempfile.gettempdir())
reference_case = os.path.join(tempfile.gettempdir(), 'reference-case-open', 'baseline')
locator = InputLocator(reference_case)
config = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
weather_path = locator.get_weather('Zug_inducity_2009')
weather_data = epwreader.epw_reader(weather_path)[
['year', 'drybulb_C', 'wetbulb_C', 'relhum_percent', 'windspd_ms', 'skytemp_C']]
# run properties script
import cea.datamanagement.archetypes_mapper
cea.datamanagement.archetypes_mapper.archetypes_mapper(locator, True, True, True, True, True, True, [])
year = weather_data['year'][0]
date_range = get_date_range_hours_from_year(year)
resolution_outputs = config.demand.resolution_output
loads_output = config.demand.loads_output
massflows_output = config.demand.massflows_output
temperatures_output = config.demand.temperatures_output
use_dynamic_infiltration_calculation = config.demand.use_dynamic_infiltration_calculation
debug = config.debug
building_properties = BuildingProperties(locator)
print("data for test_calc_thermal_loads:")
print(building_properties.list_building_names())
schedule_maker_main(locator, config, building='B1011')
bpr = building_properties['B1011']
result = calc_thermal_loads('B1011', bpr, weather_data, date_range, locator,
use_dynamic_infiltration_calculation, resolution_outputs, loads_output,
massflows_output, temperatures_output, config,
debug)
# test the building csv file
df = pd.read_csv(locator.get_demand_results_file('B1011'))
expected_columns = list(df.columns)
print("expected_columns = %s" % repr(expected_columns))
test_config = configparser.ConfigParser()
test_config.read(output_file)
value_columns = [u"E_sys_kWh", u"Qcdata_sys_kWh", u"Qcre_sys_kWh", u"Qcs_sys_kWh", u"Qhs_sys_kWh", u"Qww_sys_kWh",
u"Tcs_sys_re_C", u"Ths_sys_re_C", u"Tww_sys_re_C", u"Tcs_sys_sup_C", u"Ths_sys_sup_C",
u"Tww_sys_sup_C"]
values = [float(df[column].sum()) for column in value_columns]
print("values = %s " % repr(values))
if not test_config.has_section("test_calc_thermal_loads"):
test_config.add_section("test_calc_thermal_loads")
test_config.set("test_calc_thermal_loads", "value_columns", json.dumps(value_columns))
print(values)
test_config.set("test_calc_thermal_loads", "values", json.dumps(values))
print("data for test_calc_thermal_loads_other_buildings:")
buildings = ['B1013', 'B1012',
'B1010',
'B1000',
'B1009',
'B1011',
'B1006',
'B1003',
'B1004',
'B1001',
'B1002',
'B1005',
'B1008',
'B1007',
'B1014'
]
results = {}
for building in buildings:
bpr = building_properties[building]
b, qhs_sys_kwh, qcs_sys_kwh, qww_sys_kwh = run_for_single_building(building, bpr, weather_data,
date_range, locator,
use_dynamic_infiltration_calculation,
resolution_outputs, loads_output,
massflows_output, temperatures_output,
config,
debug)
print("'%(b)s': (%(qhs_sys_kwh).5f, %(qcs_sys_kwh).5f, %(qww_sys_kwh).5f)," % locals())
results[building] = (qhs_sys_kwh, qcs_sys_kwh, qww_sys_kwh)
if not test_config.has_section("test_calc_thermal_loads_other_buildings"):
test_config.add_section("test_calc_thermal_loads_other_buildings")
test_config.set("test_calc_thermal_loads_other_buildings", "results", json.dumps(results))
with open(output_file, 'w') as f:
test_config.write(f)
print("Wrote output to %(output_file)s" % locals())
def run_for_single_building(building, bpr, weather_data, date_range, locator,
use_dynamic_infiltration_calculation, resolution_outputs, loads_output,
massflows_output, temperatures_output, config, debug):
config.multiprocessing = False
schedule_maker_main(locator, config, building=building)
calc_thermal_loads(building, bpr, weather_data, date_range, locator,
use_dynamic_infiltration_calculation, resolution_outputs, loads_output, massflows_output,
temperatures_output, config, debug)
df = pd.read_csv(locator.get_demand_results_file(building))
return building, float(df['Qhs_sys_kWh'].sum()), df['Qcs_sys_kWh'].sum(), float(df['Qww_sys_kWh'].sum())
if __name__ == "__main__":
output_file = os.path.join(os.path.dirname(__file__), 'test_calc_thermal_loads.config')
main(output_file)
| 44.755245 | 118 | 0.654688 |
import configparser
import json
import os
import tempfile
import zipfile
import pandas as pd
from cea.demand.building_properties import BuildingProperties
from cea.demand.schedule_maker.schedule_maker import schedule_maker_main
from cea.demand.thermal_loads import calc_thermal_loads
from cea.inputlocator import InputLocator
from cea.utilities import epwreader
from cea.utilities.date import get_date_range_hours_from_year
def main(output_file):
import cea.examples
archive = zipfile.ZipFile(os.path.join(os.path.dirname(cea.examples.__file__), 'reference-case-open.zip'))
archive.extractall(tempfile.gettempdir())
reference_case = os.path.join(tempfile.gettempdir(), 'reference-case-open', 'baseline')
locator = InputLocator(reference_case)
config = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
weather_path = locator.get_weather('Zug_inducity_2009')
weather_data = epwreader.epw_reader(weather_path)[
['year', 'drybulb_C', 'wetbulb_C', 'relhum_percent', 'windspd_ms', 'skytemp_C']]
import cea.datamanagement.archetypes_mapper
cea.datamanagement.archetypes_mapper.archetypes_mapper(locator, True, True, True, True, True, True, [])
year = weather_data['year'][0]
date_range = get_date_range_hours_from_year(year)
resolution_outputs = config.demand.resolution_output
loads_output = config.demand.loads_output
massflows_output = config.demand.massflows_output
temperatures_output = config.demand.temperatures_output
use_dynamic_infiltration_calculation = config.demand.use_dynamic_infiltration_calculation
debug = config.debug
building_properties = BuildingProperties(locator)
print("data for test_calc_thermal_loads:")
print(building_properties.list_building_names())
schedule_maker_main(locator, config, building='B1011')
bpr = building_properties['B1011']
result = calc_thermal_loads('B1011', bpr, weather_data, date_range, locator,
use_dynamic_infiltration_calculation, resolution_outputs, loads_output,
massflows_output, temperatures_output, config,
debug)
df = pd.read_csv(locator.get_demand_results_file('B1011'))
expected_columns = list(df.columns)
print("expected_columns = %s" % repr(expected_columns))
test_config = configparser.ConfigParser()
test_config.read(output_file)
value_columns = [u"E_sys_kWh", u"Qcdata_sys_kWh", u"Qcre_sys_kWh", u"Qcs_sys_kWh", u"Qhs_sys_kWh", u"Qww_sys_kWh",
u"Tcs_sys_re_C", u"Ths_sys_re_C", u"Tww_sys_re_C", u"Tcs_sys_sup_C", u"Ths_sys_sup_C",
u"Tww_sys_sup_C"]
values = [float(df[column].sum()) for column in value_columns]
print("values = %s " % repr(values))
if not test_config.has_section("test_calc_thermal_loads"):
test_config.add_section("test_calc_thermal_loads")
test_config.set("test_calc_thermal_loads", "value_columns", json.dumps(value_columns))
print(values)
test_config.set("test_calc_thermal_loads", "values", json.dumps(values))
print("data for test_calc_thermal_loads_other_buildings:")
buildings = ['B1013', 'B1012',
'B1010',
'B1000',
'B1009',
'B1011',
'B1006',
'B1003',
'B1004',
'B1001',
'B1002',
'B1005',
'B1008',
'B1007',
'B1014'
]
results = {}
for building in buildings:
bpr = building_properties[building]
b, qhs_sys_kwh, qcs_sys_kwh, qww_sys_kwh = run_for_single_building(building, bpr, weather_data,
date_range, locator,
use_dynamic_infiltration_calculation,
resolution_outputs, loads_output,
massflows_output, temperatures_output,
config,
debug)
print("'%(b)s': (%(qhs_sys_kwh).5f, %(qcs_sys_kwh).5f, %(qww_sys_kwh).5f)," % locals())
results[building] = (qhs_sys_kwh, qcs_sys_kwh, qww_sys_kwh)
if not test_config.has_section("test_calc_thermal_loads_other_buildings"):
test_config.add_section("test_calc_thermal_loads_other_buildings")
test_config.set("test_calc_thermal_loads_other_buildings", "results", json.dumps(results))
with open(output_file, 'w') as f:
test_config.write(f)
print("Wrote output to %(output_file)s" % locals())
def run_for_single_building(building, bpr, weather_data, date_range, locator,
use_dynamic_infiltration_calculation, resolution_outputs, loads_output,
massflows_output, temperatures_output, config, debug):
config.multiprocessing = False
schedule_maker_main(locator, config, building=building)
calc_thermal_loads(building, bpr, weather_data, date_range, locator,
use_dynamic_infiltration_calculation, resolution_outputs, loads_output, massflows_output,
temperatures_output, config, debug)
df = pd.read_csv(locator.get_demand_results_file(building))
return building, float(df['Qhs_sys_kWh'].sum()), df['Qcs_sys_kWh'].sum(), float(df['Qww_sys_kWh'].sum())
if __name__ == "__main__":
output_file = os.path.join(os.path.dirname(__file__), 'test_calc_thermal_loads.config')
main(output_file)
| true | true |
f7fd8e015e75b14ad1c05d0881e176eb9503c862 | 422 | py | Python | aliyun/api/rest/Ecs20140526DescribeEipMonitorDataRequest.py | snowyxx/aliyun-python-demo | ed40887ddff440b85b77f9b2a1fcda11cca55c8b | [
"Apache-2.0"
] | null | null | null | aliyun/api/rest/Ecs20140526DescribeEipMonitorDataRequest.py | snowyxx/aliyun-python-demo | ed40887ddff440b85b77f9b2a1fcda11cca55c8b | [
"Apache-2.0"
] | null | null | null | aliyun/api/rest/Ecs20140526DescribeEipMonitorDataRequest.py | snowyxx/aliyun-python-demo | ed40887ddff440b85b77f9b2a1fcda11cca55c8b | [
"Apache-2.0"
] | null | null | null | '''
Created by auto_sdk on 2015.02.10
'''
from aliyun.api.base import RestApi
class Ecs20140526DescribeEipMonitorDataRequest(RestApi):
def __init__(self,domain='ecs.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.AllocationId = None
self.EndTime = None
self.Period = None
self.StartTime = None
def getapiname(self):
return 'ecs.aliyuncs.com.DescribeEipMonitorData.2014-05-26'
| 28.133333 | 62 | 0.739336 | from aliyun.api.base import RestApi
class Ecs20140526DescribeEipMonitorDataRequest(RestApi):
def __init__(self,domain='ecs.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.AllocationId = None
self.EndTime = None
self.Period = None
self.StartTime = None
def getapiname(self):
return 'ecs.aliyuncs.com.DescribeEipMonitorData.2014-05-26'
| true | true |
f7fd8f9438f124c05d135be072ef0ebc0cd67ec4 | 5,007 | py | Python | models/cifar/resnet.py | awwong1/ml-research | 6f0bb585fef0c4567a5f02937fea62726b9c88dd | [
"MIT"
] | null | null | null | models/cifar/resnet.py | awwong1/ml-research | 6f0bb585fef0c4567a5f02937fea62726b9c88dd | [
"MIT"
] | null | null | null | models/cifar/resnet.py | awwong1/ml-research | 6f0bb585fef0c4567a5f02937fea62726b9c88dd | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import torch.nn as nn
import math
__all__ = ["resnet"]
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""ResNet for CIFAR10/100 dataset."""
def __init__(self, depth, num_classes=1000, block_name="BasicBlock"):
super(ResNet, self).__init__()
# Model type specifies number of layers for CIFAR-10 model
if block_name.lower() == "basicblock":
assert (
depth - 2
) % 6 == 0, "When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202"
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == "bottleneck":
assert (
depth - 2
) % 9 == 0, "When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199"
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError("block_name shoule be Basicblock or Bottleneck")
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x) # 32x32
x = self.layer1(x) # 32x32
x = self.layer2(x) # 16x16
x = self.layer3(x) # 8x8
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet(**kwargs):
"""
Constructs a ResNet model.
"""
return ResNet(**kwargs)
| 29.982036 | 99 | 0.551628 | from __future__ import absolute_import
import torch.nn as nn
import math
__all__ = ["resnet"]
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, depth, num_classes=1000, block_name="BasicBlock"):
super(ResNet, self).__init__()
if block_name.lower() == "basicblock":
assert (
depth - 2
) % 6 == 0, "When use basicblock, depth should be 6n+2, e.g. 20, 32, 44, 56, 110, 1202"
n = (depth - 2) // 6
block = BasicBlock
elif block_name.lower() == "bottleneck":
assert (
depth - 2
) % 9 == 0, "When use bottleneck, depth should be 9n+2, e.g. 20, 29, 47, 56, 110, 1199"
n = (depth - 2) // 9
block = Bottleneck
else:
raise ValueError("block_name shoule be Basicblock or Bottleneck")
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, n)
self.layer2 = self._make_layer(block, 32, n, stride=2)
self.layer3 = self._make_layer(block, 64, n, stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet(**kwargs):
return ResNet(**kwargs)
| true | true |
f7fd8fa543efd1a5b16d1377ea04cd2f6d555a6b | 34,003 | py | Python | greykite/tests/framework/output/test_univariate_forecast.py | goncaloperes/greykite | 160bb3ada71e3c778e1fb3d242676c42ff619e3a | [
"BSD-2-Clause"
] | 1 | 2021-11-17T03:02:24.000Z | 2021-11-17T03:02:24.000Z | greykite/tests/framework/output/test_univariate_forecast.py | goncaloperes/greykite | 160bb3ada71e3c778e1fb3d242676c42ff619e3a | [
"BSD-2-Clause"
] | null | null | null | greykite/tests/framework/output/test_univariate_forecast.py | goncaloperes/greykite | 160bb3ada71e3c778e1fb3d242676c42ff619e3a | [
"BSD-2-Clause"
] | null | null | null | import datetime
import math
import sys
from functools import partial
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from pandas.util.testing import assert_series_equal
from sklearn.pipeline import Pipeline
from greykite.common import constants as cst
from greykite.common.evaluation import ElementwiseEvaluationMetricEnum
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.common.python_utils import assert_equal
from greykite.common.testing_utils import gen_sliced_df
from greykite.framework.input.univariate_time_series import UnivariateTimeSeries
from greykite.framework.output.univariate_forecast import UnivariateForecast
from greykite.framework.pipeline.utils import get_forecast
from greykite.sklearn.estimator.prophet_estimator import ProphetEstimator
from greykite.sklearn.estimator.silverkite_estimator import SilverkiteEstimator
try:
import fbprophet # noqa
except ModuleNotFoundError:
pass
@pytest.fixture
def df():
return pd.DataFrame({
cst.TIME_COL: [
datetime.datetime(2018, 1, 1),
datetime.datetime(2018, 1, 2),
datetime.datetime(2018, 1, 3),
datetime.datetime(2018, 1, 4)],
cst.ACTUAL_COL: [1, 2, 3, 4],
cst.PREDICTED_COL: [1, 4, 1, 2],
cst.PREDICTED_LOWER_COL: [1, 1, 1, 1],
cst.PREDICTED_UPPER_COL: [4, 5, 4, 4],
cst.NULL_PREDICTED_COL: [1.5, 1.5, 1.5, 1.5]
})
@pytest.fixture
def df2():
return pd.DataFrame({
cst.TIME_COL: pd.date_range(start="2018-01-01", periods=7),
cst.ACTUAL_COL:
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
cst.PREDICTED_COL:
[1.0, 4.0, 3.0, 2.0, 3.0, 4.0, 8.0],
cst.PREDICTED_LOWER_COL:
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
cst.PREDICTED_UPPER_COL:
[4.0, 5.0, 4.0, 4.0, 5.0, 6.0, 9.0],
cst.NULL_PREDICTED_COL:
[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5]
})
def test_univariate_forecast(df):
"""Checks univariate forecast class"""
# Without test_start_date
forecast = UnivariateForecast(
df,
train_end_date=datetime.datetime(2018, 1, 2),
test_start_date=None,
forecast_horizon=2)
assert forecast.forecast_horizon == 2
assert forecast.df_train.shape == (2, 6)
assert forecast.df_test.shape == (2, 6)
assert forecast.relative_error_tolerance is None
# evaluation metrics
enum = EvaluationMetricEnum.Correlation
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 1.0
enum = EvaluationMetricEnum.MeanAbsoluteError
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.RootMeanSquaredError
assert forecast.train_evaluation[enum.get_metric_name()] == math.sqrt(2)
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.MedianAbsoluteError
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.MeanAbsolutePercentError
assert forecast.train_evaluation[enum.get_metric_name()] == 50.0
assert forecast.test_evaluation[enum.get_metric_name()] == pytest.approx(58.33333, 1e-4)
assert forecast.train_evaluation[cst.R2_null_model_score] == -7.0
assert forecast.test_evaluation[cst.R2_null_model_score] == pytest.approx(0.058824, 1e-4)
assert forecast.train_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] is None
assert forecast.test_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] is None
# validation metrics
assert forecast.train_evaluation[cst.PREDICTION_BAND_WIDTH] == 250.0
assert forecast.test_evaluation[cst.PREDICTION_BAND_WIDTH] == 87.5
assert forecast.train_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.5
assert forecast.test_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.5
assert forecast.train_evaluation[cst.LOWER_BAND_COVERAGE] == 0.5
assert forecast.test_evaluation[cst.LOWER_BAND_COVERAGE] == 0.0
assert forecast.train_evaluation[cst.UPPER_BAND_COVERAGE] == 0.0
assert forecast.test_evaluation[cst.UPPER_BAND_COVERAGE] == 0.5
assert forecast.train_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.45)
assert forecast.test_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.45)
# With test_start_date, relative_error_tolerance
with pytest.warns(UserWarning):
forecast = UnivariateForecast(
df,
train_end_date=datetime.datetime(2018, 1, 2),
test_start_date=datetime.datetime(2018, 1, 4),
relative_error_tolerance=0.05)
assert forecast.forecast_horizon is None
assert forecast.df_train.shape == (2, 6)
assert forecast.df_test.shape == (1, 6)
assert forecast.relative_error_tolerance == 0.05
# evaluation metrics (train_metrics remain the same, test_metrics change)
enum = EvaluationMetricEnum.Correlation
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] is None
enum = EvaluationMetricEnum.MeanAbsoluteError
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.RootMeanSquaredError
assert forecast.train_evaluation[enum.get_metric_name()] == math.sqrt(2)
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.MedianAbsoluteError
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.MeanAbsolutePercentError
assert forecast.train_evaluation[enum.get_metric_name()] == 50.0
assert forecast.test_evaluation[enum.get_metric_name()] == 50.0
assert forecast.train_evaluation[cst.R2_null_model_score] == -7.0
assert forecast.test_evaluation[cst.R2_null_model_score] == 0.36
assert forecast.train_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 0.5
assert forecast.test_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 1.0
# validation metrics
assert forecast.train_evaluation[cst.PREDICTION_BAND_WIDTH] == 250.0
assert forecast.test_evaluation[cst.PREDICTION_BAND_WIDTH] == 75.0
assert forecast.train_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.5
assert forecast.test_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.0
assert forecast.train_evaluation[cst.LOWER_BAND_COVERAGE] == 0.5
assert forecast.test_evaluation[cst.LOWER_BAND_COVERAGE] == 0.0
assert forecast.train_evaluation[cst.UPPER_BAND_COVERAGE] == 0.0
assert forecast.test_evaluation[cst.UPPER_BAND_COVERAGE] == 0.0
assert forecast.train_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.45)
assert forecast.test_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.95)
def test_subset_columns(df):
"""Tests if intervals and null prediction are truly optional,
and relative_error_tolerance parameter"""
forecast = UnivariateForecast(df[[cst.TIME_COL, cst.ACTUAL_COL, cst.PREDICTED_COL]],
predicted_lower_col=None,
predicted_upper_col=None,
null_model_predicted_col=None,
train_end_date=datetime.datetime(2018, 1, 2),
relative_error_tolerance=0.7)
forecast_full = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
for enum in EvaluationMetricEnum:
assert forecast.train_evaluation[enum.get_metric_name()] == forecast_full.train_evaluation[enum.get_metric_name()]
assert forecast.test_evaluation[enum.get_metric_name()] == forecast_full.test_evaluation[enum.get_metric_name()]
for metric in [cst.R2_null_model_score, cst.PREDICTION_BAND_WIDTH, cst.PREDICTION_BAND_COVERAGE, cst.LOWER_BAND_COVERAGE,
cst.UPPER_BAND_COVERAGE, cst.COVERAGE_VS_INTENDED_DIFF]:
assert forecast.train_evaluation[metric] is None
assert forecast.test_evaluation[metric] is None
assert forecast.relative_error_tolerance == 0.7
assert forecast.train_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 0.5
assert forecast.test_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 0.0
def test_input_validation(df):
"""Tests input validation"""
with pytest.raises(ValueError, match="`coverage` must be provided"):
UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2), coverage=None)
with pytest.raises(ValueError, match="`coverage` must be between 0.0 and 1.0"):
UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2), coverage=80.0)
with pytest.raises(ValueError, match="2018-01-05 is not found in time column"):
UnivariateForecast(df, train_end_date="2018-01-05")
with pytest.raises(ValueError, match="Column not found in data frame"):
UnivariateForecast(df, actual_col="not_a_column")
def test_no_train_end_date(df):
"""Tests if train end date can be None"""
forecast = UnivariateForecast(
df,
train_end_date=None)
forecast2 = UnivariateForecast(
df,
train_end_date=datetime.datetime(2018, 1, 4))
assert_equal(forecast.train_evaluation, forecast2.train_evaluation)
assert forecast.test_evaluation is None
def test_partial_test_data():
"""Tests if forecast evaluation can handle partially missing data"""
df = pd.DataFrame({
cst.TIME_COL: ["2018-01-01", datetime.datetime(2018, 1, 2), "2018-01-03", "2018-01-04", "2018-01-05"],
cst.ACTUAL_COL: [1, 2, 3, 2, np.nan],
cst.PREDICTED_COL: [1, 4, 1, 2, 4],
cst.PREDICTED_LOWER_COL: [1, 1, 1, 1, 2],
cst.PREDICTED_UPPER_COL: [4, 5, 4, 4, 6],
cst.NULL_PREDICTED_COL: [1.5, 1.5, 1.5, 1.5, 1.5]
})
with pytest.warns(UserWarning) as record:
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
forecast2 = UnivariateForecast(df.iloc[:4, ], train_end_date=datetime.datetime(2018, 1, 2))
assert forecast.test_na_count == 1
assert "1 value(s) in y_true were NA or infinite and are omitted in error calc." in record[0].message.args[0:2]
assert_equal(forecast.train_evaluation, forecast2.train_evaluation)
assert_equal(forecast.test_evaluation, forecast2.test_evaluation)
def test_no_test_data():
"""Tests if test evaluation is skipped when there are no test data"""
df = pd.DataFrame({
cst.TIME_COL: ["2018-01-01", datetime.datetime(2018, 1, 2), "2018-01-03", "2018-01-04"],
cst.ACTUAL_COL: [1, 2, np.nan, np.nan],
cst.PREDICTED_COL: [1, 4, 1, 2],
cst.PREDICTED_LOWER_COL: [1, 1, 1, 1],
cst.PREDICTED_UPPER_COL: [4, 5, 4, 4],
cst.NULL_PREDICTED_COL: [1.5, 1.5, 1.5, 1.5]
})
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
assert forecast.test_na_count == 2
assert forecast.train_evaluation is not None
assert forecast.test_evaluation is None
def test_custom_loss_function(df):
"""Tests the custom loss function argument"""
def custom_loss(y_pred, y_true):
"""Root mean absolute error"""
return np.sqrt(np.sum(np.abs(np.array(y_pred) - np.array(y_true))))
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2), r2_loss_function=custom_loss)
assert forecast.train_evaluation[cst.R2_null_model_score] == 1 - math.sqrt(2)
assert forecast.test_evaluation[cst.R2_null_model_score] == 0
def test_plot(df):
"""Tests plot function"""
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
fig = forecast.plot()
assert fig is not None
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 4))
fig = forecast.plot(vertical_line_color="green")
assert fig is not None
def test_get_grouping_evaluation(df2):
"""Tests get_grouping_evaluation function"""
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 5))
# MAPE, groupby_time_feature, train set
metric = EvaluationMetricEnum.MeanAbsolutePercentError
metric_name = metric.get_metric_name()
grouped_df = forecast.get_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="train",
groupby_time_feature="dow")
expected = pd.DataFrame({
"dow": [1, 2, 3, 4, 5], # Monday, Tuesday, etc. Time feature is used as column name
f"train {metric_name}": [0.0, 100.0, 0.0, 50.0, 40.0]
})
assert_equal(grouped_df, expected)
# MSE, groupby_sliding_window_size
metric = EvaluationMetricEnum.MeanSquaredError
metric_name = metric.get_metric_name()
grouped_df = forecast.get_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="train",
groupby_sliding_window_size=2)
expected = pd.DataFrame({
f"{cst.TIME_COL}_downsample": [
datetime.datetime(2018, 1, 1),
datetime.datetime(2018, 1, 3),
datetime.datetime(2018, 1, 5)],
f"train {metric_name}": [0.0, 2.0, 4.0]
})
assert_equal(grouped_df, expected)
# MAE, groupby_custom_column, test set
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 2))
metric = EvaluationMetricEnum.MeanAbsoluteError
custom_groups = pd.Series(["g1", "g2", "g1", "g3", "g2"], name="custom_groups")
grouped_df = forecast.get_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=None,
which="test",
groupby_custom_column=custom_groups)
expected = pd.DataFrame({
"custom_groups": ["g1", "g2", "g3"],
"test metric": [1.0, 1.5, 2.0]
})
assert_equal(grouped_df, expected)
def test_plot_grouping_evaluation(df2):
"""Tests plot_grouping_evaluation function"""
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 5))
# MAPE, groupby_time_feature, train set
metric = EvaluationMetricEnum.MeanAbsolutePercentError
metric_name = metric.get_metric_name()
fig = forecast.plot_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="train",
groupby_time_feature="dow")
assert fig.data[0].name == f"train {metric_name}"
assert fig.layout.xaxis.title.text == "dow"
assert fig.layout.yaxis.title.text == f"train {metric_name}"
assert fig.layout.title.text == f"train {metric_name} vs dow"
assert fig.data[0].x.shape[0] == 5
# MSE, groupby_sliding_window_size, train set
metric = EvaluationMetricEnum.MeanSquaredError
metric_name = metric.get_metric_name()
fig = forecast.plot_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="train",
groupby_sliding_window_size=2) # there are 5 training points, so this creates groups of size (1, 2, 2)
assert fig.data[0].name == f"train {metric_name}"
assert fig.layout.xaxis.title.text == f"{cst.TIME_COL}_downsample"
assert fig.layout.yaxis.title.text == f"train {metric_name}"
assert fig.layout.title.text == f"train {metric_name} vs {cst.TIME_COL}_downsample"
assert fig.data[0].x.shape[0] == 3
# MAE, groupby_custom_column, test set
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 2))
metric = EvaluationMetricEnum.MeanAbsoluteError
metric_name = metric.get_metric_name()
custom_groups = pd.Series(["g1", "g2", "g1", "g3", "g2"], name="custom_groups")
fig = forecast.plot_grouping_evaluation(
groupby_custom_column=custom_groups,
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="test",
title=None)
assert fig.data[0].name == f"test {metric_name}"
assert fig.layout.xaxis.title.text == "custom_groups"
assert fig.layout.yaxis.title.text == f"test {metric_name}"
assert fig.layout.title.text == f"test {metric_name} vs custom_groups"
assert fig.data[0].x.shape[0] == 3
# custom xlabel, ylabel, title
fig = forecast.plot_grouping_evaluation(
groupby_custom_column=custom_groups,
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="test",
xlabel="Custom labels",
ylabel="Mean Absolute Error of y",
title="Mean Absolute Error of y by Custom labels")
assert fig.layout.xaxis.title.text == "Custom labels"
assert fig.layout.yaxis.title.text == "Mean Absolute Error of y"
assert fig.layout.title.text == "Mean Absolute Error of y by Custom labels"
def test_autocomplete_map_func_dict(df2):
"""Tests autocomplete_map_func_dict function"""
map_func_dict = {
"residual": ElementwiseEvaluationMetricEnum.Residual.name,
"squared_error": ElementwiseEvaluationMetricEnum.SquaredError.name,
"coverage": ElementwiseEvaluationMetricEnum.Coverage.name,
"custom_metric": lambda row: (row[cst.ACTUAL_COL] - row[cst.PREDICTED_COL])**4
}
df_renamed = df2.rename({
cst.TIME_COL: "custom_time_col",
cst.ACTUAL_COL: "custom_actual_col",
cst.PREDICTED_COL: "custom_predicted_col",
cst.PREDICTED_LOWER_COL: "custom_predicted_lower_col",
cst.PREDICTED_UPPER_COL: "custom_predicted_upper_col",
cst.NULL_PREDICTED_COL: "custom_null_predicted_col",
})
forecast = UnivariateForecast(df_renamed, train_end_date=datetime.datetime(2018, 1, 5))
map_func_dict = forecast.autocomplete_map_func_dict(map_func_dict)
actual = df2.apply(map_func_dict["residual"], axis=1)
expected = (df2[cst.ACTUAL_COL] - df2[cst.PREDICTED_COL])
assert_series_equal(actual, expected)
actual = df2.apply(map_func_dict["squared_error"], axis=1)
expected = (df2[cst.ACTUAL_COL] - df2[cst.PREDICTED_COL]).pow(2)
assert_series_equal(actual, expected)
actual = df2.apply(map_func_dict["coverage"], axis=1)
expected = ((df2[cst.ACTUAL_COL] > df2[cst.PREDICTED_LOWER_COL]) & (df2[cst.ACTUAL_COL] < df2[cst.PREDICTED_UPPER_COL])).astype('float')
assert_series_equal(actual, expected)
actual = df2.apply(map_func_dict["custom_metric"], axis=1)
expected = (df2[cst.ACTUAL_COL] - df2[cst.PREDICTED_COL]).pow(4)
assert_series_equal(actual, expected)
assert forecast.autocomplete_map_func_dict(None) is None
valid_names = ", ".join(ElementwiseEvaluationMetricEnum.__dict__["_member_names_"])
with pytest.raises(ValueError, match=f"unknown_func is not a recognized elementwise "
f"evaluation metric. Must be one of: {valid_names}"):
map_func_dict = {"unknown_func": "unknown_func"}
forecast.autocomplete_map_func_dict(map_func_dict)
def test_get_flexible_grouping_evaluation(df2):
"""Tests get_flexible_grouping_evaluation function"""
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 5))
# Checks residual quantiles, MSE/median squared error, and coverage
map_func_dict = {
"residual": ElementwiseEvaluationMetricEnum.Residual.name,
"squared_error": ElementwiseEvaluationMetricEnum.SquaredError.name,
"coverage": ElementwiseEvaluationMetricEnum.Coverage.name
}
agg_kwargs = {
"residual_mean": pd.NamedAgg(column="residual", aggfunc=np.nanmean),
"residual_q05": pd.NamedAgg(column="residual", aggfunc=partial(np.nanquantile, q=0.05)),
"residual_q95": pd.NamedAgg(column="residual", aggfunc=partial(np.nanquantile, q=0.95)),
"MSE": pd.NamedAgg(column="squared_error", aggfunc=np.nanmean),
"median_squared_error": pd.NamedAgg(column="squared_error", aggfunc=np.nanmedian),
"coverage": pd.NamedAgg(column="coverage", aggfunc=np.nanmean),
}
result = forecast.get_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False)
expected = pd.DataFrame({
# Only one value per group, so the mean/median/quantiles are the same
"residual_mean": [0.0, -2.0, 0.0, 2.0, 2.0],
"residual_q05": [0.0, -2.0, 0.0, 2.0, 2.0],
"residual_q95": [0.0, -2.0, 0.0, 2.0, 2.0],
"MSE": [0.0, 4.0, 0.0, 4.0, 4.0],
"median_squared_error": [0.0, 4.0, 0.0, 4.0, 4.0],
"coverage": [0.0, 1.0, 1.0, 0.0, 0.0],
}, index=pd.Series([1, 2, 3, 4, 5], name="dow"))
assert_frame_equal(result, expected)
# Equivalent way to specify `map_func_dict` (without autocomplete)
map_func_dict = {
"residual": lambda row: ElementwiseEvaluationMetricEnum.Residual.get_metric_func()(
row[forecast.actual_col],
row[forecast.predicted_col]),
"squared_error": lambda row: ElementwiseEvaluationMetricEnum.SquaredError.get_metric_func()(
row[forecast.actual_col],
row[forecast.predicted_col]),
"coverage": lambda row: ElementwiseEvaluationMetricEnum.Coverage.get_metric_func()(
row[forecast.actual_col],
row[forecast.predicted_lower_col],
row[forecast.predicted_upper_col]),
}
result = forecast.get_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False)
assert_frame_equal(result, expected)
# Equivalent way to specify `map_func_dict` (without autocomplete)
map_func_dict = {
"residual": lambda row: row[cst.ACTUAL_COL] - row[cst.PREDICTED_COL],
"squared_error": lambda row: (row[cst.ACTUAL_COL] - row[cst.PREDICTED_COL])**2,
"coverage": lambda row: 1.0 if row[cst.PREDICTED_LOWER_COL] < row[cst.ACTUAL_COL] < row[cst.PREDICTED_UPPER_COL] else 0.0
}
result = forecast.get_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False)
assert_frame_equal(result, expected)
# Groupby sliding window
result = forecast.get_flexible_grouping_evaluation(
which="train",
groupby_time_feature=None,
groupby_sliding_window_size=3,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False)
expected = pd.DataFrame({
"residual_mean": [-1.0, 4/3],
"residual_q05": [-1.9, 0.2],
"residual_q95": [-0.1, 2.0],
"MSE": [2.0, 2.0 + 2/3],
"median_squared_error": [2.0, 4.0],
"coverage": [0.5, 1/3],
}, index=pd.DatetimeIndex(["2018-01-01", "2018-01-04"], name="ts_downsample"))
assert_frame_equal(result, expected)
# On test set with custom groupby column
custom_groups = pd.Series(["val1"], name="value_group").repeat(forecast.df_test.shape[0])
result = forecast.get_flexible_grouping_evaluation(
which="test",
groupby_time_feature=None,
groupby_sliding_window_size=None,
groupby_custom_column=custom_groups,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs)
colindex = pd.Index(
["residual_mean", "residual_q05", "residual_q95",
"MSE", "median_squared_error", "coverage"])
expected = pd.DataFrame(
[[0.5, -0.85, 1.85, 2.5, 2.5, 0.5]],
columns=colindex,
index=pd.Series(["val1"], name=custom_groups.name))
assert_frame_equal(result, expected)
def test_plot_flexible_grouping_evaluation():
"""Tests plot_flexible_grouping_evaluation function"""
df = gen_sliced_df(sample_size_dict={"a": 300, "b": 200, "c": 300, "d": 80, "e": 300})
actual_col = "y"
predicted_col = "y_hat"
groupby_col = "x"
groupby_col2 = "z"
df = df[[actual_col, predicted_col, groupby_col, groupby_col2]]
df[cst.TIME_COL] = pd.date_range(start="2020-01-01", periods=df.shape[0], freq="D")
end_index = math.floor(df.shape[0] * 0.8)
forecast = UnivariateForecast(
df,
train_end_date=df[cst.TIME_COL][end_index],
time_col=cst.TIME_COL,
actual_col=actual_col,
predicted_col=predicted_col,
predicted_lower_col=None,
predicted_upper_col=None,
null_model_predicted_col=None)
# MSE and quantiles of squared error
metric_col = "squared_err"
map_func_dict = {metric_col: ElementwiseEvaluationMetricEnum.SquaredError.name}
agg_kwargs = {f"Q{quantile}": pd.NamedAgg(column=metric_col, aggfunc=partial(np.nanquantile, q=quantile)) for quantile in [0.1, 0.9]}
agg_kwargs.update({"mean": pd.NamedAgg(column=metric_col, aggfunc=np.nanmean)})
# group by "dom", "auto-fill" styling
fig = forecast.plot_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dom",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False,
y_col_style_dict="auto-fill",
default_color="rgba(0, 145, 202, 1.0)",
xlabel=None,
ylabel=metric_col,
title=None,
showlegend=True)
assert [fig.data[i].name for i in range(len(fig.data))] == ["Q0.1", "mean", "Q0.9"]
assert fig.layout.xaxis.title.text == "dom"
assert fig.layout.yaxis.title.text == metric_col
assert fig.layout.title.text == f"{metric_col} vs dom"
assert fig.data[0].x.shape[0] == 31 # 31 unique days in month
assert fig.data[1].line["color"] == "rgba(0, 145, 202, 1.0)"
assert fig.data[1].fill == "tonexty" # from auto-fill
assert fig.layout.showlegend
# group by sliding window, "auto" styling
# provide default color, xlabel, hide legend
fig = forecast.plot_flexible_grouping_evaluation(
which="train",
groupby_time_feature=None,
groupby_sliding_window_size=7,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False,
y_col_style_dict="auto",
default_color="rgba(145, 0, 202, 1.0)",
xlabel="ts",
ylabel=None,
title=None,
showlegend=False)
assert [fig.data[i].name for i in range(len(fig.data))] == ["Q0.1", "mean", "Q0.9"]
assert fig.layout.xaxis.title.text == "ts"
assert fig.layout.yaxis.title.text is None
assert fig.layout.title.text is None
assert fig.data[0].x[0] == datetime.datetime(2020, 1, 1, 0, 0)
assert fig.data[1].line["color"] == "rgba(145, 0, 202, 1.0)"
assert fig.data[1].fill is None
assert not fig.layout.showlegend
# custom groups, "plotly" styling, provide ylabel, title
fig = forecast.plot_flexible_grouping_evaluation(
which="train",
groupby_time_feature=None,
groupby_sliding_window_size=None,
groupby_custom_column=forecast.df_train["x"],
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False,
y_col_style_dict="plotly",
default_color=None,
xlabel=None,
ylabel=metric_col,
title="custom title",
showlegend=True)
assert [fig.data[i].name for i in range(len(fig.data))] == ["Q0.1", "Q0.9", "mean"] # not sorted
assert fig.layout.xaxis.title.text == "x"
assert fig.layout.yaxis.title.text == metric_col
assert fig.layout.title.text == "custom title"
assert list(fig.data[0].x) == list("abcde")
assert fig.data[0].line["color"] is None # color is up to plotly
assert fig.data[1].fill is None
assert fig.layout.showlegend
# test set, absolute percent error, custom `y_col_style_dict` styling
metric_col = "squared_error"
map_func_dict = {
metric_col: ElementwiseEvaluationMetricEnum.AbsolutePercentError.name
}
agg_kwargs = {
"median": pd.NamedAgg(column=metric_col, aggfunc=np.nanmedian),
"mean": pd.NamedAgg(column=metric_col, aggfunc=np.nanmean),
}
y_col_style_dict = {
"median": {
"mode": "lines+markers",
"line": {
"color": "rgba(202, 145, 0, 0.5)"
}
},
"mean": {
"mode": "lines+markers",
"line": {
"color": "rgba(0, 145, 202, 1.0)"
}
},
}
with pytest.warns(UserWarning, match="true_val is less than 1e-8"):
fig = forecast.plot_flexible_grouping_evaluation(
which="test",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False,
y_col_style_dict=y_col_style_dict,
xlabel="x value",
ylabel="y value",
title="error plot",
showlegend=True)
assert [fig.data[i].name for i in range(len(fig.data))] == ["median", "mean"] # not sorted
assert fig.layout.xaxis.title.text == "x value"
assert fig.layout.yaxis.title.text == "y value"
assert fig.layout.title.text == "error plot"
assert len(fig.data[0].x) == 7
assert fig.data[0].mode == "lines+markers"
assert fig.data[1].mode == "lines+markers"
assert fig.data[0].line["color"] == y_col_style_dict["median"]["line"]["color"]
assert fig.data[1].line["color"] == y_col_style_dict["mean"]["line"]["color"]
assert fig.data[1].fill is None
assert fig.layout.showlegend
# median actual vs forecast value by group
agg_kwargs = {
"y_median": pd.NamedAgg(column="y", aggfunc=np.nanmedian),
"y_hat_median": pd.NamedAgg(column="y_hat", aggfunc=np.nanmedian),
}
fig = forecast.plot_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=None,
agg_kwargs=agg_kwargs,
extend_col_names=True,
y_col_style_dict="plotly",
xlabel=None,
ylabel=forecast.ylabel,
title="true vs actual by dow",
showlegend=True)
assert [fig.data[i].name for i in range(len(fig.data))] == ["y_median", "y_hat_median"]
assert fig.layout.xaxis.title.text == "dow"
assert fig.layout.yaxis.title.text == "y"
assert fig.layout.title.text == "true vs actual by dow"
assert len(fig.data[0].x) == 7
assert fig.layout.showlegend
def test_make_univariate_time_series(df):
"""Tests make_univariate_time_series function"""
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
ts = UnivariateTimeSeries()
ts.load_data(pd.DataFrame({
cst.TIME_COL: df[cst.TIME_COL],
cst.VALUE_COL: df[cst.PREDICTED_COL]
}), cst.TIME_COL, cst.VALUE_COL)
assert forecast.make_univariate_time_series().df.equals(ts.df)
def test_plot_components():
"""Test plot_components of UnivariateForecast class"""
X = pd.DataFrame({
cst.TIME_COL: pd.date_range("2018-01-01", periods=10, freq="D"),
cst.VALUE_COL: np.arange(1, 11)
})
coverage = 0.95
# Test Silverkite
trained_model = Pipeline([("estimator", SilverkiteEstimator(coverage=coverage))])
with pytest.warns(Warning) as record:
trained_model.fit(X, X[cst.VALUE_COL])
assert "No slice had sufficient sample size" in record[0].message.args[0]
forecast = get_forecast(X, trained_model)
with pytest.warns(Warning) as record:
title = "Custom component plot"
fig = forecast.plot_components(names=["trend", "YEARLY_SEASONALITY", "DUMMY"], title=title)
expected_rows = 3
assert len(fig.data) == expected_rows
assert [fig.data[i].name for i in range(expected_rows)] == \
[cst.VALUE_COL, "trend", "YEARLY_SEASONALITY"]
assert fig.layout.xaxis.title["text"] == cst.TIME_COL
assert fig.layout.xaxis2.title["text"] == cst.TIME_COL
assert fig.layout.xaxis3.title["text"] == "Time of year"
assert fig.layout.yaxis.title["text"] == cst.VALUE_COL
assert fig.layout.yaxis2.title["text"] == "trend"
assert fig.layout.yaxis3.title["text"] == "yearly"
assert fig.layout.title["text"] == title
assert f"The following components have not been specified in the model: " \
f"{{'DUMMY'}}, plotting the rest." in record[0].message.args[0]
@pytest.mark.skipif("fbprophet" not in sys.modules,
reason="Module 'fbprophet' not installed, pytest for 'ProphetTemplate' skipped.")
def test_plot_components_prophet():
X = pd.DataFrame({
cst.TIME_COL: pd.date_range("2018-01-01", periods=10, freq="D"),
cst.VALUE_COL: np.arange(1, 11)
})
coverage = 0.95
# Test Prophet
trained_model = Pipeline([("estimator", ProphetEstimator(coverage=coverage))])
trained_model.fit(X, X[cst.VALUE_COL])
forecast = get_forecast(X, trained_model)
fig = forecast.plot_components()
assert fig is not None
| 43.59359 | 140 | 0.678499 | import datetime
import math
import sys
from functools import partial
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from pandas.util.testing import assert_series_equal
from sklearn.pipeline import Pipeline
from greykite.common import constants as cst
from greykite.common.evaluation import ElementwiseEvaluationMetricEnum
from greykite.common.evaluation import EvaluationMetricEnum
from greykite.common.python_utils import assert_equal
from greykite.common.testing_utils import gen_sliced_df
from greykite.framework.input.univariate_time_series import UnivariateTimeSeries
from greykite.framework.output.univariate_forecast import UnivariateForecast
from greykite.framework.pipeline.utils import get_forecast
from greykite.sklearn.estimator.prophet_estimator import ProphetEstimator
from greykite.sklearn.estimator.silverkite_estimator import SilverkiteEstimator
try:
import fbprophet
except ModuleNotFoundError:
pass
@pytest.fixture
def df():
return pd.DataFrame({
cst.TIME_COL: [
datetime.datetime(2018, 1, 1),
datetime.datetime(2018, 1, 2),
datetime.datetime(2018, 1, 3),
datetime.datetime(2018, 1, 4)],
cst.ACTUAL_COL: [1, 2, 3, 4],
cst.PREDICTED_COL: [1, 4, 1, 2],
cst.PREDICTED_LOWER_COL: [1, 1, 1, 1],
cst.PREDICTED_UPPER_COL: [4, 5, 4, 4],
cst.NULL_PREDICTED_COL: [1.5, 1.5, 1.5, 1.5]
})
@pytest.fixture
def df2():
return pd.DataFrame({
cst.TIME_COL: pd.date_range(start="2018-01-01", periods=7),
cst.ACTUAL_COL:
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
cst.PREDICTED_COL:
[1.0, 4.0, 3.0, 2.0, 3.0, 4.0, 8.0],
cst.PREDICTED_LOWER_COL:
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
cst.PREDICTED_UPPER_COL:
[4.0, 5.0, 4.0, 4.0, 5.0, 6.0, 9.0],
cst.NULL_PREDICTED_COL:
[1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5]
})
def test_univariate_forecast(df):
forecast = UnivariateForecast(
df,
train_end_date=datetime.datetime(2018, 1, 2),
test_start_date=None,
forecast_horizon=2)
assert forecast.forecast_horizon == 2
assert forecast.df_train.shape == (2, 6)
assert forecast.df_test.shape == (2, 6)
assert forecast.relative_error_tolerance is None
enum = EvaluationMetricEnum.Correlation
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 1.0
enum = EvaluationMetricEnum.MeanAbsoluteError
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.RootMeanSquaredError
assert forecast.train_evaluation[enum.get_metric_name()] == math.sqrt(2)
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.MedianAbsoluteError
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.MeanAbsolutePercentError
assert forecast.train_evaluation[enum.get_metric_name()] == 50.0
assert forecast.test_evaluation[enum.get_metric_name()] == pytest.approx(58.33333, 1e-4)
assert forecast.train_evaluation[cst.R2_null_model_score] == -7.0
assert forecast.test_evaluation[cst.R2_null_model_score] == pytest.approx(0.058824, 1e-4)
assert forecast.train_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] is None
assert forecast.test_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] is None
assert forecast.train_evaluation[cst.PREDICTION_BAND_WIDTH] == 250.0
assert forecast.test_evaluation[cst.PREDICTION_BAND_WIDTH] == 87.5
assert forecast.train_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.5
assert forecast.test_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.5
assert forecast.train_evaluation[cst.LOWER_BAND_COVERAGE] == 0.5
assert forecast.test_evaluation[cst.LOWER_BAND_COVERAGE] == 0.0
assert forecast.train_evaluation[cst.UPPER_BAND_COVERAGE] == 0.0
assert forecast.test_evaluation[cst.UPPER_BAND_COVERAGE] == 0.5
assert forecast.train_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.45)
assert forecast.test_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.45)
with pytest.warns(UserWarning):
forecast = UnivariateForecast(
df,
train_end_date=datetime.datetime(2018, 1, 2),
test_start_date=datetime.datetime(2018, 1, 4),
relative_error_tolerance=0.05)
assert forecast.forecast_horizon is None
assert forecast.df_train.shape == (2, 6)
assert forecast.df_test.shape == (1, 6)
assert forecast.relative_error_tolerance == 0.05
enum = EvaluationMetricEnum.Correlation
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] is None
enum = EvaluationMetricEnum.MeanAbsoluteError
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.RootMeanSquaredError
assert forecast.train_evaluation[enum.get_metric_name()] == math.sqrt(2)
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.MedianAbsoluteError
assert forecast.train_evaluation[enum.get_metric_name()] == 1.0
assert forecast.test_evaluation[enum.get_metric_name()] == 2.0
enum = EvaluationMetricEnum.MeanAbsolutePercentError
assert forecast.train_evaluation[enum.get_metric_name()] == 50.0
assert forecast.test_evaluation[enum.get_metric_name()] == 50.0
assert forecast.train_evaluation[cst.R2_null_model_score] == -7.0
assert forecast.test_evaluation[cst.R2_null_model_score] == 0.36
assert forecast.train_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 0.5
assert forecast.test_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 1.0
assert forecast.train_evaluation[cst.PREDICTION_BAND_WIDTH] == 250.0
assert forecast.test_evaluation[cst.PREDICTION_BAND_WIDTH] == 75.0
assert forecast.train_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.5
assert forecast.test_evaluation[cst.PREDICTION_BAND_COVERAGE] == 0.0
assert forecast.train_evaluation[cst.LOWER_BAND_COVERAGE] == 0.5
assert forecast.test_evaluation[cst.LOWER_BAND_COVERAGE] == 0.0
assert forecast.train_evaluation[cst.UPPER_BAND_COVERAGE] == 0.0
assert forecast.test_evaluation[cst.UPPER_BAND_COVERAGE] == 0.0
assert forecast.train_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.45)
assert forecast.test_evaluation[cst.COVERAGE_VS_INTENDED_DIFF] == pytest.approx(-0.95)
def test_subset_columns(df):
forecast = UnivariateForecast(df[[cst.TIME_COL, cst.ACTUAL_COL, cst.PREDICTED_COL]],
predicted_lower_col=None,
predicted_upper_col=None,
null_model_predicted_col=None,
train_end_date=datetime.datetime(2018, 1, 2),
relative_error_tolerance=0.7)
forecast_full = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
for enum in EvaluationMetricEnum:
assert forecast.train_evaluation[enum.get_metric_name()] == forecast_full.train_evaluation[enum.get_metric_name()]
assert forecast.test_evaluation[enum.get_metric_name()] == forecast_full.test_evaluation[enum.get_metric_name()]
for metric in [cst.R2_null_model_score, cst.PREDICTION_BAND_WIDTH, cst.PREDICTION_BAND_COVERAGE, cst.LOWER_BAND_COVERAGE,
cst.UPPER_BAND_COVERAGE, cst.COVERAGE_VS_INTENDED_DIFF]:
assert forecast.train_evaluation[metric] is None
assert forecast.test_evaluation[metric] is None
assert forecast.relative_error_tolerance == 0.7
assert forecast.train_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 0.5
assert forecast.test_evaluation[cst.FRACTION_OUTSIDE_TOLERANCE] == 0.0
def test_input_validation(df):
with pytest.raises(ValueError, match="`coverage` must be provided"):
UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2), coverage=None)
with pytest.raises(ValueError, match="`coverage` must be between 0.0 and 1.0"):
UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2), coverage=80.0)
with pytest.raises(ValueError, match="2018-01-05 is not found in time column"):
UnivariateForecast(df, train_end_date="2018-01-05")
with pytest.raises(ValueError, match="Column not found in data frame"):
UnivariateForecast(df, actual_col="not_a_column")
def test_no_train_end_date(df):
forecast = UnivariateForecast(
df,
train_end_date=None)
forecast2 = UnivariateForecast(
df,
train_end_date=datetime.datetime(2018, 1, 4))
assert_equal(forecast.train_evaluation, forecast2.train_evaluation)
assert forecast.test_evaluation is None
def test_partial_test_data():
df = pd.DataFrame({
cst.TIME_COL: ["2018-01-01", datetime.datetime(2018, 1, 2), "2018-01-03", "2018-01-04", "2018-01-05"],
cst.ACTUAL_COL: [1, 2, 3, 2, np.nan],
cst.PREDICTED_COL: [1, 4, 1, 2, 4],
cst.PREDICTED_LOWER_COL: [1, 1, 1, 1, 2],
cst.PREDICTED_UPPER_COL: [4, 5, 4, 4, 6],
cst.NULL_PREDICTED_COL: [1.5, 1.5, 1.5, 1.5, 1.5]
})
with pytest.warns(UserWarning) as record:
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
forecast2 = UnivariateForecast(df.iloc[:4, ], train_end_date=datetime.datetime(2018, 1, 2))
assert forecast.test_na_count == 1
assert "1 value(s) in y_true were NA or infinite and are omitted in error calc." in record[0].message.args[0:2]
assert_equal(forecast.train_evaluation, forecast2.train_evaluation)
assert_equal(forecast.test_evaluation, forecast2.test_evaluation)
def test_no_test_data():
df = pd.DataFrame({
cst.TIME_COL: ["2018-01-01", datetime.datetime(2018, 1, 2), "2018-01-03", "2018-01-04"],
cst.ACTUAL_COL: [1, 2, np.nan, np.nan],
cst.PREDICTED_COL: [1, 4, 1, 2],
cst.PREDICTED_LOWER_COL: [1, 1, 1, 1],
cst.PREDICTED_UPPER_COL: [4, 5, 4, 4],
cst.NULL_PREDICTED_COL: [1.5, 1.5, 1.5, 1.5]
})
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
assert forecast.test_na_count == 2
assert forecast.train_evaluation is not None
assert forecast.test_evaluation is None
def test_custom_loss_function(df):
def custom_loss(y_pred, y_true):
return np.sqrt(np.sum(np.abs(np.array(y_pred) - np.array(y_true))))
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2), r2_loss_function=custom_loss)
assert forecast.train_evaluation[cst.R2_null_model_score] == 1 - math.sqrt(2)
assert forecast.test_evaluation[cst.R2_null_model_score] == 0
def test_plot(df):
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
fig = forecast.plot()
assert fig is not None
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 4))
fig = forecast.plot(vertical_line_color="green")
assert fig is not None
def test_get_grouping_evaluation(df2):
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 5))
metric = EvaluationMetricEnum.MeanAbsolutePercentError
metric_name = metric.get_metric_name()
grouped_df = forecast.get_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="train",
groupby_time_feature="dow")
expected = pd.DataFrame({
"dow": [1, 2, 3, 4, 5],
f"train {metric_name}": [0.0, 100.0, 0.0, 50.0, 40.0]
})
assert_equal(grouped_df, expected)
metric = EvaluationMetricEnum.MeanSquaredError
metric_name = metric.get_metric_name()
grouped_df = forecast.get_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="train",
groupby_sliding_window_size=2)
expected = pd.DataFrame({
f"{cst.TIME_COL}_downsample": [
datetime.datetime(2018, 1, 1),
datetime.datetime(2018, 1, 3),
datetime.datetime(2018, 1, 5)],
f"train {metric_name}": [0.0, 2.0, 4.0]
})
assert_equal(grouped_df, expected)
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 2))
metric = EvaluationMetricEnum.MeanAbsoluteError
custom_groups = pd.Series(["g1", "g2", "g1", "g3", "g2"], name="custom_groups")
grouped_df = forecast.get_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=None,
which="test",
groupby_custom_column=custom_groups)
expected = pd.DataFrame({
"custom_groups": ["g1", "g2", "g3"],
"test metric": [1.0, 1.5, 2.0]
})
assert_equal(grouped_df, expected)
def test_plot_grouping_evaluation(df2):
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 5))
metric = EvaluationMetricEnum.MeanAbsolutePercentError
metric_name = metric.get_metric_name()
fig = forecast.plot_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="train",
groupby_time_feature="dow")
assert fig.data[0].name == f"train {metric_name}"
assert fig.layout.xaxis.title.text == "dow"
assert fig.layout.yaxis.title.text == f"train {metric_name}"
assert fig.layout.title.text == f"train {metric_name} vs dow"
assert fig.data[0].x.shape[0] == 5
metric = EvaluationMetricEnum.MeanSquaredError
metric_name = metric.get_metric_name()
fig = forecast.plot_grouping_evaluation(
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="train",
groupby_sliding_window_size=2)
assert fig.data[0].name == f"train {metric_name}"
assert fig.layout.xaxis.title.text == f"{cst.TIME_COL}_downsample"
assert fig.layout.yaxis.title.text == f"train {metric_name}"
assert fig.layout.title.text == f"train {metric_name} vs {cst.TIME_COL}_downsample"
assert fig.data[0].x.shape[0] == 3
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 2))
metric = EvaluationMetricEnum.MeanAbsoluteError
metric_name = metric.get_metric_name()
custom_groups = pd.Series(["g1", "g2", "g1", "g3", "g2"], name="custom_groups")
fig = forecast.plot_grouping_evaluation(
groupby_custom_column=custom_groups,
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="test",
title=None)
assert fig.data[0].name == f"test {metric_name}"
assert fig.layout.xaxis.title.text == "custom_groups"
assert fig.layout.yaxis.title.text == f"test {metric_name}"
assert fig.layout.title.text == f"test {metric_name} vs custom_groups"
assert fig.data[0].x.shape[0] == 3
fig = forecast.plot_grouping_evaluation(
groupby_custom_column=custom_groups,
score_func=metric.get_metric_func(),
score_func_name=metric_name,
which="test",
xlabel="Custom labels",
ylabel="Mean Absolute Error of y",
title="Mean Absolute Error of y by Custom labels")
assert fig.layout.xaxis.title.text == "Custom labels"
assert fig.layout.yaxis.title.text == "Mean Absolute Error of y"
assert fig.layout.title.text == "Mean Absolute Error of y by Custom labels"
def test_autocomplete_map_func_dict(df2):
map_func_dict = {
"residual": ElementwiseEvaluationMetricEnum.Residual.name,
"squared_error": ElementwiseEvaluationMetricEnum.SquaredError.name,
"coverage": ElementwiseEvaluationMetricEnum.Coverage.name,
"custom_metric": lambda row: (row[cst.ACTUAL_COL] - row[cst.PREDICTED_COL])**4
}
df_renamed = df2.rename({
cst.TIME_COL: "custom_time_col",
cst.ACTUAL_COL: "custom_actual_col",
cst.PREDICTED_COL: "custom_predicted_col",
cst.PREDICTED_LOWER_COL: "custom_predicted_lower_col",
cst.PREDICTED_UPPER_COL: "custom_predicted_upper_col",
cst.NULL_PREDICTED_COL: "custom_null_predicted_col",
})
forecast = UnivariateForecast(df_renamed, train_end_date=datetime.datetime(2018, 1, 5))
map_func_dict = forecast.autocomplete_map_func_dict(map_func_dict)
actual = df2.apply(map_func_dict["residual"], axis=1)
expected = (df2[cst.ACTUAL_COL] - df2[cst.PREDICTED_COL])
assert_series_equal(actual, expected)
actual = df2.apply(map_func_dict["squared_error"], axis=1)
expected = (df2[cst.ACTUAL_COL] - df2[cst.PREDICTED_COL]).pow(2)
assert_series_equal(actual, expected)
actual = df2.apply(map_func_dict["coverage"], axis=1)
expected = ((df2[cst.ACTUAL_COL] > df2[cst.PREDICTED_LOWER_COL]) & (df2[cst.ACTUAL_COL] < df2[cst.PREDICTED_UPPER_COL])).astype('float')
assert_series_equal(actual, expected)
actual = df2.apply(map_func_dict["custom_metric"], axis=1)
expected = (df2[cst.ACTUAL_COL] - df2[cst.PREDICTED_COL]).pow(4)
assert_series_equal(actual, expected)
assert forecast.autocomplete_map_func_dict(None) is None
valid_names = ", ".join(ElementwiseEvaluationMetricEnum.__dict__["_member_names_"])
with pytest.raises(ValueError, match=f"unknown_func is not a recognized elementwise "
f"evaluation metric. Must be one of: {valid_names}"):
map_func_dict = {"unknown_func": "unknown_func"}
forecast.autocomplete_map_func_dict(map_func_dict)
def test_get_flexible_grouping_evaluation(df2):
forecast = UnivariateForecast(df2, train_end_date=datetime.datetime(2018, 1, 5))
map_func_dict = {
"residual": ElementwiseEvaluationMetricEnum.Residual.name,
"squared_error": ElementwiseEvaluationMetricEnum.SquaredError.name,
"coverage": ElementwiseEvaluationMetricEnum.Coverage.name
}
agg_kwargs = {
"residual_mean": pd.NamedAgg(column="residual", aggfunc=np.nanmean),
"residual_q05": pd.NamedAgg(column="residual", aggfunc=partial(np.nanquantile, q=0.05)),
"residual_q95": pd.NamedAgg(column="residual", aggfunc=partial(np.nanquantile, q=0.95)),
"MSE": pd.NamedAgg(column="squared_error", aggfunc=np.nanmean),
"median_squared_error": pd.NamedAgg(column="squared_error", aggfunc=np.nanmedian),
"coverage": pd.NamedAgg(column="coverage", aggfunc=np.nanmean),
}
result = forecast.get_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False)
expected = pd.DataFrame({
"residual_mean": [0.0, -2.0, 0.0, 2.0, 2.0],
"residual_q05": [0.0, -2.0, 0.0, 2.0, 2.0],
"residual_q95": [0.0, -2.0, 0.0, 2.0, 2.0],
"MSE": [0.0, 4.0, 0.0, 4.0, 4.0],
"median_squared_error": [0.0, 4.0, 0.0, 4.0, 4.0],
"coverage": [0.0, 1.0, 1.0, 0.0, 0.0],
}, index=pd.Series([1, 2, 3, 4, 5], name="dow"))
assert_frame_equal(result, expected)
map_func_dict = {
"residual": lambda row: ElementwiseEvaluationMetricEnum.Residual.get_metric_func()(
row[forecast.actual_col],
row[forecast.predicted_col]),
"squared_error": lambda row: ElementwiseEvaluationMetricEnum.SquaredError.get_metric_func()(
row[forecast.actual_col],
row[forecast.predicted_col]),
"coverage": lambda row: ElementwiseEvaluationMetricEnum.Coverage.get_metric_func()(
row[forecast.actual_col],
row[forecast.predicted_lower_col],
row[forecast.predicted_upper_col]),
}
result = forecast.get_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False)
assert_frame_equal(result, expected)
map_func_dict = {
"residual": lambda row: row[cst.ACTUAL_COL] - row[cst.PREDICTED_COL],
"squared_error": lambda row: (row[cst.ACTUAL_COL] - row[cst.PREDICTED_COL])**2,
"coverage": lambda row: 1.0 if row[cst.PREDICTED_LOWER_COL] < row[cst.ACTUAL_COL] < row[cst.PREDICTED_UPPER_COL] else 0.0
}
result = forecast.get_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False)
assert_frame_equal(result, expected)
result = forecast.get_flexible_grouping_evaluation(
which="train",
groupby_time_feature=None,
groupby_sliding_window_size=3,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False)
expected = pd.DataFrame({
"residual_mean": [-1.0, 4/3],
"residual_q05": [-1.9, 0.2],
"residual_q95": [-0.1, 2.0],
"MSE": [2.0, 2.0 + 2/3],
"median_squared_error": [2.0, 4.0],
"coverage": [0.5, 1/3],
}, index=pd.DatetimeIndex(["2018-01-01", "2018-01-04"], name="ts_downsample"))
assert_frame_equal(result, expected)
custom_groups = pd.Series(["val1"], name="value_group").repeat(forecast.df_test.shape[0])
result = forecast.get_flexible_grouping_evaluation(
which="test",
groupby_time_feature=None,
groupby_sliding_window_size=None,
groupby_custom_column=custom_groups,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs)
colindex = pd.Index(
["residual_mean", "residual_q05", "residual_q95",
"MSE", "median_squared_error", "coverage"])
expected = pd.DataFrame(
[[0.5, -0.85, 1.85, 2.5, 2.5, 0.5]],
columns=colindex,
index=pd.Series(["val1"], name=custom_groups.name))
assert_frame_equal(result, expected)
def test_plot_flexible_grouping_evaluation():
df = gen_sliced_df(sample_size_dict={"a": 300, "b": 200, "c": 300, "d": 80, "e": 300})
actual_col = "y"
predicted_col = "y_hat"
groupby_col = "x"
groupby_col2 = "z"
df = df[[actual_col, predicted_col, groupby_col, groupby_col2]]
df[cst.TIME_COL] = pd.date_range(start="2020-01-01", periods=df.shape[0], freq="D")
end_index = math.floor(df.shape[0] * 0.8)
forecast = UnivariateForecast(
df,
train_end_date=df[cst.TIME_COL][end_index],
time_col=cst.TIME_COL,
actual_col=actual_col,
predicted_col=predicted_col,
predicted_lower_col=None,
predicted_upper_col=None,
null_model_predicted_col=None)
metric_col = "squared_err"
map_func_dict = {metric_col: ElementwiseEvaluationMetricEnum.SquaredError.name}
agg_kwargs = {f"Q{quantile}": pd.NamedAgg(column=metric_col, aggfunc=partial(np.nanquantile, q=quantile)) for quantile in [0.1, 0.9]}
agg_kwargs.update({"mean": pd.NamedAgg(column=metric_col, aggfunc=np.nanmean)})
fig = forecast.plot_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dom",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False,
y_col_style_dict="auto-fill",
default_color="rgba(0, 145, 202, 1.0)",
xlabel=None,
ylabel=metric_col,
title=None,
showlegend=True)
assert [fig.data[i].name for i in range(len(fig.data))] == ["Q0.1", "mean", "Q0.9"]
assert fig.layout.xaxis.title.text == "dom"
assert fig.layout.yaxis.title.text == metric_col
assert fig.layout.title.text == f"{metric_col} vs dom"
assert fig.data[0].x.shape[0] == 31
assert fig.data[1].line["color"] == "rgba(0, 145, 202, 1.0)"
assert fig.data[1].fill == "tonexty"
assert fig.layout.showlegend
fig = forecast.plot_flexible_grouping_evaluation(
which="train",
groupby_time_feature=None,
groupby_sliding_window_size=7,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False,
y_col_style_dict="auto",
default_color="rgba(145, 0, 202, 1.0)",
xlabel="ts",
ylabel=None,
title=None,
showlegend=False)
assert [fig.data[i].name for i in range(len(fig.data))] == ["Q0.1", "mean", "Q0.9"]
assert fig.layout.xaxis.title.text == "ts"
assert fig.layout.yaxis.title.text is None
assert fig.layout.title.text is None
assert fig.data[0].x[0] == datetime.datetime(2020, 1, 1, 0, 0)
assert fig.data[1].line["color"] == "rgba(145, 0, 202, 1.0)"
assert fig.data[1].fill is None
assert not fig.layout.showlegend
fig = forecast.plot_flexible_grouping_evaluation(
which="train",
groupby_time_feature=None,
groupby_sliding_window_size=None,
groupby_custom_column=forecast.df_train["x"],
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False,
y_col_style_dict="plotly",
default_color=None,
xlabel=None,
ylabel=metric_col,
title="custom title",
showlegend=True)
assert [fig.data[i].name for i in range(len(fig.data))] == ["Q0.1", "Q0.9", "mean"]
assert fig.layout.xaxis.title.text == "x"
assert fig.layout.yaxis.title.text == metric_col
assert fig.layout.title.text == "custom title"
assert list(fig.data[0].x) == list("abcde")
assert fig.data[0].line["color"] is None
assert fig.data[1].fill is None
assert fig.layout.showlegend
metric_col = "squared_error"
map_func_dict = {
metric_col: ElementwiseEvaluationMetricEnum.AbsolutePercentError.name
}
agg_kwargs = {
"median": pd.NamedAgg(column=metric_col, aggfunc=np.nanmedian),
"mean": pd.NamedAgg(column=metric_col, aggfunc=np.nanmean),
}
y_col_style_dict = {
"median": {
"mode": "lines+markers",
"line": {
"color": "rgba(202, 145, 0, 0.5)"
}
},
"mean": {
"mode": "lines+markers",
"line": {
"color": "rgba(0, 145, 202, 1.0)"
}
},
}
with pytest.warns(UserWarning, match="true_val is less than 1e-8"):
fig = forecast.plot_flexible_grouping_evaluation(
which="test",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=map_func_dict,
agg_kwargs=agg_kwargs,
extend_col_names=False,
y_col_style_dict=y_col_style_dict,
xlabel="x value",
ylabel="y value",
title="error plot",
showlegend=True)
assert [fig.data[i].name for i in range(len(fig.data))] == ["median", "mean"]
assert fig.layout.xaxis.title.text == "x value"
assert fig.layout.yaxis.title.text == "y value"
assert fig.layout.title.text == "error plot"
assert len(fig.data[0].x) == 7
assert fig.data[0].mode == "lines+markers"
assert fig.data[1].mode == "lines+markers"
assert fig.data[0].line["color"] == y_col_style_dict["median"]["line"]["color"]
assert fig.data[1].line["color"] == y_col_style_dict["mean"]["line"]["color"]
assert fig.data[1].fill is None
assert fig.layout.showlegend
agg_kwargs = {
"y_median": pd.NamedAgg(column="y", aggfunc=np.nanmedian),
"y_hat_median": pd.NamedAgg(column="y_hat", aggfunc=np.nanmedian),
}
fig = forecast.plot_flexible_grouping_evaluation(
which="train",
groupby_time_feature="dow",
groupby_sliding_window_size=None,
groupby_custom_column=None,
map_func_dict=None,
agg_kwargs=agg_kwargs,
extend_col_names=True,
y_col_style_dict="plotly",
xlabel=None,
ylabel=forecast.ylabel,
title="true vs actual by dow",
showlegend=True)
assert [fig.data[i].name for i in range(len(fig.data))] == ["y_median", "y_hat_median"]
assert fig.layout.xaxis.title.text == "dow"
assert fig.layout.yaxis.title.text == "y"
assert fig.layout.title.text == "true vs actual by dow"
assert len(fig.data[0].x) == 7
assert fig.layout.showlegend
def test_make_univariate_time_series(df):
forecast = UnivariateForecast(df, train_end_date=datetime.datetime(2018, 1, 2))
ts = UnivariateTimeSeries()
ts.load_data(pd.DataFrame({
cst.TIME_COL: df[cst.TIME_COL],
cst.VALUE_COL: df[cst.PREDICTED_COL]
}), cst.TIME_COL, cst.VALUE_COL)
assert forecast.make_univariate_time_series().df.equals(ts.df)
def test_plot_components():
X = pd.DataFrame({
cst.TIME_COL: pd.date_range("2018-01-01", periods=10, freq="D"),
cst.VALUE_COL: np.arange(1, 11)
})
coverage = 0.95
trained_model = Pipeline([("estimator", SilverkiteEstimator(coverage=coverage))])
with pytest.warns(Warning) as record:
trained_model.fit(X, X[cst.VALUE_COL])
assert "No slice had sufficient sample size" in record[0].message.args[0]
forecast = get_forecast(X, trained_model)
with pytest.warns(Warning) as record:
title = "Custom component plot"
fig = forecast.plot_components(names=["trend", "YEARLY_SEASONALITY", "DUMMY"], title=title)
expected_rows = 3
assert len(fig.data) == expected_rows
assert [fig.data[i].name for i in range(expected_rows)] == \
[cst.VALUE_COL, "trend", "YEARLY_SEASONALITY"]
assert fig.layout.xaxis.title["text"] == cst.TIME_COL
assert fig.layout.xaxis2.title["text"] == cst.TIME_COL
assert fig.layout.xaxis3.title["text"] == "Time of year"
assert fig.layout.yaxis.title["text"] == cst.VALUE_COL
assert fig.layout.yaxis2.title["text"] == "trend"
assert fig.layout.yaxis3.title["text"] == "yearly"
assert fig.layout.title["text"] == title
assert f"The following components have not been specified in the model: " \
f"{{'DUMMY'}}, plotting the rest." in record[0].message.args[0]
@pytest.mark.skipif("fbprophet" not in sys.modules,
reason="Module 'fbprophet' not installed, pytest for 'ProphetTemplate' skipped.")
def test_plot_components_prophet():
X = pd.DataFrame({
cst.TIME_COL: pd.date_range("2018-01-01", periods=10, freq="D"),
cst.VALUE_COL: np.arange(1, 11)
})
coverage = 0.95
trained_model = Pipeline([("estimator", ProphetEstimator(coverage=coverage))])
trained_model.fit(X, X[cst.VALUE_COL])
forecast = get_forecast(X, trained_model)
fig = forecast.plot_components()
assert fig is not None
| true | true |
f7fd901458b45fdb785e1d676187053baa41ac7e | 254 | py | Python | Python3/1015-Smallest-Integer-Divisible-by-K/soln-1.py | wyaadarsh/LeetCode-Solutions | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | Python3/1015-Smallest-Integer-Divisible-by-K/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | Python3/1015-Smallest-Integer-Divisible-by-K/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | class Solution:
def smallestRepunitDivByK(self, K: int) -> int:
if K % 2 == 0 or K % 5 == 0:
return -1
ans = num = 1
while num % K != 0:
num = (num * 10 + 1) % K
ans += 1
return ans
| 25.4 | 51 | 0.413386 | class Solution:
def smallestRepunitDivByK(self, K: int) -> int:
if K % 2 == 0 or K % 5 == 0:
return -1
ans = num = 1
while num % K != 0:
num = (num * 10 + 1) % K
ans += 1
return ans
| true | true |
f7fd90307a612d7bbc713c420af1fce102f45b32 | 2,004 | py | Python | cutplanner/planner.py | alanc10n/py-cutplanner | 66c90942c258f453df742cb7bcca43981bfd9af3 | [
"MIT"
] | null | null | null | cutplanner/planner.py | alanc10n/py-cutplanner | 66c90942c258f453df742cb7bcca43981bfd9af3 | [
"MIT"
] | 1 | 2015-02-27T02:26:22.000Z | 2015-02-27T02:26:22.000Z | cutplanner/planner.py | alanc10n/py-cutplanner | 66c90942c258f453df742cb7bcca43981bfd9af3 | [
"MIT"
] | null | null | null | """ Allows production of cutlists for a given set of required pieces, given
a set of available stock sizes.
"""
import collections
from .stock import Stock
# simple structure to keep track of a specific piece
Piece = collections.namedtuple('Piece', 'id, length')
class Planner(object):
""" Object that can produce a cutlist (plan) for cutting stock. """
def __init__(self, sizes, needed, loss=0.25):
self.stock = []
self.stock_sizes = sorted(sizes)
self.pieces_needed = [Piece(i, s) for i, s in enumerate(needed)]
self.pieces_needed.reverse()
self.cut_loss = loss
self.cur_stock = None
# set the algorithm to use, hard code for now
self.apply_algo = self.apply_next_fit
@property
def largest_stock(self):
""" Returns the size of the largest available stock."""
return self.stock_sizes[-1]
def cut_piece(self, piece):
""" Record the cut for the given piece """
self.cur_stock.cut(piece, self.cut_loss)
def finalize_stock(self):
""" Takes current stock out of use, attempts to shrink """
# shrink as much as possible
for smaller in self.stock_sizes[-2::-1]:
if self.cur_stock.shrink(smaller) is None:
break
self.stock.append(self.cur_stock)
def apply_next_fit(self, piece):
""" Cut from current stock until unable, then move to new stock """
if self.cur_stock.remaining_length < piece.length + self.cut_loss:
# finalize current stock and get fresh stock
self.finalize_stock()
self.cur_stock = Stock(self.largest_stock)
self.cur_stock.cut(piece, self.cut_loss)
def make_cuts(self):
""" Apply the cutting algorithm to generate a cut list."""
self.cur_stock = Stock(self.largest_stock)
while self.pieces_needed:
piece = self.pieces_needed.pop()
self.apply_algo(piece)
self.finalize_stock()
| 32.322581 | 75 | 0.64022 |
import collections
from .stock import Stock
Piece = collections.namedtuple('Piece', 'id, length')
class Planner(object):
def __init__(self, sizes, needed, loss=0.25):
self.stock = []
self.stock_sizes = sorted(sizes)
self.pieces_needed = [Piece(i, s) for i, s in enumerate(needed)]
self.pieces_needed.reverse()
self.cut_loss = loss
self.cur_stock = None
self.apply_algo = self.apply_next_fit
@property
def largest_stock(self):
return self.stock_sizes[-1]
def cut_piece(self, piece):
self.cur_stock.cut(piece, self.cut_loss)
def finalize_stock(self):
for smaller in self.stock_sizes[-2::-1]:
if self.cur_stock.shrink(smaller) is None:
break
self.stock.append(self.cur_stock)
def apply_next_fit(self, piece):
if self.cur_stock.remaining_length < piece.length + self.cut_loss:
self.finalize_stock()
self.cur_stock = Stock(self.largest_stock)
self.cur_stock.cut(piece, self.cut_loss)
def make_cuts(self):
self.cur_stock = Stock(self.largest_stock)
while self.pieces_needed:
piece = self.pieces_needed.pop()
self.apply_algo(piece)
self.finalize_stock()
| true | true |
f7fd9043d5068529ede3f38b459a8c96b8805de2 | 15,224 | py | Python | tests/unit/local/docker/test_lambda_image.py | renanmontebelo/aws-sam-cli | b5cfc46aa9726b5cd006df8ecc08d1b4eedeb9ea | [
"BSD-2-Clause",
"Apache-2.0"
] | 1 | 2021-11-21T09:21:59.000Z | 2021-11-21T09:21:59.000Z | tests/unit/local/docker/test_lambda_image.py | renanmontebelo/aws-sam-cli | b5cfc46aa9726b5cd006df8ecc08d1b4eedeb9ea | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | tests/unit/local/docker/test_lambda_image.py | renanmontebelo/aws-sam-cli | b5cfc46aa9726b5cd006df8ecc08d1b4eedeb9ea | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | import io
import tempfile
from unittest import TestCase
from unittest.mock import patch, Mock, mock_open, ANY
from docker.errors import ImageNotFound, BuildError, APIError
from samcli.commands.local.lib.exceptions import InvalidIntermediateImageError
from samcli.lib.utils.packagetype import ZIP, IMAGE
from samcli.local.docker.lambda_image import LambdaImage
from samcli.commands.local.cli_common.user_exceptions import ImageBuildException
from samcli import __version__ as version
class TestLambdaImage(TestCase):
def setUp(self):
self.layer_cache_dir = tempfile.gettempdir()
def test_initialization_without_defaults(self):
lambda_image = LambdaImage("layer_downloader", False, False, docker_client="docker_client")
self.assertEqual(lambda_image.layer_downloader, "layer_downloader")
self.assertFalse(lambda_image.skip_pull_image)
self.assertFalse(lambda_image.force_image_build)
self.assertEqual(lambda_image.docker_client, "docker_client")
@patch("samcli.local.docker.lambda_image.docker")
def test_initialization_with_defaults(self, docker_patch):
docker_client_mock = Mock()
docker_patch.from_env.return_value = docker_client_mock
lambda_image = LambdaImage("layer_downloader", False, False)
self.assertEqual(lambda_image.layer_downloader, "layer_downloader")
self.assertFalse(lambda_image.skip_pull_image)
self.assertFalse(lambda_image.force_image_build)
self.assertEqual(lambda_image.docker_client, docker_client_mock)
def test_building_image_with_no_runtime_only_image(self):
docker_client_mock = Mock()
layer_downloader_mock = Mock()
setattr(layer_downloader_mock, "layer_cache", self.layer_cache_dir)
docker_client_mock.api.build.return_value = ["mock"]
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
self.assertEqual(
lambda_image.build(None, IMAGE, "mylambdaimage:v1", []),
f"mylambdaimage:rapid-{version}",
)
@patch("samcli.local.docker.lambda_image.LambdaImage._build_image")
@patch("samcli.local.docker.lambda_image.LambdaImage._generate_docker_image_version")
def test_building_image_with_no_runtime_only_image_always_build(
self, generate_docker_image_version_patch, build_image_patch
):
docker_client_mock = Mock()
layer_downloader_mock = Mock()
setattr(layer_downloader_mock, "layer_cache", self.layer_cache_dir)
docker_client_mock.api.build.return_value = ["mock"]
generate_docker_image_version_patch.return_value = "image-version"
docker_client_mock = Mock()
docker_client_mock.images.get.return_value = Mock()
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
self.assertEqual(
lambda_image.build(None, IMAGE, "mylambdaimage:v1", ["mylayer"]),
f"mylambdaimage:rapid-{version}",
)
# No layers are added, because runtime is not defined.
build_image_patch.assert_called_once_with("mylambdaimage:v1", f"mylambdaimage:rapid-{version}", [], stream=ANY)
# No Layers are added.
layer_downloader_mock.assert_not_called()
def test_building_image_with_non_accpeted_package_type(self):
docker_client_mock = Mock()
layer_downloader_mock = Mock()
setattr(layer_downloader_mock, "layer_cache", self.layer_cache_dir)
docker_client_mock.api.build.return_value = ["mock"]
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
with self.assertRaises(InvalidIntermediateImageError):
lambda_image.build("python3.6", "Non-accepted-packagetype", None, [])
with self.assertRaises(InvalidIntermediateImageError):
lambda_image.build("python3.6", None, None, [])
def test_building_image_with_no_layers(self):
docker_client_mock = Mock()
layer_downloader_mock = Mock()
setattr(layer_downloader_mock, "layer_cache", self.layer_cache_dir)
docker_client_mock.api.build.return_value = ["mock"]
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
self.assertEqual(
lambda_image.build("python3.6", ZIP, None, []),
f"amazon/aws-sam-cli-emulation-image-python3.6:rapid-{version}",
)
@patch("samcli.local.docker.lambda_image.LambdaImage._build_image")
@patch("samcli.local.docker.lambda_image.LambdaImage._generate_docker_image_version")
def test_not_building_image_that_already_exists(self, generate_docker_image_version_patch, build_image_patch):
layer_downloader_mock = Mock()
layer_mock = Mock()
layer_mock.name = "layers1"
layer_mock.is_defined_within_template = False
layer_downloader_mock.download_all.return_value = [layer_mock]
generate_docker_image_version_patch.return_value = "image-version"
docker_client_mock = Mock()
docker_client_mock.images.get.return_value = Mock()
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
actual_image_id = lambda_image.build("python3.6", ZIP, None, [layer_mock])
self.assertEqual(actual_image_id, "samcli/lambda:image-version")
layer_downloader_mock.download_all.assert_called_once_with([layer_mock], False)
generate_docker_image_version_patch.assert_called_once_with([layer_mock], "python3.6")
docker_client_mock.images.get.assert_called_once_with("samcli/lambda:image-version")
build_image_patch.assert_not_called()
@patch("samcli.local.docker.lambda_image.LambdaImage._build_image")
@patch("samcli.local.docker.lambda_image.LambdaImage._generate_docker_image_version")
def test_force_building_image_that_doesnt_already_exists(
self, generate_docker_image_version_patch, build_image_patch
):
layer_downloader_mock = Mock()
layer_downloader_mock.download_all.return_value = ["layers1"]
generate_docker_image_version_patch.return_value = "image-version"
docker_client_mock = Mock()
docker_client_mock.images.get.side_effect = ImageNotFound("image not found")
stream = io.StringIO()
lambda_image = LambdaImage(layer_downloader_mock, False, True, docker_client=docker_client_mock)
actual_image_id = lambda_image.build("python3.6", ZIP, None, ["layers1"], stream=stream)
self.assertEqual(actual_image_id, "samcli/lambda:image-version")
layer_downloader_mock.download_all.assert_called_once_with(["layers1"], True)
generate_docker_image_version_patch.assert_called_once_with(["layers1"], "python3.6")
docker_client_mock.images.get.assert_called_once_with("samcli/lambda:image-version")
build_image_patch.assert_called_once_with(
"amazon/aws-sam-cli-emulation-image-python3.6:latest",
"samcli/lambda:image-version",
["layers1"],
stream=stream,
)
@patch("samcli.local.docker.lambda_image.LambdaImage._build_image")
@patch("samcli.local.docker.lambda_image.LambdaImage._generate_docker_image_version")
def test_not_force_building_image_that_doesnt_already_exists(
self, generate_docker_image_version_patch, build_image_patch
):
layer_downloader_mock = Mock()
layer_downloader_mock.download_all.return_value = ["layers1"]
generate_docker_image_version_patch.return_value = "image-version"
docker_client_mock = Mock()
docker_client_mock.images.get.side_effect = ImageNotFound("image not found")
stream = io.StringIO()
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
actual_image_id = lambda_image.build("python3.6", ZIP, None, ["layers1"], stream=stream)
self.assertEqual(actual_image_id, "samcli/lambda:image-version")
layer_downloader_mock.download_all.assert_called_once_with(["layers1"], False)
generate_docker_image_version_patch.assert_called_once_with(["layers1"], "python3.6")
docker_client_mock.images.get.assert_called_once_with("samcli/lambda:image-version")
build_image_patch.assert_called_once_with(
"amazon/aws-sam-cli-emulation-image-python3.6:latest",
"samcli/lambda:image-version",
["layers1"],
stream=stream,
)
@patch("samcli.local.docker.lambda_image.hashlib")
def test_generate_docker_image_version(self, hashlib_patch):
haslib_sha256_mock = Mock()
hashlib_patch.sha256.return_value = haslib_sha256_mock
haslib_sha256_mock.hexdigest.return_value = "thisisahexdigestofshahash"
layer_mock = Mock()
layer_mock.name = "layer1"
image_version = LambdaImage._generate_docker_image_version([layer_mock], "runtime")
self.assertEqual(image_version, "runtime-thisisahexdigestofshahash")
hashlib_patch.sha256.assert_called_once_with(b"layer1")
@patch("samcli.local.docker.lambda_image.docker")
def test_generate_dockerfile(self, docker_patch):
docker_client_mock = Mock()
docker_patch.from_env.return_value = docker_client_mock
expected_docker_file = (
"FROM python\nADD aws-lambda-rie /var/rapid\nRUN chmod +x /var/rapid/aws-lambda-rie\nADD layer1 /opt\n"
)
layer_mock = Mock()
layer_mock.name = "layer1"
self.assertEqual(LambdaImage._generate_dockerfile("python", [layer_mock]), expected_docker_file)
@patch("samcli.local.docker.lambda_image.create_tarball")
@patch("samcli.local.docker.lambda_image.uuid")
@patch("samcli.local.docker.lambda_image.Path")
@patch("samcli.local.docker.lambda_image.LambdaImage._generate_dockerfile")
def test_build_image(self, generate_dockerfile_patch, path_patch, uuid_patch, create_tarball_patch):
uuid_patch.uuid4.return_value = "uuid"
generate_dockerfile_patch.return_value = "Dockerfile content"
docker_full_path_mock = Mock()
docker_full_path_mock.exists.return_value = True
path_patch.return_value = docker_full_path_mock
docker_client_mock = Mock()
docker_client_mock.api.build.return_value = ["Done"]
layer_downloader_mock = Mock()
layer_downloader_mock.layer_cache = "cached layers"
tarball_fileobj = Mock()
create_tarball_patch.return_value.__enter__.return_value = tarball_fileobj
layer_version1 = Mock()
layer_version1.codeuri = "somevalue"
layer_version1.name = "name"
dockerfile_mock = Mock()
m = mock_open(dockerfile_mock)
with patch("samcli.local.docker.lambda_image.open", m):
LambdaImage(layer_downloader_mock, True, False, docker_client=docker_client_mock)._build_image(
"base_image", "docker_tag", [layer_version1]
)
handle = m()
handle.write.assert_called_with("Dockerfile content")
path_patch.assert_called_once_with("cached layers", "dockerfile_uuid")
docker_client_mock.api.build.assert_called_once_with(
fileobj=tarball_fileobj, rm=True, tag="docker_tag", pull=False, custom_context=True
)
docker_full_path_mock.unlink.assert_called_once()
@patch("samcli.local.docker.lambda_image.create_tarball")
@patch("samcli.local.docker.lambda_image.uuid")
@patch("samcli.local.docker.lambda_image.Path")
@patch("samcli.local.docker.lambda_image.LambdaImage._generate_dockerfile")
def test_build_image_fails_with_BuildError(
self, generate_dockerfile_patch, path_patch, uuid_patch, create_tarball_patch
):
uuid_patch.uuid4.return_value = "uuid"
generate_dockerfile_patch.return_value = "Dockerfile content"
docker_full_path_mock = Mock()
docker_full_path_mock.exists.return_value = False
path_patch.return_value = docker_full_path_mock
docker_client_mock = Mock()
docker_client_mock.api.build.side_effect = BuildError("buildError", "buildlog")
layer_downloader_mock = Mock()
layer_downloader_mock.layer_cache = "cached layers"
tarball_fileobj = Mock()
create_tarball_patch.return_value.__enter__.return_value = tarball_fileobj
layer_version1 = Mock()
layer_version1.codeuri = "somevalue"
layer_version1.name = "name"
dockerfile_mock = Mock()
m = mock_open(dockerfile_mock)
with patch("samcli.local.docker.lambda_image.open", m):
with self.assertRaises(ImageBuildException):
LambdaImage(layer_downloader_mock, True, False, docker_client=docker_client_mock)._build_image(
"base_image", "docker_tag", [layer_version1]
)
handle = m()
handle.write.assert_called_with("Dockerfile content")
path_patch.assert_called_once_with("cached layers", "dockerfile_uuid")
docker_client_mock.api.build.assert_called_once_with(
fileobj=tarball_fileobj, rm=True, tag="docker_tag", pull=False, custom_context=True
)
docker_full_path_mock.unlink.assert_not_called()
@patch("samcli.local.docker.lambda_image.create_tarball")
@patch("samcli.local.docker.lambda_image.uuid")
@patch("samcli.local.docker.lambda_image.Path")
@patch("samcli.local.docker.lambda_image.LambdaImage._generate_dockerfile")
def test_build_image_fails_with_ApiError(
self, generate_dockerfile_patch, path_patch, uuid_patch, create_tarball_patch
):
uuid_patch.uuid4.return_value = "uuid"
generate_dockerfile_patch.return_value = "Dockerfile content"
docker_full_path_mock = Mock()
path_patch.return_value = docker_full_path_mock
docker_client_mock = Mock()
docker_client_mock.api.build.side_effect = APIError("apiError")
layer_downloader_mock = Mock()
layer_downloader_mock.layer_cache = "cached layers"
tarball_fileobj = Mock()
create_tarball_patch.return_value.__enter__.return_value = tarball_fileobj
layer_version1 = Mock()
layer_version1.codeuri = "somevalue"
layer_version1.name = "name"
dockerfile_mock = Mock()
m = mock_open(dockerfile_mock)
with patch("samcli.local.docker.lambda_image.open", m):
with self.assertRaises(ImageBuildException):
LambdaImage(layer_downloader_mock, True, False, docker_client=docker_client_mock)._build_image(
"base_image", "docker_tag", [layer_version1]
)
handle = m()
handle.write.assert_called_with("Dockerfile content")
path_patch.assert_called_once_with("cached layers", "dockerfile_uuid")
docker_client_mock.api.build.assert_called_once_with(
fileobj=tarball_fileobj, rm=True, tag="docker_tag", pull=False, custom_context=True
)
docker_full_path_mock.unlink.assert_called_once()
| 44.776471 | 119 | 0.721821 | import io
import tempfile
from unittest import TestCase
from unittest.mock import patch, Mock, mock_open, ANY
from docker.errors import ImageNotFound, BuildError, APIError
from samcli.commands.local.lib.exceptions import InvalidIntermediateImageError
from samcli.lib.utils.packagetype import ZIP, IMAGE
from samcli.local.docker.lambda_image import LambdaImage
from samcli.commands.local.cli_common.user_exceptions import ImageBuildException
from samcli import __version__ as version
class TestLambdaImage(TestCase):
def setUp(self):
self.layer_cache_dir = tempfile.gettempdir()
def test_initialization_without_defaults(self):
lambda_image = LambdaImage("layer_downloader", False, False, docker_client="docker_client")
self.assertEqual(lambda_image.layer_downloader, "layer_downloader")
self.assertFalse(lambda_image.skip_pull_image)
self.assertFalse(lambda_image.force_image_build)
self.assertEqual(lambda_image.docker_client, "docker_client")
@patch("samcli.local.docker.lambda_image.docker")
def test_initialization_with_defaults(self, docker_patch):
docker_client_mock = Mock()
docker_patch.from_env.return_value = docker_client_mock
lambda_image = LambdaImage("layer_downloader", False, False)
self.assertEqual(lambda_image.layer_downloader, "layer_downloader")
self.assertFalse(lambda_image.skip_pull_image)
self.assertFalse(lambda_image.force_image_build)
self.assertEqual(lambda_image.docker_client, docker_client_mock)
def test_building_image_with_no_runtime_only_image(self):
docker_client_mock = Mock()
layer_downloader_mock = Mock()
setattr(layer_downloader_mock, "layer_cache", self.layer_cache_dir)
docker_client_mock.api.build.return_value = ["mock"]
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
self.assertEqual(
lambda_image.build(None, IMAGE, "mylambdaimage:v1", []),
f"mylambdaimage:rapid-{version}",
)
@patch("samcli.local.docker.lambda_image.LambdaImage._build_image")
@patch("samcli.local.docker.lambda_image.LambdaImage._generate_docker_image_version")
def test_building_image_with_no_runtime_only_image_always_build(
self, generate_docker_image_version_patch, build_image_patch
):
docker_client_mock = Mock()
layer_downloader_mock = Mock()
setattr(layer_downloader_mock, "layer_cache", self.layer_cache_dir)
docker_client_mock.api.build.return_value = ["mock"]
generate_docker_image_version_patch.return_value = "image-version"
docker_client_mock = Mock()
docker_client_mock.images.get.return_value = Mock()
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
self.assertEqual(
lambda_image.build(None, IMAGE, "mylambdaimage:v1", ["mylayer"]),
f"mylambdaimage:rapid-{version}",
)
build_image_patch.assert_called_once_with("mylambdaimage:v1", f"mylambdaimage:rapid-{version}", [], stream=ANY)
layer_downloader_mock.assert_not_called()
def test_building_image_with_non_accpeted_package_type(self):
docker_client_mock = Mock()
layer_downloader_mock = Mock()
setattr(layer_downloader_mock, "layer_cache", self.layer_cache_dir)
docker_client_mock.api.build.return_value = ["mock"]
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
with self.assertRaises(InvalidIntermediateImageError):
lambda_image.build("python3.6", "Non-accepted-packagetype", None, [])
with self.assertRaises(InvalidIntermediateImageError):
lambda_image.build("python3.6", None, None, [])
def test_building_image_with_no_layers(self):
docker_client_mock = Mock()
layer_downloader_mock = Mock()
setattr(layer_downloader_mock, "layer_cache", self.layer_cache_dir)
docker_client_mock.api.build.return_value = ["mock"]
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
self.assertEqual(
lambda_image.build("python3.6", ZIP, None, []),
f"amazon/aws-sam-cli-emulation-image-python3.6:rapid-{version}",
)
@patch("samcli.local.docker.lambda_image.LambdaImage._build_image")
@patch("samcli.local.docker.lambda_image.LambdaImage._generate_docker_image_version")
def test_not_building_image_that_already_exists(self, generate_docker_image_version_patch, build_image_patch):
layer_downloader_mock = Mock()
layer_mock = Mock()
layer_mock.name = "layers1"
layer_mock.is_defined_within_template = False
layer_downloader_mock.download_all.return_value = [layer_mock]
generate_docker_image_version_patch.return_value = "image-version"
docker_client_mock = Mock()
docker_client_mock.images.get.return_value = Mock()
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
actual_image_id = lambda_image.build("python3.6", ZIP, None, [layer_mock])
self.assertEqual(actual_image_id, "samcli/lambda:image-version")
layer_downloader_mock.download_all.assert_called_once_with([layer_mock], False)
generate_docker_image_version_patch.assert_called_once_with([layer_mock], "python3.6")
docker_client_mock.images.get.assert_called_once_with("samcli/lambda:image-version")
build_image_patch.assert_not_called()
@patch("samcli.local.docker.lambda_image.LambdaImage._build_image")
@patch("samcli.local.docker.lambda_image.LambdaImage._generate_docker_image_version")
def test_force_building_image_that_doesnt_already_exists(
self, generate_docker_image_version_patch, build_image_patch
):
layer_downloader_mock = Mock()
layer_downloader_mock.download_all.return_value = ["layers1"]
generate_docker_image_version_patch.return_value = "image-version"
docker_client_mock = Mock()
docker_client_mock.images.get.side_effect = ImageNotFound("image not found")
stream = io.StringIO()
lambda_image = LambdaImage(layer_downloader_mock, False, True, docker_client=docker_client_mock)
actual_image_id = lambda_image.build("python3.6", ZIP, None, ["layers1"], stream=stream)
self.assertEqual(actual_image_id, "samcli/lambda:image-version")
layer_downloader_mock.download_all.assert_called_once_with(["layers1"], True)
generate_docker_image_version_patch.assert_called_once_with(["layers1"], "python3.6")
docker_client_mock.images.get.assert_called_once_with("samcli/lambda:image-version")
build_image_patch.assert_called_once_with(
"amazon/aws-sam-cli-emulation-image-python3.6:latest",
"samcli/lambda:image-version",
["layers1"],
stream=stream,
)
@patch("samcli.local.docker.lambda_image.LambdaImage._build_image")
@patch("samcli.local.docker.lambda_image.LambdaImage._generate_docker_image_version")
def test_not_force_building_image_that_doesnt_already_exists(
self, generate_docker_image_version_patch, build_image_patch
):
layer_downloader_mock = Mock()
layer_downloader_mock.download_all.return_value = ["layers1"]
generate_docker_image_version_patch.return_value = "image-version"
docker_client_mock = Mock()
docker_client_mock.images.get.side_effect = ImageNotFound("image not found")
stream = io.StringIO()
lambda_image = LambdaImage(layer_downloader_mock, False, False, docker_client=docker_client_mock)
actual_image_id = lambda_image.build("python3.6", ZIP, None, ["layers1"], stream=stream)
self.assertEqual(actual_image_id, "samcli/lambda:image-version")
layer_downloader_mock.download_all.assert_called_once_with(["layers1"], False)
generate_docker_image_version_patch.assert_called_once_with(["layers1"], "python3.6")
docker_client_mock.images.get.assert_called_once_with("samcli/lambda:image-version")
build_image_patch.assert_called_once_with(
"amazon/aws-sam-cli-emulation-image-python3.6:latest",
"samcli/lambda:image-version",
["layers1"],
stream=stream,
)
@patch("samcli.local.docker.lambda_image.hashlib")
def test_generate_docker_image_version(self, hashlib_patch):
haslib_sha256_mock = Mock()
hashlib_patch.sha256.return_value = haslib_sha256_mock
haslib_sha256_mock.hexdigest.return_value = "thisisahexdigestofshahash"
layer_mock = Mock()
layer_mock.name = "layer1"
image_version = LambdaImage._generate_docker_image_version([layer_mock], "runtime")
self.assertEqual(image_version, "runtime-thisisahexdigestofshahash")
hashlib_patch.sha256.assert_called_once_with(b"layer1")
@patch("samcli.local.docker.lambda_image.docker")
def test_generate_dockerfile(self, docker_patch):
docker_client_mock = Mock()
docker_patch.from_env.return_value = docker_client_mock
expected_docker_file = (
"FROM python\nADD aws-lambda-rie /var/rapid\nRUN chmod +x /var/rapid/aws-lambda-rie\nADD layer1 /opt\n"
)
layer_mock = Mock()
layer_mock.name = "layer1"
self.assertEqual(LambdaImage._generate_dockerfile("python", [layer_mock]), expected_docker_file)
@patch("samcli.local.docker.lambda_image.create_tarball")
@patch("samcli.local.docker.lambda_image.uuid")
@patch("samcli.local.docker.lambda_image.Path")
@patch("samcli.local.docker.lambda_image.LambdaImage._generate_dockerfile")
def test_build_image(self, generate_dockerfile_patch, path_patch, uuid_patch, create_tarball_patch):
uuid_patch.uuid4.return_value = "uuid"
generate_dockerfile_patch.return_value = "Dockerfile content"
docker_full_path_mock = Mock()
docker_full_path_mock.exists.return_value = True
path_patch.return_value = docker_full_path_mock
docker_client_mock = Mock()
docker_client_mock.api.build.return_value = ["Done"]
layer_downloader_mock = Mock()
layer_downloader_mock.layer_cache = "cached layers"
tarball_fileobj = Mock()
create_tarball_patch.return_value.__enter__.return_value = tarball_fileobj
layer_version1 = Mock()
layer_version1.codeuri = "somevalue"
layer_version1.name = "name"
dockerfile_mock = Mock()
m = mock_open(dockerfile_mock)
with patch("samcli.local.docker.lambda_image.open", m):
LambdaImage(layer_downloader_mock, True, False, docker_client=docker_client_mock)._build_image(
"base_image", "docker_tag", [layer_version1]
)
handle = m()
handle.write.assert_called_with("Dockerfile content")
path_patch.assert_called_once_with("cached layers", "dockerfile_uuid")
docker_client_mock.api.build.assert_called_once_with(
fileobj=tarball_fileobj, rm=True, tag="docker_tag", pull=False, custom_context=True
)
docker_full_path_mock.unlink.assert_called_once()
@patch("samcli.local.docker.lambda_image.create_tarball")
@patch("samcli.local.docker.lambda_image.uuid")
@patch("samcli.local.docker.lambda_image.Path")
@patch("samcli.local.docker.lambda_image.LambdaImage._generate_dockerfile")
def test_build_image_fails_with_BuildError(
self, generate_dockerfile_patch, path_patch, uuid_patch, create_tarball_patch
):
uuid_patch.uuid4.return_value = "uuid"
generate_dockerfile_patch.return_value = "Dockerfile content"
docker_full_path_mock = Mock()
docker_full_path_mock.exists.return_value = False
path_patch.return_value = docker_full_path_mock
docker_client_mock = Mock()
docker_client_mock.api.build.side_effect = BuildError("buildError", "buildlog")
layer_downloader_mock = Mock()
layer_downloader_mock.layer_cache = "cached layers"
tarball_fileobj = Mock()
create_tarball_patch.return_value.__enter__.return_value = tarball_fileobj
layer_version1 = Mock()
layer_version1.codeuri = "somevalue"
layer_version1.name = "name"
dockerfile_mock = Mock()
m = mock_open(dockerfile_mock)
with patch("samcli.local.docker.lambda_image.open", m):
with self.assertRaises(ImageBuildException):
LambdaImage(layer_downloader_mock, True, False, docker_client=docker_client_mock)._build_image(
"base_image", "docker_tag", [layer_version1]
)
handle = m()
handle.write.assert_called_with("Dockerfile content")
path_patch.assert_called_once_with("cached layers", "dockerfile_uuid")
docker_client_mock.api.build.assert_called_once_with(
fileobj=tarball_fileobj, rm=True, tag="docker_tag", pull=False, custom_context=True
)
docker_full_path_mock.unlink.assert_not_called()
@patch("samcli.local.docker.lambda_image.create_tarball")
@patch("samcli.local.docker.lambda_image.uuid")
@patch("samcli.local.docker.lambda_image.Path")
@patch("samcli.local.docker.lambda_image.LambdaImage._generate_dockerfile")
def test_build_image_fails_with_ApiError(
self, generate_dockerfile_patch, path_patch, uuid_patch, create_tarball_patch
):
uuid_patch.uuid4.return_value = "uuid"
generate_dockerfile_patch.return_value = "Dockerfile content"
docker_full_path_mock = Mock()
path_patch.return_value = docker_full_path_mock
docker_client_mock = Mock()
docker_client_mock.api.build.side_effect = APIError("apiError")
layer_downloader_mock = Mock()
layer_downloader_mock.layer_cache = "cached layers"
tarball_fileobj = Mock()
create_tarball_patch.return_value.__enter__.return_value = tarball_fileobj
layer_version1 = Mock()
layer_version1.codeuri = "somevalue"
layer_version1.name = "name"
dockerfile_mock = Mock()
m = mock_open(dockerfile_mock)
with patch("samcli.local.docker.lambda_image.open", m):
with self.assertRaises(ImageBuildException):
LambdaImage(layer_downloader_mock, True, False, docker_client=docker_client_mock)._build_image(
"base_image", "docker_tag", [layer_version1]
)
handle = m()
handle.write.assert_called_with("Dockerfile content")
path_patch.assert_called_once_with("cached layers", "dockerfile_uuid")
docker_client_mock.api.build.assert_called_once_with(
fileobj=tarball_fileobj, rm=True, tag="docker_tag", pull=False, custom_context=True
)
docker_full_path_mock.unlink.assert_called_once()
| true | true |
f7fd90cf5e1c4635af96e61f23b0339f2737b2ca | 2,209 | py | Python | source/ogame_constant.py | Stegoo/ogame-caller | 29efcb36a503cae17110a52d3a4079a0a7103c80 | [
"MIT"
] | 1 | 2016-06-11T08:09:55.000Z | 2016-06-11T08:09:55.000Z | source/ogame_constant.py | Stegoo/ogame-caller | 29efcb36a503cae17110a52d3a4079a0a7103c80 | [
"MIT"
] | null | null | null | source/ogame_constant.py | Stegoo/ogame-caller | 29efcb36a503cae17110a52d3a4079a0a7103c80 | [
"MIT"
] | null | null | null | Buildings = {'MetalMine': 1,
'CrystalMine': 2,
'DeuteriumSynthesizer': 3,
'SolarPlant': 4,
'FusionReactor': 12,
'MetalStorage': 22,
'CrystalStorage': 23,
'DeuteriumTank': 24,
'ShieldedMetalDen': 25,
'UndergroundCrystalDen': 26,
'SeabedDeuteriumDen': 27}
Defense = {'RocketLauncher': 401,
'LightLaser': 402,
'HeavyLaser': 403,
'GaussCannon': 404,
'IonCannon': 405,
'PlasmaTurret': 406,
'SmallShieldDome': 407,
'LargeShieldDome': 408,
'AntiBallisticMissiles': 502,
'InterplanetaryMissiles': 503}
Ships = {'SmallCargo': 202,
'LargeCargo': 203,
'LightFighter': 204,
'HeavyFighter': 205,
'Cruiser': 206,
'Battleship': 207,
'ColonyShip': 208,
'Recycler': 209,
'EspionageProbe': 210,
'Bomber': 211,
'SolarSatellite': 212,
'Destroyer': 213,
'Deathstar': 214,
'Battlecruiser': 215}
Research = {'EspionageTechnology': 106,
'ComputerTechnology': 108,
'WeaponsTechnology': 109,
'ShieldingTechnology': 110,
'ArmourTechnology': 111,
'EnergyTechnology': 113,
'HyperspaceTechnology': 114,
'CombustionDrive': 115,
'ImpulseDrive': 117,
'HyperspaceDrive': 118,
'LaserTechnology': 120,
'IonTechnology': 121,
'PlasmaTechnology': 122,
'IntergalacticResearchNetwork': 123,
'Astrophysics': 124,
'GravitonTechnology': 199}
Speed = {'10%': 1,
'20%': 2,
'30%': 3,
'40%': 4,
'50%': 5,
'60%': 6,
'70%': 7,
'80%': 8,
'90%': 9,
'100%': 10}
Missions = {'Attack': 1,
'GroupedAttack': 2,
'Transport': 3,
'Park': 4,
'ParkInThatAlly': 5,
'Spy': 6,
'Colonize': 7,
'RecycleDebrisField': 8,
'Destroy': 9,
'Expedition': 15}
| 26.939024 | 48 | 0.463558 | Buildings = {'MetalMine': 1,
'CrystalMine': 2,
'DeuteriumSynthesizer': 3,
'SolarPlant': 4,
'FusionReactor': 12,
'MetalStorage': 22,
'CrystalStorage': 23,
'DeuteriumTank': 24,
'ShieldedMetalDen': 25,
'UndergroundCrystalDen': 26,
'SeabedDeuteriumDen': 27}
Defense = {'RocketLauncher': 401,
'LightLaser': 402,
'HeavyLaser': 403,
'GaussCannon': 404,
'IonCannon': 405,
'PlasmaTurret': 406,
'SmallShieldDome': 407,
'LargeShieldDome': 408,
'AntiBallisticMissiles': 502,
'InterplanetaryMissiles': 503}
Ships = {'SmallCargo': 202,
'LargeCargo': 203,
'LightFighter': 204,
'HeavyFighter': 205,
'Cruiser': 206,
'Battleship': 207,
'ColonyShip': 208,
'Recycler': 209,
'EspionageProbe': 210,
'Bomber': 211,
'SolarSatellite': 212,
'Destroyer': 213,
'Deathstar': 214,
'Battlecruiser': 215}
Research = {'EspionageTechnology': 106,
'ComputerTechnology': 108,
'WeaponsTechnology': 109,
'ShieldingTechnology': 110,
'ArmourTechnology': 111,
'EnergyTechnology': 113,
'HyperspaceTechnology': 114,
'CombustionDrive': 115,
'ImpulseDrive': 117,
'HyperspaceDrive': 118,
'LaserTechnology': 120,
'IonTechnology': 121,
'PlasmaTechnology': 122,
'IntergalacticResearchNetwork': 123,
'Astrophysics': 124,
'GravitonTechnology': 199}
Speed = {'10%': 1,
'20%': 2,
'30%': 3,
'40%': 4,
'50%': 5,
'60%': 6,
'70%': 7,
'80%': 8,
'90%': 9,
'100%': 10}
Missions = {'Attack': 1,
'GroupedAttack': 2,
'Transport': 3,
'Park': 4,
'ParkInThatAlly': 5,
'Spy': 6,
'Colonize': 7,
'RecycleDebrisField': 8,
'Destroy': 9,
'Expedition': 15}
| true | true |
f7fd91e957d2685596b23872ecc78b9282f85139 | 106 | py | Python | tests/test_zip2np.py | borjaeg/zip2np | e55f0e13b8807c086946c0411dbefae0a022f325 | [
"MIT"
] | null | null | null | tests/test_zip2np.py | borjaeg/zip2np | e55f0e13b8807c086946c0411dbefae0a022f325 | [
"MIT"
] | null | null | null | tests/test_zip2np.py | borjaeg/zip2np | e55f0e13b8807c086946c0411dbefae0a022f325 | [
"MIT"
] | null | null | null | from zip2np import zip2np
def test_positive_size():
assert zip2np.load_datasets(".", (64, -64)) == -1 | 26.5 | 53 | 0.688679 | from zip2np import zip2np
def test_positive_size():
assert zip2np.load_datasets(".", (64, -64)) == -1 | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.