hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acdf3a3845b48019aa8e8944020953f5d013f282 | 2,571 | py | Python | matorage/nas.py | jinserk/matorage | 8789f13c034e8aa6ba18225f1664025d4521429e | [
"Apache-2.0"
] | null | null | null | matorage/nas.py | jinserk/matorage | 8789f13c034e8aa6ba18225f1664025d4521429e | [
"Apache-2.0"
] | null | null | null | matorage/nas.py | jinserk/matorage | 8789f13c034e8aa6ba18225f1664025d4521429e | [
"Apache-2.0"
] | null | null | null | # Copyright 2020-present Tae Hwan Jung
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
class Obj(object):
def __init__(self, object_name):
self.object_name = object_name
class NAS(object):
def __init__(self, path):
self.path = path
def bucket_exists(self, bucket_name):
return os.path.exists(os.path.join(self.path, bucket_name))
def fget_object(self, bucket_name, object_name, file_path):
pass
def fput_object(self, bucket_name, object_name, file_path, part_size=None):
_filename = os.path.join(self.path, bucket_name, object_name)
if not os.path.exists(os.path.dirname(_filename)):
os.makedirs(os.path.dirname(_filename))
shutil.copyfile(src=file_path, dst=_filename)
def get_object(self, bucket_name, object_name):
_filename = os.path.join(self.path, bucket_name, object_name)
return open(_filename, "rb")
def put_object(self, bucket_name, object_name, data, length, part_size=None):
_filename = os.path.join(self.path, bucket_name, object_name)
if not os.path.exists(os.path.dirname(_filename)):
os.makedirs(os.path.dirname(_filename))
data.seek(0)
with open(_filename, "wb") as f:
shutil.copyfileobj(data, f, length=length)
def list_objects(self, bucket_name, prefix="", recursive=False):
_foldername = os.path.join(self.path, bucket_name)
if not recursive:
objects = os.listdir(_foldername)
else:
objects = [
os.path.join(dp, f) for dp, dn, fn in os.walk(_foldername) for f in fn
]
return [Obj(o) for o in objects if o.startswith(prefix)]
def make_bucket(self, bucket_name, location):
os.makedirs(os.path.join(self.path, bucket_name))
def remove_bucket(self, bucket_name):
shutil.rmtree(os.path.join(self.path, bucket_name))
def remove_object(self, bucket_name, object_name):
os.remove(os.path.join(self.path, bucket_name, object_name))
| 36.728571 | 86 | 0.683003 |
acdf3a55a865814cf9f73265d654c485bb61c07d | 2,846 | py | Python | luna.py | Aripiyok/LunaChatBot | ac82f6536387f243318c4976c41de3766b068812 | [
"MIT"
] | 1 | 2021-09-09T19:05:13.000Z | 2021-09-09T19:05:13.000Z | luna.py | Aripiyok/LunaChatBot | ac82f6536387f243318c4976c41de3766b068812 | [
"MIT"
] | null | null | null | luna.py | Aripiyok/LunaChatBot | ac82f6536387f243318c4976c41de3766b068812 | [
"MIT"
] | null | null | null | import re
import os
from asyncio import gather, get_event_loop, sleep
from aiohttp import ClientSession
from pyrogram import Client, filters, idle
from Python_ARQ import ARQ
is_config = os.path.exists("config.py")
if is_config:
from config import *
else:
from sample_config import *
luna = Client(
":memory:",
bot_token=bot_token,
api_id=6,
api_hash="eb06d4abfb49dc3eeb1aeb98ae0f581e",
)
bot_id = int(bot_token.split(":")[0])
arq = None
async def lunaQuery(query: str, user_id: int):
query = (
query
if LANGUAGE == "en"
else (await arq.translate(query, "en")).result.translatedText
)
resp = (await arq.luna(query, user_id)).result
return (
resp
if LANGUAGE == "en"
else (
await arq.translate(resp, LANGUAGE)
).result.translatedText
)
async def type_and_send(message):
chat_id = message.chat.id
user_id = message.from_user.id if message.from_user else 0
query = message.text.strip()
await message._client.send_chat_action(chat_id, "typing")
response, _ = await gather(lunaQuery(query, user_id), sleep(2))
await message.reply_text(response)
await message._client.send_chat_action(chat_id, "cancel")
@luna.on_message(filters.command("repo") & ~filters.edited)
async def repo(_, message):
await message.reply_text(
"[GitHub](https://github.com/thehamkercat/LunaChatBot)"
+ " | [Group](t.me/PatheticProgrammers)",
disable_web_page_preview=True,
)
@luna.on_message(filters.command("help") & ~filters.edited)
async def start(_, message):
await luna.send_chat_action(message.chat.id, "typing")
await sleep(2)
await message.reply_text("/repo - Get Repo Link")
@luna.on_message(
~filters.private
& filters.text
& ~filters.command("help")
& ~filters.edited,
group=69,
)
async def chat(_, message):
if message.reply_to_message:
if not message.reply_to_message.from_user:
return
from_user_id = message.reply_to_message.from_user.id
if from_user_id != bot_id:
return
else:
match = re.search(
"[.|\n]{0,}boy[.|\n]{0,}",
message.text.strip(),
flags=re.IGNORECASE,
)
if not match:
return
await type_and_send(message)
@luna.on_message(
filters.private & ~filters.command("help") & ~filters.edited
)
async def chatpm(_, message):
if not message.text:
return
await type_and_send(message)
async def main():
global arq
session = ClientSession()
arq = ARQ(ARQ_API_BASE_URL, ARQ_API_KEY, session)
await luna.start()
print(
"""
-----------------
| Luna Started! |
-----------------
"""
)
await idle()
loop = get_event_loop()
loop.run_until_complete(main())
| 23.520661 | 69 | 0.637737 |
acdf3cc31da82665db4908cf4e115effb98db4f9 | 1,685 | py | Python | ProteomicsUtils/LoggerConfig.py | ormsbya/ProteomicsUtils | c06446529e68c25874a980383dea572e21d4bf4e | [
"MIT"
] | null | null | null | ProteomicsUtils/LoggerConfig.py | ormsbya/ProteomicsUtils | c06446529e68c25874a980383dea572e21d4bf4e | [
"MIT"
] | 8 | 2018-07-13T00:43:48.000Z | 2018-12-30T03:43:08.000Z | ProteomicsUtils/LoggerConfig.py | ormsbya/ProteomicsUtils | c06446529e68c25874a980383dea572e21d4bf4e | [
"MIT"
] | 1 | 2018-09-10T06:04:30.000Z | 2018-09-10T06:04:30.000Z | import logging
import sys, os
def logger_config(logger_name, logPath=False, file_log=True, print_log=True):
"""
Takes logging.info and above messages, printing them to console and saving to a text file
Parameters:
logger_name: string
name of the logger to be instantiated e.g. analysis
logPath: string or False
file path to save the Log file to, defaults to false in which case Log file is saved to current working directory
file_log: bool
default True, instantiates the creation of the logger file. If false, does not save log to file.
print_log: bool
default True, instantiates the messages to be printed to console. If false, does not print.
"""
#to set output path as current working directory if LogPath not given
if not logPath:
logPath = os.getcwd()
else:
pass
#set file name, create formatter for log output
fileName = 'Log file'
logFormatter = logging.Formatter("%(asctime)s %(name)s: [%(levelname)-5.5s] %(message)s")
currentLogger = logging.getLogger(logger_name)
currentLogger.setLevel(logging.DEBUG)
#check if current logger already has handlers attached
if len(currentLogger.handlers):
return currentLogger
else:
if file_log:
fileHandler = logging.FileHandler("{0}/{1}.log".format(logPath, fileName))
fileHandler.setFormatter(logFormatter)
currentLogger.addHandler(fileHandler)
if print_log:
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
currentLogger.addHandler(consoleHandler)
return currentLogger
| 38.295455 | 121 | 0.689021 |
acdf3d38ae2140f6c9c3b02df83044a5f55e1572 | 900 | py | Python | dynamic_programming/fast_fibonacci.py | TeddyFirman/Algorithm_Python | edbd50a97a62c2beb2a187e4c411c677aa43115e | [
"MIT"
] | null | null | null | dynamic_programming/fast_fibonacci.py | TeddyFirman/Algorithm_Python | edbd50a97a62c2beb2a187e4c411c677aa43115e | [
"MIT"
] | null | null | null | dynamic_programming/fast_fibonacci.py | TeddyFirman/Algorithm_Python | edbd50a97a62c2beb2a187e4c411c677aa43115e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
This program calculates the nth Fibonacci number in O(log(n)).
It's possible to calculate F(1_000_000) in less than a second.
"""
from __future__ import annotations
import sys
def fibonacci(n: int) -> int:
"""
return F(n)
>>> [fibonacci(i) for i in range(13)]
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144]
"""
if n < 0:
raise ValueError("Negative arguments are not supported")
return _fib(n)[0]
# returns (F(n), F(n-1))
def _fib(n: int) -> tuple[int, int]:
if n == 0: # (F(0), F(1))
return (0, 1)
# F(2n) = F(n)[2F(n+1) − F(n)]
# F(2n+1) = F(n+1)^2+F(n)^2
a, b = _fib(n // 2)
c = a * (b * 2 - a)
d = a * a + b * b
return (d, c + d) if n % 2 else (c, d)
if __name__ == "__main__":
n = int(sys.argv[1])
print(f"fibonacci({n}) is {fibonacci(n)}")
| 23.076923 | 65 | 0.504444 |
acdf3d4c6d4aab320a23286d9d33d4d781f6fd96 | 658 | py | Python | tests/fraction_tests/test_bool.py | davideguidobene/cfractions | 2fc33d3ddf972e07787bdc99443868137999e114 | [
"MIT"
] | 3 | 2022-01-18T21:17:17.000Z | 2022-01-23T21:49:52.000Z | tests/fraction_tests/test_bool.py | davideguidobene/cfractions | 2fc33d3ddf972e07787bdc99443868137999e114 | [
"MIT"
] | 3 | 2021-06-28T13:30:58.000Z | 2022-01-16T19:05:00.000Z | tests/fraction_tests/test_bool.py | davideguidobene/cfractions | 2fc33d3ddf972e07787bdc99443868137999e114 | [
"MIT"
] | 1 | 2021-10-22T02:12:06.000Z | 2021-10-22T02:12:06.000Z | import sys
from hypothesis import given
from cfractions import Fraction
from tests.utils import (equivalence,
skip_reference_counter_test)
from . import strategies
@given(strategies.fractions)
def test_properties(fraction: Fraction) -> None:
assert equivalence(bool(fraction), bool(fraction.numerator))
@skip_reference_counter_test
@given(strategies.fractions)
def test_reference_counter(fraction: Fraction) -> None:
fraction_refcount_before = sys.getrefcount(fraction)
result = bool(fraction)
fraction_refcount_after = sys.getrefcount(fraction)
assert fraction_refcount_after == fraction_refcount_before
| 26.32 | 64 | 0.775076 |
acdf3db4229b6c0f858a1b961ac51ec78e309e93 | 673 | py | Python | piptools/_compat/__init__.py | davidclin/pip-tools | 0eeb7de34aa521579515ff70afaf6369c05ad10a | [
"BSD-3-Clause"
] | null | null | null | piptools/_compat/__init__.py | davidclin/pip-tools | 0eeb7de34aa521579515ff70afaf6369c05ad10a | [
"BSD-3-Clause"
] | null | null | null | piptools/_compat/__init__.py | davidclin/pip-tools | 0eeb7de34aa521579515ff70afaf6369c05ad10a | [
"BSD-3-Clause"
] | 1 | 2019-06-27T07:27:27.000Z | 2019-06-27T07:27:27.000Z | # coding: utf-8
# flake8: noqa
from __future__ import absolute_import, division, print_function, unicode_literals
import six
from .pip_compat import (
DEV_PKGS,
FAVORITE_HASH,
Command,
FormatControl,
InstallRequirement,
Link,
PackageFinder,
PyPI,
RequirementSet,
Wheel,
cmdoptions,
get_installed_distributions,
install_req_from_editable,
install_req_from_line,
is_dir_url,
is_file_url,
is_vcs_url,
parse_requirements,
path_to_url,
stdlib_pkgs,
url_to_path,
user_cache_dir,
)
if six.PY2:
from .tempfile import TemporaryDirectory
else:
from tempfile import TemporaryDirectory
| 18.694444 | 82 | 0.719168 |
acdf3e2989691151f049d95dd8f394f2e0bebe7c | 3,201 | py | Python | Functions/Contact_Force_Fun.py | ShihaoWang/Contact-Transition-Tree | cf53aaea8a3a61d3eb92b96a6ca16a3b0c791afc | [
"MIT"
] | 2 | 2019-02-19T19:05:15.000Z | 2019-05-05T20:07:16.000Z | Functions/Contact_Force_Fun.py | ShihaoWang/Contact-Transition-Tree | cf53aaea8a3a61d3eb92b96a6ca16a3b0c791afc | [
"MIT"
] | null | null | null | Functions/Contact_Force_Fun.py | ShihaoWang/Contact-Transition-Tree | cf53aaea8a3a61d3eb92b96a6ca16a3b0c791afc | [
"MIT"
] | null | null | null | import sys, os
from random import randint
sys.path.insert(0, '/home/shihao/trajOptLib')
from trajOptLib.io import getOnOffArgs
from trajOptLib import trajOptCollocProblem
from trajOptLib.snoptWrapper import directSolve
from trajOptLib.libsnopt import snoptConfig, probFun, solver
import functools
import numpy as np
import math
from OwnLib import *
from Terrain_Fun import *
# This function is used to calculate the contact force related function
mu = 0.35
def Contact_Force_Constraint(contact_force, contact_link_list, contact_link_status, terr_model, y_val, y_type):
# This function is used to take care of the contact force constraint given the contact force, contact link list and contact link status
# The order of contact force is arranged according to the contact link list
contact_force_index = 0
for i in range(0, len(contact_link_list)):
# This is the main force categories
contact_link_i = contact_link_list[i]
contact_link_i_status = contact_link_status[contact_link_i]
for j in range(0, len(contact_link_i_status)):
contact_link_i_point_j_status = contact_link_i_status[j]
if contact_link_i_point_j_status == 1:
# This means that the constraint force is active so should be feasible
contact_force_of_link_i_at_point_j = Contact_Force_Element_from_Index(contact_force, contact_force_index)
# Here this force is a 3 by 1 list
_, _, Contact_Link_i_Point_j_Normal = Robot_Link_2_All_Terr_Dist(contact_force_of_link_i_at_point_j, terr_model)
# Positivity constraint
contact_force_of_link_i_at_point_j_normal = Dot_Product(contact_force_of_link_i_at_point_j, Contact_Link_i_Point_j_Normal)
y_val.append(contact_force_of_link_i_at_point_j_normal)
y_type.append(1)
# Friction cone constraint
contact_force_of_link_i_at_point_j_SoS = Dot_Product(contact_force_of_link_i_at_point_j, contact_force_of_link_i_at_point_j)
contact_force_of_link_i_at_point_j_tang_sq = contact_force_of_link_i_at_point_j_SoS - contact_force_of_link_i_at_point_j_normal * contact_force_of_link_i_at_point_j_normal
contact_force_of_link_i_at_point_j_friction_constraint = mu * mu * contact_force_of_link_i_at_point_j_normal * contact_force_of_link_i_at_point_j_normal - contact_force_of_link_i_at_point_j_tang_sq
y_val.append(contact_force_of_link_i_at_point_j_friction_constraint)
y_type.append(1)
else:
# This means that the constraint force is notactive so should be zero
contact_force_of_link_i_at_point_j = Contact_Force_Element_from_Index(contact_force, contact_force_index)
List_Obj_Update(contact_force_of_link_i_at_point_j, 0, y_val, y_type)
contact_force_index = contact_force_index + 1
def Contact_Force_Element_from_Index(contact_force, index_i):
# This function gets the contact force element from certain given index_i
start_index = 3 * index_i
end_index = start_index + 3
return contact_force[start_index:end_index]
| 56.157895 | 213 | 0.757576 |
acdf3e388562fb0f1a84729ffb58eabcef8aa5c6 | 399 | py | Python | yamlintro/readyaml03.py | gjesionowski/mycode | 36aecc1343c72d88e99511adacb9980061c600d8 | [
"MIT"
] | null | null | null | yamlintro/readyaml03.py | gjesionowski/mycode | 36aecc1343c72d88e99511adacb9980061c600d8 | [
"MIT"
] | null | null | null | yamlintro/readyaml03.py | gjesionowski/mycode | 36aecc1343c72d88e99511adacb9980061c600d8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# YAML is NOT part of the standard library
# python3 -m pip install pyyaml
import yaml
def main():
## Open a blob of YAML data
yammyfile = open("/home/student/mycode/yamlintro/myYAML.yml", "r")
## convert YAML into Python data structures (lists and dictionaries)
pyyammy = yaml.load(yammyfile)
# display our new Python data
print(pyyammy)
main()
| 21 | 72 | 0.691729 |
acdf3e55cd2c8792fb197a984b6a2c5d5f92f167 | 1,302 | py | Python | scraper/scraper3.py | SebChw/IsMusicANaturalLanguage | 9cb245f9bea6c0f93863920fceeea867efa73ded | [
"MIT"
] | null | null | null | scraper/scraper3.py | SebChw/IsMusicANaturalLanguage | 9cb245f9bea6c0f93863920fceeea867efa73ded | [
"MIT"
] | null | null | null | scraper/scraper3.py | SebChw/IsMusicANaturalLanguage | 9cb245f9bea6c0f93863920fceeea867efa73ded | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
import os
""" This script download all songs with given genre from midiworld.com
"""
genre_name = input(
"type in genre name (lowercase, no space, no special characters): ")
# Just in case someone don't respect the rules.
genre_name = genre_name.lower()
genre_name = genre_name.strip()
genre_name = "".join(genre_name.split(" "))
folder = os.path.join("genresDataset", genre_name, "midiworld")
if not os.path.isdir(folder):
os.mkdir(folder)
#Here I was lazy, the biggest genre on that page has 38 pages so I've done it that way.
#If there is no page we will not get any answer, and just run the loop withouth doing anything.
for i in range(1, 38):
URL = f"https://www.midiworld.com/search/{i}/?q={genre_name}"
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
results = soup.find_all("li")
for r in results:
link = r.find("a")
if link:
if "download" in link:
link = link['href']
song_title = r.text.split("-")[0].strip()
print(f"Downloading: {song_title}")
song = requests.get(link)
with open(os.path.join(folder, song_title + ".mid"), "wb") as f:
f.write(song.content)
| 35.189189 | 95 | 0.635177 |
acdf3f69ab882cc00260ecedda574da08d6a70e5 | 1,301 | py | Python | adyen_notification_proxy/management.py | onefinestay/adyen-notification-proxy | e8131783ff04200014d5dd5b8b0d2391abe6661c | [
"Apache-2.0"
] | 2 | 2017-06-23T12:04:12.000Z | 2021-02-11T20:50:17.000Z | adyen_notification_proxy/management.py | onefinestay/adyen-notification-proxy | e8131783ff04200014d5dd5b8b0d2391abe6661c | [
"Apache-2.0"
] | 1 | 2015-10-21T12:40:39.000Z | 2015-10-26T16:24:31.000Z | adyen_notification_proxy/management.py | onefinestay/adyen-notification-proxy | e8131783ff04200014d5dd5b8b0d2391abe6661c | [
"Apache-2.0"
] | 2 | 2015-12-13T14:32:11.000Z | 2017-02-16T10:09:57.000Z | import sys
import uuid
from flask import request
from adyen_notification_proxy import app
from adyen_notification_proxy.models import db, Endpoint
NA = "n/a"
@app.route('/')
def help():
return """
Adyen proxy
===========
Available endpoints:
/ (GET) display this help
/list/ (GET) list all registered endpoints
/register/ (POST callback url): register endpoint. Returns a uuid to
be prefixed to your merchant reference for routing.
Registering an existing url returns the existing reference
"""
@app.route('/list/')
def list():
endpoints = db.session.query(Endpoint).all()
return '\n'.join(str(e) for e in endpoints) + '\n'
@app.route('/register/', methods=['POST'])
def register():
url = request.data
if url:
existing = db.session.query(Endpoint).filter_by(url=url).first()
if existing:
return existing.ref
ref = str(uuid.uuid4())
endpoint = Endpoint(ref=ref, url=url)
db.session.add(endpoint)
db.session.commit()
return endpoint.ref
return "Error parsing input", 400
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == '--setup':
db.create_all()
app.run(host="0.0.0.0", port=5000)
| 23.654545 | 78 | 0.614143 |
acdf3f84ef52d603ccee77652f245822b71d602e | 790 | py | Python | qatrack/urls.py | jrkerns/qatrackplus | 95562621e59dd56667d1e2b1753943264b182e0c | [
"MIT"
] | 5 | 2016-08-22T04:58:58.000Z | 2021-03-27T07:34:06.000Z | qatrack/urls.py | jrkerns/qatrackplus | 95562621e59dd56667d1e2b1753943264b182e0c | [
"MIT"
] | null | null | null | qatrack/urls.py | jrkerns/qatrackplus | 95562621e59dd56667d1e2b1753943264b182e0c | [
"MIT"
] | 3 | 2017-04-27T13:21:26.000Z | 2018-04-16T03:45:10.000Z | from django.conf import settings
from django.conf.urls.defaults import patterns, include, url
from django.views.generic.base import TemplateView
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name="homepage.html"), name="home"),
url(r'^accounts/', include('qatrack.accounts.urls')),
url(r'^qa/', include('qatrack.qa.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('genericdropdown.urls')),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^%s(?P<path>.*)$' % settings.MEDIA_URL[1:],
'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
)
| 28.214286 | 81 | 0.66962 |
acdf3fe28d9a67e5aa22e11f985b48d8c4dd06db | 9,383 | py | Python | doc/src/conf.py | tasts-robots/aiorate | ac71a3c3f7a37eab2bf32667e1b5c705c9ded0c8 | [
"Apache-2.0"
] | null | null | null | doc/src/conf.py | tasts-robots/aiorate | ac71a3c3f7a37eab2bf32667e1b5c705c9ded0c8 | [
"Apache-2.0"
] | null | null | null | doc/src/conf.py | tasts-robots/aiorate | ac71a3c3f7a37eab2bf32667e1b5c705c9ded0c8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2022 Stéphane Caron
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from os.path import dirname, join, abspath
sys.path.insert(0, abspath("../.."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx-mathjax-offline",
"sphinx.ext.napoleon", # before sphinx_autodoc_typehints
"sphinx_autodoc_typehints"
]
# List of modules to be mocked up
autodoc_mock_imports = ["ecos", "gurobipy", "mosek", "osqp", "qpoases"]
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "aiorate"
copyright = "2022 Stéphane Caron"
author = "Stéphane Caron"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = None # read from __init__.py
# The full version, including alpha/beta/rc tags.
release = None # read from __init__.py
# Read version info directly from the module's __init__.py
init_path = join(
dirname(dirname(dirname(str(abspath(__file__))))), "aiorate"
)
with open(f"{init_path}/__init__.py", "r") as fh:
for line in fh:
match = re.match('__version__ = "((\\d.\\d).\\d)".*', line)
if match is not None:
release = match.group(1)
version = match.group(2)
break
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["build", "Thumbs.db", ".DS_Store"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for a
# list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# These paths are either relative to html_static_path or fully qualified paths
# (eg. https://...)
html_css_files = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "aiorate" + "doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
"papersize": "a4paper",
# The font size ('10pt', '11pt' or '12pt').
#
"pointsize": "12pt",
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
project + ".tex",
project + " Documentation",
author,
"howto",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
| 29.139752 | 79 | 0.700735 |
acdf4013d327b250dcacf409bcfb2a4d79d62620 | 32,547 | py | Python | efficientdet/keras/train_lib.py | ahsha-lang/automl | 8ee389c39d0101f407f57781e174610a8cf22b65 | [
"Apache-2.0"
] | null | null | null | efficientdet/keras/train_lib.py | ahsha-lang/automl | 8ee389c39d0101f407f57781e174610a8cf22b65 | [
"Apache-2.0"
] | null | null | null | efficientdet/keras/train_lib.py | ahsha-lang/automl | 8ee389c39d0101f407f57781e174610a8cf22b65 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training related libraries."""
import math
import os
import re
from absl import logging
import neural_structured_learning as nsl
import numpy as np
import tensorflow as tf
import inference
import iou_utils
import utils
from keras import anchors
from keras import efficientdet_keras
from keras import postprocess
from tensorflow_model_optimization.python.core.sparsity.keras import pruning_wrapper
def _collect_prunable_layers(model):
"""Recursively collect the prunable layers in the model."""
prunable_layers = []
for layer in model._flatten_layers(recursive=False, include_self=False): # pylint: disable=protected-access
# A keras model may have other models as layers.
if isinstance(layer, pruning_wrapper.PruneLowMagnitude):
prunable_layers.append(layer)
elif isinstance(layer, (tf.keras.Model, tf.keras.layers.Layer)):
prunable_layers += _collect_prunable_layers(layer)
return prunable_layers
class UpdatePruningStep(tf.keras.callbacks.Callback):
"""Keras callback which updates pruning wrappers with the optimizer step.
This callback must be used when training a model which needs to be pruned. Not
doing so will throw an error.
Example:
```python
model.fit(x, y,
callbacks=[UpdatePruningStep()])
```
"""
def __init__(self):
super(UpdatePruningStep, self).__init__()
self.prunable_layers = []
def on_train_begin(self, logs=None):
# Collect all the prunable layers in the model.
self.prunable_layers = _collect_prunable_layers(self.model)
self.step = tf.keras.backend.get_value(self.model.optimizer.iterations)
def on_train_batch_begin(self, batch, logs=None):
tuples = []
for layer in self.prunable_layers:
if layer.built:
tuples.append((layer.pruning_step, self.step))
tf.keras.backend.batch_set_value(tuples)
self.step = self.step + 1
def on_epoch_end(self, batch, logs=None):
# At the end of every epoch, remask the weights. This ensures that when
# the model is saved after completion, the weights represent mask*weights.
weight_mask_ops = []
for layer in self.prunable_layers:
if layer.built and isinstance(layer, pruning_wrapper.PruneLowMagnitude):
if tf.executing_eagerly():
layer.pruning_obj.weight_mask_op()
else:
weight_mask_ops.append(layer.pruning_obj.weight_mask_op())
tf.keras.backend.batch_get_value(weight_mask_ops)
class PruningSummaries(tf.keras.callbacks.TensorBoard):
"""A Keras callback for adding pruning summaries to tensorboard.
Logs the sparsity(%) and threshold at a given iteration step.
"""
def __init__(self, log_dir, update_freq='epoch', **kwargs):
if not isinstance(log_dir, str) or not log_dir:
raise ValueError(
'`log_dir` must be a non-empty string. You passed `log_dir`='
'{input}.'.format(input=log_dir))
super().__init__(log_dir=log_dir, update_freq=update_freq, **kwargs)
log_dir = self.log_dir + '/metrics'
self._file_writer = tf.summary.create_file_writer(log_dir)
def _log_pruning_metrics(self, logs, step):
with self._file_writer.as_default():
for name, value in logs.items():
tf.summary.scalar(name, value, step=step)
self._file_writer.flush()
def on_epoch_begin(self, epoch, logs=None):
if logs is not None:
super().on_epoch_begin(epoch, logs)
pruning_logs = {}
params = []
prunable_layers = _collect_prunable_layers(self.model)
for layer in prunable_layers:
for _, mask, threshold in layer.pruning_vars:
params.append(mask)
params.append(threshold)
params.append(self.model.optimizer.iterations)
values = tf.keras.backend.batch_get_value(params)
iteration = values[-1]
del values[-1]
del params[-1]
param_value_pairs = list(zip(params, values))
for mask, mask_value in param_value_pairs[::2]:
pruning_logs.update({mask.name + '/sparsity': 1 - np.mean(mask_value)})
for threshold, threshold_value in param_value_pairs[1::2]:
pruning_logs.update({threshold.name + '/threshold': threshold_value})
self._log_pruning_metrics(pruning_logs, iteration)
def update_learning_rate_schedule_parameters(params):
"""Updates params that are related to the learning rate schedule."""
batch_size = params['batch_size'] * params['num_shards']
# Learning rate is proportional to the batch size
params['adjusted_learning_rate'] = (params['learning_rate'] * batch_size / 64)
steps_per_epoch = params['steps_per_epoch']
params['lr_warmup_step'] = int(params['lr_warmup_epoch'] * steps_per_epoch)
params['first_lr_drop_step'] = int(params['first_lr_drop_epoch'] *
steps_per_epoch)
params['second_lr_drop_step'] = int(params['second_lr_drop_epoch'] *
steps_per_epoch)
params['total_steps'] = int(params['num_epochs'] * steps_per_epoch)
class StepwiseLrSchedule(tf.optimizers.schedules.LearningRateSchedule):
"""Stepwise learning rate schedule."""
def __init__(self, adjusted_lr: float, lr_warmup_init: float,
lr_warmup_step: int, first_lr_drop_step: int,
second_lr_drop_step: int):
"""Build a StepwiseLrSchedule.
Args:
adjusted_lr: `float`, The initial learning rate.
lr_warmup_init: `float`, The warm up learning rate.
lr_warmup_step: `int`, The warm up step.
first_lr_drop_step: `int`, First lr decay step.
second_lr_drop_step: `int`, Second lr decay step.
"""
super().__init__()
logging.info('LR schedule method: stepwise')
self.adjusted_lr = adjusted_lr
self.lr_warmup_init = lr_warmup_init
self.lr_warmup_step = lr_warmup_step
self.first_lr_drop_step = first_lr_drop_step
self.second_lr_drop_step = second_lr_drop_step
def __call__(self, step):
linear_warmup = (
self.lr_warmup_init +
(tf.cast(step, dtype=tf.float32) / self.lr_warmup_step *
(self.adjusted_lr - self.lr_warmup_init)))
learning_rate = tf.where(step < self.lr_warmup_step, linear_warmup,
self.adjusted_lr)
lr_schedule = [[1.0, self.lr_warmup_step], [0.1, self.first_lr_drop_step],
[0.01, self.second_lr_drop_step]]
for mult, start_global_step in lr_schedule:
learning_rate = tf.where(step < start_global_step, learning_rate,
self.adjusted_lr * mult)
return learning_rate
class CosineLrSchedule(tf.optimizers.schedules.LearningRateSchedule):
"""Cosine learning rate schedule."""
def __init__(self, adjusted_lr: float, lr_warmup_init: float,
lr_warmup_step: int, total_steps: int):
"""Build a CosineLrSchedule.
Args:
adjusted_lr: `float`, The initial learning rate.
lr_warmup_init: `float`, The warm up learning rate.
lr_warmup_step: `int`, The warm up step.
total_steps: `int`, Total train steps.
"""
super().__init__()
logging.info('LR schedule method: cosine')
self.adjusted_lr = adjusted_lr
self.lr_warmup_init = lr_warmup_init
self.lr_warmup_step = lr_warmup_step
self.decay_steps = tf.cast(total_steps - lr_warmup_step, tf.float32)
def __call__(self, step):
linear_warmup = (
self.lr_warmup_init +
(tf.cast(step, dtype=tf.float32) / self.lr_warmup_step *
(self.adjusted_lr - self.lr_warmup_init)))
cosine_lr = 0.5 * self.adjusted_lr * (
1 + tf.cos(math.pi * tf.cast(step, tf.float32) / self.decay_steps))
return tf.where(step < self.lr_warmup_step, linear_warmup, cosine_lr)
class PolynomialLrSchedule(tf.optimizers.schedules.LearningRateSchedule):
"""Polynomial learning rate schedule."""
def __init__(self, adjusted_lr: float, lr_warmup_init: float,
lr_warmup_step: int, power: float, total_steps: int):
"""Build a PolynomialLrSchedule.
Args:
adjusted_lr: `float`, The initial learning rate.
lr_warmup_init: `float`, The warm up learning rate.
lr_warmup_step: `int`, The warm up step.
power: `float`, power.
total_steps: `int`, Total train steps.
"""
super().__init__()
logging.info('LR schedule method: polynomial')
self.adjusted_lr = adjusted_lr
self.lr_warmup_init = lr_warmup_init
self.lr_warmup_step = lr_warmup_step
self.power = power
self.total_steps = total_steps
def __call__(self, step):
linear_warmup = (
self.lr_warmup_init +
(tf.cast(step, dtype=tf.float32) / self.lr_warmup_step *
(self.adjusted_lr - self.lr_warmup_init)))
polynomial_lr = self.adjusted_lr * tf.pow(
1 - (tf.cast(step, dtype=tf.float32) / self.total_steps), self.power)
return tf.where(step < self.lr_warmup_step, linear_warmup, polynomial_lr)
def learning_rate_schedule(params):
"""Learning rate schedule based on global step."""
update_learning_rate_schedule_parameters(params)
lr_decay_method = params['lr_decay_method']
if lr_decay_method == 'stepwise':
return StepwiseLrSchedule(params['adjusted_learning_rate'],
params['lr_warmup_init'],
params['lr_warmup_step'],
params['first_lr_drop_step'],
params['second_lr_drop_step'])
if lr_decay_method == 'cosine':
return CosineLrSchedule(params['adjusted_learning_rate'],
params['lr_warmup_init'], params['lr_warmup_step'],
params['total_steps'])
if lr_decay_method == 'polynomial':
return PolynomialLrSchedule(params['adjusted_learning_rate'],
params['lr_warmup_init'],
params['lr_warmup_step'],
params['poly_lr_power'], params['total_steps'])
raise ValueError('unknown lr_decay_method: {}'.format(lr_decay_method))
def get_optimizer(params):
"""Get optimizer."""
learning_rate = learning_rate_schedule(params)
momentum = params['momentum']
if params['optimizer'].lower() == 'sgd':
logging.info('Use SGD optimizer')
optimizer = tf.keras.optimizers.SGD(learning_rate, momentum=momentum)
elif params['optimizer'].lower() == 'adam':
logging.info('Use Adam optimizer')
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=momentum)
else:
raise ValueError('optimizers should be adam or sgd')
moving_average_decay = params['moving_average_decay']
if moving_average_decay:
# TODO(tanmingxing): potentially add dynamic_decay for new tfa release.
from tensorflow_addons import optimizers as tfa_optimizers # pylint: disable=g-import-not-at-top
optimizer = tfa_optimizers.MovingAverage(
optimizer, average_decay=moving_average_decay, dynamic_decay=True)
if params['mixed_precision']:
optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(
optimizer,
loss_scale=tf.mixed_precision.experimental.DynamicLossScale(
params['loss_scale']))
return optimizer
class COCOCallback(tf.keras.callbacks.Callback):
def __init__(self, test_dataset, update_freq=None):
super().__init__()
self.test_dataset = test_dataset
self.update_freq = update_freq
def set_model(self, model: tf.keras.Model):
import coco_metric
from keras import label_util
self.model = model
config = model.config
self.config = config
label_map = label_util.get_label_map(config.label_map)
log_dir = os.path.join(config.model_dir, 'coco')
self.file_writer = tf.summary.create_file_writer(log_dir)
self.evaluator = coco_metric.EvaluationMetric(
filename=config.val_json_file, label_map=label_map)
@tf.function
def _get_detections(self, images, labels):
cls_outputs, box_outputs = self.model(images, training=False)
detections = postprocess.generate_detections(self.config,
cls_outputs,
box_outputs,
labels['image_scales'],
labels['source_ids'])
return postprocess.transform_detections(detections)
def on_epoch_end(self, epoch, logs=None):
epoch += 1
if self.update_freq and epoch % self.update_freq == 0:
self.evaluator.reset_states()
strategy = tf.distribute.get_strategy()
count = self.config.eval_samples // self.config.batch_size
dataset = self.test_dataset.take(count)
dataset = strategy.experimental_distribute_dataset(dataset)
for (images, labels) in dataset:
detections = strategy.run(self._get_detections, (images, labels))
tf.numpy_function(self.evaluator.update_state,
[labels['groundtruth_data'], detections],
[])
metrics = self.evaluator.result()
with self.file_writer.as_default(), tf.summary.record_if(True):
for i, name in enumerate(self.evaluator.metric_names):
tf.summary.scalar(name, metrics[i],step=epoch)
class DisplayCallback(tf.keras.callbacks.Callback):
"""Display inference result callback."""
def __init__(self, sample_image, output_dir, update_freq=None):
super().__init__()
image_file = tf.io.read_file(sample_image)
self.sample_image = tf.expand_dims(
tf.image.decode_jpeg(image_file, channels=3), axis=0)
self.update_freq = update_freq
self.output_dir = output_dir
def set_model(self, model: tf.keras.Model):
self.model = model
config = model.config
log_dir = os.path.join(config.model_dir, 'test_images')
self.file_writer = tf.summary.create_file_writer(log_dir)
self.min_score_thresh = config.nms_configs['score_thresh'] or 0.4
self.max_boxes_to_draw = config.nms_configs['max_output_size'] or 100
def on_train_batch_end(self, batch, logs=None):
if self.update_freq and batch % self.update_freq == 0:
self._draw_inference(batch)
def _draw_inference(self, step):
self.model.__class__ = efficientdet_keras.EfficientDetModel
results = self.model(self.sample_image, training=False)
boxes, scores, classes, valid_len = tf.nest.map_structure(np.array, results)
length = valid_len[0]
image = inference.visualize_image(
self.sample_image[0],
boxes[0][:length],
classes[0].astype(np.int)[:length],
scores[0][:length],
label_map=self.model.config.label_map,
min_score_thresh=self.min_score_thresh,
max_boxes_to_draw=self.max_boxes_to_draw)
with self.file_writer.as_default():
tf.summary.image('Test image', tf.expand_dims(image, axis=0), step=step)
self.model.__class__ = efficientdet_keras.EfficientDetNet
def get_callbacks(params, val_dataset):
"""Get callbacks for given params."""
if params.get('moving_average_decay', None):
from tensorflow_addons.callbacks import AverageModelCheckpoint
avg_callback = AverageModelCheckpoint(
filepath=os.path.join(params['model_dir'], 'ckpt'),
verbose=1,
save_weights_only=True,
update_weights=True)
callbacks = [avg_callback]
else:
ckpt_callback = tf.keras.callbacks.ModelCheckpoint(
os.path.join(params['model_dir'], 'ckpt'),
verbose=1,
save_weights_only=True)
callbacks = [ckpt_callback]
if params['model_optimizations'] and 'prune' in params['model_optimizations']:
prune_callback = UpdatePruningStep()
prune_summaries = PruningSummaries(
log_dir=params['model_dir'],
update_freq=params['iterations_per_loop'],
profile_batch=2 if params['profile'] else 0)
callbacks += [prune_callback, prune_summaries]
else:
tb_callback = tf.keras.callbacks.TensorBoard(
log_dir=params['model_dir'],
update_freq=params['iterations_per_loop'],
profile_batch=2 if params['profile'] else 0)
callbacks.append(tb_callback)
if params.get('sample_image', None):
display_callback = DisplayCallback(
params.get('sample_image', None), params['model_dir'],
params['img_summary_steps'])
callbacks.append(display_callback)
if params.get('map_freq', None):
coco_callback = COCOCallback(val_dataset, params['map_freq'])
callbacks.append(coco_callback)
return callbacks
class AdversarialLoss(tf.keras.losses.Loss):
"""Adversarial keras loss wrapper."""
# TODO(fsx950223): WIP
def __init__(self, adv_config, *args, **kwargs):
super().__init__(*args, **kwargs)
self.adv_config = adv_config
self.model = None
self.loss_fn = None
self.tape = None
self.built = False
def build(self, model, loss_fn, tape):
self.model = model
self.loss_fn = loss_fn
self.tape = tape
self.built = True
def call(self, features, y, y_pred, labeled_loss):
return self.adv_config.multiplier * nsl.keras.adversarial_loss(
features,
y,
self.model,
self.loss_fn,
predictions=y_pred,
labeled_loss=self.labeled_loss,
gradient_tape=self.tape)
class FocalLoss(tf.keras.losses.Loss):
"""Compute the focal loss between `logits` and the golden `target` values.
Focal loss = -(1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
"""
def __init__(self, alpha, gamma, label_smoothing=0.0, **kwargs):
"""Initialize focal loss.
Args:
alpha: A float32 scalar multiplying alpha to the loss from positive
examples and (1-alpha) to the loss from negative examples.
gamma: A float32 scalar modulating loss from hard and easy examples.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
**kwargs: other params.
"""
super().__init__(**kwargs)
self.alpha = alpha
self.gamma = gamma
self.label_smoothing = label_smoothing
@tf.autograph.experimental.do_not_convert
def call(self, y, y_pred):
"""Compute focal loss for y and y_pred.
Args:
y: A tuple of (normalizer, y_true), where y_true is the target class.
y_pred: A float32 tensor [batch, height_in, width_in, num_predictions].
Returns:
the focal loss.
"""
normalizer, y_true = y
alpha = tf.convert_to_tensor(self.alpha, dtype=y_pred.dtype)
gamma = tf.convert_to_tensor(self.gamma, dtype=y_pred.dtype)
# compute focal loss multipliers before label smoothing, such that it will
# not blow up the loss.
pred_prob = tf.sigmoid(y_pred)
p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))
alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)
modulating_factor = (1.0 - p_t)**gamma
# apply label smoothing for cross_entropy for each entry.
y_true = y_true * (1.0 - self.label_smoothing) + 0.5 * self.label_smoothing
ce = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred)
# compute the final loss and return
return alpha_factor * modulating_factor * ce / normalizer
class BoxLoss(tf.keras.losses.Loss):
"""L2 box regression loss."""
def __init__(self, delta=0.1, **kwargs):
"""Initialize box loss.
Args:
delta: `float`, the point where the huber loss function changes from a
quadratic to linear. It is typically around the mean value of regression
target. For instances, the regression targets of 512x512 input with 6
anchors on P3-P7 pyramid is about [0.1, 0.1, 0.2, 0.2].
**kwargs: other params.
"""
super().__init__(**kwargs)
self.huber = tf.keras.losses.Huber(
delta, reduction=tf.keras.losses.Reduction.NONE)
@tf.autograph.experimental.do_not_convert
def call(self, y_true, box_outputs):
num_positives, box_targets = y_true
normalizer = num_positives * 4.0
mask = tf.cast(box_targets != 0.0, box_outputs.dtype)
box_targets = tf.expand_dims(box_targets, axis=-1)
box_outputs = tf.expand_dims(box_outputs, axis=-1)
# TODO(fsx950223): remove cast when huber loss dtype is fixed.
box_loss = tf.cast(self.huber(box_targets, box_outputs),
box_outputs.dtype) * mask
box_loss = tf.reduce_sum(box_loss) / normalizer
return box_loss
class BoxIouLoss(tf.keras.losses.Loss):
"""Box iou loss."""
def __init__(self, iou_loss_type, min_level, max_level, num_scales,
aspect_ratios, anchor_scale, image_size, **kwargs):
super().__init__(**kwargs)
self.iou_loss_type = iou_loss_type
self.input_anchors = anchors.Anchors(min_level, max_level, num_scales,
aspect_ratios, anchor_scale,
image_size)
@tf.autograph.experimental.do_not_convert
def call(self, y_true, box_outputs):
anchor_boxes = tf.tile(
self.input_anchors.boxes,
[box_outputs.shape[0] // self.input_anchors.boxes.shape[0], 1])
num_positives, box_targets = y_true
normalizer = num_positives * 4.0
mask = tf.cast(box_targets != 0.0, box_outputs.dtype)
box_outputs = anchors.decode_box_outputs(box_outputs, anchor_boxes) * mask
box_targets = anchors.decode_box_outputs(box_targets, anchor_boxes) * mask
box_iou_loss = iou_utils.iou_loss(box_outputs, box_targets,
self.iou_loss_type)
box_iou_loss = tf.reduce_sum(box_iou_loss) / normalizer
return box_iou_loss
class EfficientDetNetTrain(efficientdet_keras.EfficientDetNet):
"""A customized trainer for EfficientDet.
see https://www.tensorflow.org/guide/keras/customizing_what_happens_in_fit
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
log_dir = os.path.join(self.config.model_dir, 'train_images')
self.summary_writer = tf.summary.create_file_writer(log_dir)
def _freeze_vars(self):
if self.config.var_freeze_expr:
return [
v for v in self.trainable_variables
if not re.match(self.config.var_freeze_expr, v.name)
]
return self.trainable_variables
def _reg_l2_loss(self, weight_decay, regex=r'.*(kernel|weight):0$'):
"""Return regularization l2 loss loss."""
var_match = re.compile(regex)
return weight_decay * tf.add_n([
tf.nn.l2_loss(v) for v in self._freeze_vars() if var_match.match(v.name)
])
def _detection_loss(self, cls_outputs, box_outputs, labels, loss_vals):
"""Computes total detection loss.
Computes total detection loss including box and class loss from all levels.
Args:
cls_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in [batch_size, height, width,
num_anchors * 4].
labels: the dictionary that returned from dataloader that includes
groundtruth targets.
loss_vals: A dict of loss values.
Returns:
total_loss: an integer tensor representing total loss reducing from
class and box losses from all levels.
cls_loss: an integer tensor representing total class loss.
box_loss: an integer tensor representing total box regression loss.
box_iou_loss: an integer tensor representing total box iou loss.
"""
# Sum all positives in a batch for normalization and avoid zero
# num_positives_sum, which would lead to inf loss during training
precision = utils.get_precision(self.config.strategy,
self.config.mixed_precision)
dtype = precision.split('_')[-1]
num_positives_sum = tf.reduce_sum(labels['mean_num_positives']) + 1.0
positives_momentum = self.config.positives_momentum or 0
if positives_momentum > 0:
# normalize the num_positive_examples for training stability.
moving_normalizer_var = tf.Variable(
0.0,
name='moving_normalizer',
dtype=dtype,
synchronization=tf.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf.VariableAggregation.MEAN)
num_positives_sum = tf.keras.backend.moving_average_update(
moving_normalizer_var,
num_positives_sum,
momentum=self.config.positives_momentum)
elif positives_momentum < 0:
num_positives_sum = utils.cross_replica_mean(num_positives_sum)
num_positives_sum = tf.cast(num_positives_sum, dtype)
levels = range(len(cls_outputs))
cls_losses = []
box_losses = []
for level in levels:
# Onehot encoding for classification labels.
cls_targets_at_level = tf.one_hot(
labels['cls_targets_%d' % (level + self.config.min_level)],
self.config.num_classes,
dtype=dtype)
if self.config.data_format == 'channels_first':
bs, _, width, height, _ = cls_targets_at_level.get_shape().as_list()
cls_targets_at_level = tf.reshape(cls_targets_at_level,
[bs, -1, width, height])
else:
bs, width, height, _, _ = cls_targets_at_level.get_shape().as_list()
cls_targets_at_level = tf.reshape(cls_targets_at_level,
[bs, width, height, -1])
class_loss_layer = self.loss.get(FocalLoss.__name__, None)
if class_loss_layer:
cls_loss = class_loss_layer([num_positives_sum, cls_targets_at_level],
cls_outputs[level])
if self.config.data_format == 'channels_first':
cls_loss = tf.reshape(
cls_loss, [bs, -1, width, height, self.config.num_classes])
else:
cls_loss = tf.reshape(
cls_loss, [bs, width, height, -1, self.config.num_classes])
cls_loss *= tf.cast(
tf.expand_dims(
tf.not_equal(
labels['cls_targets_%d' % (level + self.config.min_level)],
-2), -1), dtype)
cls_loss_sum = tf.reduce_sum(cls_loss)
cls_losses.append(tf.cast(cls_loss_sum, dtype))
if self.config.box_loss_weight and self.loss.get(BoxLoss.__name__, None):
box_targets_at_level = (
labels['box_targets_%d' % (level + self.config.min_level)])
box_loss_layer = self.loss[BoxLoss.__name__]
box_losses.append(
box_loss_layer([num_positives_sum, box_targets_at_level],
box_outputs[level]))
if self.config.iou_loss_type:
box_outputs = tf.concat([tf.reshape(v, [-1, 4]) for v in box_outputs],
axis=0)
box_targets = tf.concat([
tf.reshape(labels['box_targets_%d' %
(level + self.config.min_level)], [-1, 4])
for level in levels
],
axis=0)
box_iou_loss_layer = self.loss[BoxIouLoss.__name__]
box_iou_loss = box_iou_loss_layer([num_positives_sum, box_targets],
box_outputs)
loss_vals['box_iou_loss'] = box_iou_loss
else:
box_iou_loss = 0
cls_loss = tf.add_n(cls_losses) if cls_losses else 0
box_loss = tf.add_n(box_losses) if box_losses else 0
total_loss = (
cls_loss + self.config.box_loss_weight * box_loss +
self.config.iou_loss_weight * box_iou_loss)
loss_vals['det_loss'] = total_loss
loss_vals['cls_loss'] = cls_loss
loss_vals['box_loss'] = box_loss
return total_loss
def train_step(self, data):
"""Train step.
Args:
data: Tuple of (images, labels). Image tensor with shape [batch_size,
height, width, 3]. The height and width are fixed and equal.Input labels
in a dictionary. The labels include class targets and box targets which
are dense label maps. The labels are generated from get_input_fn
function in data/dataloader.py.
Returns:
A dict record loss info.
"""
images, labels = data
if self.config.img_summary_steps:
with self.summary_writer.as_default():
tf.summary.image('input_image', images)
with tf.GradientTape() as tape:
if len(self.config.heads) == 2:
cls_outputs, box_outputs, seg_outputs = self(images, training=True)
elif 'object_detection' in self.config.heads:
cls_outputs, box_outputs = self(images, training=True)
elif 'segmentation' in self.config.heads:
seg_outputs, = self(images, training=True)
total_loss = 0
loss_vals = {}
if 'object_detection' in self.config.heads:
det_loss = self._detection_loss(cls_outputs, box_outputs, labels,
loss_vals)
total_loss += det_loss
if 'segmentation' in self.config.heads:
seg_loss_layer = (
self.loss[tf.keras.losses.SparseCategoricalCrossentropy.__name__])
seg_loss = seg_loss_layer(labels['image_masks'], seg_outputs)
total_loss += seg_loss
loss_vals['seg_loss'] = seg_loss
reg_l2_loss = self._reg_l2_loss(self.config.weight_decay)
loss_vals['reg_l2_loss'] = reg_l2_loss
total_loss += tf.cast(reg_l2_loss, images.dtype)
if isinstance(self.optimizer,
tf.keras.mixed_precision.experimental.LossScaleOptimizer):
scaled_loss = self.optimizer.get_scaled_loss(total_loss)
else:
scaled_loss = total_loss
loss_vals['loss'] = total_loss
loss_vals['learning_rate'] = self.optimizer.learning_rate(
self.optimizer.iterations)
trainable_vars = self._freeze_vars()
scaled_gradients = tape.gradient(scaled_loss, trainable_vars)
if isinstance(self.optimizer,
tf.keras.mixed_precision.experimental.LossScaleOptimizer):
gradients = self.optimizer.get_unscaled_gradients(scaled_gradients)
else:
gradients = scaled_gradients
if self.config.clip_gradients_norm > 0:
clip_norm = abs(self.config.clip_gradients_norm)
gradients = [
tf.clip_by_norm(g, clip_norm) if g is not None else None
for g in gradients
]
gradients, _ = tf.clip_by_global_norm(gradients, clip_norm)
loss_vals['gradient_norm'] = tf.linalg.global_norm(gradients)
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
return loss_vals
def test_step(self, data):
"""Test step.
Args:
data: Tuple of (images, labels). Image tensor with shape [batch_size,
height, width, 3]. The height and width are fixed and equal.Input labels
in a dictionary. The labels include class targets and box targets which
are dense label maps. The labels are generated from get_input_fn
function in data/dataloader.py.
Returns:
A dict record loss info.
"""
images, labels = data
if len(self.config.heads) == 2:
cls_outputs, box_outputs, seg_outputs = self(images, training=False)
elif 'object_detection' in self.config.heads:
cls_outputs, box_outputs = self(images, training=False)
elif 'segmentation' in self.config.heads:
seg_outputs, = self(images, training=False)
total_loss = 0
loss_vals = {}
if 'object_detection' in self.config.heads:
det_loss = self._detection_loss(cls_outputs, box_outputs, labels,
loss_vals)
total_loss += det_loss
if 'segmentation' in self.config.heads:
seg_loss_layer = (
self.loss[tf.keras.losses.SparseCategoricalCrossentropy.__name__])
seg_loss = seg_loss_layer(labels['image_masks'], seg_outputs)
total_loss += seg_loss
loss_vals['seg_loss'] = seg_loss
reg_l2_loss = self._reg_l2_loss(self.config.weight_decay)
loss_vals['reg_l2_loss'] = reg_l2_loss
loss_vals['loss'] = total_loss + tf.cast(reg_l2_loss, images.dtype)
return loss_vals
| 39.594891 | 110 | 0.677113 |
acdf40a0bf3e7f4c76f1ec7bf23ce5b3675c9b4d | 15,150 | py | Python | tests/garage/tf/models/test_gaussian_gru_model.py | lywong92/garage | 96cb8887fcae90531a645d540653010e7fe10fcc | [
"MIT"
] | 1 | 2020-01-05T14:57:43.000Z | 2020-01-05T14:57:43.000Z | tests/garage/tf/models/test_gaussian_gru_model.py | lywong92/garage | 96cb8887fcae90531a645d540653010e7fe10fcc | [
"MIT"
] | null | null | null | tests/garage/tf/models/test_gaussian_gru_model.py | lywong92/garage | 96cb8887fcae90531a645d540653010e7fe10fcc | [
"MIT"
] | null | null | null | import pickle
from unittest import mock
from nose2.tools.params import params
import numpy as np
import tensorflow as tf
from garage.tf.models import GaussianGRUModel
from tests.fixtures import TfGraphTestCase
from tests.helpers import recurrent_step_gru
class TestGaussianGRUModel(TfGraphTestCase):
def setUp(self):
super().setUp()
self.batch_size = 1
self.time_step = 2
self.feature_shape = 2
self.default_initializer = tf.constant_initializer(0.1)
self.obs_inputs = np.full(
(self.batch_size, self.time_step, self.feature_shape), 1.)
self.obs_input = np.full((self.batch_size, self.feature_shape), 1.)
self.input_var = tf.placeholder(
tf.float32, shape=(None, None, self.feature_shape), name='input')
self.step_input_var = tf.placeholder(
tf.float32, shape=(None, self.feature_shape), name='step_input')
# yapf: disable
@params(
(1, 1),
(2, 2),
(3, 3))
# yapf: enable
@mock.patch('tensorflow.random.normal')
def test_std_share_network_output_values(self, output_dim, hidden_dim,
mock_normal):
mock_normal.return_value = 0.5
model = GaussianGRUModel(
output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=True,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.placeholder(
shape=(self.batch_size, hidden_dim),
name='step_hidden',
dtype=tf.float32)
(action_var, mean_var, step_mean_var, log_std_var, step_log_std_var,
step_hidden, hidden_init_var, dist) = model.build(
self.input_var, self.step_input_var, step_hidden_var)
hidden1 = hidden2 = np.full((self.batch_size, hidden_dim),
hidden_init_var.eval())
mean, log_std = self.sess.run(
[mean_var, log_std_var],
feed_dict={self.input_var: self.obs_inputs})
for i in range(self.time_step):
action, mean1, log_std1, hidden1 = self.sess.run(
[action_var, step_mean_var, step_log_std_var, step_hidden],
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden1
})
hidden2 = recurrent_step_gru(
input_val=self.obs_input,
num_units=hidden_dim,
step_hidden=hidden2,
w_x_init=0.1,
w_h_init=0.1,
b_init=0.,
nonlinearity=None,
gate_nonlinearity=None)
output_nonlinearity = np.full(
(np.prod(hidden2.shape[1:]), output_dim), 0.1)
output2 = np.matmul(hidden2, output_nonlinearity)
assert np.allclose(mean1, output2)
assert np.allclose(log_std1, output2)
assert np.allclose(hidden1, hidden2)
expected_action = 0.5 * np.exp(log_std1) + mean1
assert np.allclose(action, expected_action)
# yapf: disable
@params(
(1, 1),
(2, 2),
(3, 3))
# yapf: enable
def test_std_share_network_shapes(self, output_dim, hidden_dim):
model = GaussianGRUModel(
output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=True,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.placeholder(
shape=(self.batch_size, hidden_dim),
name='step_hidden',
dtype=tf.float32)
(action_var, mean_var, step_mean_var, log_std_var, step_log_std_var,
step_hidden, hidden_init_var, dist) = model.build(
self.input_var, self.step_input_var, step_hidden_var)
# output layer is a tf.keras.layers.Dense object,
# which cannot be access by tf.variable_scope.
# A workaround is to access in tf.global_variables()
for var in tf.global_variables():
if 'output_layer/kernel' in var.name:
std_share_output_weights = var
if 'output_layer/bias' in var.name:
std_share_output_bias = var
assert std_share_output_weights.shape[1] == output_dim * 2
assert std_share_output_bias.shape == output_dim * 2
@params((1, 1, 1), (1, 1, 2), (1, 2, 1), (1, 2, 2), (3, 3, 1), (3, 3, 2))
@mock.patch('tensorflow.random.normal')
def test_without_std_share_network_output_values(
self, output_dim, hidden_dim, init_std, mock_normal):
mock_normal.return_value = 0.5
model = GaussianGRUModel(
output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=False,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer,
init_std=init_std)
step_hidden_var = tf.placeholder(
shape=(self.batch_size, hidden_dim),
name='step_hidden',
dtype=tf.float32)
(action_var, mean_var, step_mean_var, log_std_var, step_log_std_var,
step_hidden, hidden_init_var, dist) = model.build(
self.input_var, self.step_input_var, step_hidden_var)
hidden1 = hidden2 = np.full((self.batch_size, hidden_dim),
hidden_init_var.eval())
mean, log_std = self.sess.run(
[mean_var, log_std_var],
feed_dict={self.input_var: self.obs_inputs})
for i in range(self.time_step):
action, mean1, log_std1, hidden1 = self.sess.run(
[action_var, step_mean_var, step_log_std_var, step_hidden],
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden1
})
hidden2 = recurrent_step_gru(
input_val=self.obs_input,
num_units=hidden_dim,
step_hidden=hidden2,
w_x_init=0.1,
w_h_init=0.1,
b_init=0.,
nonlinearity=None,
gate_nonlinearity=None)
output_nonlinearity = np.full(
(np.prod(hidden2.shape[1:]), output_dim), 0.1)
output2 = np.matmul(hidden2, output_nonlinearity)
assert np.allclose(mean1, output2)
expected_log_std = np.full((self.batch_size, output_dim),
np.log(init_std))
assert np.allclose(log_std1, expected_log_std)
assert np.allclose(hidden1, hidden2)
expected_action = 0.5 * np.exp(log_std1) + mean1
assert np.allclose(action, expected_action)
# yapf: disable
@params(
(1, 1),
(2, 2),
(3, 3))
# yapf: enable
def test_without_std_share_network_shapes(self, output_dim, hidden_dim):
model = GaussianGRUModel(
output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=False,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.placeholder(
shape=(self.batch_size, hidden_dim),
name='step_hidden',
dtype=tf.float32)
(action_var, mean_var, step_mean_var, log_std_var, step_log_std_var,
step_hidden, hidden_init_var, dist) = model.build(
self.input_var, self.step_input_var, step_hidden_var)
# output layer is a tf.keras.layers.Dense object,
# which cannot be access by tf.variable_scope.
# A workaround is to access in tf.global_variables()
for var in tf.global_variables():
if 'output_layer/kernel' in var.name:
std_share_output_weights = var
if 'output_layer/bias' in var.name:
std_share_output_bias = var
if 'log_std_param/parameter' in var.name:
log_std_param = var
assert std_share_output_weights.shape[1] == output_dim
assert std_share_output_bias.shape == output_dim
assert log_std_param.shape == output_dim
# yapf: disable
@params(
(1, 1),
(2, 2),
(3, 3))
# yapf: enable
@mock.patch('tensorflow.random.normal')
def test_std_share_network_is_pickleable(self, output_dim, hidden_dim,
mock_normal):
mock_normal.return_value = 0.5
model = GaussianGRUModel(
output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=True,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.placeholder(
shape=(self.batch_size, hidden_dim),
name='step_hidden',
dtype=tf.float32)
(_, mean_var, step_mean_var, log_std_var,
step_log_std_var, step_hidden, _, _) = model.build(
self.input_var, self.step_input_var, step_hidden_var)
# output layer is a tf.keras.layers.Dense object,
# which cannot be access by tf.variable_scope.
# A workaround is to access in tf.global_variables()
for var in tf.global_variables():
if 'output_layer/bias' in var.name:
var.load(tf.ones_like(var).eval())
hidden = np.zeros((self.batch_size, hidden_dim))
outputs1 = self.sess.run([mean_var, log_std_var],
feed_dict={self.input_var: self.obs_inputs})
output1 = self.sess.run([step_mean_var, step_log_std_var, step_hidden],
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden
}) # noqa: E126
h = pickle.dumps(model)
with tf.Session(graph=tf.Graph()) as sess:
model_pickled = pickle.loads(h)
input_var = tf.placeholder(
tf.float32,
shape=(None, None, self.feature_shape),
name='input')
step_input_var = tf.placeholder(
tf.float32,
shape=(None, self.feature_shape),
name='step_input')
step_hidden_var = tf.placeholder(
shape=(self.batch_size, hidden_dim),
name='initial_hidden',
dtype=tf.float32)
(_, mean_var2, step_mean_var2, log_std_var2,
step_log_std_var2, step_hidden2, _, _) = model_pickled.build(
input_var, step_input_var, step_hidden_var)
outputs2 = sess.run([mean_var2, log_std_var2],
feed_dict={input_var: self.obs_inputs})
output2 = sess.run(
[step_mean_var2, step_log_std_var2, step_hidden2],
feed_dict={
step_input_var: self.obs_input,
step_hidden_var: hidden
})
assert np.array_equal(outputs1, outputs2)
assert np.array_equal(output1, output2)
# yapf: disable
@params(
(1, 1),
(2, 2),
(3, 3))
# yapf: enable
@mock.patch('tensorflow.random.normal')
def test_without_std_share_network_is_pickleable(self, output_dim,
hidden_dim, mock_normal):
mock_normal.return_value = 0.5
model = GaussianGRUModel(
output_dim=output_dim,
hidden_dim=hidden_dim,
std_share_network=False,
hidden_nonlinearity=None,
recurrent_nonlinearity=None,
hidden_w_init=self.default_initializer,
recurrent_w_init=self.default_initializer,
output_w_init=self.default_initializer)
step_hidden_var = tf.placeholder(
shape=(self.batch_size, hidden_dim),
name='step_hidden',
dtype=tf.float32)
(_, mean_var, step_mean_var, log_std_var,
step_log_std_var, step_hidden, _, _) = model.build(
self.input_var, self.step_input_var, step_hidden_var)
# output layer is a tf.keras.layers.Dense object,
# which cannot be access by tf.variable_scope.
# A workaround is to access in tf.global_variables()
for var in tf.global_variables():
if 'output_layer/bias' in var.name:
var.load(tf.ones_like(var).eval())
hidden = np.zeros((self.batch_size, hidden_dim))
outputs1 = self.sess.run([mean_var, log_std_var],
feed_dict={self.input_var: self.obs_inputs})
output1 = self.sess.run([step_mean_var, step_log_std_var, step_hidden],
feed_dict={
self.step_input_var: self.obs_input,
step_hidden_var: hidden
}) # noqa: E126
h = pickle.dumps(model)
with tf.Session(graph=tf.Graph()) as sess:
model_pickled = pickle.loads(h)
input_var = tf.placeholder(
tf.float32,
shape=(None, None, self.feature_shape),
name='input')
step_input_var = tf.placeholder(
tf.float32,
shape=(None, self.feature_shape),
name='step_input')
step_hidden_var = tf.placeholder(
shape=(self.batch_size, hidden_dim),
name='initial_hidden',
dtype=tf.float32)
(_, mean_var2, step_mean_var2, log_std_var2,
step_log_std_var2, step_hidden2, _, _) = model_pickled.build(
input_var, step_input_var, step_hidden_var)
outputs2 = sess.run([mean_var2, log_std_var2],
feed_dict={input_var: self.obs_inputs})
output2 = sess.run(
[step_mean_var2, step_log_std_var2, step_hidden2],
feed_dict={
step_input_var: self.obs_input,
step_hidden_var: hidden
})
assert np.array_equal(outputs1, outputs2)
assert np.array_equal(output1, output2)
| 40.292553 | 79 | 0.580066 |
acdf40cc32d596fbf5858d5ed2da2efd7b8b8987 | 7,539 | py | Python | data/__init__.py | jaebradley/flix | adc02c2f08d01e1acd6f18065be70a8c87e71e55 | [
"MIT"
] | null | null | null | data/__init__.py | jaebradley/flix | adc02c2f08d01e1acd6f18065be70a8c87e71e55 | [
"MIT"
] | 4 | 2017-06-13T12:40:03.000Z | 2021-06-01T21:54:58.000Z | data/__init__.py | jaebradley/flix | adc02c2f08d01e1acd6f18065be70a8c87e71e55 | [
"MIT"
] | null | null | null | from enum import Enum
class ProximityGroup(Enum):
WITHIN_A_MILE = "LT_MILES_1"
WITHIN_THREE_MILES = "LT_MILES_3"
WITHIN_FIVE_MILES = "LT_MILES_5"
WITHIN_TEN_MILES = "LT_MILES_10"
class Address:
def __init__(self, street, city, state, zip, longitude, latitude, distance_from_current_location):
self.street = street
self.city = city
self.state = state
self.zip = zip
self.longitude = longitude
self.latitude = latitude
self.distance_from_current_location = distance_from_current_location
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
class Performance:
def __init__(self, start_time, code):
self.start_time = start_time
self.code = code
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
class PresentationCategory(Enum):
STANDARD = "STANDARD"
THREE_D = "THREE_D"
IMAX = "IMAX"
FOUR_K = "FOUR_K"
THREE_D_4K = "THREE_D_4K"
IMAX_3D = "IMAX_3D"
IMAX_3D_4K = "IMAX_3D_4K"
IMAX_4K = "IMAX_4K"
@staticmethod
def identify(value):
for category in PresentationCategory:
if category.value == value:
return category
raise LookupError("Could not identify value: {value}".format(value=value))
class Presentation:
def __init__(self, category, performances):
self.category = category
self.performances = performances
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
class MovieSchedule:
def __init__(self, movie_id, presentations):
self.movie_id = movie_id
self.presentations = presentations
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
class Theater:
def __init__(self, fid, name, has_fees, has_tickets, ticket_source, screen_count, map_uri, phone_number, address,
seating, movie_schedules):
self.fid = fid
self.name = name
self.has_fees = has_fees
self.has_tickets = has_tickets
self.ticket_source = ticket_source
self.screen_count = screen_count
self.map_uri = map_uri
self.phone_number = phone_number
self.address = address
self.seating = seating
self.movie_schedules = movie_schedules
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
class Actor:
def __init__(self, fid, name, url):
self.fid = fid
self.name = name
self.url = url
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
class FlixsterMovieDetails:
def __init__(self, average_rating, not_interested_count, likability_score, scores_count, want_to_see_count, popcorn_score):
self.average_rating = average_rating
self.not_interested_count = not_interested_count
self.likability_score = likability_score
self.scores_count = scores_count
self.want_to_see_count = want_to_see_count
self.popcorn_score = popcorn_score
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
class RottenTomatoesMovieDetails:
def __init__(self, rating, is_certified_fresh, consensus):
self.rating = rating
self.is_certified_fresh = is_certified_fresh
self.consensus = consensus
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
class Movie:
def __init__(self, fid, release_date, title, mpaa_rating, run_time, is_live, is_opening, trailer_url, actors, flixster_movie_details, rotten_tomatoes_movie_details):
self.fid = fid
self.release_date = release_date
self.title = title
self.mpaa_rating = mpaa_rating
self.run_time = run_time
self.is_live = is_live
self.is_opening = is_opening
self.trailer_url = trailer_url
self.actors = actors
self.flixster_movie_details = flixster_movie_details
self.rotten_tomatoes_movie_details = rotten_tomatoes_movie_details
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
class MoviePresentations:
def __init__(self, date, theaters, movie_presentations_mapping):
self.date = date
self.theaters = theaters
self.movie_presentations_mapping = movie_presentations_mapping
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
| 30.277108 | 169 | 0.658045 |
acdf40ea1b463c40907c97a5e8a0e068eb518b3c | 59,104 | py | Python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_11_01/operations/_blob_containers_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 2 | 2019-08-23T21:14:00.000Z | 2021-09-07T18:32:34.000Z | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_11_01/operations/_blob_containers_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 4 | 2019-04-17T17:57:49.000Z | 2020-04-24T21:11:22.000Z | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2018_11_01/operations/_blob_containers_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class BlobContainersOperations(object):
"""BlobContainersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
account_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ListContainerItems"
"""Lists all containers and does not support a prefix like data plane. Also SRP today does not
return continuation token.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListContainerItems, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_11_01.models.ListContainerItems
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListContainerItems"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ListContainerItems', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers'} # type: ignore
def create(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
blob_container, # type: "_models.BlobContainer"
**kwargs # type: Any
):
# type: (...) -> "_models.BlobContainer"
"""Creates a new container under the specified account as described by request body. The container
resource includes metadata and properties for that container. It does not include a list of the
blobs contained by the container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param blob_container: Properties of the blob container to create.
:type blob_container: ~azure.mgmt.storage.v2018_11_01.models.BlobContainer
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_11_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(blob_container, 'BlobContainer')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BlobContainer', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
blob_container, # type: "_models.BlobContainer"
**kwargs # type: Any
):
# type: (...) -> "_models.BlobContainer"
"""Updates container properties as specified in request body. Properties not mentioned in the
request will be unchanged. Update fails if the specified container doesn't already exist.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param blob_container: Properties to update for the blob container.
:type blob_container: ~azure.mgmt.storage.v2018_11_01.models.BlobContainer
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_11_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(blob_container, 'BlobContainer')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.BlobContainer"
"""Gets properties of a specified container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobContainer, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_11_01.models.BlobContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BlobContainer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BlobContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes specified container under its account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}'} # type: ignore
def set_legal_hold(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
legal_hold, # type: "_models.LegalHold"
**kwargs # type: Any
):
# type: (...) -> "_models.LegalHold"
"""Sets legal hold tags. Setting the same tag results in an idempotent operation. SetLegalHold
follows an append pattern and does not clear out the existing tags that are not specified in
the request.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param legal_hold: The LegalHold property that will be set to a blob container.
:type legal_hold: ~azure.mgmt.storage.v2018_11_01.models.LegalHold
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LegalHold, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_11_01.models.LegalHold
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LegalHold"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_legal_hold.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(legal_hold, 'LegalHold')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LegalHold', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_legal_hold.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/setLegalHold'} # type: ignore
def clear_legal_hold(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
legal_hold, # type: "_models.LegalHold"
**kwargs # type: Any
):
# type: (...) -> "_models.LegalHold"
"""Clears legal hold tags. Clearing the same or non-existent tag results in an idempotent
operation. ClearLegalHold clears out only the specified tags in the request.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param legal_hold: The LegalHold property that will be clear from a blob container.
:type legal_hold: ~azure.mgmt.storage.v2018_11_01.models.LegalHold
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LegalHold, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_11_01.models.LegalHold
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LegalHold"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.clear_legal_hold.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(legal_hold, 'LegalHold')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LegalHold', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
clear_legal_hold.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/clearLegalHold'} # type: ignore
def create_or_update_immutability_policy(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
if_match=None, # type: Optional[str]
parameters=None, # type: Optional["_models.ImmutabilityPolicy"]
**kwargs # type: Any
):
# type: (...) -> "_models.ImmutabilityPolicy"
"""Creates or updates an unlocked immutability policy. ETag in If-Match is honored if given but
not required for this operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:param parameters: The ImmutabilityPolicy Properties that will be created or updated to a blob
container.
:type parameters: ~azure.mgmt.storage.v2018_11_01.models.ImmutabilityPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_11_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
immutability_policy_name = "default"
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'immutabilityPolicyName': self._serialize.url("immutability_policy_name", immutability_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'ImmutabilityPolicy')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
def get_immutability_policy(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.ImmutabilityPolicy"
"""Gets the existing immutability policy along with the corresponding ETag in response headers and
body.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_11_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
immutability_policy_name = "default"
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'immutabilityPolicyName': self._serialize.url("immutability_policy_name", immutability_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
def delete_immutability_policy(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
if_match, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ImmutabilityPolicy"
"""Aborts an unlocked immutability policy. The response of delete has
immutabilityPeriodSinceCreationInDays set to 0. ETag in If-Match is required for this
operation. Deleting a locked immutability policy is not allowed, only way is to delete the
container after deleting all blobs inside the container.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_11_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
immutability_policy_name = "default"
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.delete_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'immutabilityPolicyName': self._serialize.url("immutability_policy_name", immutability_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
delete_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/{immutabilityPolicyName}'} # type: ignore
def lock_immutability_policy(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
if_match, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ImmutabilityPolicy"
"""Sets the ImmutabilityPolicy to Locked state. The only action allowed on a Locked policy is
ExtendImmutabilityPolicy action. ETag in If-Match is required for this operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_11_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.lock_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
lock_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/lock'} # type: ignore
def extend_immutability_policy(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
if_match, # type: str
parameters=None, # type: Optional["_models.ImmutabilityPolicy"]
**kwargs # type: Any
):
# type: (...) -> "_models.ImmutabilityPolicy"
"""Extends the immutabilityPeriodSinceCreationInDays of a locked immutabilityPolicy. The only
action allowed on a Locked policy will be this action. ETag in If-Match is required for this
operation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param if_match: The entity state (ETag) version of the immutability policy to update. A value
of "*" can be used to apply the operation only if the immutability policy already exists. If
omitted, this operation will always be applied.
:type if_match: str
:param parameters: The ImmutabilityPolicy Properties that will be extended for a blob
container.
:type parameters: ~azure.mgmt.storage.v2018_11_01.models.ImmutabilityPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ImmutabilityPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_11_01.models.ImmutabilityPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ImmutabilityPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.extend_immutability_policy.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'ImmutabilityPolicy')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ImmutabilityPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
extend_immutability_policy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/immutabilityPolicies/default/extend'} # type: ignore
def lease(
self,
resource_group_name, # type: str
account_name, # type: str
container_name, # type: str
parameters=None, # type: Optional["_models.LeaseContainerRequest"]
**kwargs # type: Any
):
# type: (...) -> "_models.LeaseContainerResponse"
"""The Lease Container operation establishes and manages a lock on a container for delete
operations. The lock duration can be 15 to 60 seconds, or can be infinite.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and lower-
case letters only.
:type account_name: str
:param container_name: The name of the blob container within the specified storage account.
Blob container names must be between 3 and 63 characters in length and use numbers, lower-case
letters and dash (-) only. Every dash (-) character must be immediately preceded and followed
by a letter or number.
:type container_name: str
:param parameters: Lease Container request body.
:type parameters: ~azure.mgmt.storage.v2018_11_01.models.LeaseContainerRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LeaseContainerResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2018_11_01.models.LeaseContainerResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LeaseContainerResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.lease.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'containerName': self._serialize.url("container_name", container_name, 'str', max_length=63, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'LeaseContainerRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LeaseContainerResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
lease.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/blobServices/default/containers/{containerName}/lease'} # type: ignore
| 55.444653 | 297 | 0.682864 |
acdf422678479207e7573abd814a5b09ac3e8a35 | 1,056 | py | Python | pp/components/pcm/cd.py | smartalecH/gdsfactory | 66dfbf740704f1a6155f4812a1d9483ccf5c116c | [
"MIT"
] | 16 | 2020-02-03T07:05:31.000Z | 2021-12-29T18:40:09.000Z | pp/components/pcm/cd.py | smartalecH/gdsfactory | 66dfbf740704f1a6155f4812a1d9483ccf5c116c | [
"MIT"
] | 2 | 2020-01-31T20:01:40.000Z | 2020-09-26T17:50:55.000Z | pp/components/pcm/cd.py | smartalecH/gdsfactory | 66dfbf740704f1a6155f4812a1d9483ccf5c116c | [
"MIT"
] | 7 | 2020-02-09T23:16:18.000Z | 2020-10-30T03:12:04.000Z | """ test critical dimension for width and space
"""
import pp
from pp.layers import LAYER
def square_middle(side=0.5, layer=LAYER.WG):
component = pp.Component()
a = side / 2
component.add_polygon([(-a, -a), (a, -a), (a, a), (-a, a)], layer=layer)
return component
def rectangle(width, height, layer=LAYER.WG):
component = pp.Component()
a = width / 2
b = height / 2
component.add_polygon([(-a, -b), (a, -b), (a, b), (-a, b)], layer=layer)
return component
def triangle_middle_up(side=0.5, layer=LAYER.WG):
component = pp.Component()
a = side / 2
component.add_polygon([(-a, -a), (a, -a), (0, a)], layer=layer)
return component
def triangle_middle_down(side=0.5, layer=LAYER.WG):
component = pp.Component()
a = side / 2
component.add_polygon([(-a, a), (a, a), (0, -a)], layer=layer)
return component
CENTER_SHAPES_MAP = {
"S": square_middle,
"U": triangle_middle_up,
"D": triangle_middle_down,
}
if __name__ == "__main__":
c = square_middle()
pp.show(c)
| 22 | 76 | 0.613636 |
acdf42a59836e5bdc7992b8be8237e2e4660bd5b | 456 | py | Python | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/mrp/models/ir_attachment.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | 1 | 2019-12-19T01:53:13.000Z | 2019-12-19T01:53:13.000Z | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/mrp/models/ir_attachment.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/mrp/models/ir_attachment.py | gtfarng/Odoo_migrade | 9cc28fae4c379e407645248a29d22139925eafe7 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
class IrAttachment(models.Model):
_inherit = 'ir.attachment'
_order = "priority desc, id desc"
priority = fields.Selection([
('0', 'Normal'),
('1', 'Low'),
('2', 'High'),
('3', 'Very High')], string="Priority", help="Gives the sequence order when displaying a list of tasks.")
| 28.5 | 113 | 0.614035 |
acdf43b8e5bf66deaf7c7d37a94a44d17252c9fc | 712 | py | Python | services/web/apps/inv/modelmapping/views.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | services/web/apps/inv/modelmapping/views.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | services/web/apps/inv/modelmapping/views.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# inv.modelmapping application
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.lib.app.extdocapplication import ExtDocApplication
from noc.inv.models.modelmapping import ModelMapping
from noc.core.translation import ugettext as _
class ModelMappingApplication(ExtDocApplication):
"""
ModelMapping application
"""
title = _("Model Mapping")
menu = [_("Setup"), _("Model Mapping")]
model = ModelMapping
| 30.956522 | 71 | 0.507022 |
acdf44d5f8c3cc4b6ecd21fd0721e5b2d4b29946 | 191 | py | Python | docs/conf.py | moshez/txpursuedpybar | 7e56e0fbe280ff45340ced48f5547443234b9639 | [
"Artistic-2.0"
] | 2 | 2020-04-05T02:40:38.000Z | 2020-04-05T07:48:26.000Z | docs/conf.py | moshez/txpursuedpybear | 7e56e0fbe280ff45340ced48f5547443234b9639 | [
"Artistic-2.0"
] | 7 | 2020-04-18T17:38:29.000Z | 2020-06-14T03:29:59.000Z | docs/conf.py | moshez/txpursuedpybear | 7e56e0fbe280ff45340ced48f5547443234b9639 | [
"Artistic-2.0"
] | null | null | null | extensions = ['sphinx.ext.autodoc']
master_doc = 'index'
html_theme = "sphinx_rtd_theme"
project = 'Twisted Extension PursuedPyBear'
copyright = '2020, Contributors'
author = 'Contributors'
| 23.875 | 43 | 0.764398 |
acdf452564b9873f20f17928d77594b624430731 | 23,269 | py | Python | tests/test_util.py | OrquestraDigital/aboutcode-toolkit | d9ff859735a72635563fb5a9e265ecd7023d401a | [
"Apache-2.0"
] | 1 | 2021-08-31T10:58:29.000Z | 2021-08-31T10:58:29.000Z | tests/test_util.py | sthagen/aboutcode-toolkit | cd74f15bcc223c7e1b7424f169481af8e55e0f38 | [
"Apache-2.0"
] | null | null | null | tests/test_util.py | sthagen/aboutcode-toolkit | cd74f15bcc223c7e1b7424f169481af8e55e0f38 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf8 -*-
# ============================================================================
# Copyright (c) nexB Inc. http://www.nexb.com/ - All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import string
import unittest
import saneyaml
from testing_utils import extract_test_loc
from testing_utils import get_test_loc
from testing_utils import get_temp_dir
from testing_utils import on_posix
from testing_utils import on_windows
from attributecode import CRITICAL
from attributecode import Error
from attributecode import model
from attributecode import util
class TestResourcePaths(unittest.TestCase):
def test_resource_name(self):
expected = 'first'
result = util.resource_name('some/things/first')
assert expected == result
def test_resource_name_with_extension(self):
expected = 'first.ABOUT'
result = util.resource_name('/some/things/first.ABOUT')
assert expected == result
def test_resource_name_for_dir(self):
expected = 'first'
result = util.resource_name('some/things/first/')
assert expected == result
def test_resource_name_windows(self):
expected = r'first.'
result = util.resource_name(r'c:\some\things\first.')
assert expected == result
def test_resource_name_mixed_windows_posix(self):
expected = r'first'
result = util.resource_name(r'c:\some/things\first')
assert expected == result
def test_resource_name_double_slash(self):
expected = 'first'
result = util.resource_name(r'some\thi ngs//first')
assert expected == result
def test_resource_name_punctuation(self):
expected = '_$asafg:'
result = util.resource_name('%6571351()2/75612$/_$asafg:')
assert expected == result
def test_resource_name_simple_slash(self):
expected = ''
result = util.resource_name('/')
assert expected == result
def test_resource_name_spaces(self):
expected = ''
result = util.resource_name('/ / ')
assert expected == result
def test_resource_name_does_not_recurse_infinitely(self):
expected = ''
result = util.resource_name(' / ')
assert expected == result
def test_to_posix_from_win(self):
test = r'c:\this\that'
expected = 'c:/this/that'
result = util.to_posix(test)
assert expected == result
def test_to_posix_from_posix(self):
test = r'/this/that'
expected = '/this/that'
result = util.to_posix(test)
assert expected == result
def test_to_posix_from_mixed(self):
test = r'/this/that\this'
expected = '/this/that/this'
result = util.to_posix(test)
assert expected == result
def test_to_native_from_win(self):
test = r'c:\this\that'
if on_posix:
expected = 'c:/this/that'
else:
expected = test
result = util.to_native(test)
assert expected == result
def test_to_native_from_posix(self):
test = r'/this/that'
if on_windows:
expected = r'\this\that'
else:
expected = test
result = util.to_native(test)
assert expected == result
def test_to_native_from_mixed(self):
test = r'/this/that\this'
if on_windows:
expected = r'\this\that\this'
else:
expected = r'/this/that/this'
result = util.to_native(test)
assert expected == result
def test_invalid_chars_with_valid_chars(self):
name = string.digits + string.ascii_letters + '_-.+'
result = util.invalid_chars(name)
expected = []
assert expected == result
def test_space_is_valid_chars(self):
result = util.invalid_chars(' ')
expected = []
assert expected == result
def test_invalid_chars_with_invalid_in_name_and_dir(self):
result = util.invalid_chars('_$as/afg:')
expected = [':']
assert expected == result
def test_invalid_chars_in_file_name(self):
name = '%657!1351()275612$_$asafg:~|[]{}+-.'
result = util.invalid_chars(name)
expected = ['%', '!', '$', '$', ':']
assert expected == result
def test_invalid_chars_with_space_is_valid(self):
result = util.invalid_chars('_ Hello')
expected = []
assert expected == result
def test_check_file_names_with_dupes_return_errors(self):
paths = ['some/path', 'some/PAth']
result = util.check_file_names(paths)
expected = [
Error(
CRITICAL,
"Duplicate files: 'some/PAth' and 'some/path' have the same case-insensitive file name")
]
assert expected == result
def test_check_file_names_without_dupes_return_no_error(self):
paths = ['some/path',
'some/otherpath']
result = util.check_file_names(paths)
expected = []
assert expected == result
def test_check_file_names_with_no_invalid_char_return_no_error(self):
paths = [
'locations/file',
'locations/file1',
'locations/file2',
'locations/dir1/file2',
'locations/dir1/dir2/file1',
'locations/dir2/file1']
expected = []
result = util.check_file_names(paths)
assert expected == result
def test_check_file_names_with_invalid_chars_return_errors(self):
paths = [
'locations/file',
'locations/file with space',
'locations/dir1/dir2/file1',
'locations/dir2/file1',
'Accessibilité/ périmètre'
]
import sys
if sys.version_info[0] < 3: # python2
expected = [Error(CRITICAL, b"Invalid characters '\xe9\xe8' in file name at: 'Accessibilit\xe9/ p\xe9rim\xe8tre'")]
else:
expected = [Error(CRITICAL, "Invalid characters 'éè' in file name at: 'Accessibilité/ périmètre'")]
result = util.check_file_names(paths)
assert expected[0].message == result[0].message
assert expected == result
def test_is_about_file(self):
assert util.is_about_file('test.About')
assert util.is_about_file('test2.aboUT')
assert not util.is_about_file('no_about_ext.something')
assert not util.is_about_file('about')
assert not util.is_about_file('about.txt')
def test_is_about_file_is_false_if_only_bare_extension(self):
assert not util.is_about_file('.ABOUT')
def test_get_relative_path(self):
test = [('/some/path', '/some/path/file', 'file'),
('path', '/path/file', 'file'),
('/path', '/path/file', 'file'),
('/path/', '/path/file/', 'file'),
('/path/', 'path/', 'path'),
('/p1/p2/p3', '/p1/p2//p3/file', 'file'),
(r'c:\some/path', 'c:/some/path/file', 'file'),
(r'c:\\some\\path\\', 'c:/some/path/file', 'file'),
]
for base_loc, full_loc, expected in test:
result = util.get_relative_path(base_loc, full_loc)
assert expected == result
def test_get_relative_path_with_same_path_twice(self):
test = [('/some/path/file', 'path/file'),
('/path/file', 'path/file'),
('/path/file/', 'path/file'),
('path/', 'path'),
('/p1/p2//p3/file', 'p3/file'),
('c:/some/path/file', 'path/file'),
(r'c:\\some\\path\\file', 'path/file'),
]
for loc, expected in test:
result = util.get_relative_path(loc, loc)
assert expected == result
class TestGetLocations(unittest.TestCase):
def test_get_locations(self):
test_dir = get_test_loc('test_util/about_locations')
expected = sorted([
'file with_spaces.ABOUT',
'file1',
'file2',
'dir1/file2',
'dir1/file2.aBout',
'dir1/dir2/file1.about',
'dir2/file1'])
result = sorted(util.get_locations(test_dir))
result = [l.partition('/about_locations/')[-1] for l in result]
assert expected == result
def test_get_about_locations(self):
test_dir = get_test_loc('test_util/about_locations')
expected = sorted([
'file with_spaces.ABOUT',
'dir1/file2.aBout',
'dir1/dir2/file1.about',
])
result = sorted(util.get_about_locations(test_dir))
result = [l.partition('/about_locations/')[-1] for l in result]
assert expected == result
def test_get_locations_can_yield_a_single_file(self):
test_file = get_test_loc('test_util/about_locations/file with_spaces.ABOUT')
result = list(util.get_locations(test_file))
assert 1 == len(result)
def test_get_about_locations_for_about(self):
location = get_test_loc('test_util/get_about_locations')
result = list(util.get_about_locations(location))
expected = 'get_about_locations/about.ABOUT'
assert result[0].endswith(expected)
# FIXME: these are not very long/deep paths
def test_get_locations_with_very_long_path(self):
longpath = (
'longpath'
'/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1'
'/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1'
'/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1'
'/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1/longpath1'
)
test_loc = extract_test_loc('test_util/longpath.zip')
result = list(util.get_locations(test_loc))
assert any(longpath in r for r in result)
class TestCsv(unittest.TestCase):
def test_load_csv_without_mapping(self):
test_file = get_test_loc('test_util/csv/about.csv')
expected = [dict([
('about_file', 'about.ABOUT'),
('about_resource', '.'),
('name', 'ABOUT tool'),
('version', '0.8.1')])
]
result = util.load_csv(test_file)
assert expected == result
def test_load_csv_load_rows(self):
test_file = get_test_loc('test_util/csv/about.csv')
expected = [dict([
('about_file', 'about.ABOUT'),
('about_resource', '.'),
('name', 'ABOUT tool'),
('version', '0.8.1')])
]
result = util.load_csv(test_file)
assert expected == result
def test_load_csv_does_convert_column_names_to_lowercase(self):
test_file = get_test_loc('test_util/csv/about_key_with_upper_case.csv')
expected = [dict(
[('about_file', 'about.ABOUT'),
('about_resource', '.'),
('name', 'ABOUT tool'),
('version', '0.8.1')])
]
result = util.load_csv(test_file)
assert expected == result
def test_format_about_dict_for_csv_output(self):
about = [dict([
(u'about_file_path', u'/input/about1.ABOUT'),
(u'about_resource', [u'test.c']),
(u'name', u'AboutCode-toolkit'),
(u'license_expression', u'mit AND bsd-new'),
(u'license_key', [u'mit', u'bsd-new'])])]
expected = [dict([
(u'about_file_path', u'/input/about1.ABOUT'),
(u'about_resource', u'test.c'),
(u'name', u'AboutCode-toolkit'),
(u'license_expression', u'mit AND bsd-new'),
(u'license_key', u'mit\nbsd-new')])]
output = util.format_about_dict_for_csv_output(about)
assert output == expected
def test_load_csv_microsoft_utf_8(self):
test_file = get_test_loc('test_util/csv/test_ms_utf8.csv')
expected = [dict([(u'about_resource', u'/myFile'), (u'name', u'myName')])]
result = util.load_csv(test_file)
assert expected == result
def test_load_csv_utf_8(self):
test_file = get_test_loc('test_util/csv/test_utf8.csv')
expected = [dict([(u'about_resource', u'/myFile'), (u'name', u'\u540d')])]
result = util.load_csv(test_file)
assert expected == result
class TestJson(unittest.TestCase):
def test_load_json(self):
test_file = get_test_loc('test_util/json/expected.json')
expected = [dict([
('about_file_path', '/load/this.ABOUT'),
('about_resource', '.'),
('name', 'AboutCode'),
('version', '0.11.0')])
]
result = util.load_json(test_file)
assert expected == result
def test_load_json2(self):
test_file = get_test_loc('test_util/json/expected_need_mapping.json')
expected = [dict(dict([
('about_file', '/load/this.ABOUT'),
('about_resource', '.'),
('version', '0.11.0'),
('name', 'AboutCode'),
])
)]
result = util.load_json(test_file)
assert expected == result
def test_load_non_list_json(self):
test_file = get_test_loc('test_util/json/not_a_list_need_mapping.json')
# FIXME: why this dict nesting??
expected = [dict(dict([
('about_resource', '.'),
('name', 'AboutCode'),
('path', '/load/this.ABOUT'),
('version', '0.11.0'),
])
)]
result = util.load_json(test_file)
assert expected == result
def test_load_non_list_json2(self):
test_file = get_test_loc('test_util/json/not_a_list.json')
expected = [dict([
('about_file_path', '/load/this.ABOUT'),
('version', '0.11.0'),
('about_resource', '.'),
('name', 'AboutCode'),
])
]
result = util.load_json(test_file)
assert expected == result
def test_load_json_from_abc_mgr(self):
test_file = get_test_loc('test_util/json/aboutcode_manager_exported.json')
expected = [dict(dict([
('license_expression', 'apache-2.0'),
('copyright', 'Copyright (c) 2017 nexB Inc.'),
('licenses', [{'key':'apache-2.0'}]),
('copyrights', [{'statements':['Copyright (c) 2017 nexB Inc.']}]),
('path', 'ScanCode'),
('review_status', 'Analyzed'),
('name', 'ScanCode'),
('version', '2.2.1'),
('owner', 'nexB Inc.'),
('code_type', 'Source'),
('is_modified', False),
('is_deployed', False),
('feature', ''),
('purpose', ''),
('homepage_url', None),
('download_url', None),
('license_url', None),
('notice_url', None),
('programming_language', 'Python'),
('notes', ''),
('fileId', 8458),
]))]
result = util.load_json(test_file)
assert expected == result
def test_load_json_from_scancode(self):
test_file = get_test_loc('test_util/json/scancode_info.json')
expected = [dict(dict([
('type', 'file'),
('name', 'Api.java'),
('path', 'Api.java'),
('base_name', 'Api'),
('extension', '.java'),
('size', 5074),
('date', '2017-07-15'),
('sha1', 'c3a48ec7e684a35417241dd59507ec61702c508c'),
('md5', '326fb262bbb9c2ce32179f0450e24601'),
('mime_type', 'text/plain'),
('file_type', 'ASCII text'),
('programming_language', 'Java'),
('is_binary', False),
('is_text', True),
('is_archive', False),
('is_media', False),
('is_source', True),
('is_script', False),
('files_count', 0),
('dirs_count', 0),
('size_count', 0),
('scan_errors', []),
]))]
result = util.load_json(test_file)
assert expected == result
def test_format_about_dict_for_json_output(self):
about = [dict([
(u'about_file_path', u'/input/about1.ABOUT'),
(u'about_resource', dict([(u'test.c', None)])),
(u'name', u'AboutCode-toolkit'),
(u'license_key', [u'mit', u'bsd-new'])])]
expected = [dict([
(u'about_file_path', u'/input/about1.ABOUT'),
(u'about_resource', u'test.c'),
(u'name', u'AboutCode-toolkit'),
(u'licenses', [
dict([(u'key', u'mit')]),
dict([(u'key', u'bsd-new')])])])]
output = util.format_about_dict_for_json_output(about)
assert output == expected
class TestMiscUtils(unittest.TestCase):
def test_load_yaml_about_file_with_no_dupe(self):
test = '''
name: test
license_expression: mit
notes: dup key here
'''
saneyaml.load(test, allow_duplicate_keys=False)
def test_load_yaml_about_file_raise_exception_on__duplicate(self):
test = '''
name: test
notes: some notes
notes: dup key here
notes: dup key here
license_expression: mit
notes: dup key here
'''
try:
saneyaml.load(test, allow_duplicate_keys=False)
self.fail('Exception not raised')
except saneyaml.UnsupportedYamlFeatureError as e :
assert 'Duplicate key in YAML source: notes' == str(e)
def test_load_yaml_about_file_raise_exception_on_invalid_yaml_ignore_non_key_line(self):
test = '''
name: test
- notes: some notes
- notes: dup key here
# some
notes: dup key here
license_expression: mit
notes dup key here
'''
try:
saneyaml.load(test, allow_duplicate_keys=False)
self.fail('Exception not raised')
except Exception:
pass
def test_load_yaml_about_file_with_multiline(self):
test = '''
name: test
owner: test
notes: |
String block here
license_expression: mit
owner: test1
notes: continuation
line
description: sample
'''
try:
saneyaml.load(test, allow_duplicate_keys=False)
self.fail('Exception not raised')
except saneyaml.UnsupportedYamlFeatureError as e :
# notes: exceptio is rasied only for the first dupe
assert 'Duplicate key in YAML source: owner' == str(e)
def test_ungroup_licenses(self):
about = [
dict([
(u'key', u'mit'),
(u'name', u'MIT License'),
(u'file', u'mit.LICENSE'),
(u'url', u'https://enterprise.dejacode.com/urn/?urn=urn:dje:license:mit')]),
dict([
(u'key', u'bsd-new'),
(u'name', u'BSD-3-Clause'),
(u'file', u'bsd-new.LICENSE'),
(u'url', u'https://enterprise.dejacode.com/urn/?urn=urn:dje:license:bsd-new')])
]
expected_lic_key = [u'mit', u'bsd-new']
expected_lic_name = [u'MIT License', u'BSD-3-Clause']
expected_lic_file = [u'mit.LICENSE', u'bsd-new.LICENSE']
expected_lic_url = [
u'https://enterprise.dejacode.com/urn/?urn=urn:dje:license:mit',
u'https://enterprise.dejacode.com/urn/?urn=urn:dje:license:bsd-new']
lic_key, lic_name, lic_file, lic_url = util.ungroup_licenses(about)
assert expected_lic_key == lic_key
assert expected_lic_name == lic_name
assert expected_lic_file == lic_file
assert expected_lic_url == lic_url
def test_unique_does_deduplicate_and_keep_ordering(self):
items = ['a', 'b', 'd', 'b', 'c', 'a']
expected = ['a', 'b', 'd', 'c']
results = util.unique(items)
assert expected == results
def test_unique_can_handle_About_object(self):
base_dir = 'some_dir'
test = {
'about_resource': '.',
'author': '',
'copyright': 'Copyright (c) 2013-2014 nexB Inc.',
'custom1': 'some custom',
'custom_empty': '',
'description': 'AboutCode is a tool\nfor files.',
'license': 'apache-2.0',
'name': 'AboutCode',
'owner': 'nexB Inc.'
}
a = model.About()
a.load_dict(test, base_dir)
c = model.About()
c.load_dict(test, base_dir)
b = model.About()
test.update(dict(about_resource='asdasdasd'))
b.load_dict(test, base_dir)
abouts = [a, b]
results = util.unique(abouts)
assert [a] == results
def test_copy_license_notice_files(self):
base_dir = get_temp_dir()
reference_dir = get_test_loc('test_util/licenses')
fields = [(u'license_expression', u'mit or public-domain'),
(u'about_resource', u'.'),
(u'name', u'test'),
(u'license_key', [u'mit', u'public-domain']),
(u'license_file', [u'mit.LICENSE, mit2.LICENSE', u'public-domain.LICENSE'])]
util.copy_license_notice_files(fields, base_dir, reference_dir, '')
licenses = ['mit.LICENSE', 'mit2.LICENSE', 'public-domain.LICENSE']
from os import listdir
copied_files = listdir(base_dir)
assert len(licenses) == len(copied_files)
for license in licenses:
assert license in copied_files
def test_copy_file(self):
des = get_temp_dir()
test_file = get_test_loc('test_util/licenses/mit.LICENSE')
licenses = ['mit.LICENSE']
err = util.copy_file(test_file, des)
from os import listdir
copied_files = listdir(des)
assert len(licenses) == len(copied_files)
assert err == ''
for license in licenses:
assert license in copied_files
def test_copy_file_with_dir(self):
des = get_temp_dir()
test_dir = get_test_loc('test_util/licenses/')
licenses = ['mit.LICENSE', 'mit2.LICENSE', 'public-domain.LICENSE']
err = util.copy_file(test_dir, des)
assert err == ''
import os
files_list = []
dir_list = []
# Get the directories and files in the 'des' recursively
for root, dir, files in os.walk(des):
for d in dir:
dir_list.append(d)
for f in files:
files_list.append(f)
# assert dir_list == [u'licenses']
assert len(licenses) == len(files_list)
for license in licenses:
assert license in files_list
| 35.202723 | 127 | 0.57553 |
acdf45c497e32ba6d7cbfc74b7c3887204a41788 | 20,644 | py | Python | py/vtdb/grpc_vtgate_client.py | guokeno0/vitess | 2228de2d8f0c5032c5a53277abf5515ae9fc38a1 | [
"BSD-3-Clause"
] | null | null | null | py/vtdb/grpc_vtgate_client.py | guokeno0/vitess | 2228de2d8f0c5032c5a53277abf5515ae9fc38a1 | [
"BSD-3-Clause"
] | null | null | null | py/vtdb/grpc_vtgate_client.py | guokeno0/vitess | 2228de2d8f0c5032c5a53277abf5515ae9fc38a1 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2013 Google Inc. All Rights Reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""A simple, direct connection to the vtgate proxy server, using gRPC.
"""
import datetime
import logging
import re
from urlparse import urlparse
from grpc.beta import implementations
from grpc.beta import interfaces
from grpc.framework.interfaces.face import face
from vtproto import query_pb2
from vtproto import topodata_pb2
from vtproto import vtgate_pb2
from vtproto import vtgateservice_pb2
from vtdb import dbapi
from vtdb import dbexceptions
from vtdb import field_types
from vtdb import field_types_proto3
from vtdb import keyrange_constants
from vtdb import keyspace
from vtdb import times
from vtdb import vtdb_logger
from vtdb import vtgate_client
from vtdb import vtgate_cursor
from vtdb import vtgate_utils
INT_UPPERBOUND_PLUS_ONE = 1<<63
_errno_pattern = re.compile(r'\(errno (\d+)\)', re.IGNORECASE)
_throttler_err_pattern = re.compile(
r'exceeded (.*) quota, rate limiting', re.IGNORECASE)
class GRPCVTGateConnection(vtgate_client.VTGateClient):
"""A simple, direct connection to the vtgate query service.
"""
def __init__(self, addr, timeout):
super(GRPCVTGateConnection, self).__init__(addr, timeout)
self.stub = None
self.logger_object = vtdb_logger.get_logger()
def dial(self):
if self.stub:
self.stub.close()
p = urlparse('http://' + self.addr)
channel = implementations.insecure_channel(p.hostname, p.port)
self.stub = vtgateservice_pb2.beta_create_Vitess_stub(channel)
def close(self):
"""close closes the server connection and frees up associated resources.
The stub object is managed by the gRPC library, removing references
to it will just close the channel.
"""
if self.session and self.session.in_transaction:
self.rollback()
self.stub = None
def is_closed(self):
return self.stub is None
def cursor(self, *pargs, **kwargs):
cursorclass = kwargs.pop('cursorclass', None) or vtgate_cursor.VTGateCursor
return cursorclass(self, *pargs, **kwargs)
def begin(self, effective_caller_id=None):
try:
request = vtgate_pb2.BeginRequest()
_add_caller_id(request, effective_caller_id)
response = self.stub.Begin(request, self.timeout)
# we're saving effective_caller_id to re-use it for commit and rollback.
self.effective_caller_id = effective_caller_id
self.session = response.session
except (face.AbortionError, vtgate_utils.VitessError) as e:
raise _convert_exception(e)
def commit(self):
try:
request = vtgate_pb2.CommitRequest()
_add_caller_id(request, self.effective_caller_id)
self._add_session(request)
self.stub.Commit(request, self.timeout)
except (face.AbortionError, vtgate_utils.VitessError) as e:
raise _convert_exception(e)
finally:
self.session = None
self.effective_caller_id = None
def rollback(self):
try:
request = vtgate_pb2.RollbackRequest()
_add_caller_id(request, self.effective_caller_id)
self._add_session(request)
self.stub.Rollback(request, self.timeout)
except (face.AbortionError, vtgate_utils.VitessError) as e:
raise _convert_exception(e)
finally:
self.session = None
self.effective_caller_id = None
@vtgate_utils.exponential_backoff_retry((dbexceptions.TransientError))
def _execute(
self, sql, bind_variables, tablet_type, keyspace_name=None,
shards=None, keyspace_ids=None, keyranges=None,
entity_keyspace_id_map=None, entity_column_name=None,
not_in_transaction=False, effective_caller_id=None, **kwargs):
# FIXME(alainjobart): keyspace should be in routing_kwargs,
# as it's not used for v3.
# FIXME(alainjobart): the v3 part doesn't take the ptyhon-style queries
# for bind variables (the %(xxx)s), but our style (the :xxx).
# this is not consistent with the rest.
try:
routing_kwargs = {}
exec_method = None
if shards is not None:
routing_kwargs['shards'] = shards
exec_method = 'ExecuteShards'
sql, bind_variables = dbapi.prepare_query_bind_vars(sql, bind_variables)
request = vtgate_pb2.ExecuteShardsRequest(
query=query_pb2.BoundQuery(sql=sql),
tablet_type=topodata_pb2.TabletType.Value(tablet_type.upper()),
keyspace=keyspace_name,
not_in_transaction=not_in_transaction,
)
_add_caller_id(request, effective_caller_id)
self._add_session(request)
request.shards.extend(shards)
_convert_bind_vars(bind_variables, request.query.bind_variables)
response = self.stub.ExecuteShards(request, self.timeout)
elif keyspace_ids is not None:
routing_kwargs['keyspace_ids'] = keyspace_ids
exec_method = 'ExecuteKeyspaceIds'
sql, bind_variables = dbapi.prepare_query_bind_vars(sql, bind_variables)
request = vtgate_pb2.ExecuteKeyspaceIdsRequest(
query=query_pb2.BoundQuery(sql=sql),
tablet_type=topodata_pb2.TabletType.Value(tablet_type.upper()),
keyspace=keyspace_name,
not_in_transaction=not_in_transaction,
)
_add_caller_id(request, effective_caller_id)
self._add_session(request)
request.keyspace_ids.extend(keyspace_ids)
_convert_bind_vars(bind_variables, request.query.bind_variables)
response = self.stub.ExecuteKeyspaceIds(request, self.timeout)
elif keyranges is not None:
routing_kwargs['keyranges'] = keyranges
exec_method = 'ExecuteKeyRanges'
sql, bind_variables = dbapi.prepare_query_bind_vars(sql, bind_variables)
request = vtgate_pb2.ExecuteKeyRangesRequest(
query=query_pb2.BoundQuery(sql=sql),
tablet_type=topodata_pb2.TabletType.Value(tablet_type.upper()),
keyspace=keyspace_name,
not_in_transaction=not_in_transaction,
)
_add_caller_id(request, effective_caller_id)
self._add_session(request)
_add_key_ranges(request, keyranges)
_convert_bind_vars(bind_variables, request.query.bind_variables)
response = self.stub.ExecuteKeyRanges(request, self.timeout)
elif entity_keyspace_id_map is not None:
routing_kwargs['entity_keyspace_id_map'] = entity_keyspace_id_map
routing_kwargs['entity_column_name'] = entity_column_name
exec_method = 'ExecuteEntityIds'
sql, bind_variables = dbapi.prepare_query_bind_vars(sql, bind_variables)
request = vtgate_pb2.ExecuteEntityIdsRequest(
query=query_pb2.BoundQuery(sql=sql),
tablet_type=topodata_pb2.TabletType.Value(tablet_type.upper()),
keyspace=keyspace_name,
entity_column_name=entity_column_name,
not_in_transaction=not_in_transaction,
)
_add_caller_id(request, effective_caller_id)
self._add_session(request)
_convert_bind_vars(bind_variables, request.query.bind_variables)
_convert_entity_ids(entity_keyspace_id_map, request.entity_keyspace_ids)
response = self.stub.ExecuteEntityIds(request, self.timeout)
else:
exec_method = 'Execute'
request = vtgate_pb2.ExecuteRequest(
query=query_pb2.BoundQuery(sql=sql),
tablet_type=topodata_pb2.TabletType.Value(tablet_type.upper()),
not_in_transaction=not_in_transaction,
)
_add_caller_id(request, effective_caller_id)
self._add_session(request)
_convert_bind_vars(bind_variables, request.query.bind_variables)
response = self.stub.Execute(request, self.timeout)
self.session = response.session
_extract_rpc_error(exec_method, response.error)
return _get_rowset_from_query_result(response.result)
except (face.AbortionError, vtgate_utils.VitessError) as e:
self.logger_object.log_private_data(bind_variables)
raise _convert_exception(
e, sql, keyspace=keyspace_name, tablet_type=tablet_type,
**routing_kwargs)
@vtgate_utils.exponential_backoff_retry((dbexceptions.TransientError))
def _execute_batch(
self, sql_list, bind_variables_list, keyspace_list, keyspace_ids_list,
shards_list, tablet_type, as_transaction, effective_caller_id=None,
**kwargs):
try:
if keyspace_ids_list[0]:
exec_method = 'ExecuteBatchKeyspaceIds'
request = vtgate_pb2.ExecuteBatchKeyspaceIdsRequest(
tablet_type=topodata_pb2.TabletType.Value(tablet_type.upper()),
as_transaction=as_transaction,
)
_add_caller_id(request, effective_caller_id)
self._add_session(request)
for sql, bind_variables, keyspace_name, keyspace_ids in zip(
sql_list, bind_variables_list, keyspace_list, keyspace_ids_list):
sql, bind_variables = dbapi.prepare_query_bind_vars(sql,
bind_variables)
query = request.queries.add()
query.query.sql = sql
query.keyspace = keyspace_name
query.keyspace_ids.extend(keyspace_ids)
_convert_bind_vars(bind_variables, query.query.bind_variables)
response = self.stub.ExecuteBatchKeyspaceIds(request, self.timeout)
else:
exec_method = 'ExecuteBatchShards'
request = vtgate_pb2.ExecuteBatchShardsRequest(
tablet_type=topodata_pb2.TabletType.Value(tablet_type.upper()),
as_transaction=as_transaction,
)
_add_caller_id(request, effective_caller_id)
self._add_session(request)
for sql, bind_variables, keyspace_name, shards in zip(
sql_list, bind_variables_list, keyspace_list, shards_list):
sql, bind_variables = dbapi.prepare_query_bind_vars(sql,
bind_variables)
query = request.queries.add()
query.query.sql = sql
query.keyspace = keyspace_name
query.shards.extend(shards)
_convert_bind_vars(bind_variables, query.query.bind_variables)
response = self.stub.ExecuteBatchShards(request, self.timeout)
self.session = response.session
_extract_rpc_error(exec_method, response.error)
rowsets = []
for result in response.results:
rowset = _get_rowset_from_query_result(result)
rowsets.append(rowset)
return rowsets
except (face.AbortionError, vtgate_utils.VitessError) as e:
self.logger_object.log_private_data(bind_variables_list)
raise _convert_exception(
e, sql_list, exec_method, keyspace='', tablet_type=tablet_type)
@vtgate_utils.exponential_backoff_retry((dbexceptions.TransientError))
def _stream_execute(
self, sql, bind_variables, tablet_type, keyspace_name=None,
shards=None, keyspace_ids=None, keyranges=None,
not_in_transaction=False, effective_caller_id=None,
**kwargs):
try:
sql, bind_variables = dbapi.prepare_query_bind_vars(sql, bind_variables)
if shards is not None:
request = vtgate_pb2.StreamExecuteShardsRequest(
query=query_pb2.BoundQuery(sql=sql),
tablet_type=topodata_pb2.TabletType.Value(tablet_type.upper()),
keyspace=keyspace_name,
)
_add_caller_id(request, effective_caller_id)
request.shards.extend(shards)
_convert_bind_vars(bind_variables, request.query.bind_variables)
it = self.stub.StreamExecuteShards(request, self.timeout)
elif keyspace_ids is not None:
request = vtgate_pb2.StreamExecuteKeyspaceIdsRequest(
query=query_pb2.BoundQuery(sql=sql),
tablet_type=topodata_pb2.TabletType.Value(tablet_type.upper()),
keyspace=keyspace_name,
)
_add_caller_id(request, effective_caller_id)
request.keyspace_ids.extend(keyspace_ids)
_convert_bind_vars(bind_variables, request.query.bind_variables)
it = self.stub.StreamExecuteKeyspaceIds(request, self.timeout)
elif keyranges is not None:
request = vtgate_pb2.StreamExecuteKeyRangesRequest(
query=query_pb2.BoundQuery(sql=sql),
tablet_type=topodata_pb2.TabletType.Value(tablet_type.upper()),
keyspace=keyspace_name,
)
_add_caller_id(request, effective_caller_id)
_add_key_ranges(request, keyranges)
_convert_bind_vars(bind_variables, request.query.bind_variables)
it = self.stub.StreamExecuteKeyRanges(request, self.timeout)
else:
request = vtgate_pb2.StreamExecuteRequest(
query=query_pb2.BoundQuery(sql=sql),
tablet_type=topodata_pb2.TabletType.Value(tablet_type.upper()),
)
_add_caller_id(request, effective_caller_id)
_convert_bind_vars(bind_variables, request.query.bind_variables)
it = self.stub.StreamExecute(request, self.timeout)
first_response = it.next()
except (face.AbortionError, vtgate_utils.VitessError) as e:
self.logger_object.log_private_data(bind_variables)
raise _convert_exception(
e, sql, keyspace_ids, keyranges,
keyspace=keyspace_name, tablet_type=tablet_type)
fields = []
conversions = []
for field in first_response.result.fields:
fields.append((field.name, field.type))
conversions.append(field_types_proto3.conversions.get(field.type))
def row_generator():
try:
for response in it:
for row in response.result.rows:
yield tuple(_make_row(row, conversions))
except Exception:
logging.exception('gRPC low-level error')
raise
return row_generator(), fields
def get_srv_keyspace(self, name):
try:
request = vtgate_pb2.GetSrvKeyspaceRequest(
keyspace=name,
)
response = self.stub.GetSrvKeyspace(request, self.timeout)
return keyspace.Keyspace(
name,
keyrange_constants.srv_keyspace_proto3_to_old(response.srv_keyspace))
except (face.AbortionError, vtgate_utils.VitessError) as e:
raise _convert_exception(e, keyspace=name)
def _add_session(self, request):
if self.session:
request.session.CopyFrom(self.session)
def _add_caller_id(request, caller_id):
if caller_id:
if caller_id.principal:
request.caller_id.principal = caller_id.principal
if caller_id.component:
request.caller_id.component = caller_id.component
if caller_id.subcomponent:
request.caller_id.subcomponent = caller_id.subcomponent
def _add_key_ranges(request, keyranges):
for kr in keyranges:
encoded_kr = request.key_ranges.add()
encoded_kr.start = kr.Start
encoded_kr.end = kr.End
def _extract_rpc_error(exec_method, error):
if error.code:
raise vtgate_utils.VitessError(exec_method, {
'Code': error.code,
'Message': error.message,
})
def _get_rowset_from_query_result(query_result):
"""Builds a python rowset from proto3 response.
Args:
query_result: proto3 query result.
Returns:
Array of rows
Number of modified rows
Last insert ID
Fields array of (name, type) tuples.
"""
if not query_result:
return [], 0, 0, []
fields = []
conversions = []
results = []
for field in query_result.fields:
fields.append((field.name, field.type))
conversions.append(field_types_proto3.conversions.get(field.type))
for row in query_result.rows:
results.append(tuple(_make_row(row, conversions)))
rowcount = query_result.rows_affected
lastrowid = query_result.insert_id
return results, rowcount, lastrowid, fields
def _convert_exception(exc, *args, **kwargs):
"""This parses the protocol exceptions to the api interface exceptions.
This also logs the exception and increments the appropriate error counters.
Args:
exc: raw protocol exception.
*args: additional args from the raising site.
**kwargs: additional keyword args from the raising site.
Returns:
Api interface exceptions - dbexceptions with new args.
"""
kwargs_as_str = vtgate_utils.convert_exception_kwargs(kwargs)
exc.args += args
if kwargs_as_str:
exc.args += kwargs_as_str,
new_args = (type(exc).__name__,) + exc.args
if isinstance(exc, vtgate_utils.VitessError):
new_exc = exc.convert_to_dbexception(new_args)
elif isinstance(exc, face.ExpirationError):
# face.ExpirationError is returned by the gRPC library when
# a request times out. Note it is a subclass of face.AbortionError
# so we have to test for it before.
new_exc = dbexceptions.TimeoutError(new_args)
elif isinstance(exc, face.AbortionError):
# face.AbortionError is the toplevel error returned by gRPC for any
# RPC that finishes earlier than expected.
msg = exc.details
if exc.code == interfaces.StatusCode.UNAVAILABLE:
if _throttler_err_pattern.search(msg):
return dbexceptions.ThrottledError(new_args)
else:
return dbexceptions.TransientError(new_args)
elif exc.code == interfaces.StatusCode.ALREADY_EXISTS:
new_exc = _prune_integrity_error(msg, new_args)
else:
# Unhandled RPC application error
new_exc = dbexceptions.DatabaseError(new_args + (msg,))
else:
new_exc = exc
vtgate_utils.log_exception(
new_exc,
keyspace=kwargs.get('keyspace'), tablet_type=kwargs.get('tablet_type'))
return new_exc
def _convert_bind_vars(bind_variables, request_bind_variables):
"""Convert binding variables to ProtoBuffer."""
for key, val in bind_variables.iteritems():
_convert_value(val, request_bind_variables[key], allow_lists=True)
def _convert_value(value, proto_value, allow_lists=False):
"""Convert a variable from python type to proto type+value."""
if isinstance(value, int):
proto_value.type = query_pb2.INT64
proto_value.value = str(value)
elif isinstance(value, long):
if value < INT_UPPERBOUND_PLUS_ONE:
proto_value.type = query_pb2.INT64
else:
proto_value.type = query_pb2.UINT64
proto_value.value = str(value)
elif isinstance(value, float):
proto_value.type = query_pb2.FLOAT64
proto_value.value = str(value)
elif hasattr(value, '__sql_literal__'):
proto_value.type = query_pb2.VARBINARY
proto_value.value = str(value.__sql_literal__())
elif isinstance(value, datetime.datetime):
proto_value.type = query_pb2.VARBINARY
proto_value.value = times.DateTimeToString(value)
elif isinstance(value, datetime.date):
proto_value.type = query_pb2.VARBINARY
proto_value.value = times.DateToString(value)
elif isinstance(value, str):
proto_value.type = query_pb2.VARBINARY
proto_value.value = value
elif isinstance(value, field_types.NoneType):
proto_value.type = query_pb2.NULL_TYPE
elif allow_lists and isinstance(value, (set, tuple, list)):
# this only works for bind variables, not for entities.
proto_value.type = query_pb2.TUPLE
for v in list(value):
proto_v = proto_value.values.add()
_convert_value(v, proto_v)
else:
proto_value.type = query_pb2.VARBINARY
proto_value.value = str(value)
def _convert_entity_ids(entity_keyspace_ids, request_eki):
"""Convert external entity id map to ProtoBuffer.
Args:
entity_keyspace_ids: map of entity_keyspace_id.
request_eki: destination proto3 list.
Returns:
list of entity_keyspace_id as ProtoBuf.
"""
for xid, kid in entity_keyspace_ids.iteritems():
eid = request_eki.add()
eid.keyspace_id = kid
_convert_value(xid, eid, allow_lists=False)
def _prune_integrity_error(msg, exc_args):
"""Prunes an integrity error message and returns an IntegrityError."""
parts = _errno_pattern.split(msg)
pruned_msg = msg[:msg.find(parts[2])]
exc_args = (pruned_msg,) + tuple(exc_args[1:])
return dbexceptions.IntegrityError(exc_args)
def _make_row(row, conversions):
"""Builds a python native row from proto3 row.
Args:
row: proto3 row
conversions: conversion function array
Returns:
an array of converted rows.
"""
converted_row = []
offset = 0
for i, l in enumerate(row.lengths):
if l == -1:
converted_row.append(None)
elif conversions[i]:
converted_row.append(conversions[i](row.values[offset:offset+l]))
offset += l
else:
converted_row.append(row.values[offset:offset+l])
offset += l
return converted_row
vtgate_client.register_conn_class('grpc', GRPCVTGateConnection)
| 35.840278 | 80 | 0.70994 |
acdf47305fc56eeb7e6ba24a2ef7a07ff700cbf8 | 180 | py | Python | sudo/__init__.py | PhiSigmaUnited/dans-dandy-cogs | f2112063fea03464bff4b1843f914b30ccbe5d11 | [
"MIT"
] | null | null | null | sudo/__init__.py | PhiSigmaUnited/dans-dandy-cogs | f2112063fea03464bff4b1843f914b30ccbe5d11 | [
"MIT"
] | null | null | null | sudo/__init__.py | PhiSigmaUnited/dans-dandy-cogs | f2112063fea03464bff4b1843f914b30ccbe5d11 | [
"MIT"
] | null | null | null | from .sudo import sudo
__red_end_user_data_statement__ = (
"This cog does not persistently store data or metadata about users."
)
def setup(bot):
bot.add_cog(Sudo(bot))
| 18 | 72 | 0.733333 |
acdf4838abd2d645f94785474900723a4c0014c9 | 2,449 | py | Python | sources/fr/chireads.py | Kukkerem/lightnovel-crawler | 59009a674ecf70888df6a15c3c0e3cce239f1161 | [
"Apache-2.0"
] | null | null | null | sources/fr/chireads.py | Kukkerem/lightnovel-crawler | 59009a674ecf70888df6a15c3c0e3cce239f1161 | [
"Apache-2.0"
] | null | null | null | sources/fr/chireads.py | Kukkerem/lightnovel-crawler | 59009a674ecf70888df6a15c3c0e3cce239f1161 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import requests
from lncrawl.core.crawler import Crawler
logger = logging.getLogger(__name__)
class Chireads(Crawler):
base_url = ["https://chireads.com/"]
has_manga = False
machine_translation = False
def search_novel(self, query):
query = query.lower().replace(" ", "+")
# NOTE: Using self.get_soup() here throw an error, I don't know why.
response = requests.get("https://chireads.com/search?x=0&y=0&name=" + query)
soup = self.make_soup(response)
result = []
content = soup.find("div", {"id": "content"})
for novel in content.find_all("li"):
content = novel.find("a")
result.append({
"title": content.get("title"),
"url": self.absolute_url(content.get("href")),
})
return result
def read_novel_info(self):
soup = self.get_soup(self.novel_url)
content = soup.find("div", {"class": "conteiner"}).find_all(
"div", {"class": "wid"}
)
metadata = content[0]
self.novel_cover = self.absolute_url(metadata.find("img").get("src"))
self.novel_title = metadata.find("h3", {
"class": "inform-title"
}).text.split("|")[0].strip()
self.novel_author = metadata.find("h6", {
"class": "font-color-black3"
}).text.split(
"\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0"
)[0].replace("Auteur : ", "")
body = content[1]
tomes = body.find_all("div", {"class": "chapitre"})
for vol_id, tome in enumerate(tomes, 1):
self.volumes.append({
"id": vol_id,
"title": tome.find("div", {"class": "title"}).text,
})
for chapter in tome.find_all("a"):
chap_id = len(self.chapters) + 1
self.chapters.append({
"id": chap_id,
"volume": vol_id,
"url": self.absolute_url(chapter.get("href")),
"title": chapter.text.replace("\xa0", " "),
})
def download_chapter_body(self, chapter):
soup = self.get_soup(chapter["url"])
content = soup.find("div", {
"id": "content",
"class": "font-color-black3 article-font",
})
return self.cleaner.extract_contents(content)
| 31.805195 | 84 | 0.522662 |
acdf497cc0e78ac112876b0287c5871642631b1c | 4,977 | py | Python | app/user/tests/test_user_api.py | aishwaryaprabhat/Django-REST-API | 55c41af3d647b404441e1962fcbfce57b6a45fa9 | [
"MIT"
] | null | null | null | app/user/tests/test_user_api.py | aishwaryaprabhat/Django-REST-API | 55c41af3d647b404441e1962fcbfce57b6a45fa9 | [
"MIT"
] | null | null | null | app/user/tests/test_user_api.py | aishwaryaprabhat/Django-REST-API | 55c41af3d647b404441e1962fcbfce57b6a45fa9 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUserAPITests(TestCase):
"""Test the users API (public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating user with valid payload is successful"""
payload = {
'email': "aish@gmail.com",
'password': "testpassword",
'name': "Aish"
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creatinga user that already exists fails"""
payload = {
'email': "aish@gmail.com",
'password': "testpassword",
'name': "Aish"
}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that the password must be more than 5 chars"""
payload = {
'email': "aish@gmail.com",
'password': "tes",
'name': "Aish"
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for the user"""
payload = {'email': 'test@londonappdev.com', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that token is not created if invalid credentials are given"""
create_user(email='test@londonappdev.com', password='testpass')
payload = {'email': 'test@londonappdev.com', 'password': 'wrong'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test that token is not created if user doens't exist"""
payload = {'email': 'test@londonappdev.com', 'password': 'testpass'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email and password are required"""
res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test that authentication required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
"""Test API requests that require authentication"""
def setUp(self):
self.user = create_user(
email='test@londonappdev.com',
password='testpass',
name='fname',
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retrieving profile for logged in user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email,
})
def test_post_me_not_allowed(self):
"""Test that POST is not allowed on the me URL"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the user profile for authenticated user"""
payload = {'name': 'new name', 'password': 'newpassword123'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
| 35.049296 | 77 | 0.649186 |
acdf49877a42c60c34698afb0cda08bd954f088d | 12,160 | py | Python | watson/utils.py | VukGr/Watson | 87e5e311e1428a12ccf05f33449f02d509c4e031 | [
"MIT"
] | null | null | null | watson/utils.py | VukGr/Watson | 87e5e311e1428a12ccf05f33449f02d509c4e031 | [
"MIT"
] | null | null | null | watson/utils.py | VukGr/Watson | 87e5e311e1428a12ccf05f33449f02d509c4e031 | [
"MIT"
] | null | null | null | import collections as co
import csv
import datetime
import itertools
import json
import operator
import os
import shutil
import tempfile
from io import StringIO
import click
import arrow
import watson as _watson
from .fullmoon import get_last_full_moon
from click.exceptions import UsageError
def create_watson():
return _watson.Watson(config_dir=os.environ.get('WATSON_DIR'))
def confirm_project(project, watson_projects):
"""
Ask user to confirm creation of a new project
'project' must be a string
'watson_projects' must be an interable.
Returns True on accept and raises click.exceptions.Abort on reject
"""
if project not in watson_projects:
msg = ("Project '%s' does not exist yet. Create it?"
% style('project', project))
click.confirm(msg, abort=True)
return True
def confirm_tags(tags, watson_tags):
"""
Ask user to confirm creation of new tags (each separately)
Both 'tags' and 'watson_tags" must be iterables.
Returns True if all accepted and raises click.exceptions.Abort on reject
"""
for tag in tags:
if tag not in watson_tags:
msg = "Tag '%s' does not exist yet. Create it?" % style('tag', tag)
click.confirm(msg, abort=True)
return True
def style(name, element):
def _style_tags(tags):
if not tags:
return ''
return '[{}]'.format(', '.join(
style('tag', tag) for tag in tags
))
def _style_short_id(id):
return style('id', id[:7])
formats = {
'project': {'fg': 'magenta'},
'tags': _style_tags,
'tag': {'fg': 'blue'},
'time': {'fg': 'green'},
'error': {'fg': 'red'},
'date': {'fg': 'cyan'},
'short_id': _style_short_id,
'id': {'fg': 'white'}
}
fmt = formats.get(name, {})
if isinstance(fmt, dict):
return click.style(element, **fmt)
else:
# The fmt might be a function if we need to do some computation
return fmt(element)
def format_timedelta(delta):
"""
Return a string roughly representing a timedelta.
"""
seconds = int(delta.total_seconds())
neg = seconds < 0
seconds = abs(seconds)
total = seconds
stems = []
if total >= 3600:
hours = seconds // 3600
stems.append('{}h'.format(hours))
seconds -= hours * 3600
if total >= 60:
mins = seconds // 60
stems.append('{:02}m'.format(mins))
seconds -= mins * 60
stems.append('{:02}s'.format(seconds))
return ('-' if neg else '') + ' '.join(stems)
def sorted_groupby(iterator, key, reverse=False):
"""
Similar to `itertools.groupby`, but sorts the iterator with the same
key first.
"""
return itertools.groupby(sorted(iterator, key=key, reverse=reverse), key)
def options(opt_list):
"""
Wrapper for the `value_proc` field in `click.prompt`, which validates
that the user response is part of the list of accepted responses.
"""
def value_proc(user_input):
if user_input in opt_list:
return user_input
else:
raise UsageError("Response should be one of [{}]".format(
','.join(str(x) for x in opt_list)))
return value_proc
def get_frame_from_argument(watson, arg):
"""
Get a frame from a command line argument which can either be a
position index (-1) or a frame id.
"""
# first we try to see if we are refering to a frame by
# its position (for example -2). We only take negative indexes
# as a positive index might also be an existing id
try:
index = int(arg)
if index < 0:
return watson.frames[index]
except IndexError:
raise click.ClickException(
style('error', "No frame found for index {}.".format(arg))
)
except (ValueError, TypeError):
pass
# if we didn't find a frame by position, we try by id
try:
return watson.frames[arg]
except KeyError:
raise click.ClickException("{} {}.".format(
style('error', "No frame found with id"),
style('short_id', arg))
)
def get_start_time_for_period(period):
# Using now() from datetime instead of arrow for mocking compatibility.
now = arrow.Arrow.fromdatetime(datetime.datetime.now())
date = now.date()
day = date.day
month = date.month
year = date.year
weekday = now.weekday()
if period == 'day':
start_time = arrow.Arrow(year, month, day)
elif period == 'week':
start_time = arrow.Arrow.fromdate(now.shift(days=-weekday).date())
elif period == 'month':
start_time = arrow.Arrow(year, month, 1)
elif period == 'luna':
start_time = get_last_full_moon(now)
elif period == 'year':
start_time = arrow.Arrow(year, 1, 1)
elif period == 'all':
# approximately timestamp `0`
start_time = arrow.Arrow(1970, 1, 1)
else:
raise ValueError('Unsupported period value: {}'.format(period))
return start_time
def apply_weekday_offset(start_time, week_start):
"""
Apply the offset required to move the start date `start_time` of a week
starting on Monday to that of a week starting on `week_start`.
"""
weekdays = dict(zip(
["monday", "tuesday", "wednesday", "thursday", "friday", "saturday",
"sunday"], range(0, 7)))
new_start = week_start.lower()
if new_start not in weekdays:
return start_time
now = datetime.datetime.now()
offset = weekdays[new_start] - 7 * (weekdays[new_start] > now.weekday())
return start_time.shift(days=offset)
def make_json_writer(func, *args, **kwargs):
"""
Return a function that receives a file-like object and writes the return
value of func(*args, **kwargs) as JSON to it.
"""
def writer(f):
dump = json.dumps(func(*args, **kwargs), indent=1, ensure_ascii=False)
f.write(dump)
return writer
def safe_save(path, content, ext='.bak'):
"""
Save given content to file at given path safely.
`content` may either be a (unicode) string to write to the file, or a
function taking one argument, a file object opened for writing. The
function may write (unicode) strings to the file object (but doesn't need
to close it).
The file to write to is created at a temporary location first. If there is
an error creating or writing to the temp file or calling `content`, the
destination file is left untouched. Otherwise, if all is well, an existing
destination file is backed up to `path` + `ext` (defaults to '.bak') and
the temporary file moved into its place.
"""
tmpfp = tempfile.NamedTemporaryFile(mode='w+', delete=False)
try:
with tmpfp:
if isinstance(content, str):
tmpfp.write(content)
else:
content(tmpfp)
except Exception:
try:
os.unlink(tmpfp.name)
except (IOError, OSError):
pass
raise
else:
if os.path.exists(path):
try:
os.unlink(path + ext)
except OSError:
pass
shutil.move(path, path + ext)
shutil.move(tmpfp.name, path)
def deduplicate(sequence):
"""
Return a list with all items of the input sequence but duplicates removed.
Leaves the input sequence unaltered.
"""
return [element
for index, element in enumerate(sequence)
if element not in sequence[:index]]
def parse_tags(values_list):
"""
Return a list of tags parsed from the input values list.
Find all the tags starting by a '+', even if there are spaces in them,
then strip each tag and filter out the empty ones
"""
return list(filter(None, map(operator.methodcaller('strip'), (
# We concatenate the word with the '+' to the following words
# not starting with a '+'
w[1:] + ' ' + ' '.join(itertools.takewhile(
lambda s: not s.startswith('+'), values_list[i + 1:]
))
for i, w in enumerate(values_list) if w.startswith('+')
)))) # pile of pancakes !
def frames_to_json(frames):
"""
Transform a sequence of frames into a JSON-formatted string.
Each frame object has an equivalent pair name/value in the JSON string,
except for 'updated_at', which is not included.
.. seealso:: :class:`Frame`
"""
log = [
co.OrderedDict([
('id', frame.id),
('start', frame.start.isoformat()),
('stop', frame.stop.isoformat()),
('project', frame.project),
('tags', frame.tags),
])
for frame in frames
]
return json.dumps(log, indent=4, sort_keys=True)
def frames_to_csv(frames):
"""
Transform a sequence of frames into a CSV-formatted string.
Each frame object has an equivalent pair name/value in the CSV string,
except for 'updated_at', which is not included.
.. seealso:: :class:`Frame`
"""
entries = [
co.OrderedDict([
('id', frame.id[:7]),
('start', frame.start.format('YYYY-MM-DD HH:mm:ss')),
('stop', frame.stop.format('YYYY-MM-DD HH:mm:ss')),
('project', frame.project),
('tags', ', '.join(frame.tags)),
])
for frame in frames
]
return build_csv(entries)
def build_csv(entries):
"""
Creates a CSV string from a list of dict objects.
The dictionary keys of the first item in the list are used as the header
row for the built CSV. All item's keys are supposed to be identical.
"""
if entries:
header = entries[0].keys()
else:
return ''
memfile = StringIO()
writer = csv.DictWriter(memfile, header, lineterminator=os.linesep)
writer.writeheader()
writer.writerows(entries)
output = memfile.getvalue()
memfile.close()
return output
def flatten_report_for_csv(report):
"""
Flattens the data structure returned by `watson.report()` for a csv export.
Dates are formatted in a way that Excel (default csv module dialect) can
handle them (i.e. YYYY-MM-DD HH:mm:ss).
The result is a list of dictionaries where each element can contain two
different things:
1. The total `time` spent in a project during the report interval. In this
case, the `tag` value will be empty.
2. The partial `time` spent in a tag and project during the report
interval. In this case, the `tag` value will contain a tag associated
with the project.
The sum of all elements where `tag` is empty corresponds to the total time
of the report.
"""
result = []
datetime_from = report['timespan']['from'].format('YYYY-MM-DD HH:mm:ss')
datetime_to = report['timespan']['to'].format('YYYY-MM-DD HH:mm:ss')
for project in report['projects']:
result.append({
'from': datetime_from,
'to': datetime_to,
'project': project['name'],
'tag': '',
'time': project['time']
})
for tag in project['tags']:
result.append({
'from': datetime_from,
'to': datetime_to,
'project': project['name'],
'tag': tag['name'],
'time': tag['time']
})
return result
def json_arrow_encoder(obj):
"""
Encodes Arrow objects for JSON output.
This function can be used with
`json.dumps(..., default=json_arrow_encoder)`, for example.
If the object is not an Arrow type, a TypeError is raised
:param obj: Object to encode
:return: JSON representation of Arrow object as defined by Arrow
"""
if isinstance(obj, arrow.Arrow):
return obj.for_json()
raise TypeError("Object {} is not JSON serializable".format(obj))
def get_granularity(time):
delta_hours, _ = divmod((arrow.now() - time).seconds, 3600)
if delta_hours == 0:
return 'minute'
else:
return ['hour', 'minute']
| 29.658537 | 79 | 0.609375 |
acdf49b225786a392fc87ebbf69f33b738b1ad18 | 55,078 | py | Python | library/azure_rm_virtualmachinescaleset.py | joaocc/azure_preview_modules | 2413dafa6f979a2070843b073830901cc1b1d868 | [
"MIT"
] | 46 | 2018-01-24T08:39:15.000Z | 2021-08-20T04:41:16.000Z | library/azure_rm_virtualmachinescaleset.py | joaocc/azure_preview_modules | 2413dafa6f979a2070843b073830901cc1b1d868 | [
"MIT"
] | 226 | 2017-12-12T21:46:31.000Z | 2022-02-18T05:17:03.000Z | library/azure_rm_virtualmachinescaleset.py | joaocc/azure_preview_modules | 2413dafa6f979a2070843b073830901cc1b1d868 | [
"MIT"
] | 60 | 2018-01-25T10:03:59.000Z | 2022-03-08T10:19:54.000Z | #!/usr/bin/python
#
# Copyright (c) 2016 Sertac Ozercan, <seozerca@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_virtualmachinescaleset
version_added: "2.4"
short_description: Manage Azure virtual machine scale sets
description:
- Create and update a virtual machine scale set.
- Note that this module was called M(azure_rm_virtualmachine_scaleset) before Ansible 2.8. The usage did not change.
options:
resource_group:
description:
- Name of the resource group containing the virtual machine scale set.
required: true
name:
description:
- Name of the virtual machine.
required: true
state:
description:
- Assert the state of the virtual machine scale set.
- State C(present) will check that the machine exists with the requested configuration. If the configuration
of the existing machine does not match, the machine will be updated.
- State C(absent) will remove the virtual machine scale set.
default: present
choices:
- absent
- present
location:
description:
- Valid Azure location. Defaults to location of the resource group.
short_hostname:
description:
- Short host name.
vm_size:
description:
- A valid Azure VM size value. For example, C(Standard_D4).
- The list of choices varies depending on the subscription and location. Check your subscription for available choices.
capacity:
description:
- Capacity of VMSS.
default: 1
tier:
description:
- SKU Tier.
choices:
- Basic
- Standard
upgrade_policy:
description:
- Upgrade policy.
- Required when creating the Azure virtual machine scale sets.
choices:
- Manual
- Automatic
admin_username:
description:
- Admin username used to access the host after it is created. Required when creating a VM.
admin_password:
description:
- Password for the admin username.
- Not required if the os_type is Linux and SSH password authentication is disabled by setting I(ssh_password_enabled=false).
ssh_password_enabled:
description:
- When the os_type is Linux, setting I(ssh_password_enabled=false) will disable SSH password authentication and require use of SSH keys.
type: bool
default: true
ssh_public_keys:
description:
- For I(os_type=Linux) provide a list of SSH keys.
- Each item in the list should be a dictionary where the dictionary contains two keys, C(path) and C(key_data).
- Set the C(path) to the default location of the authorized_keys files.
- On an Enterprise Linux host, for example, the I(path=/home/<admin username>/.ssh/authorized_keys).
Set C(key_data) to the actual value of the public key.
image:
description:
- Specifies the image used to build the VM.
- If a string, the image is sourced from a custom image based on the name.
- If a dict with the keys I(publisher), I(offer), I(sku), and I(version), the image is sourced from a Marketplace image.
Note that set I(version=latest) to get the most recent version of a given image.
- If a dict with the keys I(name) and I(resource_group), the image is sourced from a custom image based on the I(name) and I(resource_group) set.
Note that the key I(resource_group) is optional and if omitted, all images in the subscription will be searched for by I(name).
- Custom image support was added in Ansible 2.5.
required: true
os_disk_caching:
description:
- Type of OS disk caching.
choices:
- ReadOnly
- ReadWrite
default: ReadOnly
aliases:
- disk_caching
os_type:
description:
- Base type of operating system.
choices:
- Windows
- Linux
default: Linux
managed_disk_type:
description:
- Managed disk type.
choices:
- Standard_LRS
- Premium_LRS
data_disks:
description:
- Describes list of data disks.
version_added: "2.4"
suboptions:
lun:
description:
- The logical unit number for data disk.
default: 0
version_added: "2.4"
disk_size_gb:
description:
- The initial disk size in GB for blank data disks.
version_added: "2.4"
managed_disk_type:
description:
- Managed data disk type.
choices:
- Standard_LRS
- Premium_LRS
version_added: "2.4"
caching:
description:
- Type of data disk caching.
choices:
- ReadOnly
- ReadWrite
default: ReadOnly
version_added: "2.4"
virtual_network_resource_group:
description:
- When creating a virtual machine, if a specific virtual network from another resource group should be
used.
- Use this parameter to specify the resource group to use.
version_added: "2.5"
virtual_network_name:
description:
- Virtual Network name.
aliases:
- virtual_network
subnet_name:
description:
- Subnet name.
aliases:
- subnet
load_balancer:
description:
- Load balancer name.
version_added: "2.5"
application_gateway:
description:
- Application gateway name.
version_added: "2.8"
remove_on_absent:
description:
- When removing a VM using I(state=absent), also remove associated resources.
- It can be C(all) or a list with any of the following ['network_interfaces', 'virtual_storage', 'public_ips'].
- Any other input will be ignored.
default: ['all']
enable_accelerated_networking:
description:
- Indicates whether user wants to allow accelerated networking for virtual machines in scaleset being created.
version_added: "2.7"
type: bool
security_group:
description:
- Existing security group with which to associate the subnet.
- It can be the security group name which is in the same resource group.
- It can be the resource ID.
- It can be a dict which contains I(name) and I(resource_group) of the security group.
version_added: "2.7"
aliases:
- security_group_name
overprovision:
description:
- Specifies whether the Virtual Machine Scale Set should be overprovisioned.
type: bool
default: True
version_added: "2.8"
single_placement_group:
description:
- When true this limits the scale set to a single placement group, of max size 100 virtual machines.
type: bool
default: True
version_added: "2.9"
plan:
description:
- Third-party billing plan for the VM.
version_added: "2.10"
type: dict
suboptions:
name:
description:
- Billing plan name.
required: true
product:
description:
- Product name.
required: true
publisher:
description:
- Publisher offering the plan.
required: true
promotion_code:
description:
- Optional promotion code.
zones:
description:
- A list of Availability Zones for your virtual machine scale set.
type: list
version_added: "2.8"
custom_data:
description:
- Data which is made available to the virtual machine and used by e.g., C(cloud-init).
- Many images in the marketplace are not cloud-init ready. Thus, data sent to I(custom_data) would be ignored.
- If the image you are attempting to use is not listed in
U(https://docs.microsoft.com/en-us/azure/virtual-machines/linux/using-cloud-init#cloud-init-overview),
follow these steps U(https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cloudinit-prepare-custom-image).
version_added: "2.8"
scale_in_policy:
description:
- define the order in which vmss instances are scaled-in
choices:
- Default
- NewestVM
- OldestVM
version_added: "2.10"
terminate_event_timeout_minutes:
description:
- timeout time for termination notification event
- in range between 5 and 15
version_added: "2.10"
extends_documentation_fragment:
- azure
- azure_tags
author:
- Sertac Ozercan (@sozercan)
'''
EXAMPLES = '''
- name: Create VMSS
azure_rm_virtualmachinescaleset:
resource_group: myResourceGroup
name: testvmss
vm_size: Standard_DS1_v2
capacity: 2
virtual_network_name: testvnet
upgrade_policy: Manual
subnet_name: testsubnet
terminate_event_timeout_minutes: 10
scale_in_policy: NewestVM
admin_username: adminUser
ssh_password_enabled: false
ssh_public_keys:
- path: /home/adminUser/.ssh/authorized_keys
key_data: < insert yor ssh public key here... >
managed_disk_type: Standard_LRS
image:
offer: CoreOS
publisher: CoreOS
sku: Stable
version: latest
data_disks:
- lun: 0
disk_size_gb: 64
caching: ReadWrite
managed_disk_type: Standard_LRS
- name: Create VMSS with an image that requires plan information
azure_rm_virtualmachinescaleset:
resource_group: myResourceGroup
name: testvmss
vm_size: Standard_DS1_v2
capacity: 3
virtual_network_name: testvnet
upgrade_policy: Manual
subnet_name: testsubnet
admin_username: adminUser
ssh_password_enabled: false
ssh_public_keys:
- path: /home/adminUser/.ssh/authorized_keys
key_data: < insert yor ssh public key here... >
managed_disk_type: Standard_LRS
image:
offer: cis-ubuntu-linux-1804-l1
publisher: center-for-internet-security-inc
sku: Stable
version: latest
plan:
name: cis-ubuntu-linux-1804-l1
product: cis-ubuntu-linux-1804-l1
publisher: center-for-internet-security-inc
data_disks:
- lun: 0
disk_size_gb: 64
caching: ReadWrite
managed_disk_type: Standard_LRS
- name: Create a VMSS with a custom image
azure_rm_virtualmachinescaleset:
resource_group: myResourceGroup
name: testvmss
vm_size: Standard_DS1_v2
capacity: 2
virtual_network_name: testvnet
upgrade_policy: Manual
subnet_name: testsubnet
admin_username: adminUser
admin_password: password01
managed_disk_type: Standard_LRS
image: customimage001
- name: Create a VMSS with over 100 instances
azure_rm_virtualmachinescaleset:
resource_group: myResourceGroup
name: testvmss
vm_size: Standard_DS1_v2
capacity: 120
single_placement_group: False
virtual_network_name: testvnet
upgrade_policy: Manual
subnet_name: testsubnet
admin_username: adminUser
admin_password: password01
managed_disk_type: Standard_LRS
image: customimage001
- name: Create a VMSS with a custom image from a particular resource group
azure_rm_virtualmachinescaleset:
resource_group: myResourceGroup
name: testvmss
vm_size: Standard_DS1_v2
capacity: 2
virtual_network_name: testvnet
upgrade_policy: Manual
subnet_name: testsubnet
admin_username: adminUser
admin_password: password01
managed_disk_type: Standard_LRS
image:
name: customimage001
resource_group: myResourceGroup
'''
RETURN = '''
azure_vmss:
description:
- Facts about the current state of the object.
- Note that facts are not part of the registered output but available directly.
returned: always
type: dict
sample: {
"properties": {
"overprovision": true,
"scaleInPolicy": {
"rules": [
"NewestVM"
]
},
"singlePlacementGroup": true,
"upgradePolicy": {
"mode": "Manual"
},
"virtualMachineProfile": {
"networkProfile": {
"networkInterfaceConfigurations": [
{
"name": "testvmss",
"properties": {
"dnsSettings": {
"dnsServers": []
},
"enableAcceleratedNetworking": false,
"ipConfigurations": [
{
"name": "default",
"properties": {
"privateIPAddressVersion": "IPv4",
"subnet": {
"id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/virtualNetworks/testvnet/subnets/testsubnet"
}
}
}
],
"primary": true
}
}
]
},
"osProfile": {
"adminUsername": "testuser",
"computerNamePrefix": "testvmss",
"linuxConfiguration": {
"disablePasswordAuthentication": true,
"ssh": {
"publicKeys": [
{
"keyData": "",
"path": "/home/testuser/.ssh/authorized_keys"
}
]
}
},
"secrets": []
},
"scheduledEventsProfile": {
"terminateNotificationProfile": {
"enable": true,
"notBeforeTimeout": "PT10M"
}
},
"storageProfile": {
"dataDisks": [
{
"caching": "ReadWrite",
"createOption": "empty",
"diskSizeGB": 64,
"lun": 0,
"managedDisk": {
"storageAccountType": "Standard_LRS"
}
}
],
"imageReference": {
"offer": "CoreOS",
"publisher": "CoreOS",
"sku": "Stable",
"version": "899.17.0"
},
"osDisk": {
"caching": "ReadWrite",
"createOption": "fromImage",
"managedDisk": {
"storageAccountType": "Standard_LRS"
}
}
}
}
},
"sku": {
"capacity": 2,
"name": "Standard_DS1_v2",
"tier": "Standard"
},
"tags": null,
"type": "Microsoft.Compute/virtualMachineScaleSets"
}
''' # NOQA
import base64
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import parse_resource_id
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict, format_resource_id
from ansible.module_utils.basic import to_native, to_bytes
AZURE_OBJECT_CLASS = 'VirtualMachineScaleSet'
AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
class AzureRMVirtualMachineScaleSet(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(choices=['present', 'absent'], default='present', type='str'),
location=dict(type='str'),
short_hostname=dict(type='str'),
vm_size=dict(type='str'),
tier=dict(type='str', choices=['Basic', 'Standard']),
capacity=dict(type='int', default=1),
upgrade_policy=dict(type='str', choices=['Automatic', 'Manual']),
admin_username=dict(type='str'),
admin_password=dict(type='str', no_log=True),
ssh_password_enabled=dict(type='bool', default=True),
ssh_public_keys=dict(type='list'),
image=dict(type='raw'),
os_disk_caching=dict(type='str', aliases=['disk_caching'], choices=['ReadOnly', 'ReadWrite'],
default='ReadOnly'),
os_type=dict(type='str', choices=['Linux', 'Windows'], default='Linux'),
managed_disk_type=dict(type='str', choices=['Standard_LRS', 'Premium_LRS']),
data_disks=dict(type='list'),
subnet_name=dict(type='str', aliases=['subnet']),
load_balancer=dict(type='str'),
application_gateway=dict(type='str'),
virtual_network_resource_group=dict(type='str'),
virtual_network_name=dict(type='str', aliases=['virtual_network']),
remove_on_absent=dict(type='list', default=['all']),
enable_accelerated_networking=dict(type='bool'),
security_group=dict(type='raw', aliases=['security_group_name']),
overprovision=dict(type='bool', default=True),
single_placement_group=dict(type='bool', default=True),
zones=dict(type='list'),
custom_data=dict(type='str'),
plan=dict(type='dict', options=dict(publisher=dict(type='str', required=True),
product=dict(type='str', required=True), name=dict(type='str', required=True),
promotion_code=dict(type='str'))),
scale_in_policy=dict(type='str', choices=['Default', 'OldestVM', 'NewestVM']),
terminate_event_timeout_minutes=dict(type='int')
)
self.resource_group = None
self.name = None
self.state = None
self.location = None
self.short_hostname = None
self.vm_size = None
self.capacity = None
self.tier = None
self.upgrade_policy = None
self.admin_username = None
self.admin_password = None
self.ssh_password_enabled = None
self.ssh_public_keys = None
self.image = None
self.os_disk_caching = None
self.managed_disk_type = None
self.data_disks = None
self.os_type = None
self.subnet_name = None
self.virtual_network_resource_group = None
self.virtual_network_name = None
self.tags = None
self.differences = None
self.load_balancer = None
self.application_gateway = None
self.enable_accelerated_networking = None
self.security_group = None
self.overprovision = None
self.single_placement_group = None
self.zones = None
self.custom_data = None
self.plan = None
self.scale_in_policy = None
self.terminate_event_timeout_minutes = None
mutually_exclusive = [('load_balancer', 'application_gateway')]
self.results = dict(
changed=False,
actions=[],
ansible_facts=dict(azure_vmss=None)
)
super(AzureRMVirtualMachineScaleSet, self).__init__(
derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
if self.module._name == 'azure_rm_virtualmachine_scaleset':
self.module.deprecate("The 'azure_rm_virtualmachine_scaleset' module has been renamed to 'azure_rm_virtualmachinescaleset'", version='2.12')
# make sure options are lower case
self.remove_on_absent = set([resource.lower() for resource in self.remove_on_absent])
# convert elements to ints
self.zones = [int(i) for i in self.zones] if self.zones else None
# default virtual_network_resource_group to resource_group
if not self.virtual_network_resource_group:
self.virtual_network_resource_group = self.resource_group
changed = False
results = dict()
vmss = None
disable_ssh_password = None
subnet = None
image_reference = None
load_balancer_backend_address_pools = None
load_balancer_inbound_nat_pools = None
load_balancer = None
application_gateway = None
application_gateway_backend_address_pools = None
support_lb_change = True
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if self.custom_data:
self.custom_data = to_native(base64.b64encode(to_bytes(self.custom_data)))
if self.state == 'present':
# Verify parameters and resolve any defaults
if self.vm_size and not self.vm_size_is_valid():
self.fail("Parameter error: vm_size {0} is not valid for your subscription and location.".format(
self.vm_size
))
# if self.virtual_network_name:
# virtual_network = self.get_virtual_network(self.virtual_network_name)
if self.ssh_public_keys:
msg = "Parameter error: expecting ssh_public_keys to be a list of type dict where " \
"each dict contains keys: path, key_data."
for key in self.ssh_public_keys:
if not isinstance(key, dict):
self.fail(msg)
if not key.get('path') or not key.get('key_data'):
self.fail(msg)
if self.image and isinstance(self.image, dict):
if all(key in self.image for key in ('publisher', 'offer', 'sku', 'version')):
marketplace_image = self.get_marketplace_image_version()
if self.image['version'] == 'latest':
self.image['version'] = marketplace_image.name
self.log("Using image version {0}".format(self.image['version']))
image_reference = self.compute_models.ImageReference(
publisher=self.image['publisher'],
offer=self.image['offer'],
sku=self.image['sku'],
version=self.image['version']
)
elif self.image.get('name'):
custom_image = True
image_reference = self.get_custom_image_reference(
self.image.get('name'),
self.image.get('resource_group'))
elif self.image.get('id'):
try:
image_reference = self.compute_models.ImageReference(id=self.image['id'])
except Exception as exc:
self.fail("id Error: Cannot get image from the reference id - {0}".format(self.image['id']))
else:
self.fail("parameter error: expecting image to contain [publisher, offer, sku, version], [name, resource_group] or [id]")
elif self.image and isinstance(self.image, str):
custom_image = True
image_reference = self.get_custom_image_reference(self.image)
elif self.image:
self.fail("parameter error: expecting image to be a string or dict not {0}".format(type(self.image).__name__))
disable_ssh_password = not self.ssh_password_enabled
if self.load_balancer:
load_balancer = self.get_load_balancer(self.load_balancer)
load_balancer_backend_address_pools = ([self.compute_models.SubResource(id=resource.id)
for resource in load_balancer.backend_address_pools]
if load_balancer.backend_address_pools else None)
load_balancer_inbound_nat_pools = ([self.compute_models.SubResource(id=resource.id)
for resource in load_balancer.inbound_nat_pools]
if load_balancer.inbound_nat_pools else None)
if self.application_gateway:
application_gateway = self.get_application_gateway(self.application_gateway)
application_gateway_backend_address_pools = ([self.compute_models.SubResource(id=resource.id)
for resource in application_gateway.backend_address_pools]
if application_gateway.backend_address_pools else None)
try:
self.log("Fetching virtual machine scale set {0}".format(self.name))
vmss = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name)
self.check_provisioning_state(vmss, self.state)
vmss_dict = self.serialize_vmss(vmss)
if self.state == 'present':
differences = []
results = vmss_dict
if self.os_disk_caching and \
self.os_disk_caching != vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching']:
self.log('CHANGED: virtual machine scale set {0} - OS disk caching'.format(self.name))
differences.append('OS Disk caching')
changed = True
vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching'] = self.os_disk_caching
if self.capacity and \
self.capacity != vmss_dict['sku']['capacity']:
self.log('CHANGED: virtual machine scale set {0} - Capacity'.format(self.name))
differences.append('Capacity')
changed = True
vmss_dict['sku']['capacity'] = self.capacity
if self.data_disks and \
len(self.data_disks) != len(vmss_dict['properties']['virtualMachineProfile']['storageProfile'].get('dataDisks', [])):
self.log('CHANGED: virtual machine scale set {0} - Data Disks'.format(self.name))
differences.append('Data Disks')
changed = True
if self.upgrade_policy and \
self.upgrade_policy != vmss_dict['properties']['upgradePolicy']['mode']:
self.log('CHANGED: virtual machine scale set {0} - Upgrade Policy'.format(self.name))
differences.append('Upgrade Policy')
changed = True
vmss_dict['properties']['upgradePolicy']['mode'] = self.upgrade_policy
if image_reference and \
image_reference.as_dict() != vmss_dict['properties']['virtualMachineProfile']['storageProfile']['imageReference']:
self.log('CHANGED: virtual machine scale set {0} - Image'.format(self.name))
differences.append('Image')
changed = True
vmss_dict['properties']['virtualMachineProfile']['storageProfile']['imageReference'] = image_reference.as_dict()
update_tags, vmss_dict['tags'] = self.update_tags(vmss_dict.get('tags', dict()))
if update_tags:
differences.append('Tags')
changed = True
if bool(self.overprovision) != bool(vmss_dict['properties']['overprovision']):
differences.append('overprovision')
changed = True
if bool(self.single_placement_group) != bool(vmss_dict['properties']['singlePlacementGroup']):
differences.append('single_placement_group')
changed = True
vmss_dict['zones'] = [int(i) for i in vmss_dict['zones']] if 'zones' in vmss_dict and vmss_dict['zones'] else None
if self.zones != vmss_dict['zones']:
self.log("CHANGED: virtual machine scale sets {0} zones".format(self.name))
differences.append('Zones')
changed = True
vmss_dict['zones'] = self.zones
if self.terminate_event_timeout_minutes:
timeout = self.terminate_event_timeout_minutes
if timeout < 5 or timeout > 15:
self.fail("terminate_event_timeout_minutes should >= 5 and <= 15")
iso_8601_format = "PT" + str(timeout) + "M"
old = vmss_dict['properties']['virtualMachineProfile'].get('scheduledEventsProfile', {}).\
get('terminateNotificationProfile', {}).get('notBeforeTimeout', "")
if old != iso_8601_format:
differences.append('terminateNotification')
changed = True
vmss_dict['properties']['virtualMachineProfile'].setdefault('scheduledEventsProfile', {})['terminateNotificationProfile'] = {
'notBeforeTimeout': iso_8601_format,
"enable": 'true'
}
if self.scale_in_policy and self.scale_in_policy != vmss_dict['properties'].get('scaleInPolicy', {}).get('rules', [""])[0]:
self.log("CHANGED: virtual machine sale sets {0} scale in policy".format(self.name))
differences.append('scaleInPolicy')
changed = True
vmss_dict['properties'].setdefault('scaleInPolicy', {})['rules'] = [self.scale_in_policy]
nicConfigs = vmss_dict['properties']['virtualMachineProfile']['networkProfile']['networkInterfaceConfigurations']
backend_address_pool = nicConfigs[0]['properties']['ipConfigurations'][0]['properties'].get('loadBalancerBackendAddressPools', [])
backend_address_pool += nicConfigs[0]['properties']['ipConfigurations'][0]['properties'].get('applicationGatewayBackendAddressPools', [])
lb_or_ag_id = None
if (len(nicConfigs) != 1 or len(backend_address_pool) != 1):
support_lb_change = False # Currently not support for the vmss contains more than one loadbalancer
self.module.warn('Updating more than one load balancer on VMSS is currently not supported')
else:
if load_balancer:
lb_or_ag_id = "{0}/".format(load_balancer.id)
elif application_gateway:
lb_or_ag_id = "{0}/".format(application_gateway.id)
backend_address_pool_id = backend_address_pool[0].get('id')
if lb_or_ag_id is not None and (bool(lb_or_ag_id) != bool(backend_address_pool_id) or not backend_address_pool_id.startswith(lb_or_ag_id)):
differences.append('load_balancer')
changed = True
if self.custom_data:
if self.custom_data != vmss_dict['properties']['virtualMachineProfile']['osProfile'].get('customData'):
differences.append('custom_data')
changed = True
vmss_dict['properties']['virtualMachineProfile']['osProfile']['customData'] = self.custom_data
self.differences = differences
elif self.state == 'absent':
self.log("CHANGED: virtual machine scale set {0} exists and requested state is 'absent'".format(self.name))
results = dict()
changed = True
except CloudError:
self.log('Virtual machine scale set {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: virtual machine scale set {0} does not exist but state is 'present'.".format(self.name))
changed = True
self.results['changed'] = changed
self.results['ansible_facts']['azure_vmss'] = results
if self.check_mode:
return self.results
if changed:
if self.state == 'present':
if not vmss:
# Create the VMSS
if self.vm_size is None:
self.fail("vm size must be set")
self.log("Create virtual machine scale set {0}".format(self.name))
self.results['actions'].append('Created VMSS {0}'.format(self.name))
if self.os_type == 'Linux':
if disable_ssh_password and not self.ssh_public_keys:
self.fail("Parameter error: ssh_public_keys required when disabling SSH password.")
if not self.virtual_network_name:
self.fail("virtual network name is required")
if self.subnet_name:
subnet = self.get_subnet(self.virtual_network_name, self.subnet_name)
if not self.short_hostname:
self.short_hostname = self.name
if not image_reference:
self.fail("Parameter error: an image is required when creating a virtual machine.")
managed_disk = self.compute_models.VirtualMachineScaleSetManagedDiskParameters(storage_account_type=self.managed_disk_type)
if self.security_group:
nsg = self.parse_nsg()
if nsg:
self.security_group = self.network_models.NetworkSecurityGroup(id=nsg.get('id'))
plan = None
if self.plan:
plan = self.compute_models.Plan(name=self.plan.get('name'), product=self.plan.get('product'),
publisher=self.plan.get('publisher'),
promotion_code=self.plan.get('promotion_code'))
os_profile = None
if self.admin_username or self.custom_data or self.ssh_public_keys:
os_profile = self.compute_models.VirtualMachineScaleSetOSProfile(
admin_username=self.admin_username,
computer_name_prefix=self.short_hostname,
custom_data=self.custom_data
)
vmss_resource = self.compute_models.VirtualMachineScaleSet(
location=self.location,
overprovision=self.overprovision,
single_placement_group=self.single_placement_group,
tags=self.tags,
upgrade_policy=self.compute_models.UpgradePolicy(
mode=self.upgrade_policy
),
sku=self.compute_models.Sku(
name=self.vm_size,
capacity=self.capacity,
tier=self.tier,
),
plan=plan,
virtual_machine_profile=self.compute_models.VirtualMachineScaleSetVMProfile(
os_profile=os_profile,
storage_profile=self.compute_models.VirtualMachineScaleSetStorageProfile(
os_disk=self.compute_models.VirtualMachineScaleSetOSDisk(
managed_disk=managed_disk,
create_option=self.compute_models.DiskCreateOptionTypes.from_image,
caching=self.os_disk_caching,
),
image_reference=image_reference,
),
network_profile=self.compute_models.VirtualMachineScaleSetNetworkProfile(
network_interface_configurations=[
self.compute_models.VirtualMachineScaleSetNetworkConfiguration(
name=self.name,
primary=True,
ip_configurations=[
self.compute_models.VirtualMachineScaleSetIPConfiguration(
name='default',
subnet=self.compute_models.ApiEntityReference(
id=subnet.id
),
primary=True,
load_balancer_backend_address_pools=load_balancer_backend_address_pools,
load_balancer_inbound_nat_pools=load_balancer_inbound_nat_pools,
application_gateway_backend_address_pools=application_gateway_backend_address_pools
)
],
enable_accelerated_networking=self.enable_accelerated_networking,
network_security_group=self.security_group
)
]
)
),
zones=self.zones
)
if self.scale_in_policy:
vmss_resource.scale_in_policy = self.gen_scale_in_policy()
if self.terminate_event_timeout_minutes:
vmss_resource.virtual_machine_profile.scheduled_events_profile = self.gen_scheduled_event_profile()
if self.admin_password:
vmss_resource.virtual_machine_profile.os_profile.admin_password = self.admin_password
if self.os_type == 'Linux' and os_profile:
vmss_resource.virtual_machine_profile.os_profile.linux_configuration = self.compute_models.LinuxConfiguration(
disable_password_authentication=disable_ssh_password
)
if self.ssh_public_keys:
ssh_config = self.compute_models.SshConfiguration()
ssh_config.public_keys = \
[self.compute_models.SshPublicKey(path=key['path'], key_data=key['key_data']) for key in self.ssh_public_keys]
vmss_resource.virtual_machine_profile.os_profile.linux_configuration.ssh = ssh_config
if self.data_disks:
data_disks = []
for data_disk in self.data_disks:
data_disk_managed_disk = self.compute_models.VirtualMachineScaleSetManagedDiskParameters(
storage_account_type=data_disk.get('managed_disk_type', None)
)
data_disk['caching'] = data_disk.get(
'caching',
self.compute_models.CachingTypes.read_only
)
data_disks.append(self.compute_models.VirtualMachineScaleSetDataDisk(
lun=data_disk.get('lun', None),
caching=data_disk.get('caching', None),
create_option=self.compute_models.DiskCreateOptionTypes.empty,
disk_size_gb=data_disk.get('disk_size_gb', None),
managed_disk=data_disk_managed_disk,
))
vmss_resource.virtual_machine_profile.storage_profile.data_disks = data_disks
if self.plan:
try:
plan_name = self.plan.get('name')
plan_product = self.plan.get('product')
plan_publisher = self.plan.get('publisher')
term = self.marketplace_client.marketplace_agreements.get(
publisher_id=plan_publisher, offer_id=plan_product, plan_id=plan_name)
term.accepted = True
self.marketplace_client.marketplace_agreements.create(
publisher_id=plan_publisher, offer_id=plan_product, plan_id=plan_name, parameters=term)
except Exception as exc:
self.fail(("Error accepting terms for virtual machine {0} with plan {1}. " +
"Only service admin/account admin users can purchase images " +
"from the marketplace. - {2}").format(self.name, self.plan, str(exc)))
self.log("Create virtual machine with parameters:")
self.create_or_update_vmss(vmss_resource)
elif self.differences and len(self.differences) > 0:
self.log("Update virtual machine scale set {0}".format(self.name))
self.results['actions'].append('Updated VMSS {0}'.format(self.name))
vmss_resource = self.get_vmss()
vmss_resource.virtual_machine_profile.storage_profile.os_disk.caching = self.os_disk_caching
vmss_resource.sku.capacity = self.capacity
vmss_resource.overprovision = self.overprovision
vmss_resource.single_placement_group = self.single_placement_group
if support_lb_change:
if self.load_balancer:
vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \
.ip_configurations[0].load_balancer_backend_address_pools = load_balancer_backend_address_pools
vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \
.ip_configurations[0].load_balancer_inbound_nat_pools = load_balancer_inbound_nat_pools
vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \
.ip_configurations[0].application_gateway_backend_address_pools = None
elif self.application_gateway:
vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \
.ip_configurations[0].application_gateway_backend_address_pools = application_gateway_backend_address_pools
vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \
.ip_configurations[0].load_balancer_backend_address_pools = None
vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \
.ip_configurations[0].load_balancer_inbound_nat_pools = None
if self.data_disks is not None:
data_disks = []
for data_disk in self.data_disks:
data_disks.append(self.compute_models.VirtualMachineScaleSetDataDisk(
lun=data_disk['lun'],
caching=data_disk['caching'],
create_option=self.compute_models.DiskCreateOptionTypes.empty,
disk_size_gb=data_disk['disk_size_gb'],
managed_disk=self.compute_models.VirtualMachineScaleSetManagedDiskParameters(
storage_account_type=data_disk.get('managed_disk_type', None)
),
))
vmss_resource.virtual_machine_profile.storage_profile.data_disks = data_disks
if self.scale_in_policy:
vmss_resource.scale_in_policy = self.gen_scale_in_policy()
if self.terminate_event_timeout_minutes:
vmss_resource.virtual_machine_profile.scheduled_events_profile = self.gen_scheduled_event_profile()
if image_reference is not None:
vmss_resource.virtual_machine_profile.storage_profile.image_reference = image_reference
self.log("Update virtual machine with parameters:")
self.create_or_update_vmss(vmss_resource)
self.results['ansible_facts']['azure_vmss'] = self.serialize_vmss(self.get_vmss())
elif self.state == 'absent':
# delete the VM
self.log("Delete virtual machine scale set {0}".format(self.name))
self.results['ansible_facts']['azure_vmss'] = None
self.delete_vmss(vmss)
# until we sort out how we want to do this globally
del self.results['actions']
return self.results
def get_vmss(self):
'''
Get the VMSS
:return: VirtualMachineScaleSet object
'''
try:
vmss = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name)
return vmss
except CloudError as exc:
self.fail("Error getting virtual machine scale set {0} - {1}".format(self.name, str(exc)))
def get_virtual_network(self, name):
try:
vnet = self.network_client.virtual_networks.get(self.virtual_network_resource_group, name)
return vnet
except CloudError as exc:
self.fail("Error fetching virtual network {0} - {1}".format(name, str(exc)))
def get_subnet(self, vnet_name, subnet_name):
self.log("Fetching subnet {0} in virtual network {1}".format(subnet_name, vnet_name))
try:
subnet = self.network_client.subnets.get(self.virtual_network_resource_group, vnet_name, subnet_name)
except CloudError as exc:
self.fail("Error: fetching subnet {0} in virtual network {1} - {2}".format(
subnet_name,
vnet_name,
str(exc)))
return subnet
def get_load_balancer(self, id):
id_dict = parse_resource_id(id)
try:
return self.network_client.load_balancers.get(id_dict.get('resource_group', self.resource_group), id_dict.get('name'))
except CloudError as exc:
self.fail("Error fetching load balancer {0} - {1}".format(id, str(exc)))
def get_application_gateway(self, id):
id_dict = parse_resource_id(id)
try:
return self.network_client.application_gateways.get(id_dict.get('resource_group', self.resource_group), id_dict.get('name'))
except CloudError as exc:
self.fail("Error fetching application_gateway {0} - {1}".format(id, str(exc)))
def serialize_vmss(self, vmss):
'''
Convert a VirtualMachineScaleSet object to dict.
:param vm: VirtualMachineScaleSet object
:return: dict
'''
result = self.serialize_obj(vmss, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)
result['id'] = vmss.id
result['name'] = vmss.name
result['type'] = vmss.type
result['location'] = vmss.location
result['tags'] = vmss.tags
return result
def delete_vmss(self, vmss):
self.log("Deleting virtual machine scale set {0}".format(self.name))
self.results['actions'].append("Deleted virtual machine scale set {0}".format(self.name))
try:
poller = self.compute_client.virtual_machine_scale_sets.delete(self.resource_group, self.name)
# wait for the poller to finish
self.get_poller_result(poller)
except CloudError as exc:
self.fail("Error deleting virtual machine scale set {0} - {1}".format(self.name, str(exc)))
return True
def get_marketplace_image_version(self):
try:
versions = self.compute_client.virtual_machine_images.list(self.location,
self.image['publisher'],
self.image['offer'],
self.image['sku'])
except CloudError as exc:
self.fail("Error fetching image {0} {1} {2} - {3}".format(self.image['publisher'],
self.image['offer'],
self.image['sku'],
str(exc)))
if versions and len(versions) > 0:
if self.image['version'] == 'latest':
return versions[len(versions) - 1]
for version in versions:
if version.name == self.image['version']:
return version
self.fail("Error could not find image {0} {1} {2} {3}".format(self.image['publisher'],
self.image['offer'],
self.image['sku'],
self.image['version']))
def get_custom_image_reference(self, name, resource_group=None):
try:
if resource_group:
vm_images = self.compute_client.images.list_by_resource_group(resource_group)
else:
vm_images = self.compute_client.images.list()
except Exception as exc:
self.fail("Error fetching custom images from subscription - {0}".format(str(exc)))
for vm_image in vm_images:
if vm_image.name == name:
self.log("Using custom image id {0}".format(vm_image.id))
return self.compute_models.ImageReference(id=vm_image.id)
self.fail("Error could not find image with name {0}".format(name))
def create_or_update_vmss(self, params):
try:
poller = self.compute_client.virtual_machine_scale_sets.create_or_update(self.resource_group, self.name, params)
self.get_poller_result(poller)
except CloudError as exc:
self.fail("Error creating or updating virtual machine {0} - {1}".format(self.name, str(exc)))
def vm_size_is_valid(self):
'''
Validate self.vm_size against the list of virtual machine sizes available for the account and location.
:return: boolean
'''
try:
sizes = self.compute_client.virtual_machine_sizes.list(self.location)
except CloudError as exc:
self.fail("Error retrieving available machine sizes - {0}".format(str(exc)))
for size in sizes:
if size.name == self.vm_size:
return True
return False
def parse_nsg(self):
nsg = self.security_group
resource_group = self.resource_group
if isinstance(self.security_group, dict):
nsg = self.security_group.get('name')
resource_group = self.security_group.get('resource_group', self.resource_group)
id = format_resource_id(val=nsg,
subscription_id=self.subscription_id,
namespace='Microsoft.Network',
types='networkSecurityGroups',
resource_group=resource_group)
name = azure_id_to_dict(id).get('name')
return dict(id=id, name=name)
def gen_scheduled_event_profile(self):
if self.terminate_event_timeout_minutes is None:
return None
scheduledEventProfile = self.compute_models.ScheduledEventsProfile()
terminationProfile = self.compute_models.TerminateNotificationProfile()
terminationProfile.not_before_timeout = "PT" + str(self.terminate_event_timeout_minutes) + "M"
terminationProfile.enable = True
scheduledEventProfile.terminate_notification_profile = terminationProfile
return scheduledEventProfile
def gen_scale_in_policy(self):
if self.scale_in_policy is None:
return None
return self.compute_models.ScaleInPolicy(rules=[self.scale_in_policy])
def main():
AzureRMVirtualMachineScaleSet()
if __name__ == '__main__':
main()
| 45.294408 | 209 | 0.55603 |
acdf49fa539ea08a1311c9adcd7a4d6691c5f0dd | 7,790 | py | Python | test/functional/combine_logs.py | adymoloca/flocoin | d9244577577dede975c852f6fcfe1afba4d71a57 | [
"MIT"
] | null | null | null | test/functional/combine_logs.py | adymoloca/flocoin | d9244577577dede975c852f6fcfe1afba4d71a57 | [
"MIT"
] | null | null | null | test/functional/combine_logs.py | adymoloca/flocoin | d9244577577dede975c852f6fcfe1afba4d71a57 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Combine logs from multiple flocoin nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile.
If no argument is provided, the most recent test directory will be used."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import pathlib
import re
import sys
import tempfile
# N.B.: don't import any local modules here - this script must remain executable
# without the parent module installed.
# Should match same symbol in `test_framework.test_framework`.
TMPDIR_PREFIX = "flocoin_func_test_"
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?Z")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'testdir', nargs='?', default='',
help=('temporary test directory to combine logs from. '
'Defaults to the most recent'))
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args = parser.parse_args()
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
testdir = args.testdir or find_latest_test_dir()
if not testdir:
print("No test directories found")
sys.exit(1)
if not args.testdir:
print("Opening latest test directory: {}".format(testdir), file=sys.stderr)
colors = defaultdict(lambda: '')
if args.color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
log_events = read_logs(testdir)
if args.html:
print_logs_html(log_events)
else:
print_logs_plain(log_events, colors)
print_node_warnings(testdir, colors)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
# Find out what the folder is called that holds the debug.log file
glob = pathlib.Path(tmp_dir).glob('node0/**/debug.log')
path = next(glob, None)
if path:
assert next(glob, None) is None # more than one debug.log, should never happen
chain = re.search(r'node0/(.+?)/debug\.log$', path.as_posix()).group(1) # extract the chain name
else:
chain = 'regtest' # fallback to regtest (should only happen when none exists)
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/{}/debug.log".format(tmp_dir, i, chain)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def print_node_warnings(tmp_dir, colors):
"""Print nodes' errors and warnings"""
warnings = []
for stream in ['stdout', 'stderr']:
for i in itertools.count():
folder = "{}/node{}/{}".format(tmp_dir, i, stream)
if not os.path.isdir(folder):
break
for (_, _, fns) in os.walk(folder):
for fn in fns:
warning = pathlib.Path('{}/{}'.format(folder, fn)).read_text().strip()
if warning:
warnings.append(("node{} {}".format(i, stream), warning))
print()
for w in warnings:
print("{} {} {} {}".format(colors[w[0].split()[0]], w[0], w[1], colors["reset"]))
def find_latest_test_dir():
"""Returns the latest tmpfile test directory prefix."""
tmpdir = tempfile.gettempdir()
def join_tmp(basename):
return os.path.join(tmpdir, basename)
def is_valid_test_tmpdir(basename):
fullpath = join_tmp(basename)
return (
os.path.isdir(fullpath)
and basename.startswith(TMPDIR_PREFIX)
and os.access(fullpath, os.R_OK)
)
testdir_paths = [
join_tmp(name) for name in os.listdir(tmpdir) if is_valid_test_tmpdir(name)
]
return max(testdir_paths, key=os.path.getmtime) if testdir_paths else None
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r', encoding='utf-8') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
timestamp = time_match.group()
if time_match.group(1) is None:
# timestamp does not have microseconds. Add zeroes.
timestamp_micro = timestamp.replace("Z", ".000000Z")
line = line.replace(timestamp, timestamp_micro)
timestamp = timestamp_micro
event = line
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
# Add the line. Prefix with space equivalent to the source + timestamp so log lines are aligned
event += " " + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs_plain(log_events, colors):
"""Renders the iterator of log events into text."""
for event in log_events:
lines = event.event.splitlines()
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, lines[0], colors["reset"]))
if len(lines) > 1:
for line in lines[1:]:
print("{0}{1}{2}".format(colors[event.source.rstrip()], line, colors["reset"]))
def print_logs_html(log_events):
"""Renders the iterator of log events into html."""
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
| 38.564356 | 196 | 0.617843 |
acdf4a35955126f45ac1b3eef5905a9ffe763842 | 897 | py | Python | Kafka/Producer/src/isbn_generator.py | Sabokou/BigData | 7901451cf3fa748c541ef93cf1578495335165cf | [
"Apache-2.0"
] | 2 | 2021-12-09T15:42:36.000Z | 2022-02-03T21:15:44.000Z | Kafka/Producer/src/isbn_generator.py | Sabokou/BigData | 7901451cf3fa748c541ef93cf1578495335165cf | [
"Apache-2.0"
] | null | null | null | Kafka/Producer/src/isbn_generator.py | Sabokou/BigData | 7901451cf3fa748c541ef93cf1578495335165cf | [
"Apache-2.0"
] | 1 | 2021-09-02T21:08:00.000Z | 2021-09-02T21:08:00.000Z | # pip install isbnlib
# pip install Random-Word
import isbnlib
from random_word import RandomWords
class ISBN_generator():
def __init__(self):
self.word_gen = RandomWords()
def random_word(self):
# get a random word that is common; found in at leased 10 dictionaries
return self.word_gen.get_random_word(hasDictionaryDef="true", minDictionaryCount=10)
def random_isbn(self):
word = self.random_word()
# Use the word if it is not empty otherwise recurse
if word != "" and word is not None:
isbn = isbnlib.isbn_from_words(word)
# Return the isbn if it isn't empty otherwise recurse to get a valid isbn
if isbn != "" and isbn is not None:
return isbn
else:
return self.random_isbn()
else:
return self.random_isbn() | 32.035714 | 92 | 0.616499 |
acdf4c727c612f6c3510070ee275ae4236cb7d5c | 346 | py | Python | CODE/frequency.py | saicharan637/Geo-Mapping-of-Entities-Using-BERT | cb7d938e93399bf31ccc28f233a583b4ac6f3172 | [
"Apache-2.0"
] | null | null | null | CODE/frequency.py | saicharan637/Geo-Mapping-of-Entities-Using-BERT | cb7d938e93399bf31ccc28f233a583b4ac6f3172 | [
"Apache-2.0"
] | null | null | null | CODE/frequency.py | saicharan637/Geo-Mapping-of-Entities-Using-BERT | cb7d938e93399bf31ccc28f233a583b4ac6f3172 | [
"Apache-2.0"
] | 1 | 2021-04-04T08:27:32.000Z | 2021-04-04T08:27:32.000Z | import nltk
import sys
import os
os.chdir('C:/Users/Sai Charan/Desktop/Studies/522-Advance_Data_Mining/PROJECT')
with open ("wiki_word_dataset.txt", "r") as myfile:
data=myfile.read().replace('\n', ' ')
data = data.split(' ')
fdist1 = nltk.FreqDist(data)
sys.stdout=open("frequency.txt","w")
print(fdist1.most_common())
sys.stdout.close()
| 23.066667 | 79 | 0.716763 |
acdf4cf5d929afc69b76ac26ef60a7a7475dc65d | 1,162 | py | Python | 14_Tran_An_Thien/Bai_1.10.py | lpython2006e/exercies | 84343eae57d86708a7984aa02f77183a4688a508 | [
"MIT"
] | null | null | null | 14_Tran_An_Thien/Bai_1.10.py | lpython2006e/exercies | 84343eae57d86708a7984aa02f77183a4688a508 | [
"MIT"
] | null | null | null | 14_Tran_An_Thien/Bai_1.10.py | lpython2006e/exercies | 84343eae57d86708a7984aa02f77183a4688a508 | [
"MIT"
] | 8 | 2020-07-10T14:13:54.000Z | 2020-08-03T08:17:50.000Z | import math
"""
# Make a program in Python, that solve quadratic equations
# A.x2 + B.x1 + C = 0
# Where A, B, C is real numbers (could be negative), find X.
# Giải phương trình bậc 2: ax2 + bx + c = 0
# @param a: hệ số bậc 2
# @param b: hệ số bậc 1
# @param c: số hạng tự do
"""
def giaiPTBac2(a, b, c):
# kiểm tra các hệ số
if (a == 0):
if (b == 0):
print("Phương trình vô nghiệm!");
else:
print("Phương trình có một nghiệm: x = ", + (-c / b));
return;
# tính delta
delta = b * b - 4 * a * c;
# tính nghiệm
if (delta > 0):
x1 = (float)((-b + math.sqrt(delta)) / (2 * a));
x2 = (float)((-b - math.sqrt(delta)) / (2 * a));
print("Phương trình có 2 nghiệm là: x1 = ", x1, " và x2 = ", x2);
elif (delta == 0):
x1 = (-b / (2 * a));
print("Phương trình có nghiệm kép: x1 = x2 = ", x1);
else:
print("Phương trình vô nghiệm!");
# Nhập các hệ số
a = float(input("Nhập hệ số bậc 2, a = "));
b = float(input("Nhập hệ số bậc 1, b = "));
c = float(input("Nhập hằng số tự do, c = "));
# Gọi hàm giải phương trình bậc 2
giaiPTBac2(a, b, c)
| 27.023256 | 73 | 0.513769 |
acdf4d1fd05c1abd3eb379a9aa610ed51975be9b | 311 | py | Python | review/admin.py | LudwigOtto/ECommerce | 07fe32954604f9f9785e5e1b4c51c405d1d08323 | [
"MIT"
] | 1 | 2020-03-27T18:09:46.000Z | 2020-03-27T18:09:46.000Z | review/admin.py | LudwigOtto/ECommerce | 07fe32954604f9f9785e5e1b4c51c405d1d08323 | [
"MIT"
] | null | null | null | review/admin.py | LudwigOtto/ECommerce | 07fe32954604f9f9785e5e1b4c51c405d1d08323 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import Review
class ReviewAdmin(admin.ModelAdmin):
list_display = ['item_id', 'customer_id', 'seller',
'ratings', 'detailed_review' ]
list_editable = ['ratings', 'detailed_review' ]
admin.site.register(Review, ReviewAdmin) | 23.923077 | 53 | 0.73955 |
acdf4d9228b572ac3d6628b9c1b86cf19fd6f790 | 10,812 | py | Python | zipline/finance/slippage.py | jimgoo/zipline-fork | 7e898ae36d0cadafe443491e4f3670d587e9716c | [
"Apache-2.0"
] | 1 | 2019-03-29T01:46:35.000Z | 2019-03-29T01:46:35.000Z | zipline/finance/slippage.py | jimgoo/zipline-fork | 7e898ae36d0cadafe443491e4f3670d587e9716c | [
"Apache-2.0"
] | 1 | 2021-08-09T20:43:08.000Z | 2021-08-09T20:43:08.000Z | zipline/finance/slippage.py | jimgoo/zipline-fork | 7e898ae36d0cadafe443491e4f3670d587e9716c | [
"Apache-2.0"
] | 3 | 2017-08-31T12:34:13.000Z | 2021-09-29T22:28:48.000Z | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import abc
import math
from copy import copy
from functools import partial
from six import with_metaclass
from zipline.protocol import DATASOURCE_TYPE
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
SELL = 1 << 0
BUY = 1 << 1
STOP = 1 << 2
LIMIT = 1 << 3
def get_event_price(order, event):
if hasattr(event, 'bid') and hasattr(event, 'ask'):
if order.amount > 0:
# buy at ask
#print('Using ask price, qty = %i, sid= %i' % (order.amount, order.sid))
event_price = event.ask
elif order.amount < 0:
# sell at bid
#print('Using bid price, qty = %i, sid = %i' % (order.amount, order.sid))
event_price = event.bid
else:
return event.price
return event_price
def check_order_triggers(order, event, take_market=False):
"""
Given an order and a trade event, return a tuple of
(stop_reached, limit_reached).
For market orders, will return (False, False).
For stop orders, limit_reached will always be False.
For limit orders, stop_reached will always be False.
For stop limit orders a Boolean is returned to flag
that the stop has been reached.
Orders that have been triggered already (price targets reached),
the order's current values are returned.
"""
if order.triggered:
return (order.stop_reached, order.limit_reached, False)
stop_reached = False
limit_reached = False
sl_stop_reached = False
order_type = 0
if take_market:
event_price = get_event_price(order, event)
else:
event_price = event.price
if order.amount > 0:
order_type |= BUY
else:
order_type |= SELL
if order.stop is not None:
order_type |= STOP
if order.limit is not None:
order_type |= LIMIT
if order_type == BUY | STOP | LIMIT:
if event_price >= order.stop:
sl_stop_reached = True
if event_price <= order.limit:
limit_reached = True
elif order_type == SELL | STOP | LIMIT:
if event_price <= order.stop:
sl_stop_reached = True
if event_price >= order.limit:
limit_reached = True
elif order_type == BUY | STOP:
if event_price >= order.stop:
stop_reached = True
elif order_type == SELL | STOP:
if event_price <= order.stop:
stop_reached = True
elif order_type == BUY | LIMIT:
if event_price <= order.limit:
limit_reached = True
elif order_type == SELL | LIMIT:
# This is a SELL LIMIT order
if event_price >= order.limit:
limit_reached = True
# print (stop_reached, limit_reached, sl_stop_reached)
return (stop_reached, limit_reached, sl_stop_reached)
def transact_stub(slippage, commission, event, open_orders):
"""
This is intended to be wrapped in a partial, so that the
slippage and commission models can be enclosed.
"""
for order, transaction in slippage(event, open_orders):
if transaction and transaction.amount != 0:
direction = math.copysign(1, transaction.amount)
per_share, total_commission = commission.calculate(transaction)
transaction.price += per_share * direction
transaction.commission = total_commission
yield order, transaction
def transact_partial(slippage, commission):
return partial(transact_stub, slippage, commission)
class Transaction(object):
def __init__(self, sid, amount, dt, price, order_id, commission=None):
self.sid = sid
self.amount = amount
self.dt = dt
self.price = price
self.order_id = order_id
self.commission = commission
self.type = DATASOURCE_TYPE.TRANSACTION
def __getitem__(self, name):
return self.__dict__[name]
def to_dict(self):
py = copy(self.__dict__)
del py['type']
return py
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Transaction saved state is too old.")
self.__dict__.update(state)
def create_transaction(event, order, price, amount):
# floor the amount to protect against non-whole number orders
# TODO: Investigate whether we can add a robust check in blotter
# and/or tradesimulation, as well.
amount_magnitude = int(abs(amount))
if amount_magnitude < 1:
raise Exception("Transaction magnitude must be at least 1.")
transaction = Transaction(
sid=event.sid,
amount=int(amount),
dt=event.dt,
price=price,
order_id=order.id
)
return transaction
class LiquidityExceeded(Exception):
pass
class SlippageModel(with_metaclass(abc.ABCMeta)):
@property
def volume_for_bar(self):
return self._volume_for_bar
@abc.abstractproperty
def process_order(self, event, order):
pass
def simulate(self, event, current_orders):
self._volume_for_bar = 0
for order in current_orders:
if order.open_amount == 0:
continue
order.check_triggers(event)
if not order.triggered:
continue
try:
txn = self.process_order(event, order)
except LiquidityExceeded:
break
if txn:
self._volume_for_bar += abs(txn.amount)
yield order, txn
def __call__(self, event, current_orders, **kwargs):
return self.simulate(event, current_orders, **kwargs)
class VolumeShareSlippage(SlippageModel):
def __init__(self,
volume_limit=.25,
price_impact=0.1,
take_market=False):
self.volume_limit = volume_limit
self.price_impact = price_impact
self.take_market = take_market
def __repr__(self):
return """
{class_name}(
volume_limit={volume_limit},
price_impact={price_impact})
""".strip().format(class_name=self.__class__.__name__,
volume_limit=self.volume_limit,
price_impact=self.price_impact)
def process_order(self, event, order):
max_volume = self.volume_limit * event.volume
# price impact accounts for the total volume of transactions
# created against the current minute bar
remaining_volume = max_volume - self.volume_for_bar
if remaining_volume < 1:
# we can't fill any more transactions
raise LiquidityExceeded()
# the current order amount will be the min of the
# volume available in the bar or the open amount.
cur_volume = int(min(remaining_volume, abs(order.open_amount)))
if cur_volume < 1:
return
# tally the current amount into our total amount ordered.
# total amount will be used to calculate price impact
total_volume = self.volume_for_bar + cur_volume
volume_share = min(total_volume / event.volume,
self.volume_limit)
if self.take_market:
event_price = get_event_price(order, event)
else:
event_price = event.price
simulated_impact = volume_share ** 2 \
* math.copysign(self.price_impact, order.direction) \
* event_price
impacted_price = event_price + simulated_impact
if order.limit:
# this is tricky! if an order with a limit price has reached
# the limit price, we will try to fill the order. do not fill
# these shares if the impacted price is worse than the limit
# price. return early to avoid creating the transaction.
# buy order is worse if the impacted price is greater than
# the limit price. sell order is worse if the impacted price
# is less than the limit price
if (order.direction > 0 and impacted_price > order.limit) or \
(order.direction < 0 and impacted_price < order.limit):
return
return create_transaction(
event,
order,
impacted_price,
math.copysign(cur_volume, order.direction)
)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("VolumeShareSlippage saved state is too old.")
self.__dict__.update(state)
class FixedSlippage(SlippageModel):
def __init__(self, spread=0.0, take_market=False):
"""
Use the fixed slippage model, which will just add/subtract
a specified spread spread/2 will be added on buys and subtracted
on sells per share
"""
self.spread = spread
self.take_market = take_market
def process_order(self, event, order):
if self.take_market:
event_price = get_event_price(order, event)
else:
event_price = event.price
return create_transaction(
event,
order,
event_price + (self.spread / 2.0 * order.direction),
order.amount,
)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("FixedSlippage saved state is too old.")
self.__dict__.update(state)
| 28.909091 | 85 | 0.631243 |
acdf4e38c4adbf35251112811a1e0c2f76adca99 | 4,483 | py | Python | kmip/core/errors.py | vbnmmnbv/PyKMIP | 4617ae528006178c466fe3945a477f568b596940 | [
"Apache-2.0"
] | 12 | 2016-09-14T21:59:10.000Z | 2020-03-11T07:37:25.000Z | kmip/core/errors.py | vbnmmnbv/PyKMIP | 4617ae528006178c466fe3945a477f568b596940 | [
"Apache-2.0"
] | 1 | 2021-06-25T15:43:48.000Z | 2021-06-25T15:43:48.000Z | kmip/core/errors.py | vbnmmnbv/PyKMIP | 4617ae528006178c466fe3945a477f568b596940 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class ErrorStrings:
BAD_EXP_RECV = "Bad {0} {1}: expected {2}, received {3}"
BAD_ENCODING = "Bad {0} {1}: encoding mismatch"
class BaseError(Exception):
"""Base class for exceptions defined in this module."""
def __init__(self, args):
[setattr(self, k, v) for k, v in args.items() if k is not 'self']
class KMIPServerError(BaseError):
"""Base Exception for KMIP server errors."""
def __init__(self, args):
super(KMIPServerError, self).__init__(args)
class KMIPServerZombieError(KMIPServerError):
"""KMIP server error for hung and persistent live KMIP servers."""
def __init__(self, pid):
message = 'KMIP server alive after termination: PID {0}'.format(pid)
super(KMIPServerZombieError, self).__init__({'message': message})
def __str__(self):
return self.message
class KMIPServerSuicideError(KMIPServerError):
"""KMIP server error for prematurely dead KMIP servers."""
def __init__(self, pid):
message = 'KMIP server dead prematurely: PID {0}'.format(pid)
super(KMIPServerSuicideError, self).__init__({'message': message})
def __str__(self):
return self.message
class InitError(BaseError):
"""Exception thrown for bad initializations."""
def __init__(self, cls, exp, recv):
super(InitError, self).__init__(locals())
def __str__(self):
msg = "Tried to initialize {0} instance with bad type: "
msg += "expected {1}, received {2}"
return msg.format(self.cls, self.exp, self.recv)
class WriteValueError(BaseError):
def __init__(self, cls, attr, value):
super(WriteValueError, self).__init__(locals())
def __str__(self):
msg = "Tried to write {0}.{1} with invalid value: {2}"
return msg.format(self.cls, self.attr, self.value)
class WriteTypeError(BaseError):
def __init__(self, cls, attr, value):
super(WriteTypeError, self).__init__(locals())
def __str__(self):
msg = "Tried to write {0}.{1} with invalid type: {2}"
return msg.format(self.cls, self.attr, self.value)
class WriteOverflowError(BaseError):
def __init__(self, cls, attr, exp, recv):
super(WriteOverflowError, self).__init__(locals())
def __str__(self):
msg = "Tried to write {0}.{1} with too many bytes: "
msg += "expected {2}, received {3}"
return msg.format(self.cls, self.attr, self.exp, self.recv)
class ReadValueError(BaseError):
def __init__(self, cls, attr, exp, recv):
super(ReadValueError, self).__init__(locals())
def __str__(self):
msg = "Tried to read {0}.{1}: expected {2}, received {3}"
return msg.format(self.cls, self.attr, self.exp, self.recv)
class InvalidLengthError(ValueError):
def __init__(self, cls, exp, recv):
msg = "Invalid length read for {0}: expected {1}, received {2}"
super(InvalidLengthError, self).__init__(msg.format(cls, exp, recv))
class StreamNotEmptyError(BaseError):
def __init__(self, cls, extra):
super(StreamNotEmptyError, self).__init__(locals())
def __str__(self):
msg = "Invalid length used to read {0}, bytes remaining: {1}"
return msg.format(self.cls, self.extra)
class StateTypeError(TypeError):
def __init__(self, cls, exp, recv):
msg = "Tried to initialize {0} instance with bad type: "
msg += "expected {1}, received {2}"
super(StateTypeError, self).__init__(msg.format(cls, exp, recv))
class StateOverflowError(ValueError):
def __init__(self, cls, attr, exp, recv):
msg = "Tried to write {0}.{1} with too many bytes: "
msg += "expected {2}, received {3}"
super(StateOverflowError, self).__init__(msg.format(cls, attr, exp,
recv))
| 34.221374 | 76 | 0.660941 |
acdf4e5395c6fd73f4b792841c081d0c19fada5c | 631 | py | Python | osspeak/recognition/actions/library/screengrid/rectangle.py | OSSpeak/OSSpeak | 327c38a37684165f87bf8d76ab2ca135b43b8ab7 | [
"MIT"
] | 1 | 2020-03-17T10:24:41.000Z | 2020-03-17T10:24:41.000Z | osspeak/recognition/actions/library/screengrid/rectangle.py | OSSpeak/OSSpeak | 327c38a37684165f87bf8d76ab2ca135b43b8ab7 | [
"MIT"
] | 12 | 2016-09-28T05:16:00.000Z | 2020-11-27T22:32:40.000Z | screengrid/rectangle.py | evfredericksen/screengrid | bba77c19c8785afe969f2eeb4e0c3abb7d4ca592 | [
"MIT"
] | null | null | null | import ctypes
import win32gui, win32con
TEXT_FORMAT = win32con.DT_CENTER | win32con.DT_NOCLIP | win32con.DT_SINGLELINE | win32con.DT_VCENTER
class Rectangle:
def __init__(self, x: int, y: int, width: int, height: int, text=None):
self.x = x
self.y = y
self.width = width
self.height = height
self.text = text
def draw(self, device_context_handle):
x2, y2 = self.x + self.width, self.y + self.height
win32gui.DrawText(
device_context_handle,
self.text,
-1,
(self.x, self.y, x2, y2),
TEXT_FORMAT
) | 27.434783 | 100 | 0.586371 |
acdf4fde2f70b2d191048a38146481436df1fe0a | 750 | py | Python | atomizer/project/urls.py | mfundontini/Python-Automation-Scripts | 560c3b74d23b99d1b8a398d535616154ab524121 | [
"BSD-3-Clause"
] | null | null | null | atomizer/project/urls.py | mfundontini/Python-Automation-Scripts | 560c3b74d23b99d1b8a398d535616154ab524121 | [
"BSD-3-Clause"
] | 5 | 2021-03-19T03:18:02.000Z | 2021-09-22T19:05:34.000Z | atomizer/project/urls.py | mfundontini/Python-Automation-Scripts | 560c3b74d23b99d1b8a398d535616154ab524121 | [
"BSD-3-Clause"
] | null | null | null | """atomizer URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.090909 | 77 | 0.709333 |
acdf5080e300ead2d8cc468f50cd43cdb2bea881 | 3,225 | py | Python | python/locally_level_gaussian.py | gcgibson/ssvgd | 8f47dca7588a3ccbc13069860f342efcd5bbf644 | [
"MIT"
] | 1 | 2018-02-06T20:18:28.000Z | 2018-02-06T20:18:28.000Z | python/locally_level_gaussian.py | gcgibson/ssvgd | 8f47dca7588a3ccbc13069860f342efcd5bbf644 | [
"MIT"
] | null | null | null | python/locally_level_gaussian.py | gcgibson/ssvgd | 8f47dca7588a3ccbc13069860f342efcd5bbf644 | [
"MIT"
] | null | null | null | from autograd import numpy as np
from autograd import grad, jacobian
import numpy.matlib as nm
from svgd import SVGD
import sys
#from mpltools import style
#from mpltools import layout
#style.use('ggplot')
import matplotlib.pyplot as plt
#-(1.0/(2*observation_variance))*(theta_i - time_series[t])**2 + np.log(1.0/np.sqrt(np.pi*2*observation_variance))
observation_variance = .1
transition_variance = 10
seasonality = 4
G = np.matrix([[np.cos(2*np.pi/seasonality),np.sin(2*np.pi/seasonality)],[-np.sin(2*np.pi/seasonality),np.cos(2*np.pi/seasonality)]])
class StateSpaceModel:
def lnprob_theta_i(self, theta_i, theta_t_minus_1, time_series,t):
#ln poisson observations
lnprob_theta_i = np.log(1.0/(np.sqrt(2*np.pi*observation_variance)))+ -.5*(1.0/observation_variance)*((time_series[t] - theta_i )**2)
transition_sum = 0
for theta_t_minus_1_i in theta_t_minus_1:
transition_sum += 1.0/(np.sqrt(2*np.pi*transition_variance))*np.exp(-.5*(1.0/transition_variance)*((theta_i - theta_t_minus_1_i )**2))
return (lnprob_theta_i+np.log(transition_sum))
def dlnprob(self, theta_i,theta_t_minus_1,time_series, t):
return (grad(self.lnprob_theta_i)(theta_i, theta_t_minus_1, time_series,t))
def grad_overall(self, theta,theta_t_minus_1,time_series, t, iter_):
return_matrix = []
for theta_i in theta:
return_matrix.append(self.dlnprob(theta_i,theta_t_minus_1 ,time_series,t))
return np.array(return_matrix)
if __name__ == '__main__':
filtered_means = []
filtered_covs = []
total_thetas = []
n_iter = 1000
time_series = []#np.round(np.power(np.sin(np.arange(10)+1),2)*10 + 10)
input_exists = True
i = 1
while input_exists:
try:
time_series.append(float(sys.argv[i].replace(",","")))
i+=1
except:
input_exists =False
model = StateSpaceModel()
num_particles = 10
x0 = np.random.normal(-10,1,[num_particles,1]).astype(float)
theta = SVGD().update(x0,0,x0,time_series, model.grad_overall, n_iter=n_iter, stepsize=0.01)
total_thetas.append(theta)
#theta = p(x_0|y_0)
filtered_means.append(np.mean(theta,axis=0)[0])
filtered_covs.append(np.var(theta,axis=0)[0])
for t in range(1,len(time_series)):
theta = SVGD().update(theta,t,theta, time_series, model.grad_overall, n_iter=n_iter, stepsize=0.01)
total_thetas.append(theta)
filtered_means.append(np.mean(theta,axis=0)[0])
filtered_covs.append(np.var(theta,axis=0)[0])
total_thetas = np.array(total_thetas)
#return_list = filtered_means + filtered_covs
myList = ','.join(map(str,np.array(total_thetas).flatten() ))
print (myList)
#print (filtered_means)
#print (filtered_covs)
#import matplotlib.pyplot as plt
#plt.plot(range(len(total_thetas)),total_thetas.mean(axis=1))
#print (np.percentile(total_thetas,5,axis=1).shape)
#plt.fill_between(range(len(total_thetas)),np.percentile(total_thetas,5,axis=1).reshape((-1,)),np.percentile(total_thetas,95,axis=1).reshape((-1,)),alpha=.2)
#plt.show()
| 34.677419 | 161 | 0.667907 |
acdf557afd7efd087273250fbc6b5a3584036aa8 | 6,543 | py | Python | futu/common/pb/KeepAlive_pb2.py | liteself/py-futu-api | a78f5b46f56d30fb82a42951823afea4b5ed1307 | [
"Apache-2.0"
] | 2 | 2020-03-18T09:54:03.000Z | 2020-05-15T08:13:33.000Z | futu/common/pb/KeepAlive_pb2.py | liteself/py-futu-api | a78f5b46f56d30fb82a42951823afea4b5ed1307 | [
"Apache-2.0"
] | 1 | 2020-04-21T02:42:32.000Z | 2020-04-21T02:42:32.000Z | futu/common/pb/KeepAlive_pb2.py | liteself/py-futu-api | a78f5b46f56d30fb82a42951823afea4b5ed1307 | [
"Apache-2.0"
] | 1 | 2021-02-17T17:46:36.000Z | 2021-02-17T17:46:36.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: KeepAlive.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import Common_pb2 as Common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='KeepAlive.proto',
package='KeepAlive',
syntax='proto2',
serialized_pb=_b('\n\x0fKeepAlive.proto\x12\tKeepAlive\x1a\x0c\x43ommon.proto\"\x13\n\x03\x43\x32S\x12\x0c\n\x04time\x18\x01 \x02(\x03\"\x13\n\x03S2C\x12\x0c\n\x04time\x18\x01 \x02(\x03\"&\n\x07Request\x12\x1b\n\x03\x63\x32s\x18\x01 \x02(\x0b\x32\x0e.KeepAlive.C2S\"_\n\x08Response\x12\x15\n\x07retType\x18\x01 \x02(\x05:\x04-400\x12\x0e\n\x06retMsg\x18\x02 \x01(\t\x12\x0f\n\x07\x65rrCode\x18\x03 \x01(\x05\x12\x1b\n\x03s2c\x18\x04 \x01(\x0b\x32\x0e.KeepAlive.S2CB\x15\n\x13\x63om.futu.openapi.pb')
,
dependencies=[Common__pb2.DESCRIPTOR,])
_C2S = _descriptor.Descriptor(
name='C2S',
full_name='KeepAlive.C2S',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='time', full_name='KeepAlive.C2S.time', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=44,
serialized_end=63,
)
_S2C = _descriptor.Descriptor(
name='S2C',
full_name='KeepAlive.S2C',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='time', full_name='KeepAlive.S2C.time', index=0,
number=1, type=3, cpp_type=2, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=65,
serialized_end=84,
)
_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='KeepAlive.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='c2s', full_name='KeepAlive.Request.c2s', index=0,
number=1, type=11, cpp_type=10, label=2,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=86,
serialized_end=124,
)
_RESPONSE = _descriptor.Descriptor(
name='Response',
full_name='KeepAlive.Response',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='retType', full_name='KeepAlive.Response.retType', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=True, default_value=-400,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='retMsg', full_name='KeepAlive.Response.retMsg', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='errCode', full_name='KeepAlive.Response.errCode', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='s2c', full_name='KeepAlive.Response.s2c', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=126,
serialized_end=221,
)
_REQUEST.fields_by_name['c2s'].message_type = _C2S
_RESPONSE.fields_by_name['s2c'].message_type = _S2C
DESCRIPTOR.message_types_by_name['C2S'] = _C2S
DESCRIPTOR.message_types_by_name['S2C'] = _S2C
DESCRIPTOR.message_types_by_name['Request'] = _REQUEST
DESCRIPTOR.message_types_by_name['Response'] = _RESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
C2S = _reflection.GeneratedProtocolMessageType('C2S', (_message.Message,), dict(
DESCRIPTOR = _C2S,
__module__ = 'KeepAlive_pb2'
# @@protoc_insertion_point(class_scope:KeepAlive.C2S)
))
_sym_db.RegisterMessage(C2S)
S2C = _reflection.GeneratedProtocolMessageType('S2C', (_message.Message,), dict(
DESCRIPTOR = _S2C,
__module__ = 'KeepAlive_pb2'
# @@protoc_insertion_point(class_scope:KeepAlive.S2C)
))
_sym_db.RegisterMessage(S2C)
Request = _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), dict(
DESCRIPTOR = _REQUEST,
__module__ = 'KeepAlive_pb2'
# @@protoc_insertion_point(class_scope:KeepAlive.Request)
))
_sym_db.RegisterMessage(Request)
Response = _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), dict(
DESCRIPTOR = _RESPONSE,
__module__ = 'KeepAlive_pb2'
# @@protoc_insertion_point(class_scope:KeepAlive.Response)
))
_sym_db.RegisterMessage(Response)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.futu.openapi.pb'))
# @@protoc_insertion_point(module_scope)
| 30.574766 | 501 | 0.730399 |
acdf55b87e628faa9329ce9dcf4e7d9dc249b01a | 77 | py | Python | nbdev_template/__init__.py | Ai-Adventures/aiadv | 9a6a66c6fccdfbad24737835f4deb5dbaf888429 | [
"Apache-2.0"
] | 1 | 2020-09-27T02:22:01.000Z | 2020-09-27T02:22:01.000Z | nbdev_template/__init__.py | Ai-Adventures/aiadv | 9a6a66c6fccdfbad24737835f4deb5dbaf888429 | [
"Apache-2.0"
] | 1 | 2022-02-26T08:36:56.000Z | 2022-02-26T08:36:56.000Z | nbdev_template/__init__.py | Ai-Adventures/aiadv | 9a6a66c6fccdfbad24737835f4deb5dbaf888429 | [
"Apache-2.0"
] | null | null | null | __version__ = "0.0.2"
__all__ = ['URLs', 'untar_data']
from .core import * | 12.833333 | 32 | 0.636364 |
acdf5654522db06d48fb4799917923af7daa7759 | 1,400 | py | Python | Kits/mecco_Zen_10/lxserv/zen_doubleClick.py | 9bstudios/mecco_Zen | 6db932b00ac8d5ae2bc49c73de766cdc2f090ba9 | [
"MIT"
] | 6 | 2018-06-04T11:45:44.000Z | 2020-11-21T12:50:15.000Z | Kits/mecco_Zen_10/lxserv/zen_doubleClick.py | 9bstudios/mecco_Zen | 6db932b00ac8d5ae2bc49c73de766cdc2f090ba9 | [
"MIT"
] | null | null | null | Kits/mecco_Zen_10/lxserv/zen_doubleClick.py | 9bstudios/mecco_Zen | 6db932b00ac8d5ae2bc49c73de766cdc2f090ba9 | [
"MIT"
] | null | null | null | # python
import lx, lxu, modo
NAME_CMD = "zen.doubleClick"
def selectedIsMesh():
try:
return modo.Scene().selected[-1].type == 'mesh'
except IndexError:
return False
def setItemMode():
lx.eval('select.typeFrom item;vertex;polygon;edge;pivot;center;ptag true')
def get_mode():
sel_svc = lx.service.Selection()
selitype = sel_svc.CurrentType (None)
seltype = sel_svc.LookupName (selitype)
return seltype
class CMD_Zen_doubleClick(lxu.command.BasicCommand):
def basic_Execute(self, msg, flags):
mode = get_mode()
if mode in ('item', None, 'link'):
if selectedIsMesh():
lx.eval("select.typeFrom polygon;vertex;edge;item;pivot;center;ptag true")
else:
lx.eval("select.itemHierarchy")
elif mode == 'polygon':
if lx.eval("query layerservice polys ? selected"):
lx.eval("select.connect")
else:
setItemMode()
elif mode == 'edge':
if lx.eval("query layerservice edges ? selected"):
lx.eval("select.loop")
else:
setItemMode()
elif mode == 'vertex':
if lx.eval("query layerservice verts ? selected"):
lx.eval("select.connect")
else:
setItemMode()
lx.bless(CMD_Zen_doubleClick, NAME_CMD)
| 26.415094 | 90 | 0.575714 |
acdf58eed43ab8b89dcbf5d782e00b669adb76a3 | 9,850 | py | Python | newtonnet/data/neighbors.py | THGLab/NewtonNet | fcf2af848a1c998bd08096dcefb58a5610eda03c | [
"MIT"
] | null | null | null | newtonnet/data/neighbors.py | THGLab/NewtonNet | fcf2af848a1c998bd08096dcefb58a5610eda03c | [
"MIT"
] | null | null | null | newtonnet/data/neighbors.py | THGLab/NewtonNet | fcf2af848a1c998bd08096dcefb58a5610eda03c | [
"MIT"
] | null | null | null | """
A compilation of modules that help to find closest neighbours of each atom in a molecule.
Each Molecule is represented as a dictionary with following keys:
- atoms: atomic positions with shape (n_atom, 3)
- z: atomic numbers with shape (n_atoms, 1)
- cell: unit cell with shape (3,3)
- atom_prop: atomic property with shape (n_atoms, n_atomic_prop)
- mol_prop: molecular property with shape (1, n_mol_prop)
"""
import numpy as np
from ase import Atoms
from ase.neighborlist import neighbor_list
from combust.utils import padaxis
class ExtensiveEnvironment(object):
"""
Provide atomic environment of an array of atoms and their atomic numbers.
No cutoff, No periodic boundary condition
Parameters
----------
max_n_neighbors: int, optional (default: None)
maximum number of neighbors to pad arrays if they have less elements
if None, it will be ignored (e.g., in case all atoms have same length)
"""
def __init__(self, max_n_neighbors=None):
if max_n_neighbors is None:
max_n_neighbors = 0
self.max_n_neighbors = max_n_neighbors
def _check_shapes(self, Rshape, Zshape):
if Rshape[0] != Zshape[0]:
msg = "@ExtensiveEnvironment: atoms and atomic_numbers must have same dimension 0 (n_data)."
raise ValueError(msg)
if Rshape[2] != 3:
msg = "@ExtensiveEnvironment: atoms must have 3 coordinates at dimension 2."
raise ValueError(msg)
if Rshape[1] != Zshape[1]:
msg = "@ExtensiveEnvironment: atoms and atomic_numbers must have same dimension 1 (n_atoms)."
raise ValueError(msg)
def get_environment(self, positions, atomic_numbers):
"""
This function finds atomic environments extensively for each atom in the MD snapshot.
Parameters
----------
positions: ndarray
A 3D array of atomic positions in XYZ coordinates with shape (D, A, 3), where
D is number of snapshots of data and A is number of atoms per data point
atomic_numbers: ndarray
A 2D array of atomic numbers with shape (D, A)
Returns
-------
ndarray: 3D array of neighbors with shape (D, A, A-1)
ndarray: 3D array of neighbor mask with shape (D, A, A-1)
ndarray: 2D array of atomic mask for atomic energies (D, A)
"""
n_data = positions.shape[0] # D
n_atoms = positions.shape[1] # A
self._check_shapes(positions.shape, atomic_numbers.shape)
# 2d array of all indices for all atoms in a single data point
N = np.tile(np.arange(n_atoms), (n_atoms, 1)) # (A, A)
# remove the diagonal self indices
neighbors = N[~np.eye(n_atoms, dtype=bool)].reshape(n_atoms,
-1) # (A, A-1)
neighbors = np.repeat(neighbors[np.newaxis, ...], n_data, axis=0) # (D, A, A-1)
# mask based on zero atomic_numbers
mask = np.ones_like(atomic_numbers) #(D, A)
mask[np.where(atomic_numbers == 0)] = 0
max_atoms = np.sum(mask, axis=1)
neighbor_mask = (neighbors < np.tile(max_atoms.reshape(-1,1), n_atoms-1)[:,None,:]).astype('int')
neighbor_mask *= mask[:,:,None] # (D,A,A-1)
neighbors *= neighbor_mask # (D,A,A-1)
# atomic numbers
# atomic numbers are already in correct shape
if n_atoms < self.max_n_neighbors:
neighbors = padaxis(neighbors,
self.max_n_neighbors,
axis=-1,
pad_value=-1) # (D, A, N)
atomic_numbers = padaxis(atomic_numbers,
self.max_n_neighbors,
axis=-1,
pad_value=0) # (D, A, N)
return neighbors, neighbor_mask, mask, None, None
class PeriodicEnvironment(object):
"""
Provide atomic environment of an array of atoms and their atomic numbers with cutoff and periodic boundary condition.
Parameters
----------
max_n_neighbors: int, optional (default: None)
maximum number of neighbors to pad arrays if they have less elements
if None, it will be ignored (e.g., in case all atoms have same length)
cutoff: float
the cutoff value to be used for finding neighbors
"""
def __init__(self, max_n_neighbors=None, cutoff=7.0):
if max_n_neighbors is None:
max_n_neighbors = 0
self.max_n_neighbors = max_n_neighbors
self.cutoff = cutoff
def _check_shapes(self, Rshape, Zshape):
if Rshape[0] != Zshape[0]:
msg = "@PeriodicEnvironment: atoms and atomic_numbers must have same dimension 0 (n_data)."
raise ValueError(msg)
if Rshape[2] != 3:
msg = "@PeriodicEnvironment: atoms must have 3 coordinates at dimension 2."
raise ValueError(msg)
if Rshape[1] != Zshape[1]:
msg = "@PeriodicEnvironment: atoms and atomic_numbers must have same dimension 1 (n_atoms)."
raise ValueError(msg)
def get_environment(self, positions, atomic_numbers, lattice):
"""
This function finds atomic environments extensively for each atom in the MD snapshot using the ASE package.
Parameters
----------
positions: ndarray
A 3D array of atomic positions in XYZ coordinates with shape (D, A, 3), where
D is number of snapshots of data and A is number of atoms per data point
atomic_numbers: ndarray
A 2D array of atomic numbers with shape (D, A)
lattice: ndarray
A 2D array with shape (D, 9), where the second axis can be reshaped into (3x3) that
represents the 3 lattice vectors
Returns
-------
ndarray: 3D array of neighbors with shape (D, A, N), where N is either the maximum number of neighbors in
current batch of data, or the predefined max_n_neighbors
ndarray: 3D array of neighbor mask with shape (D, A, N)
ndarray: 2D array of atomic mask for atomic energies (D, A)
ndarray: 3D array of neighbor distances with shape (D, A, N)
ndarray: 4D array of neighbor atom arrays with shape (D, A, N, 3)
"""
n_data = positions.shape[0] # D
n_atoms = positions.shape[1] # A
lattice = lattice.reshape((-1, 3, 3))
self._check_shapes(positions.shape, atomic_numbers.shape)
staggered_neighbors = [[[] for _ in range(n_atoms)] for _ in range(n_data)]
staggered_distances = [[[] for _ in range(n_atoms)] for _ in range(n_data)]
staggered_distance_vectors = [[[] for _ in range(n_atoms)] for _ in range(n_data)]
neighbor_count = np.zeros((n_data, n_atoms), dtype=int)
# mask based on zero atomic_numbers
mask = np.ones_like(atomic_numbers) #(D, A)
mask[np.where(atomic_numbers == 0)] = 0
for idx_data in range(n_data):
molecule_Rs = positions[idx_data, mask[idx_data] == 1]
molecule_Zs = atomic_numbers[idx_data, mask[idx_data] == 1]
molecule_lattice = lattice[idx_data]
ase_molecule = Atoms(molecule_Zs, positions=molecule_Rs, cell=molecule_lattice, pbc=True)
for i, j, dist, vec in zip(*neighbor_list("ijdD", ase_molecule, self.cutoff)):
staggered_neighbors[idx_data][i].append(j)
staggered_distances[idx_data][i].append(dist)
staggered_distance_vectors[idx_data][i].append(vec)
neighbor_count[idx_data][i] += 1
max_N = np.max(neighbor_count.max(), self.max_n_neighbors)
neighbors = np.zeros((n_data, n_atoms, max_N))
distances = np.zeros((n_data, n_atoms, max_N))
distance_vectors = np.zeros((n_data, n_atoms, max_N, 3))
neighbor_mask = np.zeros((n_data, n_atoms, max_N))
for i in range(n_data):
for j in range(n_atoms):
if neighbor_count[i, j] > 0:
neighbors[i, j, :neighbor_count[i, j]] = staggered_neighbors[i][j]
distances[i, j, :neighbor_count[i, j]] = staggered_distances[i][j]
distance_vectors[i, j, :neighbor_count[i, j]] = staggered_distance_vectors[i][j]
neighbor_mask[i, j, :neighbor_count[i, j]] = 1
# # 2d array of all indices for all atoms in a single data point
# N = np.tile(np.arange(n_atoms), (n_atoms, 1)) # (A, A)
# # remove the diagonal self indices
# neighbors = N[~np.eye(n_atoms, dtype=bool)].reshape(n_atoms,
# -1) # (A, A-1)
# neighbors = np.repeat(neighbors[np.newaxis, ...], n_data, axis=0) # (D, A, A-1)
# max_atoms = np.sum(mask, axis=1)
# neighbor_mask = (neighbors < np.tile(max_atoms.reshape(-1,1), n_atoms-1)[:,None,:]).astype('int')
# neighbor_mask *= mask[:,:,None] # (D,A,A-1)
# neighbors *= neighbor_mask # (D,A,A-1)
# # atomic numbers
# # atomic numbers are already in correct shape
# if n_atoms < self.max_n_neighbors:
# neighbors = padaxis(neighbors,
# self.max_n_neighbors,
# axis=-1,
# pad_value=-1) # (D, A, N)
# atomic_numbers = padaxis(atomic_numbers,
# self.max_n_neighbors,
# axis=-1,
# pad_value=0) # (D, A, N)
return neighbors, neighbor_mask, mask, distances, distance_vectors
| 42.094017 | 121 | 0.589137 |
acdf59d619a1d7866c3abe041370e15154cbfb75 | 1,094 | py | Python | Problem_9_hard.py | alucardthefish/DailyCodingProblem | c2c3e1e3d30765e6e770bdda19795cf6d0a94f06 | [
"MIT"
] | null | null | null | Problem_9_hard.py | alucardthefish/DailyCodingProblem | c2c3e1e3d30765e6e770bdda19795cf6d0a94f06 | [
"MIT"
] | null | null | null | Problem_9_hard.py | alucardthefish/DailyCodingProblem | c2c3e1e3d30765e6e770bdda19795cf6d0a94f06 | [
"MIT"
] | null | null | null | # This problem was asked by Airbnb.
#
# Given a list of integers, write a function that returns the largest sum of non-adjacent numbers.
# Numbers can be 0 or negative.
# For example, [2, 4, 6, 2, 5] should return 13, since we pick 2, 6, and 5. [5, 1, 1, 5] should return 10,
# since we pick 5 and 5.
# Follow-up: Can you do this in O(N) time and constant space?
# Input examples
input_1 = [2, 4, 6, 2, 5] # return 13
input_2 = [5, 1, 1, 5] # return 10
input_3 = [2, 14, 6, 2, 15] # return 29
input_4 = [2, 5, 11, 8, 3] # return 16
input_5 = [90, 15, 10, 30, 100] # return 200
input_6 = [29, 51, 8, 10, 43, 28] # return 94
def largest_sum_adj(arr):
result = 0
size = len(arr)
# validate the input list. It must be a length greater than 2
if size > 2:
arr[2] += arr[0]
result = arr[2]
for i in range(3, size):
tmp = arr[i-3]
if arr[i-2] > arr[i-3]:
tmp = arr[i-2]
tmp_addition = tmp + arr[i]
arr[i] = tmp_addition
if tmp_addition > result:
result = tmp_addition
else:
print("The length of input list must be greater than 2")
return result
| 30.388889 | 106 | 0.627057 |
acdf5a47b647bfba8d1327fe7d6886aa9aedb504 | 1,192 | py | Python | main.py | priyabrs/music_time_machine | 3ab7145510d5630ae5c00f5d4d99e165344f110b | [
"MIT"
] | null | null | null | main.py | priyabrs/music_time_machine | 3ab7145510d5630ae5c00f5d4d99e165344f110b | [
"MIT"
] | null | null | null | main.py | priyabrs/music_time_machine | 3ab7145510d5630ae5c00f5d4d99e165344f110b | [
"MIT"
] | null | null | null | from billboard_scrapper import ( get_billboard_url_for_date, get_url_response_text, get_top_100_dict )
from spotifyauth import SpotifyAuth
from datetime import date
from config import config
class DateException(Exception):
pass
def get_user_date() -> date:
user_date = input('what year you would like to travel to? Please enter the date(YYYY-MM-DD): ')
if user_date:
try:
date_list = [int(value) for value in user_date.split('-')]
user_date = date(date_list[0], date_list[1], date_list[2])
except Exception as ex:
raise DateException('Entered date format is wrong!!!. Please enter date in correct format(YYYY-MM-DD)')
return user_date
def main() -> None:
sp = SpotifyAuth()
user_date = get_user_date()
# print(user_date.year)
billboard_url = get_billboard_url_for_date(user_date)
url_extract = get_url_response_text(billboard_url)
top_100_dict = get_top_100_dict(url_extract, name=config.billboard_tag_name, class_=config.billboard_class)
song_uris = sp.get_song_uris(top_100_dict.values(), user_date.year)
sp.setup_playlist(song_uris,str(user_date))
if __name__ == '__main__':
main() | 39.733333 | 115 | 0.725671 |
acdf5c2f76ff70b3081f00ed1d5674c83a6d2eee | 4,724 | py | Python | tests/test_TimeUtils.py | djw8605/gracc-reporting | f6e674d22bce8c150792f3badbb89ad15aabf595 | [
"Apache-2.0"
] | null | null | null | tests/test_TimeUtils.py | djw8605/gracc-reporting | f6e674d22bce8c150792f3badbb89ad15aabf595 | [
"Apache-2.0"
] | null | null | null | tests/test_TimeUtils.py | djw8605/gracc-reporting | f6e674d22bce8c150792f3badbb89ad15aabf595 | [
"Apache-2.0"
] | null | null | null | """Unit tests for TimeUtils"""
import unittest
from datetime import datetime, date
from dateutil import tz
import gracc_reporting.TimeUtils as TimeUtils
class TestParseDatetime(unittest.TestCase):
"""Tests for TimeUtils.parse_datetime"""
fail_string = "this should not parse correctly"
date_local = date(2018, 3, 27)
datetime_local = datetime(2018, 3, 27, 16, 00, 00)
datetime_utc = datetime(2018, 3, 27, 16, 00, 00).replace(tzinfo=tz.tzutc())
def test_none(self):
"""If we pass in None, we should get None back"""
self.assertIsNone(TimeUtils.parse_datetime(None))
def test_utc_time_control(self):
"""This should just return itself"""
answer = self.datetime_utc
self.assertEqual(TimeUtils.parse_datetime(self.datetime_utc, utc=True),
answer)
def test_local_time_control(self):
"""Take our local time, transform it to UTC"""
answer = self.datetime_local.replace(tzinfo=tz.tzlocal()).astimezone(
tz.tzutc())
self.assertEqual(TimeUtils.parse_datetime(self.datetime_local), answer)
def test_date_parse(self):
"""Parse a datetime.date object into a datetime.datetime object"""
answer = datetime(2018, 3, 27, 00, 00, 00).replace(
tzinfo=tz.tzlocal()).astimezone(tz.tzutc())
self.assertEqual(TimeUtils.parse_datetime(self.date_local), answer)
def test_valid_datestring_parse(self):
"""If we pass in a date in a standard form, it should get parsed"""
answer = self.datetime_utc
in_dates = ("2018 Mar 27 16:00:00 UTC",
"2018-03-27 16:00:00 UTC",
"Tue Mar 27 16:00:00 UTC 2018")
for in_date in in_dates:
self.assertEqual(TimeUtils.parse_datetime(
in_date, utc=True), answer)
def test_fail_parse(self):
"""Invalid time string should fail to parse"""
self.assertRaises(Exception, TimeUtils.parse_datetime, self.fail_string)
class TestEpochToDatetime(unittest.TestCase):
"""Test TimeUtils.epoch_to_datetime"""
epoch_time = 1522253329
datetime_time = datetime(2018, 3, 28, 11, 8, 49)
def test_the_test(self):
"""Make sure our test constants are equivalent"""
self.assertEqual(
datetime.fromtimestamp(self.epoch_time), self.datetime_time)
def test_return_none(self):
"""If we pass in None, we should get back None"""
self.assertIsNone(TimeUtils.epoch_to_datetime(None))
def test_control_epoch(self):
"""If we pass in self.epoch_time, we should get self.datetime_time"""
answer = self.datetime_time.replace(
tzinfo=tz.tzlocal()).astimezone(tz=tz.tzutc())
self.assertEqual(TimeUtils.epoch_to_datetime(self.epoch_time), answer)
def test_units(self):
"""If we specify a valid unit, the correct conversion
should take place"""
answer = self.datetime_time.replace(
tzinfo=tz.tzlocal()).astimezone(tz=tz.tzutc())
conversions = {'second': 1, 'millisecond': 1e3, 'microsecond': 1e6}
units_inputs = {}
for unit, factor in conversions.iteritems():
units_inputs[unit] = self.epoch_time * factor
for unit_name, value in units_inputs.iteritems():
self.assertEqual(TimeUtils.epoch_to_datetime(value, unit=unit_name), answer)
def test_unit_fail(self):
"""Raise InvalidUnitError if invalid unit is passed in"""
self.assertRaises(TimeUtils.InvalidUnitError,
TimeUtils.epoch_to_datetime, self.epoch_time,
'hours')
class TestGetEpochTimeRangeUtcms(unittest.TestCase):
"""Test TimeUtils.get_epoch_time_range_utc_ms"""
start = datetime(2018, 3, 27, 16, 8, 49)
end = datetime(2018, 3, 28, 16, 8, 49)
def test_invalid_args_type(self):
"""Raise generic Exception if we pass in invalid args"""
args_list = ['hello', 'world']
self.assertRaises(Exception,
TimeUtils.get_epoch_time_range_utc_ms, *args_list)
def test_invalid_args_value(self):
"""Raise AssertionError if we pass in bad values. Switching
start and end should trip this"""
self.assertRaises(AssertionError,
TimeUtils.get_epoch_time_range_utc_ms,
self.end, self.start)
def test_control_epoch_range(self):
"""Return epoch time range in ms for valid input range"""
answer = (1522166929000, 1522253329000)
self.assertTupleEqual(TimeUtils.get_epoch_time_range_utc_ms(self.start, self.end), answer)
if __name__ == '__main__':
unittest.main()
| 39.366667 | 98 | 0.654107 |
acdf5d3100a82252433ce6847a5e39ee882c778a | 8,036 | py | Python | pipeline_config.py | jessicachung/rna_seq_pipeline | 7bc691c237b2520f278ccec4e0f5d4f206c7e7ae | [
"MIT"
] | 1 | 2015-11-12T12:31:40.000Z | 2015-11-12T12:31:40.000Z | pipeline_config.py | jessicachung/rna_seq_pipeline | 7bc691c237b2520f278ccec4e0f5d4f206c7e7ae | [
"MIT"
] | null | null | null | pipeline_config.py | jessicachung/rna_seq_pipeline | 7bc691c237b2520f278ccec4e0f5d4f206c7e7ae | [
"MIT"
] | null | null | null | #---------------------------------
# PIPELINE RUN
#---------------------------------
# The configuration settings to run the pipeline. These options are overwritten
# if a new setting is specified as an argument when running the pipeline.
# These settings include:
# - logDir: The directory where the batch queue scripts are stored, along with
# stdout and stderr dumps after the job is run.
# - logFile: Log file in logDir which all commands submitted are stored.
# - style: the style which the pipeline runs in. One of:
# - 'print': prints the stages which will be run to stdout,
# - 'run': runs the pipeline until the specified stages are finished, and
# - 'flowchart': outputs a flowchart of the pipeline stages specified and
# their dependencies.
# - procs: the number of python processes to run simultaneously. This
# determines the maximum parallelism of the pipeline. For distributed jobs
# it also constrains the maximum total jobs submitted to the queue at any one
# time.
# - verbosity: one of 0 (quiet), 1 (normal), 2 (chatty).
# - end: the desired tasks to be run. Rubra will also run all tasks which are
# dependencies of these tasks.
# - force: tasks which will be forced to run, regardless of timestamps.
# - rebuild: one of 'fromstart','fromend'. Whether to calculate which
# dependencies will be rerun by working back from an end task to the latest
# up-to-date task, or forward from the earliest out-of-date task. 'fromstart'
# is the most conservative and commonly used as it brings all intermediate
# tasks up to date.
# - manager: "pbs" or "slurm"
pipeline = {
"logDir": "log",
"logFile": "pipeline_commands.log",
"style": "print",
"procs": 16,
"verbose": 2,
"end": ["fastQCSummary", "voom", "edgeR", "qcSummary"],
"force": [],
"rebuild": "fromstart",
"manager": "slurm",
}
# This option specifies whether or not you are using VLSCI's Merri or Barcoo
# cluster. If True, this changes java's tmpdir to the job's tmp dir on
# /scratch ($TMPDIR) instead of using the default /tmp which has limited space.
using_merri = True
# Optional parameter governing how Ruffus determines which part of the
# pipeline is out-of-date and needs to be re-run. If set to False, Ruffus
# will work back from the end target tasks and only execute the pipeline
# after the first up-to-date tasks that it encounters.
# Warning: Use with caution! If you don't understand what this option does,
# keep this option as True.
maximal_rebuild_mode = True
#---------------------------------
# CONFIG
#---------------------------------
# Name of analysis. Changing the name will create new sub-directories for
# voom, edgeR, and cuffdiff analysis.
analysis_name = "analysis_v1"
# The directory containing *.fastq.gz read files.
raw_seq_dir = "/path_to_project/fastq_files/"
# Path to the CSV file with sample information regarding condition and
# covariates if available.
samples_csv = "/path_to_project/fastq_files/samples.csv"
# Path to the CSV file with which comparisons to make.
comparisons_csv = "/path_to_project/fastq_files/comparisons.csv"
# The output directory.
output_dir = "/path_to_project/results/"
# Sequencing platform for read group information.
platform = "Illumina"
# If the experiment is paired-end or single-end: True (PE) or False (SE).
paired_end = False
# Whether the experiment is strand specific: "yes", "no", or "reverse".
stranded = "no"
#---------------------------------
# REFERENCE FILES
#---------------------------------
# Most reference files can be obtained from the Illumina iGenomes project:
# http://cufflinks.cbcb.umd.edu/igenomes.html
# Bowtie 2 index files: *.1.bt2, *.2.bt2, *.3.bt2, *.4.bt2, *.rev.1.bt2,
# *.rev.2.bt2.
genome_ref = "/vlsci/VR0002/shared/Reference_Files/Indexed_Ref_Genomes/bowtie_Indexed/human_g1k_v37"
# Genome reference FASTA. Also needs an indexed genome (.fai) and dictionary
# (.dict) file in the same directory.
genome_ref_fa = "/vlsci/VR0002/shared/Reference_Files/Indexed_Ref_Genomes/bowtie_Indexed/human_g1k_v37.fa"
# Gene set reference file (.gtf). Recommend using the GTF file obtained from
# Ensembl as Ensembl gene IDs are used for annotation (if specified).
gene_ref = "/vlsci/VR0002/shared/Reference_Files/Indexed_Ref_Genomes/TuxedoSuite_Ref_Files/Homo_sapiens/Ensembl/GRCh37/Annotation/Genes/genes.gtf"
# Either a rRNA reference fasta (ending in .fasta or .fa) or an GATK interval
# file (ending in .list) containing rRNA intervals to calculate the rRNA
# content. Can set as False if not available.
# rrna_ref = "/vlsci/VR0002/shared/Reference_Files/rRNA/human_all_rRNA.fasta"
rrna_ref = "/vlsci/VR0002/shared/jchung/human_reference_files/human_rRNA.list"
# Optional tRNA and rRNA sequences to filter out in Cuffdiff (.gtf or .gff).
# Set as False if not provided.
cuffdiff_mask_file = False
#---------------------------------
# TRIMMOMATIC PARAMETERS
#---------------------------------
# Parameters for Trimmomatic (a tool for trimming Illumina reads).
# http://www.usadellab.org/cms/index.php?page=trimmomatic
# Path of a FASTA file containing adapter sequences used in sequencing.
adapter_seq = "/vlsci/VR0002/shared/jchung/human_reference_files/TruSeqAdapters.fa"
# The maximum mismatch count which will still allow a full match to be
# performed.
seed_mismatches = 2
# How accurate the match between the two 'adapter ligated' reads must be for
# PE palindrome read alignment.
palendrome_clip_threshold = 30
# How accurate the match between any adapter etc. sequence must be against a
# read.
simple_clip_threshold = 10
# The minimum quality needed to keep a base and the minimum length of reads to
# be kept.
extra_parameters = "LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 MINLEN:36"
# Output Trimmomatic log file
write_trimmomatic_log = True
#---------------------------------
# R PARAMETERS
#---------------------------------
# Get annotations from Ensembl BioMart. GTF file needs to use IDs from Ensembl.
# Set as False to skip annotation, else
# provide the name of the dataset that will be queried. Attributes to be
# obtained include gene symbol, chromosome name, description, and gene biotype.
# Commonly used datasets:
# human: "hsapiens_gene_ensembl"
# mouse: "mmusculus_gene_ensembl"
# rat: "rnorvegicus_gene_ensembl"
# You can list all available datasets in R by using the listDatasets fuction:
# > library(biomaRt)
# > listDatasets(useMart("ensembl"))
# The gene symbol is obtained from the attribute "hgnc_symbol" (human) or
# "mgi_symbol" (mice/rats) if available. If not, the "external_gene_id" is used
# to obtain the gene symbol. You can change this by editing the script:
# scripts/combine_and_annotate.r
annotation_dataset = "hsapiens_gene_ensembl"
#---------------------------------
# SCRIPT PATHS
#---------------------------------
# Paths to other wrapper scripts needed to run the pipeline. Make sure these
# paths are relative to the directory where you plan to run the pipeline in or
# change them to absolute paths.
html_index_script = "scripts/html_index.py"
index_script = "scripts/build_index.sh"
tophat_script = "scripts/run_tophat.sh"
merge_tophat_script = "scripts/merge_tophat.sh"
fix_tophat_unmapped_reads_script = "scripts/fix_tophat_unmapped_reads.py"
htseq_script = "scripts/run_htseq.sh"
fastqc_parse_script = "scripts/fastqc_parse.py"
qc_parse_script = "scripts/qc_parse.py"
alignment_stats_script = "scripts/alignment_stats.sh"
combine_and_annotate_script = "scripts/combine_and_annotate.R"
de_analysis_script = "scripts/de_analysis.R"
#---------------------------------
# PROGRAM PATHS
#---------------------------------
trimmomatic_path = "/usr/local/trimmomatic/0.30/trimmomatic-0.30.jar"
reorder_sam_path = "/usr/local/picard/1.69/lib/ReorderSam.jar"
mark_duplicates_path = "/usr/local/picard/1.69/lib/MarkDuplicates.jar"
rnaseqc_path = "/usr/local/rnaseqc/1.1.7/RNA-SeQC_v1.1.7.jar"
add_or_replace_read_groups_path = "/usr/local/picard/1.69/lib/AddOrReplaceReadGroups.jar"
| 40.585859 | 146 | 0.713166 |
acdf5e61fdedaf5f85186240d8a9d95f41cf5ad3 | 1,408 | py | Python | lab_7/lab_5/boards/views.py | jennifernolan/Software-for-the-Global-Market | 9a219dd0c0ceb284b3458cd7ad3fe103859fbfe8 | [
"MIT"
] | null | null | null | lab_7/lab_5/boards/views.py | jennifernolan/Software-for-the-Global-Market | 9a219dd0c0ceb284b3458cd7ad3fe103859fbfe8 | [
"MIT"
] | null | null | null | lab_7/lab_5/boards/views.py | jennifernolan/Software-for-the-Global-Market | 9a219dd0c0ceb284b3458cd7ad3fe103859fbfe8 | [
"MIT"
] | null | null | null | from django.http import HttpResponse
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.shortcuts import render, redirect, get_object_or_404
from .forms import NewTopicForm
from .models import Board, Topic, Post
def home(request):
boards = Board.objects.all()
#Return the page home.html
return render(request, 'home.html', {'boards': boards})
def board_topics(request, pk):
board = get_object_or_404(Board, pk=pk)
return render(request, 'topics.html', {'board': board})
def new_topic(request, pk):
board = get_object_or_404(Board, pk=pk)
user = User.objects.first() # TODO: get the currently logged in user
if request.method == 'POST':
form = NewTopicForm(request.POST)
if form.is_valid():
topic = form.save(commit=False)
topic.board = board
topic.starter = user
topic.save()
post = Post.objects.create(
message=form.cleaned_data.get('message'),
topic=topic,
created_by=user
)
return redirect('board_topics', pk=board.pk) # TODO: redirect to the created topic page
else:
form = NewTopicForm()
return render(request, 'new_topic.html', {'board': board, 'form': form})
def testlang(request):
return HttpResponse(_('Welcome to the discussion board!')) | 38.054054 | 100 | 0.65767 |
acdf5fc93553057fee6ec93a8422c9af6ce1d20b | 6,040 | py | Python | auditlog/registry.py | stephanpoetschner/django-auditlog | bf93089a2b21eaa77eddbe25f770f5ad211f3abf | [
"MIT"
] | null | null | null | auditlog/registry.py | stephanpoetschner/django-auditlog | bf93089a2b21eaa77eddbe25f770f5ad211f3abf | [
"MIT"
] | null | null | null | auditlog/registry.py | stephanpoetschner/django-auditlog | bf93089a2b21eaa77eddbe25f770f5ad211f3abf | [
"MIT"
] | 1 | 2021-08-23T12:49:58.000Z | 2021-08-23T12:49:58.000Z | from collections import defaultdict
from typing import Callable, Collection, Dict, List, Optional, Tuple
from django.db.models import Model
from django.db.models.base import ModelBase
from django.db.models.signals import (
ModelSignal,
m2m_changed,
post_delete,
post_save,
pre_save,
)
DispatchUID = Tuple[int, int, int]
class AuditlogModelRegistry(object):
"""
A registry that keeps track of the models that use Auditlog to track changes.
"""
def __init__(
self,
create: bool = True,
update: bool = True,
delete: bool = True,
m2m: bool = True,
custom: Optional[Dict[ModelSignal, Callable]] = None,
):
from auditlog.receivers import log_create, log_delete, log_update
self._registry = {}
self._signals = {}
self._m2m_signals = defaultdict(dict)
if create:
self._signals[post_save] = log_create
if update:
self._signals[pre_save] = log_update
if delete:
self._signals[post_delete] = log_delete
self._m2m = m2m
if custom is not None:
self._signals.update(custom)
def register(
self,
model: ModelBase = None,
include_fields: Optional[List[str]] = None,
exclude_fields: Optional[List[str]] = None,
mapping_fields: Optional[Dict[str, str]] = None,
m2m_fields: Optional[Collection[str]] = None,
):
"""
Register a model with auditlog. Auditlog will then track mutations on this model's instances.
:param model: The model to register.
:param include_fields: The fields to include. Implicitly excludes all other fields.
:param exclude_fields: The fields to exclude. Overrides the fields to include.
:param mapping_fields: Mapping from field names to strings in diff.
:param m2m_fields: The fields to map as many to many.
"""
if include_fields is None:
include_fields = []
if exclude_fields is None:
exclude_fields = []
if mapping_fields is None:
mapping_fields = {}
if m2m_fields is None:
m2m_fields = set()
def registrar(cls):
"""Register models for a given class."""
if not issubclass(cls, Model):
raise TypeError("Supplied model is not a valid model.")
self._registry[cls] = {
"include_fields": include_fields,
"exclude_fields": exclude_fields,
"mapping_fields": mapping_fields,
"m2m_fields": m2m_fields,
}
self._connect_signals(cls)
# We need to return the class, as the decorator is basically
# syntactic sugar for:
# MyClass = auditlog.register(MyClass)
return cls
if model is None:
# If we're being used as a decorator, return a callable with the
# wrapper.
return lambda cls: registrar(cls)
else:
# Otherwise, just register the model.
registrar(model)
def contains(self, model: ModelBase) -> bool:
"""
Check if a model is registered with auditlog.
:param model: The model to check.
:return: Whether the model has been registered.
:rtype: bool
"""
return model in self._registry
def unregister(self, model: ModelBase) -> None:
"""
Unregister a model with auditlog. This will not affect the database.
:param model: The model to unregister.
"""
try:
del self._registry[model]
except KeyError:
pass
else:
self._disconnect_signals(model)
def get_models(self) -> List[ModelBase]:
"""Get a list of all registered models."""
return list(self._registry.keys())
def get_model_fields(self, model: ModelBase):
return {
"include_fields": list(self._registry[model]["include_fields"]),
"exclude_fields": list(self._registry[model]["exclude_fields"]),
"mapping_fields": dict(self._registry[model]["mapping_fields"]),
}
def _connect_signals(self, model):
"""
Connect signals for the model.
"""
from auditlog.receivers import make_log_m2m_changes
for signal, receiver in self._signals.items():
signal.connect(
receiver,
sender=model,
dispatch_uid=self._dispatch_uid(signal, receiver),
)
if self._m2m:
for field_name in self._registry[model]["m2m_fields"]:
receiver = make_log_m2m_changes(field_name)
self._m2m_signals[model][field_name] = receiver
field = getattr(model, field_name)
m2m_model = getattr(field, "through")
m2m_changed.connect(
receiver,
sender=m2m_model,
dispatch_uid=self._dispatch_uid(m2m_changed, receiver),
)
def _disconnect_signals(self, model):
"""
Disconnect signals for the model.
"""
for signal, receiver in self._signals.items():
signal.disconnect(
sender=model, dispatch_uid=self._dispatch_uid(signal, receiver)
)
for field_name, receiver in self._m2m_signals[model].items():
field = getattr(model, field_name)
m2m_model = getattr(field, "through")
m2m_changed.disconnect(
sender=m2m_model,
dispatch_uid=self._dispatch_uid(m2m_changed, receiver),
)
del self._m2m_signals[model]
def _dispatch_uid(self, signal, receiver) -> DispatchUID:
"""Generate a dispatch_uid which is unique for a combination of self, signal, and receiver."""
return id(self), id(signal), id(receiver)
auditlog = AuditlogModelRegistry()
| 33.005464 | 102 | 0.590563 |
acdf5fdf0b9891324f7cbcee3e2ba178f93e9310 | 8,064 | py | Python | tests/pipeline/nodes/base/test_weights_downloader_mixin.py | ericleehy/PeekingDuck | 8cf1be842235fa60bac13bc466cac09747a780ea | [
"Apache-2.0"
] | 1 | 2021-12-02T05:15:58.000Z | 2021-12-02T05:15:58.000Z | tests/pipeline/nodes/base/test_weights_downloader_mixin.py | ericleehy/PeekingDuck | 8cf1be842235fa60bac13bc466cac09747a780ea | [
"Apache-2.0"
] | null | null | null | tests/pipeline/nodes/base/test_weights_downloader_mixin.py | ericleehy/PeekingDuck | 8cf1be842235fa60bac13bc466cac09747a780ea | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 AI Singapore
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import logging
import tempfile
from pathlib import Path
from unittest import TestCase, mock
import pytest
import yaml
from peekingduck.pipeline.nodes.base import (
PEEKINGDUCK_WEIGHTS_SUBDIR,
WeightsDownloaderMixin,
)
from tests.conftest import PKD_DIR, do_nothing
@pytest.fixture(
name="weights_model", params=(PKD_DIR / "configs" / "model").glob("*.yml")
)
def fixture_weights_model(request):
return WeightsModel(request.param)
@pytest.fixture(name="weights_type_model", params=["csrnet", "yolox"])
def fixture_weights_type_model(request):
"""Selects 2 models with different weights structure:
- csrnet: SavedModel format (a folder of files)
- yolox: PyTorch (single weights file)
"""
return WeightsModel(PKD_DIR / "configs" / "model" / f"{request.param}.yml")
class WeightsModel(WeightsDownloaderMixin):
def __init__(self, config_file):
with open(config_file) as infile:
node_config = yaml.safe_load(infile)
node_config["root"] = Path.cwd()
self.config = node_config
self.logger = logging.getLogger("test_weights_downloader_mixin.WeightsModel")
class TestWeightsDownloaderMixin:
def test_parent_dir_not_exist(self, weights_model):
invalid_dir = "invalid_dir"
weights_model.config["weights_parent_dir"] = invalid_dir
with pytest.raises(FileNotFoundError) as excinfo:
weights_model.download_weights()
assert f"weights_parent_dir does not exist: {invalid_dir}" == str(excinfo.value)
def test_parent_dir_not_absolute(self, weights_model):
relative_dir = PKD_DIR.relative_to(PKD_DIR.parent)
weights_model.config["weights_parent_dir"] = relative_dir
with pytest.raises(ValueError) as excinfo:
weights_model.download_weights()
assert f"weights_parent_dir must be an absolute path: {relative_dir}" == str(
excinfo.value
)
def test_default_parent_dir(self, weights_model):
"""Checks that _find_paths() gives the correct path when
`weights_parents_dir=None`.
"""
parent_dir = Path.cwd().resolve().parent
weights_model.config["weights_parent_dir"] = parent_dir
actual = weights_model._find_paths()
assert (
actual
== parent_dir
/ PEEKINGDUCK_WEIGHTS_SUBDIR
/ weights_model.config["weights"][weights_model.config["model_format"]][
"model_subdir"
]
/ weights_model.config["model_format"]
)
def test_custom_parent_dir(self, weights_model):
"""Checks that _find_paths() gives the correct path when
`weights_parents_dir` is a valid custom path.
"""
actual = weights_model._find_paths()
assert (
actual
== weights_model.config["root"].parent
/ PEEKINGDUCK_WEIGHTS_SUBDIR
/ weights_model.config["weights"][weights_model.config["model_format"]][
"model_subdir"
]
/ weights_model.config["model_format"]
)
def test_weights_not_found(self, weights_model):
"""Checks that the proper logging message is shown then weights are not
found.
"""
with tempfile.TemporaryDirectory() as tmp_dir, TestCase.assertLogs(
"test_weights_downloader_mixin.WeightsModel"
) as captured:
weights_model.config["weights_parent_dir"] = tmp_dir
model_dir = weights_model._find_paths()
assert not weights_model._has_weights(model_dir)
assert captured.records[0].getMessage() == "No weights detected."
def test_corrupted_weights(self, weights_model):
"""Checks that the proper logging message is shown then weights are not
found.
"""
with tempfile.TemporaryDirectory() as tmp_dir, TestCase.assertLogs(
"test_weights_downloader_mixin.WeightsModel"
) as captured:
weights_model.config["weights_parent_dir"] = tmp_dir
model_dir = weights_model._find_paths()
# Create a temp weights file which doesn't match the checksum
model_dir.mkdir(parents=True, exist_ok=True)
(model_dir / weights_model.model_filename).touch()
assert not weights_model._has_weights(model_dir)
assert (
captured.records[0].getMessage()
== "Weights file is corrupted/out-of-date."
)
def test_valid_weights(self, weights_type_model):
"""Checks that verifying weights checksum works for both single weights
file and a weights directory (SavedModel format).
Currently, `weights_type_model` only hold 2 models to reduce bandwidth
usage.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
weights_type_model.config["weights_parent_dir"] = tmp_dir
model_dir = weights_type_model._find_paths()
model_dir.mkdir(parents=True, exist_ok=True)
weights_type_model._download_to(weights_type_model.blob_filename, model_dir)
weights_type_model._extract_file(model_dir)
assert weights_type_model._has_weights(model_dir)
def test_sha256sum_ignores_macos_files(self):
"""Checks that extra files created on Mac OS is ignored by the
sha256sum() method.
"""
all_files = [".DS_Store", "__MACOSX", "file1", "file2"]
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_directory = Path(tmp_dir)
for i, file in enumerate(sorted(all_files)):
(tmp_directory / file).write_text(str(i))
expected = hashlib.sha256()
for file in sorted(all_files[2:]):
expected = WeightsDownloaderMixin.sha256sum(
tmp_directory / file, expected
)
assert (
WeightsDownloaderMixin.sha256sum(tmp_directory).hexdigest()
== expected.hexdigest()
)
@pytest.mark.usefixtures("tmp_dir")
@mock.patch.object(WeightsDownloaderMixin, "_download_to", wraps=do_nothing)
@mock.patch.object(WeightsDownloaderMixin, "_extract_file", wraps=do_nothing)
def test_create_weights_dir(
self, mock_download_to, mock_extract_file, weights_model
):
with tempfile.TemporaryDirectory() as tmp_dir, TestCase.assertLogs(
"test_weights_downloader_mixin.WeightsModel"
) as captured:
weights_parent_dir = Path(tmp_dir)
model_dir = (
weights_parent_dir
/ PEEKINGDUCK_WEIGHTS_SUBDIR
/ weights_model.model_subdir
/ weights_model.config["model_format"]
)
weights_model.config["weights_parent_dir"] = weights_parent_dir
assert not (weights_parent_dir / PEEKINGDUCK_WEIGHTS_SUBDIR).exists()
weights_model.download_weights()
assert mock_download_to.called
assert mock_extract_file.called
assert (weights_parent_dir / PEEKINGDUCK_WEIGHTS_SUBDIR).exists()
assert captured.records[0].getMessage() == "No weights detected."
assert captured.records[1].getMessage() == "Proceeding to download..."
assert (
captured.records[2].getMessage()
== f"Weights downloaded to {model_dir}."
)
| 38.956522 | 88 | 0.65749 |
acdf609481a86b212afa14fb0c366fd32ec95dde | 52,008 | py | Python | python/paddle/fluid/layers/tensor.py | itminner/Paddle | f41da8a4ad3caf2a11d3cc2ca89bbcd243438c7f | [
"Apache-2.0"
] | 1 | 2019-11-25T08:04:08.000Z | 2019-11-25T08:04:08.000Z | python/paddle/fluid/layers/tensor.py | itminner/Paddle | f41da8a4ad3caf2a11d3cc2ca89bbcd243438c7f | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/layers/tensor.py | itminner/Paddle | f41da8a4ad3caf2a11d3cc2ca89bbcd243438c7f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unlessf required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from six.moves import reduce
from ..layer_helper import LayerHelper
from ..param_attr import ParamAttr
from ..framework import convert_np_dtype_to_dtype_
from ..framework import Variable
from ..initializer import Constant, force_init_on_cpu
from ..core import VarDesc
from .layer_function_generator import templatedoc
from ..data_feeder import check_type_and_dtype, check_type, check_dtype, convert_dtype
import numpy
import warnings
__all__ = [
'create_tensor', 'create_parameter', 'create_global_var', 'cast',
'tensor_array_to_tensor', 'concat', 'sums', 'assign',
'fill_constant_batch_size_like', 'fill_constant', 'argmin', 'argmax',
'argsort', 'ones', 'zeros', 'reverse', 'has_inf', 'has_nan', 'isfinite',
'range', 'linspace', 'zeros_like', 'ones_like', 'diag', 'eye'
]
def create_tensor(dtype, name=None, persistable=False):
"""
Create a variable, which will hold a Tensor with data type dtype.
Args:
dtype(string|numpy.dtype): the data type of Tensor to be created, the
data type is bool, float16, float32, float64, int8, int16, int32 and int64.
name(string, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
persistable(bool): Set the persistable flag of the create tensor.
default value is False.
Returns:
Variable: The tensor to be created according to dtype.
Examples:
.. code-block:: python
import paddle.fluid as fluid
tensor = fluid.layers.create_tensor(dtype='float32')
"""
helper = LayerHelper("create_tensor", **locals())
return helper.create_variable(
name=helper.name, dtype=dtype, persistable=persistable)
def create_parameter(shape,
dtype,
name=None,
attr=None,
is_bias=False,
default_initializer=None):
"""
This function creates a parameter. The parameter is a learnable variable, which can have
gradient, and can be optimized.
NOTE: this is a very low-level API. This API is useful when you create
operator by your self. instead of using layers.
Parameters:
shape (list of int): Shape of the parameter
dtype (str): Data type of the parameter
name (str, optional): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
attr (ParamAttr, optional): Attributes of the parameter
is_bias (bool, optional): This can affect which default initializer is chosen
when default_initializer is None. If is_bias,
initializer.Constant(0.0) will be used. Otherwise,
Xavier() will be used.
default_initializer (Initializer, optional): Initializer for the parameter
Returns:
The created parameter.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
W = layers.create_parameter(shape=[784, 200], dtype='float32')
"""
helper = LayerHelper("create_parameter", **locals())
if attr is None:
attr = ParamAttr(name=name)
return helper.create_parameter(attr, shape, dtype, is_bias,
default_initializer)
def create_global_var(shape,
value,
dtype,
persistable=False,
force_cpu=False,
name=None):
"""
This function creates a new tensor variable with value in the global block(block 0).
Parameters:
shape (list of int): Shape of the variable
value (float): The value of the variable. The new created
variable will be filled with it.
dtype (str): Data type of the variable
persistable (bool, optional): If this variable is persistable.
Default: False
force_cpu (bool, optional): Force this variable to be on CPU.
Default: False
name (str, optional): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
Returns:
Variable: The created Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
var = layers.create_global_var(shape=[2,3], value=1.0, dtype='float32',
persistable=True, force_cpu=True, name='new_var')
"""
helper = LayerHelper("global_var", **locals())
var = helper.create_global_variable(
dtype=dtype,
shape=shape,
persistable=persistable,
name=name,
stop_gradient=True)
helper.set_variable_initializer(
var, initializer=Constant(
value=float(value), force_cpu=force_cpu))
return var
def cast(x, dtype):
"""
This OP takes in the Variable :attr:`x` with :attr:`x.dtype` and casts it
to the output with :attr:`dtype`. It's meaningless if the output dtype
equals the input dtype, but it's fine if you do so.
Args:
x(Variable): An input N-D Tensor with data type bool, float16,
float32, float64, int32, int64, uint8.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output:
bool, float15, float32, float64, int8, int32, int64, uint8.
Returns:
Variable: A Tensor with the same shape as input's.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
place = fluid.core.CPUPlace()
x_lod = fluid.data(name="x", shape=[2,2], lod_level=0)
cast_res1 = fluid.layers.cast(x=x_lod, dtype="uint8")
cast_res2 = fluid.layers.cast(x=x_lod, dtype=np.int32)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x_i_lod = fluid.core.LoDTensor()
x_i_lod.set(np.array([[1.3,-2.4],[0,4]]).astype("float32"), place)
x_i_lod.set_recursive_sequence_lengths([[0,2]])
res1 = exe.run(fluid.default_main_program(), feed={'x':x_i_lod}, fetch_list=[cast_res1], return_numpy=False)
res2 = exe.run(fluid.default_main_program(), feed={'x':x_i_lod}, fetch_list=[cast_res2], return_numpy=False)
print(np.array(res1[0]), np.array(res1[0]).dtype)
# [[ 1 254]
# [ 0 4]] uint8
print(np.array(res2[0]), np.array(res2[0]).dtype)
# [[ 1 -2]
# [ 0 4]] int32
"""
helper = LayerHelper('cast', **locals())
check_type_and_dtype(
x, 'x', Variable,
['bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8'],
'cast')
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type='cast',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'in_dtype': x.dtype,
'out_dtype': out.dtype})
return out
def concat(input, axis=0, name=None):
"""
**Concat**
This OP concatenates the input along the axis.
Args:
input(list): List of input Tensors with data type float32, float64, int32,
int64.
axis(int32|Variable, optional): A scalar with type ``int32`` or a ``Tensor`` with shape [1] and type ``int32``. Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
Variable: A Tensor with the same data type as input's.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[1,2,3],
[4,5,6]])
in2 = np.array([[11,12,13],
[14,15,16]])
in3 = np.array([[21,22],
[23,24]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
x2 = fluid.dygraph.to_variable(in2)
x3 = fluid.dygraph.to_variable(in3)
out1 = fluid.layers.concat(input=[x1,x2,x3], axis=-1)
out2 = fluid.layers.concat(input=[x1,x2], axis=0)
print(out1.numpy())
# [[ 1 2 3 11 12 13 21 22]
# [ 4 5 6 14 15 16 23 24]]
print(out2.numpy())
# [[ 1 2 3]
# [ 4 5 6]
# [11 12 13]
# [14 15 16]]
"""
helper = LayerHelper('concat', **locals())
if not isinstance(input, list):
warnings.warn(
"The type of input in concat should be list, but received %s." %
(type(input)))
input = [input]
for id, x in enumerate(input):
check_type_and_dtype(
x, 'input[' + str(id) + ']', Variable,
['float16', 'float32', 'float64', 'int32', 'int64'], 'concat')
check_type(axis, 'axis', (int, Variable), 'concat')
inputs = {'X': input}
attrs = {}
if isinstance(axis, Variable):
axis.stop_gradient = True
inputs['AxisTensor'] = axis
else:
attrs['axis'] = axis
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(
type='concat', inputs=inputs, outputs={'Out': [out]}, attrs=attrs)
return out
def tensor_array_to_tensor(input, axis=1, name=None, use_stack=False):
"""
This function concatenates or stacks all tensors in the input LoDTensorArray
along the axis mentioned and returns that as the output.
For Example:
.. code-block:: text
Case 1:
Given:
input.data = {[[0.6, 0.1, 0.3],
[0.5, 0.3, 0.2]],
[[1.3],
[1.8]],
[[2.3, 2.1],
[2.5, 2.4]]}
axis = 1, use_stack = False
Then:
output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
[0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]
output_index.data = [3, 1, 2]
Case 2:
Given:
input.data = {[[0.6, 0.1],
[0.5, 0.3]],
[[0.3, 1.3],
[0.2, 1.8]],
[[2.3, 2.1],
[2.5, 2.4]]}
axis = 1, use_stack = True
Then:
output.data = [[[0.6, 0.1]
[0.3, 1.3]
[2.3, 2.1],
[[0.5, 0.3]
[0.2, 1.8]
[2.5, 2.4]]]
output_index.data = [2, 2, 2]
Args:
input(Variable): A LodTensorArray variable.
axis(int): The axis along which the tensors in attr::`input` will be
concatenated or stacked.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
use_stack(bool): Act as concat_op or stack_op. For stack mode, all
tensors in the tensor array must have the same shape.
Returns:
Variable: The concatenated or stacked tensor variable.
Variable: A 1-D tensor variable with int32 data type. The data in this \
tensor contains all input including tensors' sizes along the axis.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
x0 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
x1 = fluid.layers.assign(np.random.rand(2, 2).astype("float32"))
i = fluid.layers.fill_constant(shape=[1], dtype="int64", value=0)
array = fluid.layers.create_array(dtype='float32')
fluid.layers.array_write(x0, i, array)
fluid.layers.array_write(x1, i + 1, array)
output, output_index = fluid.layers.tensor_array_to_tensor(input=array)
"""
helper = LayerHelper('tensor_array_to_tensor', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
out_index = helper.create_variable_for_type_inference(dtype="int32")
helper.append_op(
type='tensor_array_to_tensor',
inputs={'X': input},
outputs={'Out': [out],
'OutIndex': [out_index]},
attrs={'axis': axis,
'use_stack': use_stack})
return out, out_index
def sums(input, out=None):
"""
This function computes the sum of multiple input Tensors elementwisely.
- Case 1, sum of 3 Tensors
.. code-block:: text
# Input Tensors
x0.shape = [2, 3]
x0.data = [[1., 2., 3.],
[4., 5., 6.]]
x1.shape = [2, 3]
x1.data = [[10., 20., 30.],
[40., 50., 60.]]
x2.shape = [2, 3]
x2.data = [[100., 200., 300.],
[400., 500., 600.]]
# Output Tensor
out.shape = [2, 3]
out.data = [[111., 222., 333.],
[444., 555., 666.]]
Args:
input (list): A list of Variables which hold input Tensors with the same
data type and shape. Optional data types are: float32, float64, int32, int64.
out (Variable, optional): Output Tensor. It can be any existing Variable.
The default value is None, then a new Variable will be created and returned.
Returns:
Variable: The sum of inputs. The shape and data type is the same with input. \
If :code:`out` is not None, the returned value is :code:`out` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
x0 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=1)
x1 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=2)
x2 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=3)
x3 = fluid.layers.fill_constant(shape=[16, 32], dtype='int64', value=0)
# Sum of multiple Tensors, the result is stored to a new Variable sum0 (sum0=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
sum0 = fluid.layers.sums(input=[x0, x1, x2])
# Sum of multiple Tensors, sum1 and x3 represents the same Variable (x3=x0+x1+x2, the value is [[6, ..., 6], ..., [6, ..., 6]])
sum1 = fluid.layers.sums(input=[x0, x1, x2], out=x3)
"""
helper = LayerHelper('sum', **locals())
if out is None:
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(
type='sum',
inputs={'X': input},
outputs={'Out': out},
attrs={'use_mkldnn': False})
return out
def assign(input, output=None):
"""
The OP copies the :attr:`input` to the :attr:`output`.
Parameters:
input (Variable|numpy.ndarray): A tensor or numpy ndarray, its data type supports
float32, float64, int32 and int64.
output (Variable, optional): A tensor. If :attr:`output` is None, a new tensor will
be created as :attr:`output`. Default: None.
Returns:
Variable: A tensor with the same shape, data type and value as :attr:`input`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.layers.fill_constant(shape=[3, 2], value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
result1 = fluid.layers.create_tensor(dtype='float64')
fluid.layers.assign(data, result1) # result1 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
result2 = fluid.layers.assign(data) # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
result3 = fluid.layers.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
"""
helper = LayerHelper('assign', **locals())
check_type(input, 'input', (Variable, numpy.ndarray), 'assign')
if isinstance(input, Variable):
check_dtype(input.dtype, 'input',
['float32', 'float64', 'int32', 'int64', 'bool'], 'assign',
'(When the type of input in assign is Variable.)')
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
helper.append_op(
type='assign', inputs={'X': [input]}, outputs={'Out': [output]})
elif isinstance(input, numpy.ndarray):
dtype = convert_np_dtype_to_dtype_(input.dtype)
if dtype == VarDesc.VarType.FP32:
value_name = "fp32_values"
values = [float(v) for v in input.flat]
elif dtype == VarDesc.VarType.INT32:
value_name = "int32_values"
values = [int(v) for v in input.flat]
else:
raise TypeError(
"When the type of 'input' in assign is numpy.ndarray, "
"the data type of 'input' must be float32 or int32, but "
"received %s." % convert_dtype(dtype))
if input.size > 1024 * 1024:
raise ValueError("The size of input is too big. Please consider "
"saving it to file and 'load_op' to load it")
if output is None:
output = helper.create_variable_for_type_inference(
dtype=input.dtype)
helper.append_op(
type='assign_value',
outputs={'Out': [output]},
attrs={
'dtype': dtype,
'shape': list(input.shape),
value_name: values
})
return output
def fill_constant(shape, dtype, value, force_cpu=False, out=None):
"""
This OP creates a Tensor with specified `shape` and `dtype`, and
initializes it with a constant specifed by `value`.
The attribute `stop_gradient` of the created Tensor is setted to True.
Args:
shape(list|tuple|Variable): Shape of the Tensor to be created.
The data type is ``int32`` or ``int64`` . If ``shape`` is a list or tuple,
the elements of it should be integers or Tensors with shape [1].
If ``shape`` is an Variable, it should be an 1-D Tensor .
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor which can
be float16, float32, float64, int32, int64.
value(float): The constant value used to initialize the Tensor to be created.
force_cpu(True): data should be on CPU if it's true, defalut value is False.
out(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of operation.
if out is None, a new Varibale will be create to store the result.
Returns:
Variable: Tensor which is created according to shape and dtype.
Raise:
TypeError: The dtype must be one of bool, float16, float32, float64, int32 and int64
and the data type of out Tensor must be the same as the dtype.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# attr shape is a list which doesn't contain Variable Tensor.
data1 = fluid.layers.fill_constant(shape=[2,1], value=0, dtype='int64') # data1=[[0],[0]]
data2 = fluid.layers.fill_constant(shape=[2,1], value=5, dtype='int64', out=data1)
# data1=[[0], [0]] data2=[[5], [5]]
# attr shape is a list which contains Variable Tensor.
positive_2 = fluid.layers.fill_constant([1], "int32", 2)
data3 = fluid.layers.fill_constant(shape=[1, positive_2], dtype='float32', value=1.5) # data3=[1.5, 1.5]
# attr shape is an Variable Tensor.
shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2]
data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
"""
helper = LayerHelper("fill_constant", **locals())
check_dtype(dtype, 'create data type',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'fill_constant')
check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
inputs = {}
attrs = {
'value': float(value),
'force_cpu': force_cpu or force_init_on_cpu()
}
def _contain_var(one_list):
for ele in one_list:
if isinstance(ele, Variable):
return True
return False
def _get_attr_shape(list_shape):
attr_shape = []
for idx, dim in enumerate(list_shape):
if isinstance(dim, Variable):
attr_shape.append(-1)
else:
attr_shape.append(dim)
return attr_shape
def _get_shape_tensor(list_shape):
new_shape_tensor = []
for idx, dim in enumerate(list_shape):
if isinstance(dim, Variable):
dim.stop_gradient = True
check_dtype(
dim.dtype, 'shape[' + str(idx) + ']', ['int32', 'int64'],
'fill_constant',
'(When type of shape in fill_constant is list or tuple.)')
if convert_dtype(dim.dtype) == 'int64':
dim = cast(x=dim, dtype='int32')
new_shape_tensor.append(dim)
else:
temp_out = helper.create_variable_for_type_inference('int32')
fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
new_shape_tensor.append(temp_out)
return new_shape_tensor
if isinstance(shape, Variable):
shape.stop_gradient = True
check_dtype(shape.dtype, 'shape', ['int32', 'int64'], 'fill_constant',
'(When type of shape in fill_constant is Variable.)')
if (convert_dtype(shape.dtype) == 'int64'):
shape = cast(shape, 'int32')
inputs["ShapeTensor"] = shape
elif isinstance(shape, (list, tuple)):
assert len(shape) > 0, (
"The size of 'shape' in fill_constant can't be zero, "
"but received %s." % len(shape))
attrs["shape"] = _get_attr_shape(shape)
if _contain_var(shape):
inputs['ShapeTensorList'] = _get_shape_tensor(shape)
if out is None:
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
check_dtype(
dtype, 'create data type',
convert_dtype(out.dtype), 'fill_constant',
'(The create data type in fill_constant must be the same with out data type.)'
)
attrs['dtype'] = out.dtype
helper.append_op(
type='fill_constant',
inputs=inputs,
outputs={'Out': [out]},
attrs=attrs,
stop_gradient=True)
out.stop_gradient = True
return out
@templatedoc()
def fill_constant_batch_size_like(input,
shape,
dtype,
value,
input_dim_idx=0,
output_dim_idx=0,
force_cpu=False):
"""
This OP creates a Tesnor accroding the shape and dtype, and initializes the
Tensor with the constants provided in ``value``. When the input is LoDTensor
and the input_dim_idx is 0, the output_dim_idx dimension is set to the value
of the batch_size input by the input, the Stop_gradient attribute of the created
Tensor is False by default.
Args:
input(Variable): Tensor which data type is float32, float64, int32 and int64.
shape(list): The shape of Tensor to be created, Tensor's shape may be changed
according the input.
dtype(np.dtype|core.VarDesc.VarType|str): The data type of created Tensor which
can be float32, float64, int32, int64.
value(float|int): The constant value used to initialize the Tensor to be created.
input_dim_idx(int): When the value is 0 and the input is LoDTensor, the output_dim_idx
dimension of the created Tensor is set to the batch_size value of input.
The default value is 0.
output_dim_idx(int): Used to specify which dimension of Tensor is created to be set
the value of batch_size of input Tensor. The default value is 0.
force_cpu(bool): data should be on CPU if it's true, defalut value is False.
Returns:
Variable: Tensor which will be created according to dtype.
Examples:
.. code-block:: python
import paddle.fluid as fluid
like = fluid.layers.fill_constant(shape=[1,2], value=10, dtype='int64') #like=[[10, 10]]
data = fluid.layers.fill_constant_batch_size_like(
input=like, shape=[1], value=0, dtype='int64') #like=[[10, 10]] data=[0]
"""
helper = LayerHelper("fill_constant_batch_size_like", **locals())
out = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type='fill_constant_batch_size_like',
inputs={'Input': input},
outputs={'Out': [out]},
attrs={
'shape': shape,
'dtype': out.dtype,
'value': float(value),
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'force_cpu': force_cpu or force_init_on_cpu()
})
out.stop_gradient = True
return out
def argmin(x, axis=0):
"""
**argmin**
This OP computes the indices of the min elements of the input tensor's
element along the provided axis.
Args:
x(Variable): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
Returns:
Variable: A Tensor with data type int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.argmin(x=x, axis=-1)
out2 = fluid.layers.argmin(x=x, axis=0)
out3 = fluid.layers.argmin(x=x, axis=1)
out4 = fluid.layers.argmin(x=x, axis=2)
print(out1.numpy())
# [[0 0 2]
# [1 0 2]]
print(out2.numpy())
# [[0 1 1 1]
# [0 0 0 0]
# [1 1 1 0]]
print(out3.numpy())
# [[1 1 1 2]
# [2 0 2 0]]
print(out4.numpy())
# [[0 0 2]
# [1 0 2]]
"""
helper = LayerHelper("arg_min", **locals())
out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
helper.append_op(
type='arg_min',
inputs={'X': x},
outputs={'Out': [out]},
attrs={'axis': axis})
return out
def argmax(x, axis=0):
"""
**argmax**
This OP computes the indices of the max elements of the input tensor's
element along the provided axis.
Args:
x(Variable): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
Returns:
Variable: A Tensor with data type int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.argmax(x=x, axis=-1)
out2 = fluid.layers.argmax(x=x, axis=0)
out3 = fluid.layers.argmax(x=x, axis=1)
out4 = fluid.layers.argmax(x=x, axis=2)
print(out1.numpy())
# [[2 3 1]
# [0 3 1]]
print(out2.numpy())
# [[0 0 0 0]
# [1 1 1 1]
# [0 0 0 1]]
print(out3.numpy())
# [[2 2 0 1]
# [0 1 1 1]]
print(out4.numpy())
# [[2 3 1]
# [0 3 1]]
"""
helper = LayerHelper("arg_max", **locals())
out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
helper.append_op(
type='arg_max',
inputs={'X': x},
outputs={'Out': [out]},
attrs={'axis': axis})
return out
def argsort(input, axis=-1, name=None):
"""
This OP sorts the input along the given axis, and returns sorted output
data Varibale and its corresponding index Variable with the same shape as
:attr:`input`.
Args:
input(Variable): An input N-D Tensor with type float32, float64, int16,
int32, int64, uint8.
axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns:
tuple: A tuple of sorted data Variable(with the same shape and data
type as input) and the sorted indices(with the same shape as input's
and with data type int64).
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
in1 = np.array([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]]).astype(np.float32)
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.argsort(input=x, axis=-1)
out2 = fluid.layers.argsort(input=x, axis=0)
out3 = fluid.layers.argsort(input=x, axis=1)
print(out1[0].numpy())
# [[[5. 5. 8. 9.]
# [0. 0. 1. 7.]
# [2. 4. 6. 9.]]
# [[2. 2. 4. 5.]
# [4. 7. 7. 9.]
# [0. 1. 6. 7.]]]
print(out1[1].numpy())
# [[[0 3 1 2]
# [0 1 2 3]
# [2 3 0 1]]
# [[1 3 2 0]
# [0 1 2 3]
# [2 0 3 1]]]
print(out2[0].numpy())
# [[[5. 2. 4. 2.]
# [0. 0. 1. 7.]
# [1. 7. 0. 4.]]
# [[5. 8. 9. 5.]
# [4. 7. 7. 9.]
# [6. 9. 2. 6.]]]
print(out3[0].numpy())
# [[[0. 0. 1. 4.]
# [5. 8. 2. 5.]
# [6. 9. 9. 7.]]
# [[1. 2. 0. 2.]
# [4. 7. 4. 6.]
# [5. 7. 7. 9.]]]
"""
helper = LayerHelper("argsort", **locals())
out = helper.create_variable_for_type_inference(
dtype=input.dtype, stop_gradient=True)
ids = helper.create_variable_for_type_inference(
VarDesc.VarType.INT64, stop_gradient=True)
helper.append_op(
type='argsort',
inputs={'X': input},
outputs={'Out': out,
'Indices': ids},
attrs={'axis': axis})
return out, ids
def ones(shape, dtype, force_cpu=False):
"""
The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 1.
Its :attr:`stop_gradient` will be set to True to stop gradient computation.
Parameters:
shape (tuple|list): Shape of output tensor.
dtype (np.dtype|core.VarDesc.VarType|str): Data type of output tensor, it supports
bool, float16, float32, float64, int32 and int64.
force_cpu (bool, optional): Whether force to store the output tensor in CPU memory.
If :attr:`force_cpu` is False, the output tensor will be stored in running device memory.
Default: False.
Returns:
Variable: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.ones(shape=[2, 4], dtype='float32') # [[1., 1., 1., 1.], [1., 1., 1., 1.]]
"""
assert isinstance(shape, list) or isinstance(
shape, tuple), "The shape's type should be list or tuple."
assert reduce(lambda x, y: x * y,
shape) > 0, "The shape is invalid: %s." % (str(shape))
return fill_constant(value=1.0, **locals())
def zeros(shape, dtype, force_cpu=False):
"""
The OP creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
Its :attr:`stop_gradient` will be set to True to stop gradient computation.
Parameters:
shape (tuple|list): Shape of output tensor.
dtype (np.dtype|core.VarDesc.VarType|str): Data type of output tensor, it supports
bool, float16, float32, float64, int32 and int64.
force_cpu (bool, optional): Whether force to store the output tensor in CPU memory.
If :attr:`force_cpu` is False, the output tensor will be stored in running device memory.
Default: False.
Returns:
Variable: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.zeros(shape=[3, 2], dtype='float32') # [[0., 0.], [0., 0.], [0., 0.]]
"""
check_dtype(dtype, 'create data type',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
'zeros')
return fill_constant(value=0.0, **locals())
def reverse(x, axis):
"""
The OP reverses the tensor :attr:`x` along the given :attr:`axis`.
Parameters:
x (Variable): A tensor to be reversed, its data type supports bool, float32, float64, int32, int64 and uint8.
axis (int|tuple|list): A dimension or a set of dimensions of :attr:`x` to reverse. Must be
in the range [-rank( :attr:`x` ), rank( :attr:`x` )). If it is a tuple or a list, reversing
will be apply on each axis in the tuple or list.
Returns:
Variable: The reversed tensor with the same shape and data type as :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
data = fluid.layers.assign(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype='float32')) # [[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]
result1 = fluid.layers.reverse(data, 0) # [[6., 7., 8.], [3., 4., 5.], [0., 1., 2.]]
result2 = fluid.layers.reverse(data, [0, 1]) # [[8., 7., 6.], [5., 4., 3.], [2., 1., 0.]]
"""
if isinstance(axis, int):
axis = [axis]
helper = LayerHelper("reverse", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reverse',
inputs={'X': x},
outputs={'Out': [out]},
attrs={'axis': axis})
return out
def save(x, file_path, overwrite=True):
"""
Saves a variable as a file.
Args:
x(variable): The Tensor/LoDTensor to be saved.
file_path(str): The file path where the variable will be saved.
overwrite(bool): Whether or not cover the given file when it has already
existed. If it's set 'False' and the file is existed, a runtime
error will be thrown.
"""
helper = LayerHelper("save", **locals())
helper.append_op(
type="save",
inputs={"input": x},
outputs={},
args={"file_path": file_path,
"overwrite": overwrite})
def save_combine(x, file_path, overwrite=True):
"""
Saves a list of variables into a single file.
Args:
x(list): A list of Tensor/LoDTensor variables to be saved together in
a single file.
file_path(str): The file path where variables will be saved.
overwrite(bool): Whether or not cover the given file when it has already
existed. If it's set 'False' and the file is existed, a runtime
error will be thrown.
Returns:
There is no return value.
Examples:
.. code-block:: python
import paddle.fluid as fluid
v1 = fluid.layers.data(name="data",
shape=(4, 6),
dtype="float32")
v2 = fluid.layers.data(name="data",
shape=(6, 8, 4),
dtype="float32")
normed = fluid.layers.save_combine([v1, v2], file_path="output")
"""
helper = LayerHelper("save_combine", **locals())
helper.append_op(
type="save_combine",
inputs={"input": x},
outputs={},
args={"file_path": file_path,
"overwrite": overwrite})
def load_combine(out, file_path):
"""
Loads a list of vairables from a single file.
Args:
out(list): The list of variables to be read from the disk file.
file_path(str): The path of the disk file.
"""
helper = LayerHelper("load_combine", **locals())
helper.append_op(
type="load_combine",
inputs={},
output={"Out": out},
args={"file_path": file_path})
def has_inf(x):
"""
Test if any of x contains an infinity number
Args:
x (Variable): The Tensor/LoDTensor to be checked.
Returns:
Variable: The tensor variable storing the output, only a bool value, indicating that whether there is infinity number in x or not.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32")
res = fluid.layers.has_inf(data)
"""
helper = LayerHelper("isinf", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out})
return out
def has_nan(x):
"""
Test if any of x contains a NAN
Args:
x (Variable): The Tensor/LoDTensor to be checked.
Returns:
Variable: The tensor variable storing the output, only a bool value, indicating that whether there is NAN in x or not.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32")
res = fluid.layers.has_nan(data)
"""
helper = LayerHelper("isnan", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out})
return out
def isfinite(x):
"""
Test if any of x contains an infinity/NAN number. If all the elements are finite,
returns true, else false.
Args:
x(variable): The Tensor/LoDTensor to be checked.
Returns:
Variable: The tensor variable storing the output, contains a bool value.
Examples:
.. code-block:: python
import paddle.fluid as fluid
var = fluid.layers.data(name="data",
shape=(4, 6),
dtype="float32")
out = fluid.layers.isfinite(var)
"""
helper = LayerHelper("isfinite", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out})
return out
def range(start, end, step, dtype):
"""
Return evenly spaced values within a given interval.
Values are generated within the half-open interval [start, stop) (in other words,
the interval including start but excluding stop).
Parameters:
start(float32 | float64 | int32 | int64 | Variable): Start of interval. The interval includes this value.
when start is Variable, it is a 1-D Tensor with shape [1].
end(float32 | float64 | int32 | int64 | Variable): End of interval. The interval does not include this
value, except in some cases where step is not an integer
and floating point round-off affects the length of out. When end is Variable,
it is a 1-D Tensor with shape [1].
step(float32 | float64 | int32 | int64 | Variable): Spacing between values. For any output out, this is the
distance between two adjacent values, out[i+1] - out[i].
dtype(str): the data type of the output tensor, can be float32, float64, int32, int64.
Returns: a 1-D Tensor which is evenly spaced values within a given interval. Its data type is set by dtype.
Return type: Variable
examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.range(0, 10, 2, 'int32')
"""
helper = LayerHelper("range", **locals())
if not isinstance(start, Variable):
start = fill_constant([1], dtype, start)
if not isinstance(end, Variable):
end = fill_constant([1], dtype, end)
if not isinstance(step, Variable):
step = fill_constant([1], dtype, step)
out = helper.create_variable_for_type_inference(dtype=start.dtype)
helper.append_op(
type='range',
inputs={'Start': start,
'End': end,
'Step': step},
outputs={'Out': [out]})
out.stop_gradient = True
return out
def linspace(start, stop, num, dtype):
"""
This OP return fixed number of evenly spaced values within a given interval.
Args:
start(float|Variable): The input :attr:`start` is start variable of range. It is a float scalar, \
or a tensor of shape [1] with input data type float32, float64.
stop(float|Variable): The input :attr:`stop` is start variable of range. It is a float scalar, \
or a tensor of shape [1] with input data type float32, float64.
num(int|Variable): The input :attr:`num` is given num of the sequence. It is an int scalar, \
or a tensor of shape [1] with type int32.
dtype(string): The data type of output tensor, it could be 'float32' and 'float64'.
Returns:
Variable, the output data type will be float32, float64.: The 1-D tensor with fixed number of evenly spaced values, \
the data shape of this tensor is :math:`[num]` . If the :attr:`num` is set 1, the output tensor just has \
the value with input :attr:`start`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.linspace(0, 10, 5, 'float32') # [0.0, 2.5, 5.0, 7.5, 10.0]
data = fluid.layers.linspace(0, 10, 1, 'float32') # [0.0]
"""
helper = LayerHelper("linspace", **locals())
if not isinstance(start, Variable):
start = fill_constant([1], dtype, start)
if not isinstance(stop, Variable):
stop = fill_constant([1], dtype, stop)
if not isinstance(num, Variable):
num = fill_constant([1], 'int32', num)
out = helper.create_variable_for_type_inference(dtype=start.dtype)
helper.append_op(
type='linspace',
inputs={'Start': start,
'Stop': stop,
'Num': num},
outputs={'Out': [out]})
return out
def zeros_like(x, out=None):
"""
This OP creates a zeros tensor which has identical shape and dtype
with `x`.
Args:
x(Variable): The input tensor which specifies shape and dtype, the input data dtype could be bool, float32, float64, int32, int64.
out(Variable, optional): If is :attr:`None` , the op will create the variable as output, the data type and shape of \
this variable will be same as input :attr:`x`. If is a tensor, the data type and shape need to be same as input :attr:`x`.
The defalut value is :attr:`None` .
Returns:
Variable: The N-D tensor, the element in tensor is related to input data type, if the input data type is bool, \
the output value is False, otherwise is zero. The output shape is the same as the input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', dtype='float32', shape=[3])
data = fluid.layers.zeros_like(x) # [0.0, 0.0, 0.0]
"""
helper = LayerHelper("zeros_like", **locals())
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='fill_zeros_like', inputs={'X': [x]}, outputs={'Out': [out]})
out.stop_gradient = True
return out
def diag(diagonal):
"""
This OP creates a square matrix which has diagonal values specified by input :attr:`diagonal`.
Args:
diagonal(Variable|numpy.ndarray): The input tensor should be 1D tensor, the input shape is :math:`[ N]` , \
specifying diagonal values by this input tensor. The input data type should be float32, float64, int32, int64.
Returns:
Variable, the output data type is the same as input data type.: The tensor variable storing the square matrix, \
the diagonal values specified by input :attr:`diagonal`. the output shape is :math:`[N, N]` with two dims.
Examples:
.. code-block:: python
# [[3, 0, 0]
# [0, 4, 0]
# [0, 0, 5]
import paddle.fluid as fluid
import numpy as np
diagonal = np.arange(3, 6, dtype='int32')
data = fluid.layers.diag(diagonal)
# diagonal.shape=(3,) data.shape=(3, 3)
"""
helper = LayerHelper("diag", **locals())
if not isinstance(diagonal, Variable):
diagonal = assign(diagonal)
out = helper.create_variable_for_type_inference(dtype=diagonal.dtype)
helper.append_op(
type='diag', inputs={'Diagonal': [diagonal]}, outputs={'Out': [out]})
out.stop_gradient = True
return out
def eye(num_rows, num_columns=None, batch_shape=None, dtype='float32'):
"""
**eye**
This function constructs an identity tensor, or a batch of tensor.
Args:
num_rows(int): the number of rows in each batch tensor.
num_columns(int): the number of columns in each batch tensor.
If None, default: num_rows.
batch_shape(list(int)): If provided, the returned tensor will have a leading
batch size of this shape.
dtype(string): The data type of the returned tensor.
It should be int32, int64, float16, float32, float64.
Returns:
Variable: An identity Tensor or LoDTensor of shape batch_shape + [num_rows, num_columns].
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.eye(3, dtype='int32')
# [[1, 0, 0]
# [0, 1, 0]
# [0, 0, 1]]
data = fluid.layers.eye(2, 3, dtype='int32')
# [[1, 0, 0]
# [0, 1, 0]]
data = fluid.layers.eye(2, batch_shape=[3])
# Construct a batch of 3 identity tensors, each 2 x 2.
# data[i, :, :] is a 2 x 2 identity tensor, i = 0, 1, 2.
"""
helper = LayerHelper("eye", **locals())
if not isinstance(num_rows, int) or num_rows < 0:
raise TypeError("num_rows should be a non-negative int")
if num_columns is not None:
if not isinstance(num_columns, int) or num_columns < 0:
raise TypeError("num_columns should be a non-negative int")
else:
num_columns = num_rows
out = helper.create_variable_for_type_inference(dtype=dtype)
c_dtype = convert_np_dtype_to_dtype_(dtype)
helper.append_op(
type='eye',
inputs={},
outputs={'Out': [out]},
attrs={
'num_rows': num_rows,
'num_columns': num_columns,
'dtype': c_dtype
},
stop_gradient=True)
out.stop_gradient = True
if batch_shape is not None:
if not isinstance(batch_shape, list):
raise TypeError("batch_shape should be a list")
from .nn import stack
for batch_val in reversed(batch_shape):
if batch_val <= 0:
raise TypeError("batch_shape should be a positive int list")
else:
stack_vars = [out for _ in numpy.arange(batch_val)]
out = stack(stack_vars, axis=0)
return out
def ones_like(x, out=None):
"""
**ones_like**
This function creates a ones tensor which has identical shape and dtype
with `x`.
Args:
x(Variable): The input tensor which specifies shape and dtype.
out(Variable): The output tensor.
Returns:
out(Variable): The tensor variable storing the output.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', dtype='float32', shape=[3], append_batch_size=False)
data = fluid.layers.ones_like(x) # [1.0, 1.0, 1.0]
"""
helper = LayerHelper("ones_like", **locals())
if out is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='fill_any_like',
inputs={'X': [x]},
attrs={'value': 1.0},
outputs={'Out': [out]})
return out
| 37.255014 | 171 | 0.563163 |
acdf60ae4bc3c1b2a58b17a1193ad3cb26a00aa3 | 350 | py | Python | Leetcode/1000-2000/1441. Build an Array With Stack Operations/1441.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/1000-2000/1441. Build an Array With Stack Operations/1441.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/1000-2000/1441. Build an Array With Stack Operations/1441.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | class Solution:
def buildArray(self, target: List[int], n: int) -> List[str]:
ans = []
i = 0 # target pointer
num = 1 # curr num
while i < len(target):
t = target[i]
if t == num:
ans.append("Push")
i += 1
else:
ans.append("Push")
ans.append("Pop")
num += 1
return ans
| 19.444444 | 63 | 0.482857 |
acdf60d910df23a5caa167c055ea7701d6b73bf3 | 4,557 | py | Python | jans-pycloudlib/jans/pycloudlib/meta/kubernetes_meta.py | JanssenProject/jans | 8d57d01b998bfe87a2377bbe9023dd97fb03cc9f | [
"Apache-2.0"
] | 18 | 2022-01-13T13:45:13.000Z | 2022-03-30T04:41:18.000Z | jans-pycloudlib/jans/pycloudlib/meta/kubernetes_meta.py | JanssenProject/jans | 8d57d01b998bfe87a2377bbe9023dd97fb03cc9f | [
"Apache-2.0"
] | 604 | 2022-01-13T12:32:50.000Z | 2022-03-31T20:27:36.000Z | jans-pycloudlib/jans/pycloudlib/meta/kubernetes_meta.py | JanssenProject/jans | 8d57d01b998bfe87a2377bbe9023dd97fb03cc9f | [
"Apache-2.0"
] | 8 | 2022-01-28T00:23:25.000Z | 2022-03-16T05:12:12.000Z | """This module consists of class to interact with Kubernetes API."""
import logging
import os
import shlex
import tarfile
from tempfile import TemporaryFile
import kubernetes.client
import kubernetes.config
from kubernetes.stream import stream
from jans.pycloudlib.meta.base_meta import BaseMeta
logger = logging.getLogger(__name__)
class KubernetesMeta(BaseMeta):
"""A class to interact with a subset of Kubernetes APIs."""
def __init__(self):
"""Initialize kubernetes meta wrapper."""
self._client = None
self.kubeconfig_file = os.path.expanduser("~/.kube/config")
@property
def client(self):
"""Get kubernetes client instance."""
if not self._client:
# config loading priority
try:
kubernetes.config.load_incluster_config()
except kubernetes.config.config_exception.ConfigException:
kubernetes.config.load_kube_config(self.kubeconfig_file)
self._client = kubernetes.client.CoreV1Api()
self._client.api_client.configuration.assert_hostname = False
return self._client
def get_containers(self, label: str) -> list:
"""Get list of containers based on label.
:params label: Label name, i.e. ``APP_NAME=oxauth``.
:returns: List of container objects.
"""
namespace = os.environ.get("CN_CONTAINER_METADATA_NAMESPACE", "default")
return self.client.list_namespaced_pod(namespace, label_selector=label).items
def get_container_ip(self, container) -> str:
"""Get container's IP address.
:params container: Container object.
:returns: IP address associated with the container.
"""
return container.status.pod_ip
def get_container_name(self, container):
"""Get container's name.
:params container: Container object.
:returns: Container name.
"""
return container.metadata.name
def copy_to_container(self, container, path: str) -> None:
"""Copy path to container.
:params container: Container object.
:params path: Path to file or directory.
"""
# make sure parent directory is created first
dirname = os.path.dirname(path)
self.exec_cmd(container, f"mkdir -p {dirname}")
# copy file implementation
resp = stream(
self.client.connect_get_namespaced_pod_exec,
container.metadata.name,
container.metadata.namespace,
command=["tar", "xmvf", "-", "-C", "/"],
container=self._get_main_container_name(container),
stderr=True,
stdin=True,
stdout=True,
tty=False,
_preload_content=False,
)
with TemporaryFile() as tar_buffer:
with tarfile.open(fileobj=tar_buffer, mode="w") as tar:
tar.add(path)
tar_buffer.seek(0)
commands = [tar_buffer.read()]
while resp.is_open():
resp.update(timeout=1)
if resp.peek_stdout():
logger.debug(f"STDOUT: {resp.read_stdout()}")
if resp.peek_stderr():
logger.debug(f"STDERR: {resp.read_stderr()}")
if commands:
c = commands.pop(0)
resp.write_stdin(c)
else:
break
resp.close()
def exec_cmd(self, container, cmd: str):
"""Run command inside container.
:params container: Container object.
:params cmd: String of command.
"""
return stream(
self.client.connect_get_namespaced_pod_exec,
container.metadata.name,
container.metadata.namespace,
command=shlex.split(cmd),
container=self._get_main_container_name(container),
stderr=True,
stdin=True,
stdout=True,
tty=False,
)
def _get_main_container_name(self, container) -> str:
"""Get the pod's main container name.
:param container: Container object.
"""
name = ""
for cntr in container.spec.containers:
if not cntr.env:
continue
for env in cntr.env:
if env.name == "CN_CONTAINER_MAIN_NAME":
name = env.value
break
# add fallback (if needed)
return name or container.spec.containers[0].name
| 31.212329 | 85 | 0.589862 |
acdf613f505336001b892ce287cace923358f6b1 | 4,373 | py | Python | mux_python/models/video_view_response.py | gts-work/mux-python | 826e52730bad7acd08c31a3e1951a281521f1b4f | [
"MIT"
] | null | null | null | mux_python/models/video_view_response.py | gts-work/mux-python | 826e52730bad7acd08c31a3e1951a281521f1b4f | [
"MIT"
] | null | null | null | mux_python/models/video_view_response.py | gts-work/mux-python | 826e52730bad7acd08c31a3e1951a281521f1b4f | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Mux API
Mux is how developers build online video. This API encompasses both Mux Video and Mux Data functionality to help you build your video-related projects better and faster than ever before. # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import inspect
import pprint
import re # noqa: F401
import six
from mux_python.configuration import Configuration
class VideoViewResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'data': 'VideoView',
'timeframe': 'list[int]'
}
attribute_map = {
'data': 'data',
'timeframe': 'timeframe'
}
def __init__(self, data=None, timeframe=None, local_vars_configuration=None): # noqa: E501
"""VideoViewResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._data = None
self._timeframe = None
self.discriminator = None
if data is not None:
self.data = data
if timeframe is not None:
self.timeframe = timeframe
@property
def data(self):
"""Gets the data of this VideoViewResponse. # noqa: E501
:return: The data of this VideoViewResponse. # noqa: E501
:rtype: VideoView
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this VideoViewResponse.
:param data: The data of this VideoViewResponse. # noqa: E501
:type data: VideoView
"""
self._data = data
@property
def timeframe(self):
"""Gets the timeframe of this VideoViewResponse. # noqa: E501
:return: The timeframe of this VideoViewResponse. # noqa: E501
:rtype: list[int]
"""
return self._timeframe
@timeframe.setter
def timeframe(self, timeframe):
"""Sets the timeframe of this VideoViewResponse.
:param timeframe: The timeframe of this VideoViewResponse. # noqa: E501
:type timeframe: list[int]
"""
self._timeframe = timeframe
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = inspect.getargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VideoViewResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, VideoViewResponse):
return True
return self.to_dict() != other.to_dict()
| 28.212903 | 205 | 0.578779 |
acdf63dd0b1032c417e2ca8df89eac1c92a02c85 | 1,918 | py | Python | tests/providers/google/cloud/transfers/test_bigquery_to_mysql.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 79 | 2021-10-15T07:32:27.000Z | 2022-03-28T04:10:19.000Z | tests/providers/google/cloud/transfers/test_bigquery_to_mysql.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 210 | 2021-07-17T00:25:52.000Z | 2021-12-29T00:44:48.000Z | tests/providers/google/cloud/transfers/test_bigquery_to_mysql.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 23 | 2021-10-15T02:36:37.000Z | 2022-03-17T02:59:27.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from airflow.providers.google.cloud.transfers.bigquery_to_mysql import BigQueryToMySqlOperator
TASK_ID = 'test-bq-create-table-operator'
TEST_DATASET = 'test-dataset'
TEST_TABLE_ID = 'test-table-id'
TEST_DAG_ID = 'test-bigquery-operators'
class TestBigQueryToMySqlOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.transfers.bigquery_to_mysql.BigQueryHook')
def test_execute_good_request_to_bq(self, mock_hook):
destination_table = 'table'
operator = BigQueryToMySqlOperator(
task_id=TASK_ID,
dataset_table=f'{TEST_DATASET}.{TEST_TABLE_ID}',
mysql_table=destination_table,
replace=False,
)
operator.execute(None)
# fmt: off
mock_hook.return_value.get_conn.return_value.cursor.return_value.get_tabledata\
.assert_called_once_with(
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
max_results=1000,
selected_fields=None,
start_index=0,
)
# fmt: on
| 37.607843 | 94 | 0.713764 |
acdf6425bc6edbf874a18f2711608a6f7f3ef56f | 5,622 | py | Python | Doc/includes/mp_benchmarks.py | deadsnakes/python3.2 | c0deccc710b5c1c8dd40a1c6d46a8271b60617f1 | [
"PSF-2.0"
] | 4 | 2016-05-04T07:05:22.000Z | 2020-09-24T00:21:05.000Z | Doc/includes/mp_benchmarks.py | deadsnakes/python3.2 | c0deccc710b5c1c8dd40a1c6d46a8271b60617f1 | [
"PSF-2.0"
] | 1 | 2019-07-04T09:18:21.000Z | 2019-07-04T19:14:03.000Z | Doc/includes/mp_benchmarks.py | deadsnakes/python3.2 | c0deccc710b5c1c8dd40a1c6d46a8271b60617f1 | [
"PSF-2.0"
] | 13 | 2015-04-02T16:49:38.000Z | 2021-10-17T20:14:14.000Z | #
# Simple benchmarks for the multiprocessing package
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import time
import sys
import multiprocessing
import threading
import queue
import gc
if sys.platform == 'win32':
_timer = time.clock
else:
_timer = time.time
delta = 1
#### TEST_QUEUESPEED
def queuespeed_func(q, c, iterations):
a = '0' * 256
c.acquire()
c.notify()
c.release()
for i in range(iterations):
q.put(a)
q.put('STOP')
def test_queuespeed(Process, q, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = Process(target=queuespeed_func, args=(q, c, iterations))
c.acquire()
p.start()
c.wait()
c.release()
result = None
t = _timer()
while result != 'STOP':
result = q.get()
elapsed = _timer() - t
p.join()
print(iterations, 'objects passed through the queue in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_PIPESPEED
def pipe_func(c, cond, iterations):
a = '0' * 256
cond.acquire()
cond.notify()
cond.release()
for i in range(iterations):
c.send(a)
c.send('STOP')
def test_pipespeed():
c, d = multiprocessing.Pipe()
cond = multiprocessing.Condition()
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
p = multiprocessing.Process(target=pipe_func,
args=(d, cond, iterations))
cond.acquire()
p.start()
cond.wait()
cond.release()
result = None
t = _timer()
while result != 'STOP':
result = c.recv()
elapsed = _timer() - t
p.join()
print(iterations, 'objects passed through connection in',elapsed,'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_SEQSPEED
def test_seqspeed(seq):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in range(iterations):
a = seq[5]
elapsed = _timer() - t
print(iterations, 'iterations in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_LOCK
def test_lockspeed(l):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
t = _timer()
for i in range(iterations):
l.acquire()
l.release()
elapsed = _timer() - t
print(iterations, 'iterations in', elapsed, 'seconds')
print('average number/sec:', iterations/elapsed)
#### TEST_CONDITION
def conditionspeed_func(c, N):
c.acquire()
c.notify()
for i in range(N):
c.wait()
c.notify()
c.release()
def test_conditionspeed(Process, c):
elapsed = 0
iterations = 1
while elapsed < delta:
iterations *= 2
c.acquire()
p = Process(target=conditionspeed_func, args=(c, iterations))
p.start()
c.wait()
t = _timer()
for i in range(iterations):
c.notify()
c.wait()
elapsed = _timer() - t
c.release()
p.join()
print(iterations * 2, 'waits in', elapsed, 'seconds')
print('average number/sec:', iterations * 2 / elapsed)
####
def test():
manager = multiprocessing.Manager()
gc.disable()
print('\n\t######## testing Queue.Queue\n')
test_queuespeed(threading.Thread, queue.Queue(),
threading.Condition())
print('\n\t######## testing multiprocessing.Queue\n')
test_queuespeed(multiprocessing.Process, multiprocessing.Queue(),
multiprocessing.Condition())
print('\n\t######## testing Queue managed by server process\n')
test_queuespeed(multiprocessing.Process, manager.Queue(),
manager.Condition())
print('\n\t######## testing multiprocessing.Pipe\n')
test_pipespeed()
print()
print('\n\t######## testing list\n')
test_seqspeed(list(range(10)))
print('\n\t######## testing list managed by server process\n')
test_seqspeed(manager.list(list(range(10))))
print('\n\t######## testing Array("i", ..., lock=False)\n')
test_seqspeed(multiprocessing.Array('i', list(range(10)), lock=False))
print('\n\t######## testing Array("i", ..., lock=True)\n')
test_seqspeed(multiprocessing.Array('i', list(range(10)), lock=True))
print()
print('\n\t######## testing threading.Lock\n')
test_lockspeed(threading.Lock())
print('\n\t######## testing threading.RLock\n')
test_lockspeed(threading.RLock())
print('\n\t######## testing multiprocessing.Lock\n')
test_lockspeed(multiprocessing.Lock())
print('\n\t######## testing multiprocessing.RLock\n')
test_lockspeed(multiprocessing.RLock())
print('\n\t######## testing lock managed by server process\n')
test_lockspeed(manager.Lock())
print('\n\t######## testing rlock managed by server process\n')
test_lockspeed(manager.RLock())
print()
print('\n\t######## testing threading.Condition\n')
test_conditionspeed(threading.Thread, threading.Condition())
print('\n\t######## testing multiprocessing.Condition\n')
test_conditionspeed(multiprocessing.Process, multiprocessing.Condition())
print('\n\t######## testing condition managed by a server process\n')
test_conditionspeed(multiprocessing.Process, manager.Condition())
gc.enable()
if __name__ == '__main__':
multiprocessing.freeze_support()
test()
| 23.040984 | 80 | 0.592494 |
acdf646875acedd91c9bd68a864c9334a5df4497 | 4,795 | py | Python | machine_learning/decision_tree.py | KirilBangachev/Python | 7ad45a46e02edda86a45969de8768f26ef44b306 | [
"MIT"
] | 3 | 2019-10-11T20:50:59.000Z | 2020-01-21T02:10:10.000Z | machine_learning/decision_tree.py | KirilBangachev/Python | 7ad45a46e02edda86a45969de8768f26ef44b306 | [
"MIT"
] | 1 | 2019-09-01T06:43:06.000Z | 2019-09-01T06:44:55.000Z | machine_learning/decision_tree.py | KirilBangachev/Python | 7ad45a46e02edda86a45969de8768f26ef44b306 | [
"MIT"
] | 3 | 2020-08-05T02:33:42.000Z | 2020-10-13T02:47:01.000Z | """
Implementation of a basic regression decision tree.
Input data set: The input data set must be 1-dimensional with continuous labels.
Output: The decision tree maps a real number input to a real number output.
"""
import numpy as np
class Decision_Tree:
def __init__(self, depth = 5, min_leaf_size = 5):
self.depth = depth
self.decision_boundary = 0
self.left = None
self.right = None
self.min_leaf_size = min_leaf_size
self.prediction = None
def mean_squared_error(self, labels, prediction):
"""
mean_squared_error:
@param labels: a one dimensional numpy array
@param prediction: a floating point value
return value: mean_squared_error calculates the error if prediction is used to estimate the labels
"""
if labels.ndim != 1:
print("Error: Input labels must be one dimensional")
return np.mean((labels - prediction) ** 2)
def train(self, X, y):
"""
train:
@param X: a one dimensional numpy array
@param y: a one dimensional numpy array.
The contents of y are the labels for the corresponding X values
train does not have a return value
"""
"""
this section is to check that the inputs conform to our dimensionality constraints
"""
if X.ndim != 1:
print("Error: Input data set must be one dimensional")
return
if len(X) != len(y):
print("Error: X and y have different lengths")
return
if y.ndim != 1:
print("Error: Data set labels must be one dimensional")
return
if len(X) < 2 * self.min_leaf_size:
self.prediction = np.mean(y)
return
if self.depth == 1:
self.prediction = np.mean(y)
return
best_split = 0
min_error = self.mean_squared_error(X,np.mean(y)) * 2
"""
loop over all possible splits for the decision tree. find the best split.
if no split exists that is less than 2 * error for the entire array
then the data set is not split and the average for the entire array is used as the predictor
"""
for i in range(len(X)):
if len(X[:i]) < self.min_leaf_size:
continue
elif len(X[i:]) < self.min_leaf_size:
continue
else:
error_left = self.mean_squared_error(X[:i], np.mean(y[:i]))
error_right = self.mean_squared_error(X[i:], np.mean(y[i:]))
error = error_left + error_right
if error < min_error:
best_split = i
min_error = error
if best_split != 0:
left_X = X[:best_split]
left_y = y[:best_split]
right_X = X[best_split:]
right_y = y[best_split:]
self.decision_boundary = X[best_split]
self.left = Decision_Tree(depth = self.depth - 1, min_leaf_size = self.min_leaf_size)
self.right = Decision_Tree(depth = self.depth - 1, min_leaf_size = self.min_leaf_size)
self.left.train(left_X, left_y)
self.right.train(right_X, right_y)
else:
self.prediction = np.mean(y)
return
def predict(self, x):
"""
predict:
@param x: a floating point value to predict the label of
the prediction function works by recursively calling the predict function
of the appropriate subtrees based on the tree's decision boundary
"""
if self.prediction is not None:
return self.prediction
elif self.left or self.right is not None:
if x >= self.decision_boundary:
return self.right.predict(x)
else:
return self.left.predict(x)
else:
print("Error: Decision tree not yet trained")
return None
def main():
"""
In this demonstration we're generating a sample data set from the sin function in numpy.
We then train a decision tree on the data set and use the decision tree to predict the
label of 10 different test values. Then the mean squared error over this test is displayed.
"""
X = np.arange(-1., 1., 0.005)
y = np.sin(X)
tree = Decision_Tree(depth = 10, min_leaf_size = 10)
tree.train(X,y)
test_cases = (np.random.rand(10) * 2) - 1
predictions = np.array([tree.predict(x) for x in test_cases])
avg_error = np.mean((predictions - test_cases) ** 2)
print("Test values: " + str(test_cases))
print("Predictions: " + str(predictions))
print("Average error: " + str(avg_error))
if __name__ == '__main__':
main() | 34.746377 | 106 | 0.589781 |
acdf651f881271d743ce827659ec677ebdd65c94 | 641 | py | Python | urls.py | ppai22/ncovid19-stats-data-analysis | 0bcf77adc7cc0065275b4301ca3de7452eaf635a | [
"MIT"
] | null | null | null | urls.py | ppai22/ncovid19-stats-data-analysis | 0bcf77adc7cc0065275b4301ca3de7452eaf635a | [
"MIT"
] | null | null | null | urls.py | ppai22/ncovid19-stats-data-analysis | 0bcf77adc7cc0065275b4301ca3de7452eaf635a | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
import json
who_url = 'https://www.who.int/emergencies/diseases/novel-coronavirus-2019/situation-reports/'
base_page_source = requests.get(who_url).text
soup = BeautifulSoup(base_page_source, 'lxml')
directory = []
for content_block in [soup.find_all('div', class_='sf-content-block content-block')[-4]]:
for item in content_block.find_all('p'):
try:
directory.append([item.strong.text, 'https://www.who.int' + item.a['href']])
except:
pass
directory = directory[::-1]
with open('directory.json', 'w+') as f:
json.dump(directory, f, indent=4)
| 30.52381 | 94 | 0.687988 |
acdf6529825e1a361b39a31d9960c537b684f3b7 | 4,482 | py | Python | tensorbay/opendataset/CACD/loader.py | machearn/tensorbay-python-sdk | 5c96a5f4c0028c7bec0764f2d0142b29597ec3a9 | [
"MIT"
] | 73 | 2021-02-24T12:23:26.000Z | 2022-03-12T13:00:31.000Z | tensorbay/opendataset/CACD/loader.py | machearn/tensorbay-python-sdk | 5c96a5f4c0028c7bec0764f2d0142b29597ec3a9 | [
"MIT"
] | 681 | 2021-02-25T07:34:17.000Z | 2022-03-25T07:08:23.000Z | tensorbay/opendataset/CACD/loader.py | machearn/tensorbay-python-sdk | 5c96a5f4c0028c7bec0764f2d0142b29597ec3a9 | [
"MIT"
] | 35 | 2021-02-24T12:00:45.000Z | 2022-03-30T06:43:13.000Z | #!/usr/bin/env python3
#
# Copyright 2021 Graviti. Licensed under MIT License.
#
# pylint: disable=invalid-name
"""Dataloader of CACD dataset."""
import os
from typing import Any, Dict, Tuple
from tensorbay.dataset import Data, Dataset
from tensorbay.exception import ModuleImportError
from tensorbay.label import CategoryInfo, Classification, ClassificationSubcatalog
from tensorbay.opendataset._utility import glob
from tensorbay.utility import NameList
DATASET_NAME = "CACD"
_ATTRIBUTES = {
"attributes": [
{
"name": "name",
"type": "string",
"description": "celebrity name",
},
{
"name": "age",
"type": "number",
"minimum": 14,
"maximum": 62,
"description": "Estimated age of the celebrity",
},
{
"name": "birth",
"type": "number",
"minimum": 1951,
"maximum": 1990,
"description": "Celebrity birth year",
},
{
"name": "range",
"type": "number",
"minimum": 1,
"maximum": 50,
"description": "Rank of the celebrity with same birth year in IMDB.com",
},
{
"name": "year",
"type": "number",
"minimum": 2004,
"maximum": 2013,
"description": "Estimated year of which the photo was taken",
},
{
"name": "lfw",
"type": "boolean",
"description": "Whether the celebrity is in LFW dataset.",
},
]
}
_MAT_KEYS = ("name", "identity", "age", "birth", "lfw", "rank", "year")
def CACD(path: str) -> Dataset:
"""`Cross-Age Celebrity Dataset (CACD) <https://bcsiriuschen.github.io/CARC/>`_ dataset.
The file structure should be like::
<path>
CACD2000/
14_Aaron_Johnson_0001.jpg
...
celebrity2000.mat
Arguments:
path: The root directory of the dataset.
Returns:
Loaded :class:`~tensorbay.dataset.dataset.Dataset` instance.
"""
root_path = os.path.abspath(os.path.expanduser(path))
dataset = Dataset(DATASET_NAME)
dataset.catalog.classification = _get_subcatalog()
segment = dataset.create_segment()
image_files = glob(os.path.join(root_path, "CACD2000", "*.jpg"))
labels_map = _get_labels_map(os.path.join(root_path, "celebrity2000.mat"))
for image in image_files:
category, attribute = labels_map[os.path.basename(image)]
image_data = Data(image)
image_data.label.classification = Classification(category, attribute)
segment.append(image_data)
return dataset
def _get_labels_map(path: str) -> Dict[str, Tuple[str, Dict[str, Any]]]:
"""Get celebrity_image_data from .mat file.
Arguments:
path: The root directory of the dataset.
Raises:
ModuleImportError: When the module "h5py" can not be found.
Returns:
A Dict of attributes.
"""
try:
from h5py import File # pylint: disable=import-outside-toplevel
except ModuleNotFoundError as error:
raise ModuleImportError(module_name=error.name) from error
mat_file = File(path, "r")
celebrity_image_data = mat_file["celebrityImageData"]
celebrity_data = mat_file["celebrityData"]
# Name is a h5r object which can be searched in .mat file.
id2name_map = {
identity: _hdf5_to_str(mat_file[name])
for identity, name in zip(celebrity_data["identity"][0], celebrity_data["name"][0])
}
labels_map = {}
# The "name" is not the name of the celebrity but the name of the image file.
for name, identity, *values in zip(*(celebrity_image_data[key][0] for key in _MAT_KEYS)):
attribute = {"name": id2name_map[identity]}
attribute.update(zip(_MAT_KEYS[2:], values))
labels_map[_hdf5_to_str(mat_file[name])] = (str(int(identity)).zfill(4), attribute)
return labels_map
def _get_subcatalog() -> ClassificationSubcatalog:
categories: NameList[CategoryInfo] = NameList()
for i in range(1, 2001):
categories.append(CategoryInfo(str(i).zfill(4)))
classification_subcatalog = ClassificationSubcatalog.loads(_ATTRIBUTES)
classification_subcatalog.categories = categories
return classification_subcatalog
def _hdf5_to_str(hdf5_string: Any) -> str:
return "".join(chr(c[0]) for c in hdf5_string)
| 31.342657 | 93 | 0.617581 |
acdf65513c41402fce754da4d8fe818f1dc68a69 | 10,084 | py | Python | stoq/tests/test_plugin_manager.py | ytreister/stoq | 8bfc78b226ee6500eb78e1bdf361fc83bc5005b7 | [
"Apache-2.0"
] | 385 | 2015-11-20T02:21:18.000Z | 2022-03-24T09:38:24.000Z | stoq/tests/test_plugin_manager.py | ytreister/stoq | 8bfc78b226ee6500eb78e1bdf361fc83bc5005b7 | [
"Apache-2.0"
] | 127 | 2016-07-08T20:23:20.000Z | 2022-02-23T13:52:19.000Z | stoq/tests/test_plugin_manager.py | ytreister/stoq | 8bfc78b226ee6500eb78e1bdf361fc83bc5005b7 | [
"Apache-2.0"
] | 68 | 2015-11-20T12:51:44.000Z | 2022-01-25T04:35:54.000Z | #!/usr/bin/env python3
# Copyright 2014-2018 PUNCH Cyber Analytics Group
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
from typing import Optional
import json
from stoq import Stoq, StoqException, StoqPluginNotFound
from stoq.data_classes import Payload, WorkerResponse, Request
from stoq.plugin_manager import StoqPluginManager
from stoq.plugins import WorkerPlugin
import stoq.tests.utils as utils
class TestPluginManager(unittest.TestCase):
DUMMY_PLUGINS = [
'dummy_archiver',
'dummy_connector',
'dummy_provider',
'dummy_worker',
'dummy_decorator',
]
def setUp(self) -> None:
logging.disable(logging.CRITICAL)
def tearDown(self) -> None:
logging.disable(logging.NOTSET)
def test_no_plugins(self):
pm = StoqPluginManager([])
self.assertEqual(len(pm.list_plugins()), 0)
def test_collect_plugins(self):
pm = StoqPluginManager([utils.get_plugins_dir()])
collected_plugins = pm.list_plugins()
for name in self.DUMMY_PLUGINS:
self.assertIn(name, collected_plugins)
def test_plugin_objects(self):
pm = StoqPluginManager([utils.get_plugins_dir()])
simple_worker = pm.load_plugin('simple_worker')
self.assertEqual('simple_worker', simple_worker.plugin_name)
self.assertEqual('Marcus LaFerrera', simple_worker.__author__)
self.assertEqual('0.1', simple_worker.__version__)
self.assertEqual(
'https://github.com/PUNCH-Cyber/stoq-plugins-public',
simple_worker.__website__,
)
self.assertEqual('Simple stoQ Worker plugin', simple_worker.__description__)
def test_plugin_missing_objects(self):
pm = StoqPluginManager([utils.get_invalid_plugins_dir()])
worker = pm.load_plugin('missing_config_objects')
self.assertEqual('missing_config_objects', worker.plugin_name)
self.assertEqual('', worker.__author__)
self.assertEqual('', worker.__version__)
self.assertEqual('', worker.__website__)
self.assertEqual('', worker.__description__)
def test_multiple_dirs(self):
pm = StoqPluginManager([utils.get_plugins_dir(), utils.get_plugins2_dir()])
collected_plugins = pm.list_plugins()
for name in self.DUMMY_PLUGINS + ['dummy_worker2']:
self.assertIn(name, collected_plugins)
def test_collect_one_invalid_dir(self):
# Verify that the invalid directory doesn't cause an exception
pm = StoqPluginManager([utils.get_plugins_dir(), '/no/way/this/exists'])
self.assertGreater(len(pm.list_plugins()), 0)
def test_collect_invalid_config(self):
pm = StoqPluginManager([utils.get_invalid_plugins_dir()])
collected_plugins = pm.list_plugins()
self.assertNotIn('missing_module', collected_plugins)
self.assertNotIn('invalid_config', collected_plugins)
def test_load_plugin(self):
pm = StoqPluginManager([utils.get_plugins_dir()])
for name in self.DUMMY_PLUGINS:
pm.load_plugin(name)
def test_load_plugin_nonexistent(self):
pm = StoqPluginManager([utils.get_plugins_dir()])
with self.assertRaises(StoqPluginNotFound):
pm.load_plugin('this_plugin_does_not_exist')
def test_load_non_plugin(self):
pm = StoqPluginManager([utils.get_invalid_plugins_dir()])
collected_plugins = pm.list_plugins()
# The plugin should be collected even though it is invalid at load time
self.assertIn('missing_plugin', collected_plugins)
with self.assertRaises(StoqException):
pm.load_plugin('missing_plugin')
def test_load_multiple_plugins_in_module(self):
pm = StoqPluginManager([utils.get_invalid_plugins_dir()])
collected_plugins = pm.list_plugins()
# The plugin should be collected even though it is invalid at load time
self.assertIn('multiple_plugins_in_module', collected_plugins)
with self.assertRaises(StoqException):
pm.load_plugin('multiple_plugins_in_module')
def test_no_reload(self):
pm = StoqPluginManager([utils.get_plugins_dir()])
worker = pm.load_plugin('dummy_worker')
self.assertIsNotNone(worker)
worker2 = pm.load_plugin('dummy_worker')
self.assertIs(worker, worker2) # Same object
def test_plugin_config(self):
pm = StoqPluginManager([utils.get_plugins_dir()])
plugin = pm.load_plugin('configurable_worker')
self.assertEqual(plugin.get_important_option(), 'cybercybercyber')
# Test StoqConfigParser.getjson reading from configuration file
self.assertEqual(plugin.getjson_option('list'), ['item1', 'item2'])
self.assertEqual(plugin.getjson_option('dict'), {'key':'value'})
self.assertEqual(plugin.getjson_option('sq_dict'), {"bar'foo": "value"})
with self.assertRaises(json.decoder.JSONDecodeError) as exc:
plugin.getjson_option('invalid')
# Test fallback
self.assertEqual(plugin.getjson_option('doesnotexist'), {})
def test_plugin_opts(self):
pm = StoqPluginManager(
[utils.get_plugins_dir()],
{'configurable_worker': {
'crazy_runtime_option': 16,
'list': json.dumps(['item3', 'item4']),
'dict': json.dumps({'key1': 'value1'}),
'invalid':'invalid json blob',
'sq_dict': json.dumps({"foo'bar": "value"}),
'dq_dict': json.dumps({'foo"bar': "value"})
}},
)
plugin = pm.load_plugin('configurable_worker')
self.assertEqual(plugin.get_crazy_runtime_option(), 16)
# Test StoqConfigParser.getjson reading from plugin_opts
self.assertEqual(plugin.getjson_option('list'), ['item3', 'item4'])
self.assertEqual(plugin.getjson_option('dict'), {'key1':'value1'})
self.assertEqual(plugin.getjson_option('sq_dict'), {"foo'bar": "value"})
self.assertEqual(plugin.getjson_option('dq_dict'), {'foo"bar': "value"})
with self.assertRaises(json.decoder.JSONDecodeError) as exc:
plugin.getjson_option('invalid')
def test_plugin_opts_from_stoq_cfg(self):
s = Stoq(base_dir=utils.get_data_dir())
plugin = s.load_plugin('configurable_worker')
self.assertEqual(
plugin.config.getboolean('options', 'worker_test_option_bool'), True
)
self.assertEqual(
plugin.config.get('options', 'worker_test_option_str'),
'Worker Testy McTest Face',
)
self.assertEqual(plugin.config.getint('options', 'worker_test_option_int'), 10)
plugin = s.load_plugin('dummy_connector')
self.assertEqual(
plugin.config.getboolean('options', 'connector_test_option_bool'), False
)
self.assertEqual(
plugin.config.get('options', 'Connector_test_option_str'),
'Connector Testy McTest Face',
)
self.assertEqual(
plugin.config.getint('options', 'connector_test_option_int'), 5
)
def test_plugin_opts_precedence(self):
s = Stoq(
base_dir=utils.get_data_dir(),
plugin_opts={
'configurable_worker': {
'worker_test_option_bool': False,
'worker_test_option_str': 'Test string',
'worker_test_option_int': 20,
}
},
)
plugin = s.load_plugin('configurable_worker')
self.assertEqual(
plugin.config.getboolean('options', 'worker_test_option_bool'), False
)
self.assertEqual(
plugin.config.get('options', 'worker_test_option_str'), 'Test string'
)
self.assertEqual(plugin.config.getint('options', 'worker_test_option_int'), 20)
def test_min_stoq_version(self):
pm = StoqPluginManager([utils.get_invalid_plugins_dir()])
# We have to override the fact that all log calls are disabled in setUp()
# for the calls here to actually go through
logging.disable(logging.NOTSET)
with self.assertLogs(level='WARNING'):
plugin = pm.load_plugin('incompatible_min_stoq_version')
self.assertIsNotNone(plugin)
def test_plugin_override(self):
"""
Verify that if plugin directories have plugins with duplicate names,
the one in the last specified directory will be used
"""
pm = StoqPluginManager([utils.get_plugins_dir(), utils.get_plugins2_dir()])
collected_plugins = pm.list_plugins()
self.assertIn('dummy_worker', collected_plugins)
worker = pm.load_plugin('dummy_worker')
self.assertTrue(worker.PLUGINS2_DUP_MARKER)
pm = StoqPluginManager([utils.get_plugins2_dir(), utils.get_plugins_dir()])
self.assertIn('dummy_worker', collected_plugins)
worker = pm.load_plugin('dummy_worker')
with self.assertRaises(Exception):
worker.PLUGINS2_DUP_MARKER
class ExampleExternalPlugin(WorkerPlugin):
# Intentionally override this method to not require the config argument
def __init__(self):
pass
async def scan(
self, payload: Payload, request: Request, *args
) -> Optional[WorkerResponse]:
pass
class NoParentClassPlugin:
async def scan(
self, payload: Payload, request: Request, *args
) -> Optional[WorkerResponse]:
pass
| 40.175299 | 87 | 0.664518 |
acdf66e0c4bd9a8817227009ac2144ef912966b0 | 20,208 | py | Python | 2020/2020_16a.py | davidxiao93/Advent-of-Code | 29503100ae4eb46b048fc3ab68ff0181c6f00ee5 | [
"MIT"
] | null | null | null | 2020/2020_16a.py | davidxiao93/Advent-of-Code | 29503100ae4eb46b048fc3ab68ff0181c6f00ee5 | [
"MIT"
] | null | null | null | 2020/2020_16a.py | davidxiao93/Advent-of-Code | 29503100ae4eb46b048fc3ab68ff0181c6f00ee5 | [
"MIT"
] | null | null | null | from typing import List, Dict
input = """departure location: 27-840 or 860-957
departure station: 28-176 or 183-949
departure platform: 44-270 or 277-967
departure track: 33-197 or 203-957
departure date: 47-660 or 677-955
departure time: 45-744 or 758-971
arrival location: 42-636 or 642-962
arrival station: 44-243 or 252-962
arrival platform: 46-428 or 449-949
arrival track: 25-862 or 876-951
class: 26-579 or 585-963
duration: 38-683 or 701-949
price: 41-453 or 460-970
route: 48-279 or 292-963
row: 33-617 or 637-955
seat: 39-328 or 351-970
train: 35-251 or 264-957
type: 25-380 or 389-951
wagon: 42-461 or 480-965
zone: 33-768 or 789-954
your ticket:
83,53,73,139,127,131,97,113,61,101,107,67,79,137,89,109,103,59,149,71
nearby tickets:
541,797,657,536,243,821,805,607,97,491,714,170,714,533,363,491,896,399,710,865
351,879,143,113,228,415,393,714,163,171,233,726,422,469,706,264,83,354,309,915
590,56,148,311,729,76,884,352,590,419,205,393,287,761,305,838,76,762,390,914
77,828,197,946,568,599,610,145,307,741,536,4,617,491,158,879,895,794,718,219
828,483,885,827,313,539,602,928,604,74,199,595,733,519,238,909,681,77,238,356
881,557,93,240,193,509,488,767,561,912,553,909,333,185,510,428,720,804,409,765
944,242,652,683,876,592,315,225,378,231,862,975,647,371,707,657,726,937,577,509
251,196,933,499,911,718,800,71,128,891,595,723,121,401,158,494,928,886,212,937
406,168,815,115,643,95,909,878,313,50,656,146,706,904,300,177,505,114,799,521
400,76,450,113,305,98,490,558,475,941,449,104,139,392,508,790,571,79,85,557
488,378,898,461,737,127,144,99,221,212,555,174,454,535,717,736,209,216,928,643
405,532,390,61,118,838,633,506,304,726,266,502,306,311,144,88,74,586,171,654
423,890,925,611,63,762,169,659,318,539,498,404,926,814,541,619,404,392,655,118
51,899,946,736,429,701,547,603,893,946,609,300,568,361,50,299,902,905,311,94
394,556,125,351,366,940,589,215,5,243,570,725,452,794,767,610,524,645,823,613
899,268,420,238,161,602,113,363,322,74,363,413,140,208,11,374,491,322,376,184
415,922,592,98,729,352,511,879,846,523,145,219,301,357,820,163,268,145,320,307
201,484,234,368,451,322,316,763,299,902,815,159,116,133,419,912,389,314,585,120
884,353,903,83,396,417,652,807,223,659,538,540,943,377,151,130,908,347,120,302
598,712,636,711,610,94,558,409,95,659,713,918,509,120,169,99,370,352,563,135
319,874,277,419,324,264,534,600,481,313,176,815,67,660,493,389,616,91,827,719
654,167,659,104,932,571,300,737,226,758,996,369,565,460,220,140,568,709,461,322
112,943,186,293,499,267,209,517,940,449,493,407,261,216,397,881,813,707,95,824
530,409,499,390,477,801,398,375,940,518,426,215,421,910,79,206,461,115,942,532
126,573,654,75,124,218,647,766,542,734,577,224,679,911,107,546,418,199,394,236
136,892,204,806,125,389,876,834,159,185,549,280,140,175,792,553,69,299,397,154
724,654,937,593,677,998,215,605,766,823,174,358,561,277,401,309,239,758,196,807
653,534,574,999,208,767,206,405,565,718,73,908,558,914,360,50,80,208,889,268
600,99,145,391,371,141,710,643,882,592,232,579,370,710,390,672,185,485,702,399
887,78,156,558,415,681,63,242,99,57,603,451,327,821,229,612,602,758,721,747
644,561,312,306,531,809,88,212,550,794,169,813,553,373,845,376,701,808,318,306
289,708,731,551,65,358,63,791,725,765,190,609,143,706,306,732,410,603,517,929
723,605,561,103,4,679,524,124,159,57,239,561,481,507,313,724,502,826,154,821
210,301,0,94,89,730,932,939,401,105,569,378,197,703,313,327,642,410,512,503
608,345,538,241,90,896,50,827,536,803,921,902,535,723,359,482,935,122,704,266
734,589,417,279,167,730,312,86,159,813,733,305,511,611,425,756,268,602,915,587
409,503,521,205,714,416,642,488,191,896,922,504,887,134,589,948,889,90,802,472
206,70,82,710,318,418,411,821,374,925,150,865,320,568,376,725,909,559,128,67
368,576,899,325,833,871,84,520,762,183,404,216,545,888,317,679,908,742,52,409
733,268,265,579,419,557,948,125,563,239,659,727,571,997,537,609,364,322,160,53
662,948,394,209,905,811,55,495,509,828,405,233,150,307,826,608,93,714,449,485
126,643,811,679,714,373,549,270,198,225,103,327,824,270,902,936,703,828,326,647
838,89,562,419,325,410,800,196,805,586,976,703,107,589,861,796,354,861,322,514
237,682,935,403,410,942,554,324,75,352,702,313,486,680,183,251,570,892,813,572
252,103,64,712,713,604,197,805,67,305,914,727,556,702,224,361,151,65,601,97
943,498,499,537,140,305,305,679,550,353,76,57,571,825,436,413,107,394,415,190
721,906,69,487,88,389,928,552,525,537,320,730,529,309,584,731,321,708,715,56
140,491,884,567,929,228,571,109,915,509,817,832,570,913,19,120,710,609,827,883
797,60,491,351,551,498,90,646,278,569,715,406,492,374,177,194,554,54,794,87
577,550,223,110,917,563,815,326,149,241,17,878,222,683,913,399,606,207,541,613
900,752,730,709,380,593,324,236,533,726,306,400,173,650,66,570,102,278,830,307
145,650,614,559,948,496,57,562,533,299,527,795,568,505,749,354,91,215,81,115
543,373,520,703,120,711,768,91,112,152,308,876,479,126,486,69,303,209,836,825
328,325,596,84,683,267,393,892,701,184,373,565,654,505,647,597,23,828,188,536
876,653,389,4,66,615,220,485,416,513,923,60,796,804,942,172,917,587,789,838
367,231,705,936,933,524,361,920,439,293,302,941,50,792,678,721,211,650,609,188
839,676,501,536,482,929,508,213,926,542,115,105,419,298,921,537,761,737,223,147
820,561,768,529,293,304,121,51,338,220,214,206,701,524,243,578,938,918,306,406
91,204,805,555,911,379,304,883,458,914,562,813,205,143,531,650,325,410,71,80
547,93,227,402,735,226,194,426,721,741,515,96,426,554,616,289,324,916,218,923
940,537,535,552,905,65,568,949,634,94,523,679,898,103,938,157,191,265,709,713
311,216,653,789,317,506,515,492,551,976,826,389,532,609,312,912,762,132,743,944
452,605,325,834,921,592,888,760,917,176,534,704,805,895,280,266,97,131,130,98
80,564,417,795,819,543,554,313,816,150,677,589,653,600,609,276,735,101,906,907
16,574,153,592,135,220,372,562,152,876,919,395,768,600,235,542,412,297,522,227
173,86,76,875,157,732,452,173,648,789,649,326,649,613,68,413,84,410,744,53
514,17,314,413,292,517,450,890,575,710,391,269,564,880,916,367,809,111,898,814
240,603,515,367,719,198,883,558,936,391,449,295,743,241,412,496,372,896,594,679
188,575,817,818,558,494,69,586,169,596,134,394,353,335,421,800,565,293,304,563
416,474,763,323,759,588,883,526,902,827,528,102,500,353,542,234,681,615,822,495
597,107,369,948,546,292,571,530,944,92,916,118,792,767,204,61,273,316,919,490
891,642,127,830,544,483,104,494,928,223,395,906,665,114,941,323,132,75,573,560
212,75,485,53,740,185,488,539,861,303,102,199,183,799,490,945,352,428,238,157
321,127,882,657,212,236,425,188,908,941,657,662,933,295,554,352,104,101,531,358
698,105,705,604,129,325,452,597,575,574,129,680,400,366,427,103,125,121,266,566
106,151,175,61,212,806,276,453,370,806,494,832,175,321,265,807,539,495,139,159
379,568,186,587,93,947,748,495,53,940,727,882,542,143,918,896,484,395,137,213
575,536,486,83,217,58,169,98,577,679,517,878,854,705,736,731,96,51,301,807
722,921,188,807,488,555,311,170,643,480,229,736,16,266,426,501,900,100,593,65
206,814,449,789,116,236,3,679,133,87,495,391,312,596,567,427,417,121,183,795
107,326,96,399,149,485,204,215,171,570,644,517,275,837,921,533,231,295,128,293
106,151,74,607,920,391,50,320,680,411,720,629,826,547,514,277,227,277,566,552
301,148,917,682,155,121,712,909,495,105,842,195,239,313,764,234,906,302,209,119
795,278,913,612,807,223,301,547,608,351,185,297,808,941,775,269,93,449,705,135
406,509,678,608,365,153,81,608,426,409,940,533,626,124,818,577,105,650,134,266
10,116,99,838,568,356,375,944,564,823,947,824,542,553,939,534,371,507,905,882
544,74,827,77,164,224,169,84,751,593,730,512,300,556,227,152,167,538,225,531
173,368,680,210,343,680,323,605,159,549,558,509,560,414,82,601,762,702,920,657
611,515,735,306,71,363,921,456,926,142,586,614,129,566,293,948,304,564,71,372
106,130,372,363,561,305,942,75,683,549,216,394,363,719,831,420,626,763,96,63
738,546,896,602,198,943,601,54,526,316,924,548,677,410,489,85,373,85,371,798
941,136,474,881,510,314,461,301,491,500,105,540,309,375,234,575,879,576,795,389
132,516,568,324,390,835,526,243,205,155,77,656,688,678,487,185,231,792,890,916
727,807,221,14,836,763,450,727,368,889,547,229,592,127,106,424,86,279,143,193
178,833,228,212,794,534,321,710,544,410,644,531,317,712,722,714,484,616,265,404
570,410,948,312,823,678,811,843,833,794,802,142,366,82,235,712,142,884,367,510
378,902,278,796,358,396,421,936,564,836,935,852,707,732,428,371,304,228,681,924
794,125,133,598,867,88,278,192,525,564,647,551,109,876,119,577,740,196,596,562
742,649,923,596,408,497,151,294,135,542,892,402,122,146,393,491,871,651,655,862
720,677,372,191,573,11,114,413,74,395,721,416,365,193,812,131,91,643,360,608
235,658,490,118,498,804,243,181,505,861,81,396,505,595,817,742,411,733,678,215
236,420,146,210,524,598,741,94,144,140,389,415,267,400,647,689,811,743,211,553
545,418,799,653,650,567,560,68,551,102,376,339,511,267,503,130,890,374,51,118
520,701,547,509,528,588,196,16,278,713,643,797,163,718,650,886,830,561,512,714
811,129,311,75,506,370,377,825,540,59,720,710,609,765,258,170,224,173,819,299
560,532,997,711,546,59,499,163,412,482,235,83,681,52,922,646,316,899,160,659
552,885,703,793,650,91,390,210,612,376,422,471,677,495,300,919,141,712,392,496
907,575,898,924,594,742,875,418,98,56,160,643,826,139,123,533,208,73,794,412
212,76,421,74,548,896,830,238,209,730,394,648,199,265,352,552,509,138,213,616
604,391,822,380,60,176,277,814,893,359,652,893,818,934,201,944,131,547,732,64
947,824,609,70,362,608,569,588,911,828,218,612,401,451,244,586,130,209,501,270
807,761,525,157,417,749,529,655,422,302,190,395,892,145,947,417,129,917,511,828
279,378,717,948,630,647,145,351,937,617,209,893,744,366,176,491,568,233,420,173
535,889,730,766,96,11,133,903,113,679,762,940,512,99,176,593,899,78,534,810
461,533,427,491,711,470,214,363,205,878,508,401,77,103,884,816,566,147,183,890
391,824,701,494,594,596,61,418,103,22,193,924,577,419,918,450,927,884,524,494
110,594,156,727,274,146,655,814,819,352,518,934,52,939,323,567,740,839,899,810
731,60,568,878,241,213,146,544,732,184,133,556,732,203,634,562,228,884,681,600
761,725,169,926,239,223,428,828,880,900,491,557,947,268,882,429,324,656,399,485
409,518,365,235,449,730,895,899,867,296,518,361,138,506,365,520,682,514,928,713
717,104,406,834,95,132,126,765,613,356,866,818,323,714,152,411,650,377,923,193
96,209,87,113,392,98,392,895,907,840,296,228,650,208,303,872,409,893,270,825
652,367,417,899,507,375,526,231,806,67,545,826,76,306,687,400,885,554,295,652
722,514,361,916,517,220,236,657,801,878,458,102,367,117,543,212,304,364,571,60
827,53,511,882,589,214,302,949,512,807,664,150,243,394,131,159,106,317,372,554
496,496,866,298,104,803,502,189,561,402,909,739,241,901,545,498,922,123,522,577
578,178,230,528,325,765,723,946,600,176,242,123,599,231,113,925,461,357,531,612
539,621,683,505,823,597,86,742,513,710,763,497,161,830,374,709,543,138,816,323
365,274,487,734,302,593,590,57,511,423,643,131,379,887,701,154,292,380,730,759
163,896,825,194,407,806,596,122,557,417,179,766,565,644,234,491,653,916,899,131
738,482,733,678,391,744,650,830,544,578,604,830,328,490,574,380,726,204,765,630
554,217,564,809,702,178,798,216,73,111,67,165,153,612,267,879,319,895,328,503
323,541,221,420,918,239,450,512,231,118,279,542,297,814,663,766,317,322,801,226
929,590,136,222,642,596,879,111,275,728,425,175,523,423,607,789,647,143,522,892
300,184,498,154,457,899,224,574,878,204,789,297,101,493,653,303,169,389,833,416
54,496,607,898,3,133,791,558,917,828,190,81,861,310,810,225,521,105,725,831
73,460,927,303,215,79,926,894,220,575,910,548,893,395,520,812,472,716,526,876
233,907,216,95,821,537,155,831,327,703,703,219,171,993,935,306,818,560,718,502
293,545,890,893,653,272,492,825,881,305,126,119,482,739,138,609,818,516,358,886
720,929,314,537,313,231,722,199,876,572,414,642,586,884,817,94,134,172,759,421
62,789,808,939,554,588,713,375,226,821,536,603,736,203,821,610,178,742,295,409
134,828,762,830,524,355,555,948,824,230,806,150,640,911,530,189,608,61,925,513
657,681,517,576,126,683,310,924,766,822,544,445,361,493,821,372,928,903,530,556
316,107,507,920,111,21,912,232,543,893,353,803,521,396,53,538,528,117,766,826
84,947,862,567,281,836,801,84,197,562,317,481,938,394,215,649,550,831,617,758
124,977,495,880,192,362,605,192,758,759,59,556,483,135,216,238,907,317,910,501
320,683,353,358,277,815,934,266,159,614,239,257,185,149,404,239,885,169,801,533
85,511,481,460,766,396,499,403,420,768,919,646,359,826,708,903,902,476,141,117
453,396,173,103,729,228,911,667,296,574,277,821,546,808,210,106,556,552,278,324
572,508,894,606,826,910,325,427,740,63,491,860,429,494,646,806,298,482,527,885
802,227,143,726,390,592,931,61,179,653,380,896,219,920,400,929,839,823,497,701
493,204,817,489,234,69,229,319,800,808,421,813,775,607,735,792,419,494,219,210
79,923,359,391,240,914,85,529,450,577,274,397,140,731,194,862,240,730,553,894
803,316,606,53,130,940,861,728,401,796,86,566,132,194,66,567,350,704,832,765
642,803,395,408,732,151,79,341,596,269,587,106,297,150,83,943,175,327,537,943
549,915,678,224,656,82,159,239,574,534,724,459,607,550,614,565,360,376,68,933
136,520,654,707,930,121,471,943,415,720,646,135,195,214,489,268,644,63,939,187
538,317,162,917,990,377,528,366,937,174,165,894,563,572,265,835,173,922,529,373
134,321,614,617,420,914,818,113,572,218,746,91,321,741,729,913,806,489,237,189
735,76,415,989,243,881,321,505,427,555,922,713,551,172,558,59,736,402,98,560
543,76,645,106,212,742,74,304,192,237,115,274,379,359,529,650,549,678,803,924
942,566,933,836,831,292,108,678,324,574,67,178,63,229,67,269,733,513,356,424
515,88,538,172,12,829,563,733,564,896,205,118,174,228,58,606,720,355,188,714
480,52,570,115,214,550,518,168,79,131,315,726,654,211,890,789,149,978,235,536
442,427,93,427,209,451,378,830,860,808,364,820,68,766,268,410,141,125,52,930
118,62,61,418,177,303,157,396,494,947,806,304,144,407,192,138,389,557,723,159
815,904,922,560,148,542,308,492,558,50,619,711,378,235,396,212,768,838,497,419
792,884,758,925,390,103,997,511,743,234,860,146,186,421,203,62,161,186,326,601
919,206,266,570,707,678,767,884,818,710,560,133,879,145,720,526,915,620,299,511
918,579,380,404,547,143,812,942,81,592,408,602,891,642,728,86,886,63,492,343
237,389,64,462,838,881,656,893,839,318,920,906,720,215,184,57,88,562,527,267
799,353,728,363,96,299,822,205,282,566,93,936,362,607,921,517,578,703,268,191
819,152,617,937,642,738,197,560,174,426,603,508,804,739,722,458,806,264,491,726
905,861,108,793,295,323,840,74,663,654,327,565,129,161,836,896,92,427,652,837
917,232,88,203,900,174,604,139,575,943,924,606,8,605,482,267,537,209,377,724
520,838,175,895,881,895,929,629,373,499,717,832,704,642,452,379,617,944,559,649
883,193,902,293,897,265,946,238,295,117,442,701,140,120,132,815,655,411,725,526
646,226,907,65,807,191,399,275,677,175,223,790,656,393,371,809,102,740,647,313
125,302,943,278,593,297,350,72,416,861,377,791,266,277,726,325,188,554,794,308
217,357,79,371,263,921,237,714,830,521,124,184,154,73,677,59,507,563,556,308
918,243,797,94,98,665,168,92,594,213,608,798,377,652,715,502,939,926,170,168
683,373,302,814,423,898,264,722,723,601,575,164,327,303,927,201,568,240,241,313
268,86,163,511,713,933,372,877,98,243,397,927,660,818,157,165,588,510,980,84
328,116,882,334,561,209,791,312,100,76,548,614,941,894,365,174,305,508,125,706
92,165,73,654,362,217,97,450,981,327,718,911,147,831,741,53,723,239,60,415
365,312,556,765,766,378,946,587,372,126,901,553,873,903,945,143,739,159,147,806
812,189,293,421,231,452,876,522,392,679,314,392,217,512,637,102,828,566,520,679
51,861,740,706,551,196,327,301,400,797,585,532,209,107,504,911,752,823,328,510
948,551,419,929,146,533,197,559,610,407,343,230,542,304,741,718,537,578,701,544
454,797,235,743,189,120,789,154,883,915,547,61,305,913,540,564,882,300,743,878
567,321,767,497,82,856,483,58,238,837,932,900,930,830,390,799,225,323,312,485
272,803,572,210,901,652,362,426,309,900,790,522,98,569,812,216,356,146,79,941
820,796,532,929,368,83,213,885,738,61,191,698,70,155,230,500,208,721,108,520
319,242,921,514,324,710,323,560,325,84,551,914,833,156,795,783,899,602,368,535
800,482,567,307,101,545,233,815,937,792,376,723,811,400,311,462,428,196,607,940
722,85,369,612,213,125,157,835,452,412,763,210,266,306,273,725,544,227,655,924
498,718,826,488,731,191,392,510,835,758,791,165,919,427,523,856,172,797,729,764
680,811,939,190,761,711,68,563,932,75,551,553,303,119,112,724,763,172,364,198
756,73,95,760,127,503,744,172,921,406,704,294,81,924,653,839,592,738,135,911
942,191,159,926,214,176,396,423,579,879,511,103,749,795,302,323,832,920,425,378
335,551,205,903,502,212,392,88,561,579,915,815,357,169,189,728,657,611,790,359
578,111,934,497,109,344,840,360,761,320,935,588,393,158,128,887,310,837,136,541
877,789,125,482,305,828,202,493,148,94,889,824,481,116,305,305,124,220,608,107
58,75,136,545,551,613,313,566,723,107,112,754,534,605,122,67,220,194,716,268
617,241,488,102,605,837,209,93,917,173,305,813,834,522,360,370,391,874,452,81
470,113,657,164,313,657,760,511,653,914,116,519,908,703,213,729,197,924,267,354
528,201,376,143,461,172,237,648,519,364,707,224,68,706,817,767,91,219,113,452
729,937,554,186,721,720,886,467,221,814,761,375,681,736,607,144,398,901,104,225
231,153,537,225,886,368,567,882,397,331,914,588,355,796,737,800,154,920,533,157
364,391,677,758,372,551,653,545,311,926,307,893,358,238,94,124,880,498,483,23
914,653,711,311,172,593,833,921,948,861,299,907,920,911,561,256,607,607,221,557
733,188,102,658,527,209,54,948,611,294,216,400,742,57,938,243,201,732,375,649
117,416,151,554,900,539,547,719,451,316,112,307,234,171,927,336,820,484,725,497
54,114,403,913,120,53,243,112,600,721,94,146,738,22,213,311,173,894,546,526
632,407,615,929,409,551,729,911,801,229,918,512,240,767,378,267,929,121,148,95
214,522,421,816,576,904,789,152,114,358,506,7,361,915,949,424,892,911,726,205
800,554,975,682,532,716,295,585,919,453,904,600,508,391,149,577,371,817,122,163
134,108,71,578,742,573,206,793,405,890,710,660,94,538,470,762,394,606,369,368
150,793,728,138,768,632,839,565,937,164,714,317,615,369,323,228,268,793,86,920
503,596,527,66,71,168,525,801,391,491,572,829,196,158,865,318,397,607,823,562
90,143,53,300,536,983,597,88,825,105,946,568,320,408,327,400,910,133,96,804
366,918,277,84,308,945,916,421,144,886,460,491,178,610,133,399,138,124,425,401
712,906,340,616,159,156,882,160,744,818,882,717,922,763,508,215,108,898,73,114
520,903,515,613,913,525,570,157,542,761,727,536,877,703,350,585,567,516,727,883
219,239,710,736,941,605,71,864,449,217,892,836,92,706,295,486,428,193,912,934
236,702,55,559,673,166,72,878,798,546,535,84,766,559,312,510,74,301,790,391
302,682,726,711,565,157,136,576,943,221,941,214,681,732,458,539,616,939,904,677
144,494,883,211,752,643,839,537,722,881,76,520,493,224,565,659,578,216,679,655
853,862,829,424,586,376,646,815,917,423,924,728,657,887,886,396,128,499,886,167
860,218,530,278,504,650,215,826,64,377,742,376,72,372,802,353,994,166,311,69
377,824,422,372,919,397,195,812,887,809,469,724,65,314,716,703,706,681,837,423
809,267,600,395,857,406,366,150,615,531,541,423,608,84,413,799,609,834,533,712
820,814,222,904,405,74,81,749,90,683,61,354,419,552,910,496,565,651,823,371
873,831,550,312,418,126,147,293,374,243,574,225,876,552,318,359,306,229,763,210
196,494,73,806,648,121,839,449,187,554,424,928,250,797,227,189,127,125,173,424"""
fields_str, my_ticket_str, other_tickets_str = input.split("\n\n")
fields: Dict[str, int] = dict()
for line in fields_str.splitlines():
left, right = line.split(": ")
right_a, right_b = right.split(" or ")
values = [
v
for r in [right_a, right_b]
for v in range(int(r.split("-")[0]), int(r.split("-")[1]))
]
fields[left] = values
my_ticket = [int(x) for x in my_ticket_str.splitlines()[1].split(",")]
other_tickets = [
[int(x) for x in other_ticket_str.split(",")]
for other_ticket_str in other_tickets_str.splitlines()[1:]
]
def get_invalid_values(ticket: List[int], fields: Dict[str, int]):
all_valid_values = set()
invalid = 0
for field, values in fields.items():
all_valid_values |= set(values)
for t in ticket:
if t not in all_valid_values:
invalid += t
return invalid
error_rate = 0
for other_ticket in other_tickets:
error_rate += get_invalid_values(other_ticket, fields)
print(error_rate) | 67.812081 | 81 | 0.740053 |
acdf6719acec00a549db298d2e5c6533869226bc | 11,610 | py | Python | adb/windows/platform-tools/systrace/catapult/telemetry/third_party/web-page-replay/httpclient_test.py | llaske/sugarizer-deployment-tool-desktop | 34df1a56b68b15b6771671f87ab66586d60c514a | [
"Apache-2.0"
] | 1 | 2019-01-17T19:03:17.000Z | 2019-01-17T19:03:17.000Z | adb/MACOS/platform-tools/systrace/catapult/telemetry/third_party/web-page-replay/httpclient_test.py | llaske/sugarizer-deployment-tool-desktop | 34df1a56b68b15b6771671f87ab66586d60c514a | [
"Apache-2.0"
] | 2 | 2017-09-08T20:26:05.000Z | 2017-09-08T20:29:07.000Z | adb/windows/platform-tools/systrace/catapult/telemetry/third_party/web-page-replay/httpclient_test.py | llaske/sugarizer-deployment-tool-desktop | 34df1a56b68b15b6771671f87ab66586d60c514a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
import datetime
import dnsproxy
import httparchive
import httpclient
import platformsettings
import script_injector
import test_utils
class RealHttpFetchTest(unittest.TestCase):
# Initialize test data
CONTENT_TYPE = 'content-type: image/x-icon'
COOKIE_1 = ('Set-Cookie: GMAIL_IMP=EXPIRED; '
'Expires=Thu, 12-Jul-2012 22:41:22 GMT; '
'Path=/mail; Secure')
COOKIE_2 = ('Set-Cookie: GMAIL_STAT_205a=EXPIRED; '
'Expires=Thu, 12-Jul-2012 22:42:24 GMT; '
'Path=/mail; Secure')
FIRST_LINE = 'fake-header: first line'
SECOND_LINE = ' second line'
THIRD_LINE = '\tthird line'
BAD_HEADER = 'this is a bad header'
def test__GetHeaderNameValueBasic(self):
"""Test _GetHeaderNameValue with normal header."""
real_http_fetch = httpclient.RealHttpFetch
name_value = real_http_fetch._GetHeaderNameValue(self.CONTENT_TYPE)
self.assertEqual(name_value, ('content-type', 'image/x-icon'))
def test__GetHeaderNameValueLowercasesName(self):
"""_GetHeaderNameValue lowercases header name."""
real_http_fetch = httpclient.RealHttpFetch
header = 'X-Google-Gfe-Backend-Request-Info: eid=1KMAUMeiK4eMiAL52YyMBg'
expected = ('x-google-gfe-backend-request-info',
'eid=1KMAUMeiK4eMiAL52YyMBg')
name_value = real_http_fetch._GetHeaderNameValue(header)
self.assertEqual(name_value, expected)
def test__GetHeaderNameValueBadLineGivesNone(self):
"""_GetHeaderNameValue returns None for a header in wrong format."""
real_http_fetch = httpclient.RealHttpFetch
name_value = real_http_fetch._GetHeaderNameValue(self.BAD_HEADER)
self.assertIsNone(name_value)
def test__ToTuplesBasic(self):
"""Test _ToTuples with normal input."""
real_http_fetch = httpclient.RealHttpFetch
headers = [self.CONTENT_TYPE, self.COOKIE_1, self.FIRST_LINE]
result = real_http_fetch._ToTuples(headers)
expected = [('content-type', 'image/x-icon'),
('set-cookie', self.COOKIE_1[12:]),
('fake-header', 'first line')]
self.assertEqual(result, expected)
def test__ToTuplesMultipleHeadersWithSameName(self):
"""Test mulitple headers with the same name."""
real_http_fetch = httpclient.RealHttpFetch
headers = [self.CONTENT_TYPE, self.COOKIE_1, self.COOKIE_2, self.FIRST_LINE]
result = real_http_fetch._ToTuples(headers)
expected = [('content-type', 'image/x-icon'),
('set-cookie', self.COOKIE_1[12:]),
('set-cookie', self.COOKIE_2[12:]),
('fake-header', 'first line')]
self.assertEqual(result, expected)
def test__ToTuplesAppendsContinuationLine(self):
"""Test continuation line is handled."""
real_http_fetch = httpclient.RealHttpFetch
headers = [self.CONTENT_TYPE, self.COOKIE_1, self.FIRST_LINE,
self.SECOND_LINE, self.THIRD_LINE]
result = real_http_fetch._ToTuples(headers)
expected = [('content-type', 'image/x-icon'),
('set-cookie', self.COOKIE_1[12:]),
('fake-header', 'first line\n second line\n third line')]
self.assertEqual(result, expected)
def test__ToTuplesIgnoresBadHeader(self):
"""Test bad header is ignored."""
real_http_fetch = httpclient.RealHttpFetch
bad_headers = [self.CONTENT_TYPE, self.BAD_HEADER, self.COOKIE_1]
expected = [('content-type', 'image/x-icon'),
('set-cookie', self.COOKIE_1[12:])]
result = real_http_fetch._ToTuples(bad_headers)
self.assertEqual(result, expected)
def test__ToTuplesIgnoresMisplacedContinuationLine(self):
"""Test misplaced continuation line is ignored."""
real_http_fetch = httpclient.RealHttpFetch
misplaced_headers = [self.THIRD_LINE, self.CONTENT_TYPE,
self.COOKIE_1, self.FIRST_LINE, self.SECOND_LINE]
result = real_http_fetch._ToTuples(misplaced_headers)
expected = [('content-type', 'image/x-icon'),
('set-cookie', self.COOKIE_1[12:]),
('fake-header', 'first line\n second line')]
self.assertEqual(result, expected)
class RealHttpFetchGetConnectionTest(unittest.TestCase):
"""Test that a connection is made with request IP/port or proxy IP/port."""
def setUp(self):
def real_dns_lookup(host):
return {
'example.com': '127.127.127.127',
'proxy.com': '2.2.2.2',
}[host]
self.fetch = httpclient.RealHttpFetch(real_dns_lookup)
self.https_proxy = None
self.http_proxy = None
def get_proxy(is_ssl):
return self.https_proxy if is_ssl else self.http_proxy
self.fetch._get_system_proxy = get_proxy
def set_http_proxy(self, host, port):
self.http_proxy = platformsettings.SystemProxy(host, port)
def set_https_proxy(self, host, port):
self.https_proxy = platformsettings.SystemProxy(host, port)
def test_get_connection_without_proxy_connects_to_host_ip(self):
"""HTTP connection with no proxy connects to host IP."""
self.set_http_proxy(host=None, port=None)
connection = self.fetch._get_connection('example.com', None, is_ssl=False)
self.assertEqual('127.127.127.127', connection.host)
self.assertEqual(80, connection.port) # default HTTP port
def test_get_connection_without_proxy_uses_nondefault_request_port(self):
"""HTTP connection with no proxy connects with request port."""
self.set_https_proxy(host=None, port=None)
connection = self.fetch._get_connection('example.com', 8888, is_ssl=False)
self.assertEqual('127.127.127.127', connection.host)
self.assertEqual(8888, connection.port) # request HTTP port
def test_get_connection_with_proxy_uses_proxy_port(self):
"""HTTP connection with proxy connects used proxy port."""
self.set_http_proxy(host='proxy.com', port=None)
connection = self.fetch._get_connection('example.com', 8888, is_ssl=False)
self.assertEqual('2.2.2.2', connection.host) # proxy IP
self.assertEqual(80, connection.port) # proxy port (default HTTP)
def test_ssl_get_connection_without_proxy_connects_to_host_ip(self):
"""HTTPS (SSL) connection with no proxy connects to host IP."""
self.set_https_proxy(host=None, port=None)
connection = self.fetch._get_connection('example.com', None, is_ssl=True)
self.assertEqual('127.127.127.127', connection.host)
self.assertEqual(443, connection.port) # default SSL port
def test_ssl_get_connection_with_proxy_connects_to_proxy_ip(self):
"""HTTPS (SSL) connection with proxy connects to proxy IP."""
self.set_https_proxy(host='proxy.com', port=8443)
connection = self.fetch._get_connection('example.com', None, is_ssl=True)
self.assertEqual('2.2.2.2', connection.host) # proxy IP
self.assertEqual(8443, connection.port) # SSL proxy port
def test_ssl_get_connection_with_proxy_tunnels_to_host(self):
"""HTTPS (SSL) connection with proxy tunnels to target host."""
self.set_https_proxy(host='proxy.com', port=8443)
connection = self.fetch._get_connection('example.com', 9443, is_ssl=True)
self.assertEqual('example.com', connection._tunnel_host) # host name
self.assertEqual(9443, connection._tunnel_port) # host port
class ActualNetworkFetchTest(test_utils.RealNetworkFetchTest):
def testFetchNonSSLRequest(self):
real_dns_lookup = dnsproxy.RealDnsLookup(
name_servers=[platformsettings.get_original_primary_nameserver()],
dns_forwarding=False, proxy_host='127.0.0.1', proxy_port=5353)
fetch = httpclient.RealHttpFetch(real_dns_lookup)
request = httparchive.ArchivedHttpRequest(
command='GET', host='google.com', full_path='/search?q=dogs',
request_body=None, headers={}, is_ssl=False)
response = fetch(request)
self.assertIsNotNone(response)
def testFetchSSLRequest(self):
real_dns_lookup = dnsproxy.RealDnsLookup(
name_servers=[platformsettings.get_original_primary_nameserver()],
dns_forwarding=False, proxy_host='127.0.0.1', proxy_port=5353)
fetch = httpclient.RealHttpFetch(real_dns_lookup)
request = httparchive.ArchivedHttpRequest(
command='GET', host='google.com', full_path='/search?q=dogs',
request_body=None, headers={}, is_ssl=True)
response = fetch(request)
self.assertIsNotNone(response)
class HttpArchiveFetchTest(unittest.TestCase):
TEST_REQUEST_TIME = datetime.datetime(2016, 11, 17, 1, 2, 3, 456)
def createTestResponse(self):
return httparchive.ArchivedHttpResponse(
11, 200, 'OK', [('content-type', 'text/html')],
['<body>test</body>'],
request_time=HttpArchiveFetchTest.TEST_REQUEST_TIME)
def checkTestResponse(self, actual_response, archive, request):
self.assertEqual(actual_response, archive[request])
self.assertEqual(['<body>test</body>'], actual_response.response_data)
self.assertEqual(HttpArchiveFetchTest.TEST_REQUEST_TIME,
actual_response.request_time)
@staticmethod
def dummy_injector(_):
return '<body>test</body>'
class RecordHttpArchiveFetchTest(HttpArchiveFetchTest):
@mock.patch('httpclient.RealHttpFetch')
def testFetch(self, real_http_fetch):
http_fetch_instance = real_http_fetch.return_value
response = self.createTestResponse()
http_fetch_instance.return_value = response
archive = httparchive.HttpArchive()
fetch = httpclient.RecordHttpArchiveFetch(archive, self.dummy_injector)
request = httparchive.ArchivedHttpRequest(
'GET', 'www.test.com', '/', None, {})
self.checkTestResponse(fetch(request), archive, request)
class ReplayHttpArchiveFetchTest(HttpArchiveFetchTest):
def testFetch(self):
request = httparchive.ArchivedHttpRequest(
'GET', 'www.test.com', '/', None, {})
response = self.createTestResponse()
archive = httparchive.HttpArchive()
archive[request] = response
fetch = httpclient.ReplayHttpArchiveFetch(
archive, None, self.dummy_injector)
self.checkTestResponse(fetch(request), archive, request)
@mock.patch('script_injector.util.resource_string')
@mock.patch('script_injector.util.resource_exists')
@mock.patch('script_injector.os.path.exists')
def testInjectedDate(self, os_path, util_exists, util_resource_string):
os_path.return_value = False
util_exists.return_value = True
util_resource_string.return_value = \
["""var time_seed={}""".format(script_injector.TIME_SEED_MARKER)]
request = httparchive.ArchivedHttpRequest(
'GET', 'www.test.com', '/', None, {})
response = self.createTestResponse()
archive = httparchive.HttpArchive()
archive[request] = response
fetch = httpclient.ReplayHttpArchiveFetch(
archive, None, script_injector.GetScriptInjector("time_script.js"))
self.assertEqual(
['<script>var time_seed=1479344523000</script><body>test</body>'],
fetch(request).response_data)
if __name__ == '__main__':
unittest.presentation.main()
| 40.736842 | 80 | 0.718346 |
acdf675f717fb5cf5183d1673528d0aa3877d5ac | 20,352 | py | Python | python/ccxt/kkex.py | jknight/ccxt | 02cdef0247435a6c6557faad8a1793d3da67c085 | [
"MIT"
] | null | null | null | python/ccxt/kkex.py | jknight/ccxt | 02cdef0247435a6c6557faad8a1793d3da67c085 | [
"MIT"
] | null | null | null | python/ccxt/kkex.py | jknight/ccxt | 02cdef0247435a6c6557faad8a1793d3da67c085 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
class kkex(Exchange):
def describe(self):
return self.deep_extend(super(kkex, self).describe(), {
'id': 'kkex',
'name': 'KKEX',
'countries': ['CN', 'US', 'JP'],
'version': 'v2',
'has': {
'CORS': False,
'fetchBalance': True,
'fetchTickers': True,
'fetchOrders': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'createMarketOrder': True,
'fetchOrder': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '1hour',
'4h': '4hour',
'12h': '12hour',
'1d': '1day',
'1w': '1week',
'1M': '1month',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/47401462-2e59f800-d74a-11e8-814f-e4ae17b4968a.jpg',
'api': {
'public': 'https://kkex.com/api/v1',
'private': 'https://kkex.com/api/v2',
'v1': 'https://kkex.com/api/v1',
},
'www': 'https://kkex.com',
'doc': 'https://kkex.com/api_wiki/cn/',
'fees': 'https://intercom.help/kkex/fee',
},
'api': {
'public': {
'get': [
'exchange_rate',
'products',
'assets',
'tickers',
'ticker',
'depth',
'trades',
'kline',
],
},
'private': {
'post': [
'profile',
'trade',
'batch_trade',
'cancel_order',
'cancel_all_orders',
'order_history',
'userinfo',
'order_info',
'orders_info',
],
},
'v1': {
'post': [
'process_strategy',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': 0.002,
'maker': 0.002,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {},
},
},
'options': {
'lastNonceTimestamp': 0,
},
})
def fetch_markets(self, params={}):
tickers = self.publicGetTickers(params)
tickers = tickers['tickers']
products = self.publicGetProducts(params)
products = products['products']
markets = []
for k in range(0, len(tickers)):
keys = list(tickers[k].keys())
markets.append(keys[0])
result = []
for i in range(0, len(markets)):
id = markets[i]
market = markets[i]
baseId = ''
quoteId = ''
precision = {}
limits = {}
for j in range(0, len(products)):
p = products[j]
if p['mark_asset'] + p['base_asset'] == market:
quoteId = p['base_asset']
baseId = p['mark_asset']
price_scale_str = str(p['price_scale'])
scale = len(price_scale_str) - 1
precision = {
'price': scale,
'amount': scale,
}
limits = {
'amount': {
'min': max(self.safe_float(p, 'min_bid_size'), self.safe_float(p, 'min_ask_size')),
'max': min(self.safe_float(p, 'max_bid_size'), self.safe_float(p, 'max_ask_size')),
},
'price': {
'min': self.safe_float(p, 'min_price'),
'max': self.safe_float(p, 'max_price'),
},
}
limits['cost'] = {
'min': self.safe_float(p, 'min_bid_amount'),
'max': self.safe_float(p, 'max_bid_amount'),
}
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'precision': precision,
'limits': limits,
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_timestamp(ticker, 'date')
symbol = None
if market is not None:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'vol'),
'quoteVolume': None,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.markets[symbol]
request = {
'symbol': market['id'],
}
response = self.publicGetTicker(self.extend(request, params))
ticker = self.extend(response['ticker'], self.omit(response, 'ticker'))
return self.parse_ticker(ticker, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTickers(params)
#
# { date: 1540350657,
# tickers: [{ENUBTC: {sell: "0.00000256",
# buy: "0.00000253",
# last: "0.00000253",
# vol: "138686.828804",
# high: "0.00000278",
# low: "0.00000253",
# open: "0.0000027" }},
# {ENUEOS: {sell: "0.00335",
# buy: "0.002702",
# last: "0.0034",
# vol: "15084.9",
# high: "0.0034",
# low: "0.003189",
# open: "0.003189" }} ],
# result: True }
#
tickers = self.safe_value(response, 'tickers')
result = {}
for i in range(0, len(tickers)):
ids = list(tickers[i].keys())
id = ids[0]
market = self.safe_value(self.markets_by_id, id)
if market is not None:
symbol = market['symbol']
ticker = self.extend(tickers[i][id], self.omit(response, 'tickers'))
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'symbol': self.market_id(symbol),
}
if limit is not None:
request['size'] = limit
response = self.publicGetDepth(self.extend(request, params))
return self.parse_order_book(response)
def parse_trade(self, trade, market=None):
timestamp = self.safe_integer(trade, 'date_ms')
datetime = self.iso8601(timestamp)
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
cost = None
if price is not None:
if amount is not None:
cost = amount * price
symbol = None
if market is not None:
symbol = market['symbol']
id = self.safe_string(trade, 'tid')
type = None
side = self.safe_string(trade, 'type')
return {
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': datetime,
'symbol': symbol,
'order': None,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostUserinfo(params)
balances = self.safe_value(response, 'info')
result = {'info': response}
funds = self.safe_value(balances, 'funds')
free = self.safe_value(funds, 'free', {})
freezed = self.safe_value(funds, 'freezed', {})
currencyIds = list(free.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(free, currencyId)
account['used'] = self.safe_float(freezed, currencyId)
result[code] = account
return self.parse_balance(result)
def fetch_order(self, id, symbol=None, params={}):
if not symbol:
raise ArgumentsRequired(self.id + ' fetchOrder requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
'symbol': market['id'],
}
response = self.privatePostOrderInfo(self.extend(request, params))
if response['result']:
return self.parse_order(response['order'], market)
raise OrderNotFound(self.id + ' order ' + id + ' not found')
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
#
# [
# "1521072000000",
# "0.000002",
# "0.00003",
# "0.000002",
# "0.00003",
# "3.106889"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_float(ohlcv, 1),
self.safe_float(ohlcv, 2),
self.safe_float(ohlcv, 3),
self.safe_float(ohlcv, 4),
self.safe_float(ohlcv, 5),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'type': self.timeframes[timeframe],
}
if since is not None:
# since = self.milliseconds() - self.parse_timeframe(timeframe) * limit * 1000
request['since'] = int(since / 1000)
if limit is not None:
request['size'] = limit
response = self.publicGetKline(self.extend(request, params))
#
# [
# [
# "1521072000000",
# "0.000002",
# "0.00003",
# "0.000002",
# "0.00003",
# "3.106889"
# ],
# [
# "1517356800000",
# "0.1",
# "0.1",
# "0.00000013",
# "0.000001",
# "542832.83114"
# ]
# ]
#
return self.parse_ohlcvs(response, market)
def parse_order_status(self, status):
statuses = {
'-1': 'canceled',
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'open',
'4': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# {
# "status": 2,
# "source": "NORMAL",
# "amount": "10.852019",
# "create_date": 1523938461036,
# "avg_price": "0.00096104",
# "order_id": "100",
# "price": "0.00096105",
# "type": "buy",
# "symbol": "READBTC",
# "deal_amount": "10.852019"
# }
#
symbol = None
if market is not None:
symbol = market['symbol']
side = self.safe_string(order, 'side')
if side is None:
side = self.safe_string(order, 'type')
timestamp = self.safe_integer(order, 'create_date')
id = self.safe_string_2(order, 'order_id', 'id')
status = self.parse_order_status(self.safe_string(order, 'status'))
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'deal_amount')
average = self.safe_float(order, 'avg_price')
average = self.safe_float(order, 'price_avg', average)
remaining = None
cost = None
if filled is not None:
if amount is not None:
remaining = amount - filled
if average is not None:
cost = average * filled
return {
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'average': average,
'type': 'limit',
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'fee': None,
'info': order,
'trades': None,
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'type': side,
}
if type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
if self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False to supply the cost in the amount argument(the exchange-specific behaviour)")
else:
request['amount'] = self.cost_to_precision(symbol, float(amount) * float(price))
request['price'] = self.cost_to_precision(symbol, amount)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
request['type'] += '_' + type
else:
request['amount'] = self.amount_to_precision(symbol, amount)
request['price'] = self.price_to_precision(symbol, price)
response = self.privatePostTrade(self.extend(request, params))
id = self.safe_string(response, 'order_id')
return {
'info': response,
'id': id,
'datetime': None,
'timestamp': None,
'lastTradeTimestamp': None,
'status': 'open',
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': None,
'amount': amount,
'filled': None,
'remaining': None,
'trades': None,
'fee': None,
'clientOrderId': None,
'average': None,
}
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'order_id': id,
'symbol': market['id'],
}
return self.privatePostCancelOrder(self.extend(request, params))
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['page_length'] = limit # 20 by default
response = self.privatePostOrderHistory(self.extend(request, params))
return self.parse_orders(response['orders'], market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'status': 0,
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'status': 1,
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + path
if api == 'public':
url += '?' + self.urlencode(params)
headers = {'Content-Type': 'application/json'}
else:
self.check_required_credentials()
nonce = self.nonce()
signature = self.extend({
'nonce': nonce,
'api_key': self.apiKey,
}, params)
signature = self.urlencode(self.keysort(signature))
signature += '&secret_key=' + self.secret
signature = self.hash(self.encode(signature), 'md5')
signature = signature.upper()
body = self.extend({
'api_key': self.apiKey,
'sign': signature,
'nonce': nonce,
}, params)
body = self.urlencode(body)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| 37.138686 | 465 | 0.459758 |
acdf67ee4a99cebaddea000ef358ceed872f774f | 7,324 | py | Python | src/functions/import_security_hub.py | aws-samples/iot-device-defender-integration-with-securityhub | 8c73df013779361ceebb19eef171d1a2e4dfacaa | [
"MIT-0"
] | 2 | 2021-05-24T19:18:45.000Z | 2021-08-29T10:44:22.000Z | src/functions/import_security_hub.py | aws-samples/iot-device-defender-integration-with-securityhub | 8c73df013779361ceebb19eef171d1a2e4dfacaa | [
"MIT-0"
] | null | null | null | src/functions/import_security_hub.py | aws-samples/iot-device-defender-integration-with-securityhub | 8c73df013779361ceebb19eef171d1a2e4dfacaa | [
"MIT-0"
] | 1 | 2021-06-10T19:01:29.000Z | 2021-06-10T19:01:29.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import os
import json
import logging
import boto3
import datetime
logger = logging.getLogger()
logger.setLevel(logging.INFO)
securityhub = boto3.client('securityhub')
iot = boto3.client('iot')
RECORDSTATE_ARCHIVED = "ARCHIVED"
RECORDSTATE_ACTIVE = "ACTIVE"
TYPE_PREFIX = "Software and Configuration Checks/AWS IoT Device Defender"
def get_sh_resource_type(iot_finding):
"""Return ASFF Resource type based on IoT Device Defender finding"""
return "AwsIamRole" if iot_finding['nonCompliantResource']['resourceType'] == "IAM_ROLE" else "Other"
def get_resource_identifier(iot_finding):
"""Get resource name from IoT device Defender finding"""
resource = iot_finding['nonCompliantResource']['resourceIdentifier']
if list(resource.keys())[0] == "policyVersionIdentifier":
return resource["policyVersionIdentifier"]["policyName"]
else:
return list(resource.values())[0]
def map_iot_dd_to_security_hub(finding):
"""Create a Security Hub finding based on IoT Device Defender finding"""
severity = finding['severity']
resource_id = get_resource_identifier(finding)
resource_type = get_sh_resource_type(finding)
account_id = finding['accountId']
region = finding['region']
check_name = finding['checkName']
finding_id = "arn:aws:iot-device-defender:{0}:{1}:audits/finding/{2}-{3}".format(
region, account_id, check_name, resource_id)
task_id = finding['taskId']
audit_arn = finding['auditARN']
record_state = RECORDSTATE_ACTIVE
status = "FAILED"
description = finding['reasonForNonCompliance']
title = "IoT Device Defender: resource {} non compliant to {}".format(
resource_id, check_name)
d = datetime.datetime.utcnow()
new_recorded_time = d.isoformat() + "Z"
remediation_url = "https://console.aws.amazon.com/iot/home?region=" + \
region+"#/dd/audit/"+task_id+"/"+check_name
new_finding = {
"SchemaVersion": "2018-10-08",
"Id": finding_id,
"ProductArn": "arn:aws:securityhub:{0}:{1}:product/{1}/default".format(region, account_id),
"GeneratorId": audit_arn,
"AwsAccountId": account_id,
"Compliance": {"Status": status},
"Types": [
f"{TYPE_PREFIX}/{check_name}"
],
"CreatedAt": new_recorded_time,
"UpdatedAt": new_recorded_time,
"Severity": {
"Label": severity
},
"Title": title,
"Description": description,
'Remediation': {
'Recommendation': {
'Text': 'For directions on how to fix this issue, start mitigation action in AWS IoT Device Defender console',
'Url': remediation_url
}
},
"ProductFields": {
"ProviderName": "IoTDeviceDefender",
"ProviderVersion": "1.0",
},
'Resources': [
{
'Id': resource_id,
'Type': resource_type,
'Partition': "aws",
'Region': region
}
],
'Workflow': {'Status': 'NEW'},
'RecordState': record_state
}
return new_finding
def import_new_findings(new_findings):
"""Import new audit findings to Security Hub"""
try:
for i in range(0, len(new_findings), 100):
response = securityhub.batch_import_findings(Findings=new_findings[i : i + 100])
if response['FailedCount'] > 0:
logger.warning("Failed to import {} findings".format(
response['FailedCount']))
else:
logger.info("Findings imported to Security Hub")
except Exception as error:
logger.error("Error: %s", error)
raise
def archive_resolved_findings(new_findings):
"""Archive Security hub findings that were resolved"""
new_recorded_time = datetime.datetime.utcnow().isoformat() + "Z"
archived = []
new_ids = [finding['Id'] for finding in new_findings]
paginator = securityhub.get_paginator( 'get_findings')
findings_for_check_pages = paginator.paginate(Filters={"Type": [{
'Value': TYPE_PREFIX, 'Comparison': 'PREFIX' }] ,
'RecordState': [{ 'Value': RECORDSTATE_ACTIVE, 'Comparison': 'EQUALS'}] })
for previous_findings in findings_for_check_pages:
for finding in previous_findings["Findings"]:
if not finding['Id'] in new_ids:
finding['UpdatedAt'] = new_recorded_time
finding['RecordState'] = RECORDSTATE_ARCHIVED
archived.append(finding)
if len(archived) > 0:
import_new_findings(archived)
def lambda_handler(event, context):
"""Lambda response to completed audit tasks"""
logger.error("Error: %s", json.dumps(event))
region = os.environ['AWS_REGION']
for record in event["Records"]:
msg = json.loads(record['Sns']['Message'])
new_findings = []
try:
if msg.get("taskType") and msg.get("auditDetails"):
task_id = msg['taskId']
logger.info(msg['taskId'])
task = iot.describe_audit_task(taskId=task_id)
audit_name = task.get("scheduledAuditName", "OnDemand")
if (msg['taskType'] == 'ON_DEMAND_AUDIT_TASK' or msg['taskType'] == 'SCHEDULED_AUDIT_TASK') \
and msg['taskStatus'] == 'COMPLETED':
for audit in msg['auditDetails']:
if audit['checkRunStatus'] == "COMPLETED_NON_COMPLIANT":
logger.info("NON_COMPLIANT_FINDING: {}".format(
audit['checkName']))
paginator = iot.get_paginator(
'list_audit_findings')
findings_for_check_pages = paginator.paginate(
taskId=task_id, checkName=audit['checkName'])
for page in findings_for_check_pages:
for finding in page['findings']:
if not finding['isSuppressed']:
finding['RecordState'] = RECORDSTATE_ACTIVE
else:
finding['RecordState'] = RECORDSTATE_ARCHIVED
finding['accountId'] = msg['accountId']
finding['region'] = region
finding['auditARN'] = "arn:aws:iot:{}:{}:scheduledaudit/{}".format(
region, msg['accountId'], audit_name)
logger.info(finding)
new_findings.append(
map_iot_dd_to_security_hub(finding))
if new_findings:
import_new_findings(new_findings)
archive_resolved_findings(new_findings)
else:
logger.info("Event not related to a completed audit task")
except Exception as error:
logger.error("Error: %s", error)
raise
| 39.376344 | 126 | 0.57414 |
acdf68627faaab0580c7d031ebd06824ea26b2c1 | 953 | py | Python | day9.py | bloy/adventofcode-2017 | 24cc473cee40720477660574a5a929c9546c37bc | [
"MIT"
] | null | null | null | day9.py | bloy/adventofcode-2017 | 24cc473cee40720477660574a5a929c9546c37bc | [
"MIT"
] | 2 | 2019-12-01T15:44:32.000Z | 2019-12-01T15:44:32.000Z | day9.py | bloy/adventofcode-2017 | 24cc473cee40720477660574a5a929c9546c37bc | [
"MIT"
] | null | null | null | #!env python
import aoc
import re
import pprint
def parse_data(lines):
return [line for line in lines][0]
def solve1(data):
cancelre = re.compile(r'!.')
garbagere = re.compile(r'<.*?>')
data = cancelre.sub('', data)
data = garbagere.sub('', data)
i = 0
groupcount = 0
score = 0
while i < len(data):
c = data[i]
if c == '{':
groupcount += 1
elif c == '}':
score += groupcount
groupcount -= 1
i += 1
return score
def solve2(data):
cancelre = re.compile(r'!.')
garbagere = re.compile(r'<.*?>')
data = cancelre.sub('', data)
count = 0
for match in garbagere.finditer(data):
length = match.end() - match.start() - 2
count += length
return count
if __name__ == '__main__':
lines = aoc.input_lines(day=9)
data = parse_data(lines)
pprint.pprint(solve1(data))
pprint.pprint(solve2(data))
| 19.854167 | 48 | 0.548793 |
acdf68c5c3a73d2bb6176c1f5464449eff85ae02 | 3,531 | py | Python | app.py | streamlit-badge-bot/awair | d79c23890a90d559cddf4a22e6a3f35cbe3163e3 | [
"MIT"
] | null | null | null | app.py | streamlit-badge-bot/awair | d79c23890a90d559cddf4a22e6a3f35cbe3163e3 | [
"MIT"
] | null | null | null | app.py | streamlit-badge-bot/awair | d79c23890a90d559cddf4a22e6a3f35cbe3163e3 | [
"MIT"
] | 1 | 2020-11-28T19:46:35.000Z | 2020-11-28T19:46:35.000Z | # -*- coding: utf-8 -*-
import pandas as pd
import streamlit as st
import altair as alt
RED = '#D32F2F'
YELLOW = '#FFCA28'
ORANGE = '#F57C00'
LIGHT_ORANGE = '#FFAB00'
GREEN = '#7CB342'
@st.cache()
def get_df():
return pd.read_parquet('data/awair.parquet')
def score_color(x):
if x <= 60:
return RED
elif x <= 80:
return ORANGE
else:
return GREEN
def temp_color(x):
if x <= 48:
return RED
elif x <= 51:
return ORANGE
elif x <= 62:
return LIGHT_ORANGE
elif x <= 64:
return YELLOW
elif x <= 77:
return GREEN
elif x <= 79:
return YELLOW
elif x <= 89:
return LIGHT_ORANGE
elif x <= 92:
return ORANGE
else:
return RED
def humid_color(x):
if x <= 15:
return RED
elif x <= 20:
return ORANGE
elif x <= 35:
return LIGHT_ORANGE
elif x <= 40:
return YELLOW
elif x <= 50:
return GREEN
elif x <= 60:
return YELLOW
elif x <= 65:
return LIGHT_ORANGE
elif x <= 80:
return ORANGE
else:
return RED
def co2_color(x):
if x <= 600:
return GREEN
elif x <= 1000:
return YELLOW
elif x <= 1500:
return LIGHT_ORANGE
elif x <= 2500:
return ORANGE
else:
return RED
def voc_color(x):
if x <= 333:
return GREEN
elif x <= 1000:
return YELLOW
elif x <= 3333:
return LIGHT_ORANGE
elif x <= 8332:
return ORANGE
else:
return RED
def pm25_color(x):
if x <= 15:
return GREEN
elif x <= 35:
return YELLOW
elif x <= 55:
return LIGHT_ORANGE
elif x <= 75:
return ORANGE
else:
return RED
if __name__ == '__main__':
st.beta_set_page_config(
page_title="awair",
layout="wide",
page_icon="https://assets.website-files.com/5e740636238c35d731ff790a/5ebb634dacf6431494a020e0_awair.ico" # noqa
)
st.header("Awair Sensor Data")
df = get_df().round(2)
dfr = df.reset_index()
dfr['score_color'] = dfr['score'].apply(score_color)
dfr['temp_color'] = dfr['temp'].apply(temp_color)
dfr['humid_color'] = dfr['humid'].apply(humid_color)
dfr['co2_color'] = dfr['co2'].apply(co2_color)
dfr['voc_color'] = dfr['voc'].apply(voc_color)
dfr['pm25_color'] = dfr['pm25'].apply(pm25_color)
sensors = st.sidebar.multiselect(
"Select Sensors",
options=list(df.columns),
default=list(df.columns),
)
min_dt = df.index.min().date()
max_dt = df.index.max().date()
dt = max_dt - pd.Timedelta("7D")
from_dt = st.sidebar.date_input("From Date", value=dt, min_value=min_dt, max_value=max_dt)
to_dt = st.sidebar.date_input("From Date", value=max_dt, min_value=min_dt, max_value=max_dt)
df2 = dfr[(dfr['timestamp'].dt.date >= from_dt) & (dfr['timestamp'].dt.date <= to_dt)]
st_cols = st.beta_columns(2)
for i, s in enumerate(sensors):
st_cols[i % 2].subheader(f"Average {s}: {round(df2[s].mean())}")
chart = alt.Chart(df2).mark_circle().encode(
x=alt.X('timestamp:T', axis=alt.Axis(format="%m/%d/%y %H:%M")),
y=alt.Y(f'{s}:Q', scale=alt.Scale(domain=[df2[s].min(), df2[s].max()])),
color=alt.Color(f'{s}_color', scale=None),
tooltip=['timestamp', f'{s}:Q'],
).interactive()
st_cols[i % 2].altair_chart(chart, use_container_width=True)
| 22.928571 | 120 | 0.568677 |
acdf691e69f54df10a310bd75e7a630ebe499a9c | 3,505 | py | Python | utils/digest_pgn.py | wrocket/Tulip-Chess | d029a53eac945f049cc9894e71a313a8e3a7c9a2 | [
"MIT"
] | null | null | null | utils/digest_pgn.py | wrocket/Tulip-Chess | d029a53eac945f049cc9894e71a313a8e3a7c9a2 | [
"MIT"
] | null | null | null | utils/digest_pgn.py | wrocket/Tulip-Chess | d029a53eac945f049cc9894e71a313a8e3a7c9a2 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# The MIT License (MIT)
#
# Copyright (c) 2015 Brian Wray (brian@wrocket.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Script to digest a series of PGN files in to a simple list of move strings.
# The output can be used to easily build up transposition tables or opening
# books in the main C code or elsewhere.
# The output is a simple list of moves to the given depth found in the PGN.
# No attempt to parse or otherwise validate the move strings is made, so
# checking for illegal moves needs to be done by the consumer of this
# output.
import sys
import re
from itertools import chain
_max_moves = 14
if len(sys.argv) <= 1:
print('Usage: %s [pgn file 1] [pgn file 2] ...' % sys.argv[0])
exit(-1)
# Turn a token like '1.e4' to 'e4'
def remove_move_numbers(t):
m = re.match(r'^\d+\.(.*)', t)
return m.group(1) if m else t
# Eliminate empty tokens and move number tokens.
def is_move_token(str):
return len(str) and not re.match(r'^\d+\.*', str) and str != '*'
def parse_moves(game_lines):
joined_lines = ' '.join(game_lines)
comments_removed = re.sub(r'{.*?}', '', joined_lines)
split = re.split('\s+', comments_removed)
no_numbers = [remove_move_numbers(t) for t in split]
move_strs = filter(lambda s: is_move_token(s), no_numbers)
return list(move_strs)[:_max_moves]
def parse_pgn_lines(line_source):
results = []
inside_game = False
game_lines = []
for line in line_source:
line = line.strip()
if len(line) == 0 or line.startswith('['):
if inside_game and len(game_lines):
results.append(parse_moves(game_lines))
game_lines = []
inside_game = False
continue
elif inside_game:
game_lines.append(line)
else:
inside_game = True
game_lines = [line]
if len(game_lines):
results.append(parse_moves(game_lines))
return results
def read_pgn_file(file_name):
with open(file_name, 'r') as in_file:
try:
results = parse_pgn_lines(in_file)
except ValueError:
return []
return results
def write_digest_file(results):
flattened = chain.from_iterable(results)
non_empty = filter(lambda r: len(r) > 0, flattened)
lines = set([' '.join(r) for r in non_empty])
for l in sorted(lines):
print(l)
all_games = [read_pgn_file(f) for f in sys.argv[1:]]
write_digest_file(all_games)
| 33.066038 | 79 | 0.685021 |
acdf691ebacf6310b5003c53d801bdf166b5aadd | 526 | py | Python | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/test-29018 | 12c9568299a49ac85da3a82ef95e0c0faee06bbe | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/test-29018 | 12c9568299a49ac85da3a82ef95e0c0faee06bbe | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/test-29018 | 12c9568299a49ac85da3a82ef95e0c0faee06bbe | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "test-29018.botics.co"
site_params = {
"name": "test",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 20.230769 | 61 | 0.65019 |
acdf69b3d95ab6e98afb963c1417b349ba37b079 | 1,669 | py | Python | Data Science With Python/18-supervised-learning-with-scikit-learn/02-regression/04-5-fold-cross-validation.py | aimanahmedmoin1997/DataCamp | c6a6c4d59b83f14854bd76ed5c0c7f2dddd6de1d | [
"MIT"
] | 3 | 2019-05-12T04:49:24.000Z | 2020-05-06T00:40:28.000Z | Data Science With Python/18-supervised-learning-with-scikit-learn/02-regression/04-5-fold-cross-validation.py | aimanahmedmoin1997/DataCamp | c6a6c4d59b83f14854bd76ed5c0c7f2dddd6de1d | [
"MIT"
] | null | null | null | Data Science With Python/18-supervised-learning-with-scikit-learn/02-regression/04-5-fold-cross-validation.py | aimanahmedmoin1997/DataCamp | c6a6c4d59b83f14854bd76ed5c0c7f2dddd6de1d | [
"MIT"
] | 7 | 2018-11-06T17:43:31.000Z | 2020-11-07T21:08:16.000Z | '''
5-fold cross-validation
Cross-validation is a vital step in evaluating a model. It maximizes the amount of data that is used to train the model, as during the course of training, the model is not only trained, but also tested on all of the available data.
In this exercise, you will practice 5-fold cross validation on the Gapminder data. By default, scikit-learn's cross_val_score() function uses R2
R
2
as the metric of choice for regression. Since you are performing 5-fold cross-validation, the function will return 5 scores. Your job is to compute these 5 scores and then take their average.
The DataFrame has been loaded as df and split into the feature/target variable arrays X and y. The modules pandas and numpy have been imported as pd and np, respectively.
INSTRUCTIONS
100XP
Import LinearRegression from sklearn.linear_model and cross_val_score from sklearn.model_selection.
Create a linear regression regressor called reg.
Use the cross_val_score() function to perform 5-fold cross-validation on X and y.
Compute and print the average cross-validation score. You can use NumPy's mean() function to compute the average.
'''
# Import necessary modules
from sklearn.model_selection import cross_val_score
from sklearn.metrics import roc_auc_score
# Compute predicted probabilities: y_pred_prob
y_pred_prob = logreg.predict_proba(X_test)[:,1]
# Compute and print AUC score
print("AUC: {}".format(roc_auc_score(y_test, y_pred_prob)))
# Compute cross-validated AUC scores: cv_auc
cv_auc = cross_val_score(logreg, X, y, cv=5, scoring='roc_auc')
# Print list of AUC scores
print("AUC scores computed using 5-fold cross-validation: {}".format(cv_auc))
| 47.685714 | 231 | 0.795087 |
acdf69bc6ec767fa5ce7dd02c07b6dd13ed02e4e | 863 | py | Python | test/test_internal_tracker.py | awalker125/gitea-api | 2dea0493d4b6a92d6e63a7284afb2c80cbf35cf7 | [
"MIT"
] | null | null | null | test/test_internal_tracker.py | awalker125/gitea-api | 2dea0493d4b6a92d6e63a7284afb2c80cbf35cf7 | [
"MIT"
] | null | null | null | test/test_internal_tracker.py | awalker125/gitea-api | 2dea0493d4b6a92d6e63a7284afb2c80cbf35cf7 | [
"MIT"
] | 1 | 2022-01-27T14:12:40.000Z | 2022-01-27T14:12:40.000Z | # coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.15.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import gitea_api
from gitea_api.models.internal_tracker import InternalTracker # noqa: E501
from gitea_api.rest import ApiException
class TestInternalTracker(unittest.TestCase):
"""InternalTracker unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInternalTracker(self):
"""Test InternalTracker"""
# FIXME: construct object with mandatory attributes with example values
# model = gitea_api.models.internal_tracker.InternalTracker() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.04878 | 83 | 0.696408 |
acdf69f675b1c0fb6dc9719204aa0f42c76e2e75 | 2,846 | py | Python | service/selenium/friends_displayer/FriendsDisplayer.py | koravel/friends_displayer | d2505687171b142efff622e31fd3729376a2e86b | [
"Apache-2.0"
] | null | null | null | service/selenium/friends_displayer/FriendsDisplayer.py | koravel/friends_displayer | d2505687171b142efff622e31fd3729376a2e86b | [
"Apache-2.0"
] | null | null | null | service/selenium/friends_displayer/FriendsDisplayer.py | koravel/friends_displayer | d2505687171b142efff622e31fd3729376a2e86b | [
"Apache-2.0"
] | null | null | null | from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from service.selenium import SeleniumService
from service.selenium.friends_displayer import *
class FriendsDisplayer(SeleniumService):
def __init__(self, driver_location, logger, actions_delay):
logger.log_info("Initializing friends_displayer service...")
if driver_location == "":
browser = webdriver.Chrome()
else:
browser = webdriver.Chrome(driver_location)
super().__init__(logger, browser)
self.__actions_delay = actions_delay
try:
self._browser.get(facebook_link)
except WebDriverException:
self._logger.log_critical(f"Cannot pass through invalid link:{facebook_link}")
raise ConnectionError("")
self._browser.maximize_window()
def login(self, login, password):
self.login = login
self.password = password
self._logger.log_info("Entering login...")
self.get_text_element(self.__actions_delay, By.ID, email_box_id).send_keys(login)
self._logger.log_info("Entering password...")
self.get_text_element(self.__actions_delay, By.ID, password_box_id).send_keys(password)
self._logger.log_info("Pushing login button... I can't handle it, help me!")
self.get_clickable_element(self.__actions_delay, By.ID, login_button_id).click()
self._logger.log_info("Skipping annoying popup...")
webdriver.ActionChains(self._browser).send_keys(Keys.ESCAPE).perform()
def get_friends_dict(self):
self._logger.log_info("Go to profile...")
self.get_clickable_element(self.__actions_delay, By.CSS_SELECTOR, profile_css_selector).click()
self._logger.log_info("Go to friends tab...")
self.get_clickable_element(self.__actions_delay, By.CSS_SELECTOR, friends_tab_css_selector).click()
self._logger.log_info("Extracting friend info containers...")
friend_containers = self.get_element_list(self.__actions_delay, By.CSS_SELECTOR, friend_containers_css_selector)
friends = dict()
self._logger.log_info("Formatting data...")
for item in friend_containers:
link = item.get_attribute("href").replace(excess_profile_link_data, "")[:-1]
friends[item.text] = link
self._logger.log_info("Printing data...")
text = ""
for key, value in friends.items():
text += f"\n{result_separator}{result_format.format(key, value)}"
text += f"\n{result_separator}"
self._logger.log_info(f"\n{text}")
def finalize(self):
self._logger.log_info("Shutting down friends_displayer service...")
self._browser.quit()
| 39.527778 | 120 | 0.690443 |
acdf6a2327aab5beeada64171a254eca5ecb5e5f | 146 | py | Python | teste/flask_teste.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
] | null | null | null | teste/flask_teste.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
] | null | null | null | teste/flask_teste.py | pinheiroo27/ontask_b | 23fee8caf4e1c5694a710a77f3004ca5d9effeac | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__)
@app.route("/teste")
def index():
return 'Olá Mundo!'
if __name__ == "__main__":
app.run() | 16.222222 | 26 | 0.650685 |
acdf6a96c07e4cfa9dffd3d853f6b52ad0c7ea99 | 5,331 | py | Python | python/orca/src/bigdl/orca/learn/pytorch/pytorch_ray_worker.py | sgwhat/BigDL | 25b402666fbb26b0bc18fc8100e9a00469844778 | [
"Apache-2.0"
] | null | null | null | python/orca/src/bigdl/orca/learn/pytorch/pytorch_ray_worker.py | sgwhat/BigDL | 25b402666fbb26b0bc18fc8100e9a00469844778 | [
"Apache-2.0"
] | null | null | null | python/orca/src/bigdl/orca/learn/pytorch/pytorch_ray_worker.py | sgwhat/BigDL | 25b402666fbb26b0bc18fc8100e9a00469844778 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2017 The Ray Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ray
from bigdl.orca.learn.pytorch.utils import find_free_port
from bigdl.orca.learn.pytorch.torch_runner import TorchRunner
import torch.nn as nn
from torch.utils.data import IterableDataset
import logging
from bigdl.dllib.utils.log4Error import *
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
class PytorchRayWorker(TorchRunner):
"""Manages a PyTorch model for training."""
def __init__(self,
model_creator,
optimizer_creator,
loss_creator=None,
metrics=None,
scheduler_creator=None,
training_operator_cls=None,
config=None,
use_tqdm=False,
scheduler_step_freq=None,
sync_stats=True,
log_level=logging.INFO):
super().__init__(model_creator, optimizer_creator, loss_creator, metrics, scheduler_creator,
training_operator_cls, config, use_tqdm, scheduler_step_freq, sync_stats,
log_level=log_level)
self.backend = "torch-local"
self.rank = 0
self.size = 0
def setup_horovod(self):
import horovod.torch as hvd
hvd.init()
self.backend = "horovod"
self.rank = hvd.rank()
self.size = hvd.size()
self.setup_components_horovod()
self.setup_operator(self.models)
def setup_address(self):
ip = self.get_node_ip()
port = find_free_port()
return f"tcp://{ip}:{port}"
def get_node_ip(self):
"""Returns the IP address of the current node."""
return ray._private.services.get_node_ip_address()
def setup_components_horovod(self):
import horovod.torch as hvd
self.logger.debug("Creating model")
self.models = self.model_creator(self.config)
if not isinstance(self.models, Iterable):
self.models = [self.models]
else:
invalidInputError(False,
"only support single model for now")
invalidInputError(all(isinstance(model, nn.Module) for model in self.models),
("All models must be PyTorch models: {}.".format(self.models)))
self.logger.debug("Creating optimizer.")
self.optimizers = self.optimizer_creator(self.given_models,
self.config)
if not isinstance(self.optimizers, Iterable):
hvd.broadcast_parameters(self.models[0].state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(self.optimizers, root_rank=0)
parameters = self.models[0].named_parameters()
self.optimizers = hvd.DistributedOptimizer(self.optimizers,
named_parameters=parameters)
self.optimizers = [self.optimizers]
else:
invalidInputError(False,
"only support one optimizer for now")
self._create_schedulers_if_available()
self._create_loss()
def predict(self, data_creator, batch_size=32, profile=False):
"""Evaluates the model on the validation data set."""
config = self.config.copy()
self._toggle_profiling(profile=profile)
shards_ref = data_creator(config, batch_size)
if isinstance(shards_ref, IterableDataset):
pred_stats = super().predict(partition=shards_ref, batch_size=batch_size,
profile=profile)
for pred_stat in pred_stats:
pred_stat.update(pred_stat)
worker_stats = pred_stat["prediction"]
else:
if not isinstance(shards_ref, ray.ObjectID):
invalidInputError(False,
"Only xshards and Ray Dataset is supported for predict")
partition = ray.get(shards_ref)
worker_stats = super().predict(partition=partition, batch_size=batch_size,
profile=profile)
return worker_stats
| 38.352518 | 100 | 0.634403 |
acdf6aa427fa28d7a51ad3b140fa458f811c4e5c | 53,464 | py | Python | exchange/binance/binance.py | iCoder333/Coinbot | d62a6968116e586753e8160fe07d495c8b285b94 | [
"Apache-2.0"
] | 1 | 2021-06-03T14:23:42.000Z | 2021-06-03T14:23:42.000Z | exchange/binance/binance.py | cristianver333/Coinbot | d62a6968116e586753e8160fe07d495c8b285b94 | [
"Apache-2.0"
] | null | null | null | exchange/binance/binance.py | cristianver333/Coinbot | d62a6968116e586753e8160fe07d495c8b285b94 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
import hashlib
import hmac
import requests
import six
import time
from .BinanceExceptions import BinanceAPIException, BinanceRequestException, BinanceWithdrawException
from ..exchange import Exchange
if six.PY2:
from urllib import urlencode
elif six.PY3:
from urllib.parse import urlencode
class Binance(Exchange):
def __init__(self, key, secret):
self.api = BinanceAPI(key, secret)
super().__init__('binance')
self.connect_success()
def get_step_size(self, pair):
info = self.api.get_symbol_info(pair)
if info:
return float(info['filters'][1]['stepSize'])
else:
return -1
def get_pair(self, coin, base):
return coin + base
def get_BTC_price(self):
return self.get_price('BTC', base='USDT')
def get_price(self, coin, base='BTC', _type=0):
TYPES = {0: 'bids', 1: 'asks', 2: 'last'}
pair = self.get_pair(coin, base)
if self.api.get_symbol_info(pair):
return float(self.api.get_order_book(symbol=pair)[TYPES[_type]][0][0])
else:
return 0
def get_full_balance(self, allow_zero=False):
balances = self.api.get_account()['balances']
BTC_price = self.get_BTC_price()
coins = {
'total': {'BTC': 0, 'USD': 0, 'num': 0},
'USD': {'BTC': 0, 'USD': 0, 'num': 0}
}
for coin in balances:
coinName = coin['asset']
num = float(coin['free']) + float(coin['locked'])
if allow_zero or num > 0:
if coinName == 'USDT':
coinName = 'USD'
BTC_value = num / BTC_price
elif coinName == 'BTC':
BTC_value = num
else:
BTC_value = self.get_price(coinName) * num
USD_value = BTC_value * BTC_price
# update info
coins[coinName] = {
'num': num,
'BTC': BTC_value,
'USD': USD_value
}
coins['total']['BTC'] += BTC_value
coins['total']['USD'] += USD_value
return coins
def get_all_coin_balance(self, allow_zero=False):
balances = self.api.get_account()['balances']
coins = {}
for coin in balances:
coinName = coin['asset']
num = float(coin['free']) + float(coin['locked'])
if coinName == 'USDT':
coinName = 'USD'
if allow_zero or num > 0:
coins[coinName] = num
return coins
def get_trading_pairs(self):
markets = {}
for base in self.market_bases:
markets[base] = set()
binance_markets = self.api.get_products()['data']
for info in binance_markets:
if info['quoteAsset'] == base:
markets[base].add(info['baseAsset'])
return markets
def get_market(self, coin, base='BTC'):
pair = self.get_pair(coin, base)
return self.api.get_order_book(symbol=pair, limit=10)
def market_buy(self, coin, base='BTC', quantity=0):
pair = self.get_pair(coin, base)
response = self.api.order_market_buy(symbol=pair, quantity=quantity)
return {
'exchange': self.name,
'side': 'sell',
'pair': self.get_my_pair(coin, base),
'price': response['price'],
'quantity': response['executedQty'],
'total': None,
'fee': None,
'id': response['orderId'],
'id2': response['clientOrderId']
}
def market_sell(self, coin, base='BTC', quantity=0):
pair = self.get_pair(coin, base)
response = self.api.order_market_sell(symbol=pair, quantity=quantity)
return {
'exchange': self.name,
'side': 'sell',
'pair': self.get_my_pair(coin, base),
'price': response['price'],
'quantity': response['executedQty'],
'total': None,
'fee': None,
'id': response['orderId'],
'id2': response['clientOrderId']
}
def market_sell_all(self, coin, base='BTC'):
quantity = self.get_coin_balance(coin)
if quantity <= 0:
print('%s does not have enough balance to sell')
return None
else:
step = self.get_step_size(self.get_pair(coin, base))
if step > 0 and quantity >= step:
quantity = quantity - (quantity % float(step))
return self.market_sell(coin, base=base, quantity=quantity)
# ------------------------------------------------------------------ #
# --------------------------- API Wrapper -------------------------- #
# ------------------------------------------------------------------ #
class BinanceAPI(object):
API_URL = 'https://api.binance.com/api'
WITHDRAW_API_URL = 'https://api.binance.com/wapi'
WEBSITE_URL = 'https://www.binance.com'
PUBLIC_API_VERSION = 'v1'
PRIVATE_API_VERSION = 'v3'
WITHDRAW_API_VERSION = 'v3'
SYMBOL_TYPE_SPOT = 'SPOT'
ORDER_STATUS_NEW = 'NEW'
ORDER_STATUS_PARTIALLY_FILLED = 'PARTIALLY_FILLED'
ORDER_STATUS_FILLED = 'FILLED'
ORDER_STATUS_CANCELED = 'CANCELED'
ORDER_STATUS_PENDING_CANCEL = 'PENDING_CANCEL'
ORDER_STATUS_REJECTED = 'REJECTED'
ORDER_STATUS_EXPIRED = 'EXPIRED'
KLINE_INTERVAL_1MINUTE = '1m'
KLINE_INTERVAL_3MINUTE = '3m'
KLINE_INTERVAL_5MINUTE = '5m'
KLINE_INTERVAL_15MINUTE = '15m'
KLINE_INTERVAL_30MINUTE = '30m'
KLINE_INTERVAL_1HOUR = '1h'
KLINE_INTERVAL_2HOUR = '2h'
KLINE_INTERVAL_4HOUR = '4h'
KLINE_INTERVAL_6HOUR = '6h'
KLINE_INTERVAL_8HOUR = '8h'
KLINE_INTERVAL_12HOUR = '12h'
KLINE_INTERVAL_1DAY = '1d'
KLINE_INTERVAL_3DAY = '3d'
KLINE_INTERVAL_1WEEK = '1w'
KLINE_INTERVAL_1MONTH = '1M'
SIDE_BUY = 'BUY'
SIDE_SELL = 'SELL'
ORDER_TYPE_LIMIT = 'LIMIT'
ORDER_TYPE_MARKET = 'MARKET'
ORDER_TYPE_STOP_LOSS = 'STOP_LOSS'
ORDER_TYPE_STOP_LOSS_LIMIT = 'STOP_LOSS_LIMIT'
ORDER_TYPE_TAKE_PROFIT = 'TAKE_PROFIT'
ORDER_TYPE_TAKE_PROFIT_LIMIT = 'TAKE_PROFIT_LIMIT'
ORDER_TYPE_LIMIT_MAKER = 'LIMIT_MAKER'
TIME_IN_FORCE_GTC = 'GTC' # Good till cancelled
TIME_IN_FORCE_IOC = 'IOC' # Immediate or cancel
TIME_IN_FORCE_FOK = 'FOK' # Fill or kill
ORDER_RESP_TYPE_ACK = 'ACK'
ORDER_RESP_TYPE_RESULT = 'RESULT'
ORDER_RESP_TYPE_FULL = 'FULL'
def __init__(self, api_key, api_secret):
"""Binance API Client constructor
:param api_key: Api Key
:type api_key: str.
:param api_secret: Api Secret
:type api_secret: str.
"""
self.API_KEY = api_key
self.API_SECRET = api_secret
self.session = self._init_session()
# init DNS and SSL cert
self.ping()
def _init_session(self):
session = requests.session()
session.headers.update({'Accept': 'application/json',
'User-Agent': 'binance/python',
'X-MBX-APIKEY': self.API_KEY})
return session
def _create_api_uri(self, path, signed=True, version=PUBLIC_API_VERSION):
v = self.PRIVATE_API_VERSION if signed else version
return self.API_URL + '/' + v + '/' + path
def _create_withdraw_api_uri(self, path):
return self.WITHDRAW_API_URL + '/' + self.WITHDRAW_API_VERSION + '/' + path
def _create_website_uri(self, path):
return self.WEBSITE_URL + '/' + path
def _generate_signature(self, data):
query_string = urlencode(data)
m = hmac.new(self.API_SECRET.encode('utf-8'), query_string.encode('utf-8'), hashlib.sha256)
return m.hexdigest()
def _order_params(self, data):
"""Convert params to list with signature as last element
:param data:
:return:
"""
has_signature = False
params = []
for key, value in data.items():
if key == 'signature':
has_signature = True
else:
params.append((key, value))
if has_signature:
params.append(('signature', data['signature']))
return params
def _request(self, method, uri, signed, force_params=False, **kwargs):
data = kwargs.get('data', None)
if data and isinstance(data, dict):
kwargs['data'] = data
if signed:
# generate signature
kwargs['data']['timestamp'] = int(time.time() * 1000)
kwargs['data']['signature'] = self._generate_signature(kwargs['data'])
if data and (method == 'get' or force_params):
kwargs['params'] = self._order_params(kwargs['data'])
del(kwargs['data'])
response = getattr(self.session, method)(uri, **kwargs)
return self._handle_response(response)
def _request_api(self, method, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
uri = self._create_api_uri(path, signed, version)
return self._request(method, uri, signed, **kwargs)
def _request_withdraw_api(self, method, path, signed=False, **kwargs):
uri = self._create_withdraw_api_uri(path)
return self._request(method, uri, signed, True, **kwargs)
def _request_website(self, method, path, signed=False, **kwargs):
uri = self._create_website_uri(path)
return self._request(method, uri, signed, **kwargs)
def _handle_response(self, response):
"""Internal helper for handling API responses from the Binance server.
Raises the appropriate exceptions when necessary; otherwise, returns the
response.
"""
if not str(response.status_code).startswith('2'):
raise BinanceAPIException(response)
try:
return response.json()
except ValueError:
raise BinanceRequestException('Invalid Response: %s' % response.text)
def _get(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('get', path, signed, version, **kwargs)
def _post(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('post', path, signed, version, **kwargs)
def _put(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('put', path, signed, version, **kwargs)
def _delete(self, path, signed=False, version=PUBLIC_API_VERSION, **kwargs):
return self._request_api('delete', path, signed, version, **kwargs)
# Exchange Endpoints
def get_products(self):
"""Return list of products currently listed on Binance
Use get_exchange_info() call instead
:returns: list - List of product dictionaries
:raises: BinanceResponseException, BinanceAPIException
"""
products = self._request_website('get', 'exchange/public/product')
return products
def get_exchange_info(self):
"""Return rate limits and list of symbols
:returns: list - List of product dictionaries
.. code-block:: python
{
"timezone": "UTC",
"serverTime": 1508631584636,
"rateLimits": [
{
"rateLimitType": "REQUESTS",
"interval": "MINUTE",
"limit": 1200
},
{
"rateLimitType": "ORDERS",
"interval": "SECOND",
"limit": 10
},
{
"rateLimitType": "ORDERS",
"interval": "DAY",
"limit": 100000
}
],
"exchangeFilters": [],
"symbols": [
{
"symbol": "ETHBTC",
"status": "TRADING",
"baseAsset": "ETH",
"baseAssetPrecision": 8,
"quoteAsset": "BTC",
"quotePrecision": 8,
"orderTypes": ["LIMIT", "MARKET"],
"icebergAllowed": false,
"filters": [
{
"filterType": "PRICE_FILTER",
"minPrice": "0.00000100",
"maxPrice": "100000.00000000",
"tickSize": "0.00000100"
}, {
"filterType": "LOT_SIZE",
"minQty": "0.00100000",
"maxQty": "100000.00000000",
"stepSize": "0.00100000"
}, {
"filterType": "MIN_NOTIONAL",
"minNotional": "0.00100000"
}
]
}
]
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('exchangeInfo')
def get_symbol_info(self, symbol):
"""Return information about a symbol
:param symbol: required e.g BNBBTC
:type symbol: str
:returns: Dict if found, None if not
.. code-block:: python
{
"symbol": "ETHBTC",
"status": "TRADING",
"baseAsset": "ETH",
"baseAssetPrecision": 8,
"quoteAsset": "BTC",
"quotePrecision": 8,
"orderTypes": ["LIMIT", "MARKET"],
"icebergAllowed": false,
"filters": [
{
"filterType": "PRICE_FILTER",
"minPrice": "0.00000100",
"maxPrice": "100000.00000000",
"tickSize": "0.00000100"
}, {
"filterType": "LOT_SIZE",
"minQty": "0.00100000",
"maxQty": "100000.00000000",
"stepSize": "0.00100000"
}, {
"filterType": "MIN_NOTIONAL",
"minNotional": "0.00100000"
}
]
}
:raises: BinanceResponseException, BinanceAPIException
"""
res = self._get('exchangeInfo')
for item in res['symbols']:
if item['symbol'] == symbol.upper():
return item
return None
# General Endpoints
def ping(self):
"""Test connectivity to the Rest API.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#test-connectivity
:returns: Empty array
.. code-block:: python
{}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ping')
def get_server_time(self):
"""Test connectivity to the Rest API and get the current server time.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#check-server-time
:returns: Current server time
.. code-block:: python
{
"serverTime": 1499827319559
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('time')
# Market Data Endpoints
def get_all_tickers(self):
"""Latest price for all symbols.
https://www.binance.com/restapipub.html#symbols-price-ticker
:returns: List of market tickers
.. code-block:: python
[
{
"symbol": "LTCBTC",
"price": "4.00000200"
},
{
"symbol": "ETHBTC",
"price": "0.07946600"
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ticker/allPrices')
def get_orderbook_tickers(self):
"""Best price/qty on the order book for all symbols.
https://www.binance.com/restapipub.html#symbols-order-book-ticker
:returns: List of order book market entries
.. code-block:: python
[
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
},
{
"symbol": "ETHBTC",
"bidPrice": "0.07946700",
"bidQty": "9.00000000",
"askPrice": "100000.00000000",
"askQty": "1000.00000000"
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ticker/allBookTickers')
def get_order_book(self, **params):
"""Get the Order Book for the market
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#order-book
:param symbol: required
:type symbol: str
:param limit: Default 100; max 100
:type limit: int
:returns: API response
.. code-block:: python
{
"lastUpdateId": 1027024,
"bids": [
[
"4.00000000", # PRICE
"431.00000000", # QTY
[] # Can be ignored
]
],
"asks": [
[
"4.00000200",
"12.00000000",
[]
]
]
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('depth', data=params)
def get_recent_trades(self, **params):
"""Get recent trades (up to last 500).
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#recent-trades-list
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"time": 1499865549590,
"isBuyerMaker": true,
"isBestMatch": true
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('trades', data=params)
def get_historical_trades(self, **params):
"""Get older trades.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#recent-trades-list
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:param fromId: TradeId to fetch from. Default gets most recent trades.
:type fromId: str
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"time": 1499865549590,
"isBuyerMaker": true,
"isBestMatch": true
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('historicalTrades', data=params)
def get_aggregate_trades(self, **params):
"""Get compressed, aggregate trades. Trades that fill at the time,
from the same order, with the same price will have the quantity aggregated.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list
:param symbol: required
:type symbol: str
:param fromId: ID to get aggregate trades from INCLUSIVE.
:type fromId: str
:param startTime: Timestamp in ms to get aggregate trades from INCLUSIVE.
:type startTime: int
:param endTime: Timestamp in ms to get aggregate trades until INCLUSIVE.
:type endTime: int
:param limit: Default 500; max 500.
:type limit: int
:returns: API response
.. code-block:: python
[
{
"a": 26129, # Aggregate tradeId
"p": "0.01633102", # Price
"q": "4.70443515", # Quantity
"f": 27781, # First tradeId
"l": 27781, # Last tradeId
"T": 1498793709153, # Timestamp
"m": true, # Was the buyer the maker?
"M": true # Was the trade the best price match?
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('aggTrades', data=params)
def get_klines(self, **params):
"""Kline/candlestick bars for a symbol. Klines are uniquely identified by their open time.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#klinecandlestick-data
:param symbol: required
:type symbol: str
:param interval: -
:type interval: enum
:param limit: - Default 500; max 500.
:type limit: int
:param startTime:
:type startTime: int
:param endTime:
:type endTime: int
:returns: API response
.. code-block:: python
[
[
1499040000000, # Open time
"0.01634790", # Open
"0.80000000", # High
"0.01575800", # Low
"0.01577100", # Close
"148976.11427815", # Volume
1499644799999, # Close time
"2434.19055334", # Quote asset volume
308, # Number of trades
"1756.87402397", # Taker buy base asset volume
"28.46694368", # Taker buy quote asset volume
"17928899.62484339" # Can be ignored
]
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('klines', data=params)
def get_ticker(self, **params):
"""24 hour price change statistics.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#24hr-ticker-price-change-statistics
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"priceChange": "-94.99999800",
"priceChangePercent": "-95.960",
"weightedAvgPrice": "0.29628482",
"prevClosePrice": "0.10002000",
"lastPrice": "4.00000200",
"bidPrice": "4.00000000",
"askPrice": "4.00000200",
"openPrice": "99.00000000",
"highPrice": "100.00000000",
"lowPrice": "0.10000000",
"volume": "8913.30000000",
"openTime": 1499783499040,
"closeTime": 1499869899040,
"fristId": 28385, # First tradeId
"lastId": 28460, # Last tradeId
"count": 76 # Trade count
}
OR
.. code-block:: python
[
{
"priceChange": "-94.99999800",
"priceChangePercent": "-95.960",
"weightedAvgPrice": "0.29628482",
"prevClosePrice": "0.10002000",
"lastPrice": "4.00000200",
"bidPrice": "4.00000000",
"askPrice": "4.00000200",
"openPrice": "99.00000000",
"highPrice": "100.00000000",
"lowPrice": "0.10000000",
"volume": "8913.30000000",
"openTime": 1499783499040,
"closeTime": 1499869899040,
"fristId": 28385, # First tradeId
"lastId": 28460, # Last tradeId
"count": 76 # Trade count
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ticker/24hr', data=params)
def get_symbol_ticker(self, **params):
"""Latest price for a symbol or symbols.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#24hr-ticker-price-change-statistics
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"price": "4.00000200"
}
OR
.. code-block:: python
[
{
"symbol": "LTCBTC",
"price": "4.00000200"
},
{
"symbol": "ETHBTC",
"price": "0.07946600"
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ticker/price', data=params, version=self.PRIVATE_API_VERSION)
def get_orderbook_ticker(self, **params):
"""Latest price for a symbol or symbols.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#symbol-order-book-ticker
:param symbol:
:type symbol: str
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
}
OR
.. code-block:: python
[
{
"symbol": "LTCBTC",
"bidPrice": "4.00000000",
"bidQty": "431.00000000",
"askPrice": "4.00000200",
"askQty": "9.00000000"
},
{
"symbol": "ETHBTC",
"bidPrice": "0.07946700",
"bidQty": "9.00000000",
"askPrice": "100000.00000000",
"askQty": "1000.00000000"
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('ticker/bookTicker', data=params, version=self.PRIVATE_API_VERSION)
# Account Endpoints
def create_order(self, **params):
"""Send in a new order
Any order with an icebergQty MUST have timeInForce set to GTC.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#new-order--trade
:param symbol: required
:type symbol: str
:param side: required
:type side: enum
:param type: required
:type type: enum
:param timeInForce: required if limit order
:type timeInForce: enum
:param quantity: required
:type quantity: decimal
:param price: required
:type price: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with LIMIT, STOP_LOSS_LIMIT, and TAKE_PROFIT_LIMIT to create an iceberg order.
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: enum
:returns: API response
Response ACK:
.. code-block:: python
{
"symbol":"LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1" # Will be newClientOrderId
"transactTime": 1499827319559
}
Response RESULT:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "0.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL"
}
Response FULL:
.. code-block:: python
{
"symbol": "BTCUSDT",
"orderId": 28,
"clientOrderId": "6gCrw2kRUAF9CvJDGP16IP",
"transactTime": 1507725176595,
"price": "0.00000000",
"origQty": "10.00000000",
"executedQty": "10.00000000",
"status": "FILLED",
"timeInForce": "GTC",
"type": "MARKET",
"side": "SELL",
"fills": [
{
"price": "4000.00000000",
"qty": "1.00000000",
"commission": "4.00000000",
"commissionAsset": "USDT"
},
{
"price": "3999.00000000",
"qty": "5.00000000",
"commission": "19.99500000",
"commissionAsset": "USDT"
},
{
"price": "3998.00000000",
"qty": "2.00000000",
"commission": "7.99600000",
"commissionAsset": "USDT"
},
{
"price": "3997.00000000",
"qty": "1.00000000",
"commission": "3.99700000",
"commissionAsset": "USDT"
},
{
"price": "3995.00000000",
"qty": "1.00000000",
"commission": "3.99500000",
"commissionAsset": "USDT"
}
]
}
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
return self._post('order', True, data=params)
def order_limit(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit order
Any order with an icebergQty MUST have timeInForce set to GTC.
:param symbol: required
:type symbol: str
:param side: required
:type side: enum
:param quantity: required
:type quantity: decimal
:param price: required
:type price: decimal
:param timeInForce: default Good till cancelled
:type timeInForce: enum
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with LIMIT, STOP_LOSS_LIMIT, and TAKE_PROFIT_LIMIT to create an iceberg order.
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: enum
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'type': self.ORDER_TYPE_LIMIT,
'timeInForce': timeInForce
})
return self.create_order(**params)
def order_limit_buy(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit buy order
Any order with an icebergQty MUST have timeInForce set to GTC.
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: decimal
:param timeInForce: default Good till cancelled
:type timeInForce: enum
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param stopPrice: Used with stop orders
:type stopPrice: decimal
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: enum
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_BUY,
})
return self.order_limit(timeInForce=timeInForce, **params)
def order_limit_sell(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit sell order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: decimal
:param timeInForce: default Good till cancelled
:type timeInForce: enum
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param stopPrice: Used with stop orders
:type stopPrice: decimal
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_SELL
})
return self.order_limit(timeInForce=timeInForce, **params)
def order_market(self, **params):
"""Send in a new market order
:param symbol: required
:type symbol: str
:param side: required
:type side: enum
:param quantity: required
:type quantity: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: enum
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'type': self.ORDER_TYPE_MARKET
})
return self.create_order(**params)
def order_market_buy(self, **params):
"""Send in a new market buy order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: enum
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_BUY
})
return self.order_market(**params)
def order_market_sell(self, **params):
"""Send in a new market sell order
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: enum
:returns: API response
See order endpoint for full response options
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_SELL
})
return self.order_market(**params)
def create_test_order(self, **params):
"""Test new order creation and signature/recvWindow long. Creates and validates a new order but does not send it into the matching engine.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#test-new-order-trade
:param symbol: required
:type symbol: str
:param side: required
:type side: enum
:param type: required
:type type: enum
:param timeInForce: required if limit order
:type timeInForce: enum
:param quantity: required
:type quantity: decimal
:param price: required
:type price: decimal
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: enum
:param recvWindow: The number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{}
:raises: BinanceResponseException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
return self._post('order/test', True, data=params)
def get_order(self, **params):
"""Check an order's status. Either orderId or origClientOrderId must be sent.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#query-order-user_data
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param origClientOrderId: optional
:type origClientOrderId: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('order', True, data=params)
def get_all_orders(self, **params):
"""Get all account orders; active, canceled, or filled.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#all-orders-user_data
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param limit: Default 500; max 500.
:type limit: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('allOrders', True, data=params)
def cancel_order(self, **params):
"""Cancel an active order. Either orderId or origClientOrderId must be sent.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#cancel-order-trade
:param symbol: required
:type symbol: str
:param orderId: The unique order id
:type orderId: int
:param origClientOrderId: optional
:type origClientOrderId: str
:param newClientOrderId: Used to uniquely identify this cancel. Automatically generated by default.
:type newClientOrderId: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"symbol": "LTCBTC",
"origClientOrderId": "myOrder1",
"orderId": 1,
"clientOrderId": "cancelMyOrder1"
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._delete('order', True, data=params)
def get_open_orders(self, **params):
"""Get all open orders on a symbol.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#current-open-orders-user_data
:param symbol: required
:type symbol: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"symbol": "LTCBTC",
"orderId": 1,
"clientOrderId": "myOrder1",
"price": "0.1",
"origQty": "1.0",
"executedQty": "0.0",
"status": "NEW",
"timeInForce": "GTC",
"type": "LIMIT",
"side": "BUY",
"stopPrice": "0.0",
"icebergQty": "0.0",
"time": 1499827319559
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('openOrders', True, data=params)
# User Stream Endpoints
def get_account(self, **params):
"""Get current account information.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-information-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"makerCommission": 15,
"takerCommission": 15,
"buyerCommission": 0,
"sellerCommission": 0,
"canTrade": true,
"canWithdraw": true,
"canDeposit": true,
"balances": [
{
"asset": "BTC",
"free": "4723846.89208129",
"locked": "0.00000000"
},
{
"asset": "LTC",
"free": "4763368.68006011",
"locked": "0.00000000"
}
]
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('account', True, data=params)
def get_my_trades(self, **params):
"""Get trades for a specific symbol.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-trade-list-user_data
:param symbol: required
:type symbol: str
:param limit: Default 500; max 500.
:type limit: int
:param fromId: TradeId to fetch from. Default gets most recent trades.
:type fromId: int
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
[
{
"id": 28457,
"price": "4.00000100",
"qty": "12.00000000",
"commission": "10.10000000",
"commissionAsset": "BNB",
"time": 1499865549590,
"isBuyer": true,
"isMaker": false,
"isBestMatch": true
}
]
:raises: BinanceResponseException, BinanceAPIException
"""
return self._get('myTrades', True, data=params)
# Withdraw Endpoints
def withdraw(self, **params):
"""Submit a withdraw request.
https://www.binance.com/restapipub.html
Assumptions:
- You must have Withdraw permissions enabled on your API key
- You must have withdrawn to the address specified through the website and approved the transaction via email
:param asset: required
:type asset: str
:type address: required
:type address: str
:param amount: required
:type amount: decimal
:param name: Description of the address - optional
:type name: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"msg": "success",
"success": true
}
:raises: BinanceResponseException, BinanceAPIException, BinanceWithdrawException
"""
res = self._request_withdraw_api('post', 'withdraw.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg'])
return res
def get_deposit_history(self, **params):
"""Fetch deposit history.
https://www.binance.com/restapipub.html
:param asset: optional
:type asset: str
:type status: 0(0:pending,1:success) optional
:type status: int
:param startTime: optional
:type startTime: long
:param endTime: optional
:type endTime: long
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"depositList": [
{
"insertTime": 1508198532000,
"amount": 0.04670582,
"asset": "ETH",
"status": 1
}
],
"success": true
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'depositHistory.html', True, data=params)
def get_withdraw_history(self, **params):
"""Fetch withdraw history.
https://www.binance.com/restapipub.html
:param asset: optional
:type asset: str
:type status: 0(0:Email Sent,1:Cancelled 2:Awaiting Approval 3:Rejected 4:Processing 5:Failure 6Completed) optional
:type status: int
:param startTime: optional
:type startTime: long
:param endTime: optional
:type endTime: long
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"withdrawList": [
{
"amount": 1,
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"asset": "ETH",
"applyTime": 1508198532000
"status": 4
},
{
"amount": 0.005,
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"txId": "0x80aaabed54bdab3f6de5868f89929a2371ad21d666f20f7393d1a3389fad95a1",
"asset": "ETH",
"applyTime": 1508198532000,
"status": 4
}
],
"success": true
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'withdrawHistory.html', True, data=params)
def get_deposit_address(self, **params):
"""Fetch a deposit address for a symbol
https://www.binance.com/restapipub.html
:param asset: required
:type asset: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"address": "0x6915f16f8791d0a1cc2bf47c13a6b2a92000504b",
"success": true,
"addressTag": "1231212",
"asset": "BNB"
}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._request_withdraw_api('get', 'depositAddress.html', True, data=params)
# User Stream Endpoints
def stream_get_listen_key(self):
"""Start a new user data stream and return the listen key
If a stream already exists it should return the same key.
If the stream becomes invalid a new key is returned.
Can be used to keep the user stream alive.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#start-user-data-stream-user_stream
:returns: API response
.. code-block:: python
{
"listenKey": "pqia91ma19a5s61cv6a81va65sdf19v8a65a1a5s61cv6a81va65sdf19v8a65a1"
}
:raises: BinanceResponseException, BinanceAPIException
"""
res = self._post('userDataStream', False, data={})
return res['listenKey']
def stream_keepalive(self, **params):
"""PING a user data stream to prevent a time out.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#keepalive-user-data-stream-user_stream
:param listenKey: required
:type listenKey: str
:returns: API response
.. code-block:: python
{}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._put('userDataStream', False, data=params)
def stream_close(self, **params):
"""Close out a user data stream.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#close-user-data-stream-user_stream
:returns: API response
.. code-block:: python
{}
:raises: BinanceResponseException, BinanceAPIException
"""
return self._delete('userDataStream', False, data=params)
| 32.860479 | 252 | 0.536641 |
acdf6acf5af9bbeadd827684ea692eb1762fdb42 | 7,511 | py | Python | corehq/tabs/uitab.py | dannyroberts/commcare-hq | 4b0b8ecbe851e46307d3a0e635d6d5d6e31c3598 | [
"BSD-3-Clause"
] | null | null | null | corehq/tabs/uitab.py | dannyroberts/commcare-hq | 4b0b8ecbe851e46307d3a0e635d6d5d6e31c3598 | [
"BSD-3-Clause"
] | null | null | null | corehq/tabs/uitab.py | dannyroberts/commcare-hq | 4b0b8ecbe851e46307d3a0e635d6d5d6e31c3598 | [
"BSD-3-Clause"
] | 5 | 2015-11-30T13:12:45.000Z | 2019-07-01T19:27:07.000Z | from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf import settings
from django.core.cache import cache
from django.urls import reverse, resolve, Resolver404
from django.utils.translation import get_language
from corehq.apps.domain.models import Domain
from corehq.tabs.exceptions import UrlPrefixFormatError, UrlPrefixFormatsSuggestion
from corehq.tabs.utils import sidebar_to_dropdown
from memoized import memoized
from dimagi.utils.django.cache import make_template_fragment_key
from dimagi.utils.web import get_url_base
def url_is_location_safe(url):
from corehq.apps.locations.permissions import is_location_safe
url = url.split(get_url_base())[-1] if url else None
try:
match = resolve(url)
except Resolver404:
return False
# pass empty request, since we should exclude any url that requires request context
return is_location_safe(match.func, None, match.args, match.kwargs)
class UITab(object):
title = None
view = None
dispatcher = None
# Tuple of prefixes that this UITab claims e.g.
# ('/a/{domain}/reports/', '/a/{domain}/otherthing/')
# This is a required field.
url_prefix_formats = ()
show_by_default = True
# must be instance of GaTracker
ga_tracker = None
def __init__(self, request, domain=None, couch_user=None, project=None):
self.domain = domain
self.couch_user = couch_user
self._project = project
# This should not be considered as part of the subclass API unless it
# is necessary. Try to add new explicit parameters instead.
self._request = request
# must be set manually (i.e. `tab.is_active_tab = True`)
self.is_active_tab = False
# Do some preemptive checks on the subclass's configuration (if DEBUG)
if settings.DEBUG:
if not self.url_prefix_formats:
raise UrlPrefixFormatsSuggestion(
'Class {} must define url_prefix_formats. Try\n'
'url_prefix_formats = {}'
.format(self.__class__.__name__,
self.get_url_prefix_formats_suggestion()))
for url_prefix_formats in self.url_prefix_formats:
try:
url_prefix_formats.format(domain='')
except (IndexError, KeyError):
raise UrlPrefixFormatError(
'Class {} has url_prefix_format has an issue: {}'
.format(self.__class__.__name__, url_prefix_formats))
@property
def project(self):
if not self._project and self.domain:
self._project = Domain.get_by_name(self.domain)
return self._project
@property
def request_path(self):
return self._request.get_full_path()
@property
def can_access_all_locations(self):
"""Is this a web user who can access project-wide data?"""
return getattr(self._request, 'can_access_all_locations', True)
@property
def dropdown_items(self):
return sidebar_to_dropdown(sidebar_items=self.sidebar_items,
domain=self.domain, current_url=self.url)
@property
def filtered_dropdown_items(self):
if self.can_access_all_locations:
return self.dropdown_items
filtered = []
for item in self.dropdown_items:
if url_is_location_safe(item['url']):
filtered.append(item)
return filtered
@property
@memoized
def sidebar_items(self):
if self.dispatcher:
return self.dispatcher.navigation_sections(request=self._request, domain=self.domain)
else:
return []
@property
@memoized
def filtered_sidebar_items(self):
if self.can_access_all_locations:
return self.sidebar_items
filtered = []
for heading, pages in self.sidebar_items:
safe_pages = [p for p in pages if url_is_location_safe(p['url'])]
if safe_pages:
filtered.append((heading, safe_pages))
return filtered
@property
def _is_viewable(self):
"""
Whether the tab should be displayed. Subclass implementations can skip
checking whether domain, couch_user, or project is not None before
accessing an attribute of them -- this property is accessed in
should_show and wrapped in a try block that returns False in the
case of an AttributeError for any of those variables.
"""
raise NotImplementedError()
@memoized
def should_show(self):
if not self.show_by_default and not self.is_active_tab:
return False
if not self.can_access_all_locations:
if self.dropdown_items and not self.filtered_dropdown_items:
# location-safe filtering makes this whole tab inaccessible
return False
# Just a button tab, determine if it's location safe
if not self.dropdown_items and not url_is_location_safe(self.url):
return False
try:
return self._is_viewable
except AttributeError:
return False
@property
@memoized
def url(self):
try:
if self.domain:
return reverse(self.view, args=[self.domain])
except Exception:
pass
try:
return reverse(self.view)
except Exception:
return None
@property
def url_prefixes(self):
# Use self._request.domain instead of self.domain to generate url-prefixes
# because the latter might have a normalized domain name which might not match the
# domain name mentioned in the URL. for example domain-name 'hq_test' is normalized to
# 'hq-test'
return [url_prefix_format.format(domain=getattr(self._request, 'domain', None))
for url_prefix_format in self.url_prefix_formats]
def get_url_prefix_formats_suggestion(self):
import six.moves.urllib.parse
accepted_urls = []
# sorted shortest first
all_urls = sorted(
six.moves.urllib.parse.urlparse(url).path
# replace the actual domain with {domain}
.replace('/a/{}'.format(self.domain), '/a/{domain}')
for url in self._get_inferred_urls
)
# accept only urls that don't start with an already-accepted prefix
for url in all_urls:
for prefix in accepted_urls:
if url.startswith(prefix):
break
else:
accepted_urls.append(url)
return tuple(accepted_urls)
@property
@memoized
def _get_inferred_urls(self):
urls = [self.url] if self.url else []
for name, section in self.sidebar_items:
urls.extend(item['url'] for item in section)
return urls
@classmethod
def clear_dropdown_cache(cls, domain, user_id):
for is_active in True, False:
key = make_template_fragment_key('header_tab', [
cls.class_name(),
domain,
is_active,
user_id,
get_language(),
])
cache.delete(key)
@property
def css_id(self):
return self.__class__.__name__
@classmethod
def class_name(cls):
return cls.__name__
| 33.53125 | 97 | 0.630542 |
acdf6d8556951cdf166406fc7ee9122a7d8ced48 | 31,670 | py | Python | SqlmapCelery/sqlmap/lib/core/target.py | tt9133github/hunter | 7a1be4ae1fbbadff291742f513d47f0921159648 | [
"Apache-2.0"
] | 322 | 2020-01-10T09:08:31.000Z | 2022-01-21T06:43:24.000Z | SqlmapCelery/sqlmap/lib/core/target.py | tt9133github/hunter | 7a1be4ae1fbbadff291742f513d47f0921159648 | [
"Apache-2.0"
] | 27 | 2020-01-10T10:12:21.000Z | 2022-03-08T23:38:23.000Z | SqlmapCelery/sqlmap/lib/core/target.py | tt9133github/hunter | 7a1be4ae1fbbadff291742f513d47f0921159648 | [
"Apache-2.0"
] | 128 | 2020-01-10T09:08:14.000Z | 2022-03-18T06:56:01.000Z | #!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import codecs
import functools
import os
import re
import tempfile
import time
import urlparse
from lib.core.common import Backend
from lib.core.common import getSafeExString
from lib.core.common import getUnicode
from lib.core.common import hashDBRetrieve
from lib.core.common import intersect
from lib.core.common import normalizeUnicode
from lib.core.common import openFile
from lib.core.common import paramToDict
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import resetCookieJar
from lib.core.common import urldecode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import mergedOptions
from lib.core.data import paths
from lib.core.datatype import InjectionDict
from lib.core.dicts import DBMS_DICT
from lib.core.dump import dumper
from lib.core.enums import HASHDB_KEYS
from lib.core.enums import HTTP_HEADER
from lib.core.enums import HTTPMETHOD
from lib.core.enums import MKSTEMP_PREFIX
from lib.core.enums import PLACE
from lib.core.enums import POST_HINT
from lib.core.exception import SqlmapFilePathException
from lib.core.exception import SqlmapGenericException
from lib.core.exception import SqlmapMissingPrivileges
from lib.core.exception import SqlmapSystemException
from lib.core.exception import SqlmapUserQuitException
from lib.core.option import _setDBMS
from lib.core.option import _setKnowledgeBaseAttributes
from lib.core.option import _setAuthCred
from lib.core.settings import ASTERISK_MARKER
from lib.core.settings import CSRF_TOKEN_PARAMETER_INFIXES
from lib.core.settings import CUSTOM_INJECTION_MARK_CHAR
from lib.core.settings import DEFAULT_GET_POST_DELIMITER
from lib.core.settings import HOST_ALIASES
from lib.core.settings import ARRAY_LIKE_RECOGNITION_REGEX
from lib.core.settings import JSON_RECOGNITION_REGEX
from lib.core.settings import JSON_LIKE_RECOGNITION_REGEX
from lib.core.settings import MULTIPART_RECOGNITION_REGEX
from lib.core.settings import PROBLEMATIC_CUSTOM_INJECTION_PATTERNS
from lib.core.settings import REFERER_ALIASES
from lib.core.settings import RESTORE_MERGED_OPTIONS
from lib.core.settings import RESULTS_FILE_FORMAT
from lib.core.settings import SUPPORTED_DBMS
from lib.core.settings import UNENCODED_ORIGINAL_VALUE
from lib.core.settings import UNICODE_ENCODING
from lib.core.settings import UNKNOWN_DBMS_VERSION
from lib.core.settings import URI_INJECTABLE_REGEX
from lib.core.settings import USER_AGENT_ALIASES
from lib.core.settings import XML_RECOGNITION_REGEX
from lib.utils.hashdb import HashDB
from thirdparty.odict.odict import OrderedDict
def _setRequestParams():
"""
Check and set the parameters and perform checks on 'data' option for
HTTP method POST.
"""
if conf.direct:
conf.parameters[None] = "direct connection"
return
testableParameters = False
# Perform checks on GET parameters
if conf.parameters.get(PLACE.GET):
parameters = conf.parameters[PLACE.GET]
paramDict = paramToDict(PLACE.GET, parameters)
if paramDict:
conf.paramDict[PLACE.GET] = paramDict
testableParameters = True
# Perform checks on POST parameters
if conf.method == HTTPMETHOD.POST and conf.data is None:
logger.warn("detected empty POST body")
conf.data = ""
if conf.data is not None:
conf.method = HTTPMETHOD.POST if not conf.method or conf.method == HTTPMETHOD.GET else conf.method
hintNames = []
def process(match, repl):
retVal = match.group(0)
if not (conf.testParameter and match.group("name") not in conf.testParameter):
retVal = repl
while True:
_ = re.search(r"\\g<([^>]+)>", retVal)
if _:
retVal = retVal.replace(_.group(0), match.group(int(_.group(1)) if _.group(1).isdigit() else _.group(1)))
else:
break
if CUSTOM_INJECTION_MARK_CHAR in retVal:
hintNames.append((retVal.split(CUSTOM_INJECTION_MARK_CHAR)[0], match.group("name")))
return retVal
if kb.processUserMarks is None and CUSTOM_INJECTION_MARK_CHAR in conf.data:
message = "custom injection marking character ('%s') found in option " % CUSTOM_INJECTION_MARK_CHAR
message += "'--data'. Do you want to process it? [Y/n/q] "
choice = readInput(message, default='Y')
if choice == 'Q':
raise SqlmapUserQuitException
else:
kb.processUserMarks = choice == 'Y'
if kb.processUserMarks:
kb.testOnlyCustom = True
if not (kb.processUserMarks and CUSTOM_INJECTION_MARK_CHAR in conf.data):
if re.search(JSON_RECOGNITION_REGEX, conf.data):
message = "JSON data found in %s data. " % conf.method
message += "Do you want to process it? [Y/n/q] "
choice = readInput(message, default='Y')
if choice == 'Q':
raise SqlmapUserQuitException
elif choice == 'Y':
conf.data = getattr(conf.data, UNENCODED_ORIGINAL_VALUE, conf.data)
conf.data = conf.data.replace(CUSTOM_INJECTION_MARK_CHAR, ASTERISK_MARKER)
conf.data = re.sub(r'("(?P<name>[^"]+)"\s*:\s*"[^"]+)"', functools.partial(process, repl=r'\g<1>%s"' % CUSTOM_INJECTION_MARK_CHAR), conf.data)
conf.data = re.sub(r'("(?P<name>[^"]+)"\s*:\s*)(-?\d[\d\.]*\b)', functools.partial(process, repl=r'\g<0>%s' % CUSTOM_INJECTION_MARK_CHAR), conf.data)
match = re.search(r'(?P<name>[^"]+)"\s*:\s*\[([^\]]+)\]', conf.data)
if match and not (conf.testParameter and match.group("name") not in conf.testParameter):
_ = match.group(2)
_ = re.sub(r'("[^"]+)"', '\g<1>%s"' % CUSTOM_INJECTION_MARK_CHAR, _)
_ = re.sub(r'(\A|,|\s+)(-?\d[\d\.]*\b)', '\g<0>%s' % CUSTOM_INJECTION_MARK_CHAR, _)
conf.data = conf.data.replace(match.group(0), match.group(0).replace(match.group(2), _))
kb.postHint = POST_HINT.JSON
elif re.search(JSON_LIKE_RECOGNITION_REGEX, conf.data):
message = "JSON-like data found in %s data. " % conf.method
message += "Do you want to process it? [Y/n/q] "
choice = readInput(message, default='Y').upper()
if choice == 'Q':
raise SqlmapUserQuitException
elif choice == 'Y':
conf.data = getattr(conf.data, UNENCODED_ORIGINAL_VALUE, conf.data)
conf.data = conf.data.replace(CUSTOM_INJECTION_MARK_CHAR, ASTERISK_MARKER)
conf.data = re.sub(r"('(?P<name>[^']+)'\s*:\s*'[^']+)'", functools.partial(process, repl=r"\g<1>%s'" % CUSTOM_INJECTION_MARK_CHAR), conf.data)
conf.data = re.sub(r"('(?P<name>[^']+)'\s*:\s*)(-?\d[\d\.]*\b)", functools.partial(process, repl=r"\g<0>%s" % CUSTOM_INJECTION_MARK_CHAR), conf.data)
kb.postHint = POST_HINT.JSON_LIKE
elif re.search(ARRAY_LIKE_RECOGNITION_REGEX, conf.data):
message = "Array-like data found in %s data. " % conf.method
message += "Do you want to process it? [Y/n/q] "
choice = readInput(message, default='Y').upper()
if choice == 'Q':
raise SqlmapUserQuitException
elif choice == 'Y':
conf.data = conf.data.replace(CUSTOM_INJECTION_MARK_CHAR, ASTERISK_MARKER)
conf.data = re.sub(r"(=[^%s]+)" % DEFAULT_GET_POST_DELIMITER, r"\g<1>%s" % CUSTOM_INJECTION_MARK_CHAR, conf.data)
kb.postHint = POST_HINT.ARRAY_LIKE
elif re.search(XML_RECOGNITION_REGEX, conf.data):
message = "SOAP/XML data found in %s data. " % conf.method
message += "Do you want to process it? [Y/n/q] "
choice = readInput(message, default='Y').upper()
if choice == 'Q':
raise SqlmapUserQuitException
elif choice == 'Y':
conf.data = getattr(conf.data, UNENCODED_ORIGINAL_VALUE, conf.data)
conf.data = conf.data.replace(CUSTOM_INJECTION_MARK_CHAR, ASTERISK_MARKER)
conf.data = re.sub(r"(<(?P<name>[^>]+)( [^<]*)?>)([^<]+)(</\2)", functools.partial(process, repl=r"\g<1>\g<4>%s\g<5>" % CUSTOM_INJECTION_MARK_CHAR), conf.data)
kb.postHint = POST_HINT.SOAP if "soap" in conf.data.lower() else POST_HINT.XML
elif re.search(MULTIPART_RECOGNITION_REGEX, conf.data):
message = "Multipart-like data found in %s data. " % conf.method
message += "Do you want to process it? [Y/n/q] "
choice = readInput(message, default='Y').upper()
if choice == 'Q':
raise SqlmapUserQuitException
elif choice == 'Y':
conf.data = getattr(conf.data, UNENCODED_ORIGINAL_VALUE, conf.data)
conf.data = conf.data.replace(CUSTOM_INJECTION_MARK_CHAR, ASTERISK_MARKER)
conf.data = re.sub(r"(?si)((Content-Disposition[^\n]+?name\s*=\s*[\"'](?P<name>[^\n]+?)[\"']).+?)(((\r)?\n)+--)", functools.partial(process, repl=r"\g<1>%s\g<4>" % CUSTOM_INJECTION_MARK_CHAR), conf.data)
kb.postHint = POST_HINT.MULTIPART
if not kb.postHint:
if CUSTOM_INJECTION_MARK_CHAR in conf.data: # later processed
pass
else:
place = PLACE.POST
conf.parameters[place] = conf.data
paramDict = paramToDict(place, conf.data)
if paramDict:
conf.paramDict[place] = paramDict
testableParameters = True
else:
if CUSTOM_INJECTION_MARK_CHAR not in conf.data: # in case that no usable parameter values has been found
conf.parameters[PLACE.POST] = conf.data
kb.processUserMarks = True if (kb.postHint and CUSTOM_INJECTION_MARK_CHAR in conf.data) else kb.processUserMarks
if re.search(URI_INJECTABLE_REGEX, conf.url, re.I) and not any(place in conf.parameters for place in (PLACE.GET, PLACE.POST)) and not kb.postHint and not CUSTOM_INJECTION_MARK_CHAR in (conf.data or "") and conf.url.startswith("http"):
warnMsg = "you've provided target URL without any GET "
warnMsg += "parameters (e.g. 'http://www.site.com/article.php?id=1') "
warnMsg += "and without providing any POST parameters "
warnMsg += "through option '--data'"
logger.warn(warnMsg)
message = "do you want to try URI injections "
message += "in the target URL itself? [Y/n/q] "
choice = readInput(message, default='Y').upper()
if choice == 'Q':
raise SqlmapUserQuitException
elif choice == 'Y':
conf.url = "%s%s" % (conf.url, CUSTOM_INJECTION_MARK_CHAR)
kb.processUserMarks = True
for place, value in ((PLACE.URI, conf.url), (PLACE.CUSTOM_POST, conf.data), (PLACE.CUSTOM_HEADER, str(conf.httpHeaders))):
_ = re.sub(PROBLEMATIC_CUSTOM_INJECTION_PATTERNS, "", value or "") if place == PLACE.CUSTOM_HEADER else value or ""
if CUSTOM_INJECTION_MARK_CHAR in _:
if kb.processUserMarks is None:
lut = {PLACE.URI: '-u', PLACE.CUSTOM_POST: '--data', PLACE.CUSTOM_HEADER: '--headers/--user-agent/--referer/--cookie'}
message = "custom injection marking character ('%s') found in option " % CUSTOM_INJECTION_MARK_CHAR
message += "'%s'. Do you want to process it? [Y/n/q] " % lut[place]
choice = readInput(message, default='Y').upper()
if choice == 'Q':
raise SqlmapUserQuitException
else:
kb.processUserMarks = choice == 'Y'
if kb.processUserMarks:
kb.testOnlyCustom = True
if "=%s" % CUSTOM_INJECTION_MARK_CHAR in _:
warnMsg = "it seems that you've provided empty parameter value(s) "
warnMsg += "for testing. Please, always use only valid parameter values "
warnMsg += "so sqlmap could be able to run properly"
logger.warn(warnMsg)
if not kb.processUserMarks:
if place == PLACE.URI:
query = urlparse.urlsplit(value).query
if query:
parameters = conf.parameters[PLACE.GET] = query
paramDict = paramToDict(PLACE.GET, parameters)
if paramDict:
conf.url = conf.url.split('?')[0]
conf.paramDict[PLACE.GET] = paramDict
testableParameters = True
elif place == PLACE.CUSTOM_POST:
conf.parameters[PLACE.POST] = conf.data
paramDict = paramToDict(PLACE.POST, conf.data)
if paramDict:
conf.paramDict[PLACE.POST] = paramDict
testableParameters = True
else:
conf.parameters[place] = value
conf.paramDict[place] = OrderedDict()
if place == PLACE.CUSTOM_HEADER:
for index in xrange(len(conf.httpHeaders)):
header, value = conf.httpHeaders[index]
if CUSTOM_INJECTION_MARK_CHAR in re.sub(PROBLEMATIC_CUSTOM_INJECTION_PATTERNS, "", value):
parts = value.split(CUSTOM_INJECTION_MARK_CHAR)
for i in xrange(len(parts) - 1):
conf.paramDict[place]["%s #%d%s" % (header, i + 1, CUSTOM_INJECTION_MARK_CHAR)] = "%s,%s" % (header, "".join("%s%s" % (parts[j], CUSTOM_INJECTION_MARK_CHAR if i == j else "") for j in xrange(len(parts))))
conf.httpHeaders[index] = (header, value.replace(CUSTOM_INJECTION_MARK_CHAR, ""))
else:
parts = value.split(CUSTOM_INJECTION_MARK_CHAR)
for i in xrange(len(parts) - 1):
name = None
if kb.postHint:
for ending, _ in hintNames:
if parts[i].endswith(ending):
name = "%s %s" % (kb.postHint, _)
break
if name is None:
name = "%s#%s%s" % (("%s " % kb.postHint) if kb.postHint else "", i + 1, CUSTOM_INJECTION_MARK_CHAR)
conf.paramDict[place][name] = "".join("%s%s" % (parts[j], CUSTOM_INJECTION_MARK_CHAR if i == j else "") for j in xrange(len(parts)))
if place == PLACE.URI and PLACE.GET in conf.paramDict:
del conf.paramDict[PLACE.GET]
elif place == PLACE.CUSTOM_POST and PLACE.POST in conf.paramDict:
del conf.paramDict[PLACE.POST]
testableParameters = True
if kb.processUserMarks:
for item in ("url", "data", "agent", "referer", "cookie"):
if conf.get(item):
conf[item] = conf[item].replace(CUSTOM_INJECTION_MARK_CHAR, "")
# Perform checks on Cookie parameters
if conf.cookie:
conf.parameters[PLACE.COOKIE] = conf.cookie
paramDict = paramToDict(PLACE.COOKIE, conf.cookie)
if paramDict:
conf.paramDict[PLACE.COOKIE] = paramDict
testableParameters = True
# Perform checks on header values
if conf.httpHeaders:
for httpHeader, headerValue in list(conf.httpHeaders):
# Url encoding of the header values should be avoided
# Reference: http://stackoverflow.com/questions/5085904/is-ok-to-urlencode-the-value-in-headerlocation-value
if httpHeader.title() == HTTP_HEADER.USER_AGENT:
conf.parameters[PLACE.USER_AGENT] = urldecode(headerValue)
condition = any((not conf.testParameter, intersect(conf.testParameter, USER_AGENT_ALIASES, True)))
if condition:
conf.paramDict[PLACE.USER_AGENT] = {PLACE.USER_AGENT: headerValue}
testableParameters = True
elif httpHeader.title() == HTTP_HEADER.REFERER:
conf.parameters[PLACE.REFERER] = urldecode(headerValue)
condition = any((not conf.testParameter, intersect(conf.testParameter, REFERER_ALIASES, True)))
if condition:
conf.paramDict[PLACE.REFERER] = {PLACE.REFERER: headerValue}
testableParameters = True
elif httpHeader.title() == HTTP_HEADER.HOST:
conf.parameters[PLACE.HOST] = urldecode(headerValue)
condition = any((not conf.testParameter, intersect(conf.testParameter, HOST_ALIASES, True)))
if condition:
conf.paramDict[PLACE.HOST] = {PLACE.HOST: headerValue}
testableParameters = True
else:
condition = intersect(conf.testParameter, [httpHeader], True)
if condition:
conf.parameters[PLACE.CUSTOM_HEADER] = str(conf.httpHeaders)
conf.paramDict[PLACE.CUSTOM_HEADER] = {httpHeader: "%s,%s%s" % (httpHeader, headerValue, CUSTOM_INJECTION_MARK_CHAR)}
conf.httpHeaders = [(header, value.replace(CUSTOM_INJECTION_MARK_CHAR, "")) for header, value in conf.httpHeaders]
testableParameters = True
if not conf.parameters:
errMsg = "you did not provide any GET, POST and Cookie "
errMsg += "parameter, neither an User-Agent, Referer or Host header value"
raise SqlmapGenericException(errMsg)
elif not testableParameters:
errMsg = "all testable parameters you provided are not present "
errMsg += "within the given request data"
raise SqlmapGenericException(errMsg)
if conf.csrfToken:
if not any(conf.csrfToken in _ for _ in (conf.paramDict.get(PLACE.GET, {}), conf.paramDict.get(PLACE.POST, {}))) and not re.search(r"\b%s\b" % re.escape(conf.csrfToken), conf.data or "") and not conf.csrfToken in set(_[0].lower() for _ in conf.httpHeaders) and not conf.csrfToken in conf.paramDict.get(PLACE.COOKIE, {}):
errMsg = "anti-CSRF token parameter '%s' not " % conf.csrfToken
errMsg += "found in provided GET, POST, Cookie or header values"
raise SqlmapGenericException(errMsg)
else:
for place in (PLACE.GET, PLACE.POST, PLACE.COOKIE):
for parameter in conf.paramDict.get(place, {}):
if any(parameter.lower().count(_) for _ in CSRF_TOKEN_PARAMETER_INFIXES):
message = "%s parameter '%s' appears to hold anti-CSRF token. " % (place, parameter)
message += "Do you want sqlmap to automatically update it in further requests? [y/N] "
if readInput(message, default='N', boolean=True):
conf.csrfToken = parameter
break
def _setHashDB():
"""
Check and set the HashDB SQLite file for query resume functionality.
"""
if not conf.hashDBFile:
conf.hashDBFile = conf.sessionFile or os.path.join(conf.outputPath, "session.sqlite")
if os.path.exists(conf.hashDBFile):
if conf.flushSession:
try:
os.remove(conf.hashDBFile)
logger.info("flushing session file")
except OSError, msg:
errMsg = "unable to flush the session file (%s)" % msg
raise SqlmapFilePathException(errMsg)
conf.hashDB = HashDB(conf.hashDBFile)
def _resumeHashDBValues():
"""
Resume stored data values from HashDB
"""
kb.absFilePaths = hashDBRetrieve(HASHDB_KEYS.KB_ABS_FILE_PATHS, True) or kb.absFilePaths
kb.brute.tables = hashDBRetrieve(HASHDB_KEYS.KB_BRUTE_TABLES, True) or kb.brute.tables
kb.brute.columns = hashDBRetrieve(HASHDB_KEYS.KB_BRUTE_COLUMNS, True) or kb.brute.columns
kb.chars = hashDBRetrieve(HASHDB_KEYS.KB_CHARS, True) or kb.chars
kb.dynamicMarkings = hashDBRetrieve(HASHDB_KEYS.KB_DYNAMIC_MARKINGS, True) or kb.dynamicMarkings
kb.xpCmdshellAvailable = hashDBRetrieve(HASHDB_KEYS.KB_XP_CMDSHELL_AVAILABLE) or kb.xpCmdshellAvailable
kb.errorChunkLength = hashDBRetrieve(HASHDB_KEYS.KB_ERROR_CHUNK_LENGTH)
if kb.errorChunkLength and kb.errorChunkLength.isdigit():
kb.errorChunkLength = int(kb.errorChunkLength)
else:
kb.errorChunkLength = None
conf.tmpPath = conf.tmpPath or hashDBRetrieve(HASHDB_KEYS.CONF_TMP_PATH)
for injection in hashDBRetrieve(HASHDB_KEYS.KB_INJECTIONS, True) or []:
if isinstance(injection, InjectionDict) and injection.place in conf.paramDict and \
injection.parameter in conf.paramDict[injection.place]:
if not conf.tech or intersect(conf.tech, injection.data.keys()):
if intersect(conf.tech, injection.data.keys()):
injection.data = dict(_ for _ in injection.data.items() if _[0] in conf.tech)
if injection not in kb.injections:
kb.injections.append(injection)
_resumeDBMS()
_resumeOS()
def _resumeDBMS():
"""
Resume stored DBMS information from HashDB
"""
value = hashDBRetrieve(HASHDB_KEYS.DBMS)
if not value:
return
dbms = value.lower()
dbmsVersion = [UNKNOWN_DBMS_VERSION]
_ = "(%s)" % ("|".join([alias for alias in SUPPORTED_DBMS]))
_ = re.search(r"\A%s (.*)" % _, dbms, re.I)
if _:
dbms = _.group(1).lower()
dbmsVersion = [_.group(2)]
if conf.dbms:
check = True
for aliases, _, _, _ in DBMS_DICT.values():
if conf.dbms.lower() in aliases and dbms not in aliases:
check = False
break
if not check:
message = "you provided '%s' as a back-end DBMS, " % conf.dbms
message += "but from a past scan information on the target URL "
message += "sqlmap assumes the back-end DBMS is '%s'. " % dbms
message += "Do you really want to force the back-end "
message += "DBMS value? [y/N] "
if not readInput(message, default='N', boolean=True):
conf.dbms = None
Backend.setDbms(dbms)
Backend.setVersionList(dbmsVersion)
else:
infoMsg = "resuming back-end DBMS '%s' " % dbms
logger.info(infoMsg)
Backend.setDbms(dbms)
Backend.setVersionList(dbmsVersion)
def _resumeOS():
"""
Resume stored OS information from HashDB
"""
value = hashDBRetrieve(HASHDB_KEYS.OS)
if not value:
return
os = value
if os and os != 'None':
infoMsg = "resuming back-end DBMS operating system '%s' " % os
logger.info(infoMsg)
if conf.os and conf.os.lower() != os.lower():
message = "you provided '%s' as back-end DBMS operating " % conf.os
message += "system, but from a past scan information on the "
message += "target URL sqlmap assumes the back-end DBMS "
message += "operating system is %s. " % os
message += "Do you really want to force the back-end DBMS "
message += "OS value? [y/N] "
if not readInput(message, default='N', boolean=True):
conf.os = os
else:
conf.os = os
Backend.setOs(conf.os)
def _setResultsFile():
"""
Create results file for storing results of running in a
multiple target mode.
"""
if not conf.multipleTargets:
return
if not conf.resultsFP:
conf.resultsFilename = os.path.join(paths.SQLMAP_OUTPUT_PATH, time.strftime(RESULTS_FILE_FORMAT).lower())
try:
conf.resultsFP = openFile(conf.resultsFilename, "w+", UNICODE_ENCODING, buffering=0)
except (OSError, IOError), ex:
try:
warnMsg = "unable to create results file '%s' ('%s'). " % (conf.resultsFilename, getUnicode(ex))
handle, conf.resultsFilename = tempfile.mkstemp(prefix=MKSTEMP_PREFIX.RESULTS, suffix=".csv")
os.close(handle)
conf.resultsFP = openFile(conf.resultsFilename, "w+", UNICODE_ENCODING, buffering=0)
warnMsg += "Using temporary file '%s' instead" % conf.resultsFilename
logger.warn(warnMsg)
except IOError, _:
errMsg = "unable to write to the temporary directory ('%s'). " % _
errMsg += "Please make sure that your disk is not full and "
errMsg += "that you have sufficient write permissions to "
errMsg += "create temporary files and/or directories"
raise SqlmapSystemException(errMsg)
conf.resultsFP.writelines("Target URL,Place,Parameter,Technique(s),Note(s)%s" % os.linesep)
logger.info("using '%s' as the CSV results file in multiple targets mode" % conf.resultsFilename)
def _createFilesDir():
"""
Create the file directory.
"""
if not conf.rFile:
return
conf.filePath = paths.SQLMAP_FILES_PATH % conf.hostname
if not os.path.isdir(conf.filePath):
try:
os.makedirs(conf.filePath, 0755)
except OSError, ex:
tempDir = tempfile.mkdtemp(prefix="sqlmapfiles")
warnMsg = "unable to create files directory "
warnMsg += "'%s' (%s). " % (conf.filePath, getUnicode(ex))
warnMsg += "Using temporary directory '%s' instead" % tempDir
logger.warn(warnMsg)
conf.filePath = tempDir
def _createDumpDir():
"""
Create the dump directory.
"""
if not conf.dumpTable and not conf.dumpAll and not conf.search:
return
conf.dumpPath = paths.SQLMAP_DUMP_PATH % conf.hostname
if not os.path.isdir(conf.dumpPath):
try:
os.makedirs(conf.dumpPath, 0755)
except OSError, ex:
tempDir = tempfile.mkdtemp(prefix="sqlmapdump")
warnMsg = "unable to create dump directory "
warnMsg += "'%s' (%s). " % (conf.dumpPath, getUnicode(ex))
warnMsg += "Using temporary directory '%s' instead" % tempDir
logger.warn(warnMsg)
conf.dumpPath = tempDir
def _configureDumper():
conf.dumper = dumper
conf.dumper.setOutputFile()
def _createTargetDirs():
"""
Create the output directory.
"""
try:
if not os.path.isdir(paths.SQLMAP_OUTPUT_PATH):
os.makedirs(paths.SQLMAP_OUTPUT_PATH, 0755)
_ = os.path.join(paths.SQLMAP_OUTPUT_PATH, randomStr())
open(_, "w+b").close()
os.remove(_)
if conf.outputDir:
warnMsg = "using '%s' as the output directory" % paths.SQLMAP_OUTPUT_PATH
logger.warn(warnMsg)
except (OSError, IOError), ex:
try:
tempDir = tempfile.mkdtemp(prefix="sqlmapoutput")
except Exception, _:
errMsg = "unable to write to the temporary directory ('%s'). " % _
errMsg += "Please make sure that your disk is not full and "
errMsg += "that you have sufficient write permissions to "
errMsg += "create temporary files and/or directories"
raise SqlmapSystemException(errMsg)
warnMsg = "unable to %s output directory " % ("create" if not os.path.isdir(paths.SQLMAP_OUTPUT_PATH) else "write to the")
warnMsg += "'%s' (%s). " % (paths.SQLMAP_OUTPUT_PATH, getUnicode(ex))
warnMsg += "Using temporary directory '%s' instead" % getUnicode(tempDir)
logger.warn(warnMsg)
paths.SQLMAP_OUTPUT_PATH = tempDir
conf.outputPath = os.path.join(getUnicode(paths.SQLMAP_OUTPUT_PATH), normalizeUnicode(getUnicode(conf.hostname)))
if not os.path.isdir(conf.outputPath):
try:
os.makedirs(conf.outputPath, 0755)
except (OSError, IOError), ex:
try:
tempDir = tempfile.mkdtemp(prefix="sqlmapoutput")
except Exception, _:
errMsg = "unable to write to the temporary directory ('%s'). " % _
errMsg += "Please make sure that your disk is not full and "
errMsg += "that you have sufficient write permissions to "
errMsg += "create temporary files and/or directories"
raise SqlmapSystemException(errMsg)
warnMsg = "unable to create output directory "
warnMsg += "'%s' (%s). " % (conf.outputPath, getUnicode(ex))
warnMsg += "Using temporary directory '%s' instead" % getUnicode(tempDir)
logger.warn(warnMsg)
conf.outputPath = tempDir
try:
with codecs.open(os.path.join(conf.outputPath, "target.txt"), "w+", UNICODE_ENCODING) as f:
f.write(kb.originalUrls.get(conf.url) or conf.url or conf.hostname)
f.write(" (%s)" % (HTTPMETHOD.POST if conf.data else HTTPMETHOD.GET))
if conf.data:
f.write("\n\n%s" % getUnicode(conf.data))
except IOError, ex:
if "denied" in getUnicode(ex):
errMsg = "you don't have enough permissions "
else:
errMsg = "something went wrong while trying "
errMsg += "to write to the output directory '%s' (%s)" % (paths.SQLMAP_OUTPUT_PATH, getSafeExString(ex))
raise SqlmapMissingPrivileges(errMsg)
_createDumpDir()
_createFilesDir()
_configureDumper()
def _restoreMergedOptions():
"""
Restore merged options (command line, configuration file and default values)
that could be possibly changed during the testing of previous target.
"""
for option in RESTORE_MERGED_OPTIONS:
conf[option] = mergedOptions[option]
def initTargetEnv():
"""
Initialize target environment.
"""
if conf.multipleTargets:
if conf.hashDB:
conf.hashDB.close()
if conf.cj:
resetCookieJar(conf.cj)
conf.paramDict = {}
conf.parameters = {}
conf.hashDBFile = None
_setKnowledgeBaseAttributes(False)
_restoreMergedOptions()
_setDBMS()
if conf.data:
class _(unicode):
pass
kb.postUrlEncode = True
for key, value in conf.httpHeaders:
if key.upper() == HTTP_HEADER.CONTENT_TYPE.upper():
kb.postUrlEncode = "urlencoded" in value
break
if kb.postUrlEncode:
original = conf.data
conf.data = _(urldecode(conf.data))
setattr(conf.data, UNENCODED_ORIGINAL_VALUE, original)
kb.postSpaceToPlus = '+' in original
def setupTargetEnv():
_createTargetDirs()
_setRequestParams()
_setHashDB()
_resumeHashDBValues()
_setResultsFile()
_setAuthCred()
| 43.324213 | 328 | 0.604231 |
acdf6ef677481a3f276aecc59bdd56e7e641defd | 22,638 | py | Python | python-profiles/STANDA/8CMA16DC-25_15.py | EPC-MSU/libximc | b0349721f57c8274b098a7b646d7ae67b8e70b9d | [
"BSD-2-Clause"
] | 3 | 2020-12-08T14:41:48.000Z | 2022-02-23T13:42:42.000Z | python-profiles/STANDA/8CMA16DC-25_15.py | EPC-MSU/libximc | b0349721f57c8274b098a7b646d7ae67b8e70b9d | [
"BSD-2-Clause"
] | 4 | 2020-12-08T20:15:06.000Z | 2021-12-08T14:15:24.000Z | python-profiles/STANDA/8CMA16DC-25_15.py | EPC-MSU/libximc | b0349721f57c8274b098a7b646d7ae67b8e70b9d | [
"BSD-2-Clause"
] | 2 | 2020-11-02T02:17:35.000Z | 2021-03-18T14:13:56.000Z | def set_profile_8CMA16DC_25_15(lib, id):
worst_result = Result.Ok
result = Result.Ok
feedback_settings = feedback_settings_t()
feedback_settings.IPS = 1024
class FeedbackType_:
FEEDBACK_ENCODER_MEDIATED = 6
FEEDBACK_NONE = 5
FEEDBACK_EMF = 4
FEEDBACK_ENCODER = 1
feedback_settings.FeedbackType = FeedbackType_.FEEDBACK_ENCODER
class FeedbackFlags_:
FEEDBACK_ENC_TYPE_BITS = 192
FEEDBACK_ENC_TYPE_DIFFERENTIAL = 128
FEEDBACK_ENC_TYPE_SINGLE_ENDED = 64
FEEDBACK_ENC_REVERSE = 1
FEEDBACK_ENC_TYPE_AUTO = 0
feedback_settings.FeedbackFlags = FeedbackFlags_.FEEDBACK_ENC_TYPE_SINGLE_ENDED | FeedbackFlags_.FEEDBACK_ENC_REVERSE | FeedbackFlags_.FEEDBACK_ENC_TYPE_AUTO
feedback_settings.CountsPerTurn = 1024
result = lib.set_feedback_settings(id, byref(feedback_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
home_settings = home_settings_t()
home_settings.FastHome = 500
home_settings.uFastHome = 0
home_settings.SlowHome = 500
home_settings.uSlowHome = 0
home_settings.HomeDelta = 3000
home_settings.uHomeDelta = 0
class HomeFlags_:
HOME_USE_FAST = 256
HOME_STOP_SECOND_BITS = 192
HOME_STOP_SECOND_LIM = 192
HOME_STOP_SECOND_SYN = 128
HOME_STOP_SECOND_REV = 64
HOME_STOP_FIRST_BITS = 48
HOME_STOP_FIRST_LIM = 48
HOME_STOP_FIRST_SYN = 32
HOME_STOP_FIRST_REV = 16
HOME_HALF_MV = 8
HOME_MV_SEC_EN = 4
HOME_DIR_SECOND = 2
HOME_DIR_FIRST = 1
home_settings.HomeFlags = HomeFlags_.HOME_USE_FAST | HomeFlags_.HOME_STOP_SECOND_REV | HomeFlags_.HOME_STOP_FIRST_BITS | HomeFlags_.HOME_DIR_SECOND
result = lib.set_home_settings(id, byref(home_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
move_settings = move_settings_t()
move_settings.Speed = 2500
move_settings.uSpeed = 0
move_settings.Accel = 25000
move_settings.Decel = 25000
move_settings.AntiplaySpeed = 2500
move_settings.uAntiplaySpeed = 0
class MoveFlags_:
RPM_DIV_1000 = 1
result = lib.set_move_settings(id, byref(move_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_settings = engine_settings_t()
engine_settings.NomVoltage = 600
engine_settings.NomCurrent = 430
engine_settings.NomSpeed = 5000
engine_settings.uNomSpeed = 0
class EngineFlags_:
ENGINE_LIMIT_RPM = 128
ENGINE_LIMIT_CURR = 64
ENGINE_LIMIT_VOLT = 32
ENGINE_ACCEL_ON = 16
ENGINE_ANTIPLAY = 8
ENGINE_MAX_SPEED = 4
ENGINE_CURRENT_AS_RMS = 2
ENGINE_REVERSE = 1
engine_settings.EngineFlags = EngineFlags_.ENGINE_LIMIT_RPM | EngineFlags_.ENGINE_LIMIT_CURR | EngineFlags_.ENGINE_LIMIT_VOLT | EngineFlags_.ENGINE_ACCEL_ON | EngineFlags_.ENGINE_REVERSE
engine_settings.Antiplay = 12800
class MicrostepMode_:
MICROSTEP_MODE_FRAC_256 = 9
MICROSTEP_MODE_FRAC_128 = 8
MICROSTEP_MODE_FRAC_64 = 7
MICROSTEP_MODE_FRAC_32 = 6
MICROSTEP_MODE_FRAC_16 = 5
MICROSTEP_MODE_FRAC_8 = 4
MICROSTEP_MODE_FRAC_4 = 3
MICROSTEP_MODE_FRAC_2 = 2
MICROSTEP_MODE_FULL = 1
engine_settings.MicrostepMode = MicrostepMode_.MICROSTEP_MODE_FULL
engine_settings.StepsPerRev = 1
result = lib.set_engine_settings(id, byref(engine_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
entype_settings = entype_settings_t()
class EngineType_:
ENGINE_TYPE_BRUSHLESS = 5
ENGINE_TYPE_TEST = 4
ENGINE_TYPE_STEP = 3
ENGINE_TYPE_2DC = 2
ENGINE_TYPE_DC = 1
ENGINE_TYPE_NONE = 0
entype_settings.EngineType = EngineType_.ENGINE_TYPE_DC | EngineType_.ENGINE_TYPE_NONE
class DriverType_:
DRIVER_TYPE_EXTERNAL = 3
DRIVER_TYPE_INTEGRATE = 2
DRIVER_TYPE_DISCRETE_FET = 1
entype_settings.DriverType = DriverType_.DRIVER_TYPE_INTEGRATE
result = lib.set_entype_settings(id, byref(entype_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
power_settings = power_settings_t()
power_settings.HoldCurrent = 60
power_settings.CurrReductDelay = 1500
power_settings.PowerOffDelay = 3600
power_settings.CurrentSetTime = 600
class PowerFlags_:
POWER_SMOOTH_CURRENT = 4
POWER_OFF_ENABLED = 2
POWER_REDUCT_ENABLED = 1
result = lib.set_power_settings(id, byref(power_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
secure_settings = secure_settings_t()
secure_settings.LowUpwrOff = 800
secure_settings.CriticalIpwr = 4000
secure_settings.CriticalUpwr = 5500
secure_settings.CriticalT = 800
secure_settings.CriticalIusb = 450
secure_settings.CriticalUusb = 520
secure_settings.MinimumUusb = 420
class Flags_:
ALARM_ENGINE_RESPONSE = 128
ALARM_WINDING_MISMATCH = 64
USB_BREAK_RECONNECT = 32
ALARM_FLAGS_STICKING = 16
ALARM_ON_BORDERS_SWAP_MISSET = 8
H_BRIDGE_ALERT = 4
LOW_UPWR_PROTECTION = 2
ALARM_ON_DRIVER_OVERHEATING = 1
secure_settings.Flags = Flags_.ALARM_ENGINE_RESPONSE | Flags_.ALARM_FLAGS_STICKING | Flags_.ALARM_ON_BORDERS_SWAP_MISSET | Flags_.H_BRIDGE_ALERT | Flags_.ALARM_ON_DRIVER_OVERHEATING
result = lib.set_secure_settings(id, byref(secure_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
edges_settings = edges_settings_t()
class BorderFlags_:
BORDERS_SWAP_MISSET_DETECTION = 8
BORDER_STOP_RIGHT = 4
BORDER_STOP_LEFT = 2
BORDER_IS_ENCODER = 1
edges_settings.BorderFlags = BorderFlags_.BORDER_STOP_RIGHT | BorderFlags_.BORDER_STOP_LEFT
class EnderFlags_:
ENDER_SW2_ACTIVE_LOW = 4
ENDER_SW1_ACTIVE_LOW = 2
ENDER_SWAP = 1
edges_settings.EnderFlags = EnderFlags_.ENDER_SWAP
edges_settings.LeftBorder = 357960
edges_settings.uLeftBorder = 0
edges_settings.RightBorder = 14074440
edges_settings.uRightBorder = 0
result = lib.set_edges_settings(id, byref(edges_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
pid_settings = pid_settings_t()
pid_settings.KpU = 500
pid_settings.KiU = 1600
pid_settings.KdU = 1000
pid_settings.Kpf = 0
pid_settings.Kif = 0
pid_settings.Kdf = 0
result = lib.set_pid_settings(id, byref(pid_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_in_settings = sync_in_settings_t()
class SyncInFlags_:
SYNCIN_GOTOPOSITION = 4
SYNCIN_INVERT = 2
SYNCIN_ENABLED = 1
sync_in_settings.ClutterTime = 4
sync_in_settings.Position = 0
sync_in_settings.uPosition = 0
sync_in_settings.Speed = 0
sync_in_settings.uSpeed = 0
result = lib.set_sync_in_settings(id, byref(sync_in_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_out_settings = sync_out_settings_t()
class SyncOutFlags_:
SYNCOUT_ONPERIOD = 64
SYNCOUT_ONSTOP = 32
SYNCOUT_ONSTART = 16
SYNCOUT_IN_STEPS = 8
SYNCOUT_INVERT = 4
SYNCOUT_STATE = 2
SYNCOUT_ENABLED = 1
sync_out_settings.SyncOutFlags = SyncOutFlags_.SYNCOUT_ONSTOP | SyncOutFlags_.SYNCOUT_ONSTART
sync_out_settings.SyncOutPulseSteps = 100
sync_out_settings.SyncOutPeriod = 2000
sync_out_settings.Accuracy = 0
sync_out_settings.uAccuracy = 0
result = lib.set_sync_out_settings(id, byref(sync_out_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extio_settings = extio_settings_t()
class EXTIOSetupFlags_:
EXTIO_SETUP_INVERT = 2
EXTIO_SETUP_OUTPUT = 1
extio_settings.EXTIOSetupFlags = EXTIOSetupFlags_.EXTIO_SETUP_OUTPUT
class EXTIOModeFlags_:
EXTIO_SETUP_MODE_OUT_BITS = 240
EXTIO_SETUP_MODE_OUT_MOTOR_ON = 64
EXTIO_SETUP_MODE_OUT_ALARM = 48
EXTIO_SETUP_MODE_OUT_MOVING = 32
EXTIO_SETUP_MODE_OUT_ON = 16
EXTIO_SETUP_MODE_IN_BITS = 15
EXTIO_SETUP_MODE_IN_ALARM = 5
EXTIO_SETUP_MODE_IN_HOME = 4
EXTIO_SETUP_MODE_IN_MOVR = 3
EXTIO_SETUP_MODE_IN_PWOF = 2
EXTIO_SETUP_MODE_IN_STOP = 1
EXTIO_SETUP_MODE_IN_NOP = 0
EXTIO_SETUP_MODE_OUT_OFF = 0
extio_settings.EXTIOModeFlags = EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_STOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_NOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_OUT_OFF
result = lib.set_extio_settings(id, byref(extio_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
brake_settings = brake_settings_t()
brake_settings.t1 = 300
brake_settings.t2 = 500
brake_settings.t3 = 300
brake_settings.t4 = 400
class BrakeFlags_:
BRAKE_ENG_PWROFF = 2
BRAKE_ENABLED = 1
brake_settings.BrakeFlags = BrakeFlags_.BRAKE_ENG_PWROFF
result = lib.set_brake_settings(id, byref(brake_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
control_settings = control_settings_t()
control_settings.MaxSpeed[0] = 25
control_settings.MaxSpeed[1] = 250
control_settings.MaxSpeed[2] = 2500
control_settings.MaxSpeed[3] = 0
control_settings.MaxSpeed[4] = 0
control_settings.MaxSpeed[5] = 0
control_settings.MaxSpeed[6] = 0
control_settings.MaxSpeed[7] = 0
control_settings.MaxSpeed[8] = 0
control_settings.MaxSpeed[9] = 0
control_settings.uMaxSpeed[0] = 0
control_settings.uMaxSpeed[1] = 0
control_settings.uMaxSpeed[2] = 0
control_settings.uMaxSpeed[3] = 0
control_settings.uMaxSpeed[4] = 0
control_settings.uMaxSpeed[5] = 0
control_settings.uMaxSpeed[6] = 0
control_settings.uMaxSpeed[7] = 0
control_settings.uMaxSpeed[8] = 0
control_settings.uMaxSpeed[9] = 0
control_settings.Timeout[0] = 1000
control_settings.Timeout[1] = 1000
control_settings.Timeout[2] = 1000
control_settings.Timeout[3] = 1000
control_settings.Timeout[4] = 1000
control_settings.Timeout[5] = 1000
control_settings.Timeout[6] = 1000
control_settings.Timeout[7] = 1000
control_settings.Timeout[8] = 1000
control_settings.MaxClickTime = 300
class Flags_:
CONTROL_BTN_RIGHT_PUSHED_OPEN = 8
CONTROL_BTN_LEFT_PUSHED_OPEN = 4
CONTROL_MODE_BITS = 3
CONTROL_MODE_LR = 2
CONTROL_MODE_JOY = 1
CONTROL_MODE_OFF = 0
control_settings.Flags = Flags_.CONTROL_MODE_LR | Flags_.CONTROL_MODE_OFF
control_settings.DeltaPosition = 1
control_settings.uDeltaPosition = 0
result = lib.set_control_settings(id, byref(control_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
joystick_settings = joystick_settings_t()
joystick_settings.JoyLowEnd = 0
joystick_settings.JoyCenter = 5000
joystick_settings.JoyHighEnd = 10000
joystick_settings.ExpFactor = 100
joystick_settings.DeadZone = 50
class JoyFlags_:
JOY_REVERSE = 1
result = lib.set_joystick_settings(id, byref(joystick_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
ctp_settings = ctp_settings_t()
ctp_settings.CTPMinError = 3
class CTPFlags_:
CTP_ERROR_CORRECTION = 16
REV_SENS_INV = 8
CTP_ALARM_ON_ERROR = 4
CTP_BASE = 2
CTP_ENABLED = 1
result = lib.set_ctp_settings(id, byref(ctp_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
uart_settings = uart_settings_t()
uart_settings.Speed = 115200
class UARTSetupFlags_:
UART_STOP_BIT = 8
UART_PARITY_BIT_USE = 4
UART_PARITY_BITS = 3
UART_PARITY_BIT_MARK = 3
UART_PARITY_BIT_SPACE = 2
UART_PARITY_BIT_ODD = 1
UART_PARITY_BIT_EVEN = 0
uart_settings.UARTSetupFlags = UARTSetupFlags_.UART_PARITY_BIT_EVEN
result = lib.set_uart_settings(id, byref(uart_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
controller_name = controller_name_t()
controller_name.ControllerName = bytes([0, 113, 252, 118, 36, 0, 72, 0, 3, 0, 0, 0, 104, 101, 103, 0])
class CtrlFlags_:
EEPROM_PRECEDENCE = 1
result = lib.set_controller_name(id, byref(controller_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
emf_settings = emf_settings_t()
emf_settings.L = 0.07000000029802322
emf_settings.R = 5.099999904632568
emf_settings.Km = 0
class BackEMFFlags_:
BACK_EMF_KM_AUTO = 4
BACK_EMF_RESISTANCE_AUTO = 2
BACK_EMF_INDUCTANCE_AUTO = 1
result = lib.set_emf_settings(id, byref(emf_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_advansed_setup = engine_advansed_setup_t()
engine_advansed_setup.stepcloseloop_Kw = 50
engine_advansed_setup.stepcloseloop_Kp_low = 1000
engine_advansed_setup.stepcloseloop_Kp_high = 33
result = lib.set_engine_advansed_setup(id, byref(engine_advansed_setup))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extended_settings = extended_settings_t()
extended_settings.Param1 = 0
result = lib.set_extended_settings(id, byref(extended_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_name = stage_name_t()
stage_name.PositionerName = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_name(id, byref(stage_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_information = stage_information_t()
stage_information.Manufacturer = bytes([0, 116, 97, 110, 100, 97, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
stage_information.PartNumber = bytes([56, 67, 77, 65, 49, 54, 68, 67, 45, 50, 53, 95, 49, 53, 0, 48, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_information(id, byref(stage_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_settings = stage_settings_t()
stage_settings.LeadScrewPitch = 0.25
stage_settings.Units = bytes([0, 109, 0, 114, 101, 101, 0, 0])
stage_settings.MaxSpeed = 0.15000000596046448
stage_settings.TravelRange = 25
stage_settings.SupplyVoltageMin = 0
stage_settings.SupplyVoltageMax = 0
stage_settings.MaxCurrentConsumption = 0
stage_settings.HorizontalLoadCapacity = 0
stage_settings.VerticalLoadCapacity = 0
result = lib.set_stage_settings(id, byref(stage_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_information = motor_information_t()
motor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
motor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_motor_information(id, byref(motor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_settings = motor_settings_t()
class MotorType_:
MOTOR_TYPE_BLDC = 3
MOTOR_TYPE_DC = 2
MOTOR_TYPE_STEP = 1
MOTOR_TYPE_UNKNOWN = 0
motor_settings.MotorType = MotorType_.MOTOR_TYPE_STEP | MotorType_.MOTOR_TYPE_UNKNOWN
motor_settings.ReservedField = 0
motor_settings.Poles = 0
motor_settings.Phases = 0
motor_settings.NominalVoltage = 0
motor_settings.NominalCurrent = 0
motor_settings.NominalSpeed = 0
motor_settings.NominalTorque = 0
motor_settings.NominalPower = 0
motor_settings.WindingResistance = 0
motor_settings.WindingInductance = 0
motor_settings.RotorInertia = 0
motor_settings.StallTorque = 0
motor_settings.DetentTorque = 0
motor_settings.TorqueConstant = 0
motor_settings.SpeedConstant = 0
motor_settings.SpeedTorqueGradient = 0
motor_settings.MechanicalTimeConstant = 0
motor_settings.MaxSpeed = 7000
motor_settings.MaxCurrent = 0
motor_settings.MaxCurrentTime = 0
motor_settings.NoLoadCurrent = 0
motor_settings.NoLoadSpeed = 0
result = lib.set_motor_settings(id, byref(motor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_information = encoder_information_t()
encoder_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
encoder_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_encoder_information(id, byref(encoder_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_settings = encoder_settings_t()
encoder_settings.MaxOperatingFrequency = 80
encoder_settings.SupplyVoltageMin = 0
encoder_settings.SupplyVoltageMax = 0
encoder_settings.MaxCurrentConsumption = 0
encoder_settings.PPR = 256
class EncoderSettings_:
ENCSET_REVOLUTIONSENSOR_ACTIVE_HIGH = 256
ENCSET_REVOLUTIONSENSOR_PRESENT = 64
ENCSET_INDEXCHANNEL_PRESENT = 16
ENCSET_PUSHPULL_OUTPUT = 4
ENCSET_DIFFERENTIAL_OUTPUT = 1
result = lib.set_encoder_settings(id, byref(encoder_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_information = hallsensor_information_t()
hallsensor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
hallsensor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_hallsensor_information(id, byref(hallsensor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_settings = hallsensor_settings_t()
hallsensor_settings.MaxOperatingFrequency = 0
hallsensor_settings.SupplyVoltageMin = 0
hallsensor_settings.SupplyVoltageMax = 0
hallsensor_settings.MaxCurrentConsumption = 0
hallsensor_settings.PPR = 0
result = lib.set_hallsensor_settings(id, byref(hallsensor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_information = gear_information_t()
gear_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
gear_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_gear_information(id, byref(gear_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_settings = gear_settings_t()
gear_settings.ReductionIn = 141
gear_settings.ReductionOut = 1
gear_settings.RatedInputTorque = 0
gear_settings.RatedInputSpeed = 5000
gear_settings.MaxOutputBacklash = 0
gear_settings.InputInertia = 0
gear_settings.Efficiency = 0
result = lib.set_gear_settings(id, byref(gear_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
accessories_settings = accessories_settings_t()
accessories_settings.MagneticBrakeInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.MBRatedVoltage = 0
accessories_settings.MBRatedCurrent = 0
accessories_settings.MBTorque = 0
class MBSettings_:
MB_POWERED_HOLD = 2
MB_AVAILABLE = 1
accessories_settings.TemperatureSensorInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.TSMin = 0
accessories_settings.TSMax = 0
accessories_settings.TSGrad = 0
class TSSettings_:
TS_AVAILABLE = 8
TS_TYPE_BITS = 7
TS_TYPE_SEMICONDUCTOR = 2
TS_TYPE_THERMOCOUPLE = 1
TS_TYPE_UNKNOWN = 0
accessories_settings.TSSettings = TSSettings_.TS_TYPE_THERMOCOUPLE | TSSettings_.TS_TYPE_UNKNOWN
class LimitSwitchesSettings_:
LS_SHORTED = 16
LS_SW2_ACTIVE_LOW = 8
LS_SW1_ACTIVE_LOW = 4
LS_ON_SW2_AVAILABLE = 2
LS_ON_SW1_AVAILABLE = 1
result = lib.set_accessories_settings(id, byref(accessories_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
return worst_result
| 35.81962 | 190 | 0.693215 |
acdf6f4600bc4074bf198a659585628959b29966 | 2,606 | py | Python | big_data/bokeh_examples/ad_sku_ex.py | paulhtremblay/big-data | dfa2aa9877300a57e7a9368af59c07fcc5841b4f | [
"MIT"
] | null | null | null | big_data/bokeh_examples/ad_sku_ex.py | paulhtremblay/big-data | dfa2aa9877300a57e7a9368af59c07fcc5841b4f | [
"MIT"
] | 7 | 2020-06-05T18:13:25.000Z | 2022-03-11T23:19:48.000Z | big_data/bokeh_examples/ad_sku_ex.py | paulhtremblay/big-data | dfa2aa9877300a57e7a9368af59c07fcc5841b4f | [
"MIT"
] | 1 | 2020-11-25T18:24:37.000Z | 2020-11-25T18:24:37.000Z | import sys
import networkx as nx
from bokeh.models import Plot, ColumnDataSource, Range1d, from_networkx, Circle,MultiLine
from bokeh.models import ColumnDataSource, LabelSet
from bokeh.io import show, output_file
from bokeh.palettes import Viridis
import pprint
pp = pprint.PrettyPrinter(indent=4)
#define graph
G = nx.DiGraph()
G.add_edges_from( [('User', 'ad1'), ('User', 'ad2'), ('ad1', 'sku1'), ('ad2', 'sku2')])
G['User']['ad1']['weight'] = 1
G['User']['ad2']['weight'] = 5
G['ad1']['sku1']['weight'] = 1
G['ad2']['sku2']['weight'] = 1
node_size = [50, 20, 20, 20, 20]
node_initial_pos = [(-0.5,0), (0.5,0), (0,0.25),(0,-0.25), (.6, 0)]
node_color = [Viridis[10][0], Viridis[10][9],Viridis[10][9],'blue', 'blue']
index= ['User', 'ad1', 'ad2', 'sku1', 'sku2']
G.nodes['User']['name'] = 'User'
G.nodes['ad1']['name'] = 'ad1'
G.nodes['ad2']['name'] = 'ad2'
G.nodes['sku1']['name'] = 'sku1'
G.nodes['sku2']['name'] = 'sku2'
graph_renderer = from_networkx(G, nx.spring_layout, scale=0.5, center=(0,0))
plot = Plot(plot_height=1000, plot_width=1000,
x_range=Range1d(-1.1,1.1), y_range=Range1d(-1.1,1.1))
x, y = zip(*graph_renderer.layout_provider.graph_layout.values())
#node_labels = nx.get_node_attributes(G, 'club')
#node_labels = {key:key for key in G.nodes}
node_labels = nx.get_node_attributes(G, 'name')
line_size = [1,2, 3, 4, 5]
source = ColumnDataSource(data=dict(
node_size = node_size,
node_color = node_color,
node_initial_pos = node_initial_pos,
index= index,
names = [node_labels[i] for i in index],
x = x,
y = y,
line_size = line_size,
))
labels = LabelSet(x='x', y='y', text='names', source=source,
background_fill_color='white')
node_initial_pos = {'User':(-0.5,0), 'ad1':(.5,.50),'ad2':(0,0),'sku1':(0,-0.25), 'sku2':(0,.25)}
#graph_renderer = from_networkx(G, nx.shell_layout, scale=0.5, center=(0,0))
#style
graph_renderer.node_renderer.data_source = source
graph_renderer.node_renderer.glyph = Circle(fill_color = 'node_color',size = 'node_size', line_color = None)
graph_renderer.edge_renderer.glyph = MultiLine(line_color="blue", line_alpha=0.8)
graph_renderer.edge_renderer.data_source.data["line_width"] = [G.get_edge_data(a,b)['weight'] for a, b in G.edges()]
graph_renderer.edge_renderer.glyph.line_width = {'field': 'line_width'}
plot.renderers.append(graph_renderer)
plot.renderers.append(graph_renderer)
plot.renderers.append(labels)
output_file('test.html')
show(plot)
| 34.289474 | 116 | 0.643899 |
acdf6f9cb11239a67ce6f6eb6f8d42e9d071fcfa | 2,437 | py | Python | src/backend/plotepochlog.py | scopeInfinity/Video2Description | 588a72959caba2e19fdd6eba9a6a342e400d7dff | [
"Apache-2.0"
] | 152 | 2018-04-29T12:25:03.000Z | 2022-03-31T07:13:17.000Z | src/backend/plotepochlog.py | scopeInfinity/Video2Description | 588a72959caba2e19fdd6eba9a6a342e400d7dff | [
"Apache-2.0"
] | 23 | 2018-12-21T09:48:48.000Z | 2022-03-15T00:54:34.000Z | src/backend/plotepochlog.py | scopeInfinity/Video2Description | 588a72959caba2e19fdd6eba9a6a342e400d7dff | [
"Apache-2.0"
] | 56 | 2018-07-26T04:57:24.000Z | 2021-12-29T10:46:47.000Z | import matplotlib.pyplot as plt
import numpy as np
import sys
MXPOINT = 100
assert len(sys.argv)>=2
fname=sys.argv[1]
showtrain = True
if len(sys.argv)>=3:
showtrain = (sys.argv[2] == 'yo')
showval = True
if len(sys.argv)>=4:
showval = (sys.argv[3] == 'yo')
showepoch = True
if len(sys.argv)>=5:
showepoch = (sys.argv[4] == 'yo')
print("Fname %s " % fname)
batch = []
loss = []
acc = []
val_loss = []
val_acc = []
ndata = []
with open(fname,'r') as f:
for row in f:
rr =[float(x) for x in row.split(',')]
ndata.append(rr)
ndata = np.array(ndata, dtype='float')
print(np.shape(ndata))
step = 1
if len(ndata[0]) > MXPOINT:
step = len(ndata[0]) // MXPOINT
[batch, loss, acc, val_loss, val_acc,cider,bleu4,rouge,meteor] = [y[::step] for y in np.matrix.transpose(ndata)][:9]
x = range(len(batch))
fig = plt.figure()
host = fig.add_subplot(111)
pscores = host.twinx()
pacc = host.twinx()
ploss = host.twinx()
if showepoch:
_b,=host.plot(x,batch,color= plt.cm.viridis(0.95),label='Batches')
if showtrain:
_a,=pacc.plot(x,acc,'-.',label="Accuracy",color= plt.cm.viridis(0))
_l,=ploss.plot(x,loss, '-', label="Loss", color = plt.cm.viridis(0))
if showval:
ploss.plot(x,val_loss,'-', label="Val Loss",color= plt.cm.viridis(0.5))
pacc.plot(x,val_acc,'-.',label="Val Accuracy",color= plt.cm.viridis(0.5))
if showtrain or showval:
ploss.legend(loc='lower right')
pacc.legend(loc='lower left')
ploss.spines['right'].set_position(('outward', 30))
score_total = cider+bleu4+rouge+meteor
pscores.plot(x,cider,'-', label="Cider",color= plt.cm.viridis(0.0))
pscores.plot(x,bleu4,'-', label="Bleu4",color= plt.cm.viridis(0.2))
pscores.plot(x,rouge,'-', label="Rouge",color= plt.cm.viridis(0.4))
pscores.plot(x,meteor,'-', label="Meteor",color= plt.cm.viridis(0.6))
pscores.plot(x,score_total,'-', label="Total",color= plt.cm.viridis(0.8))
pscores.legend(loc='upper left')
#host.yaxis.label.set_color(_b.get_color())
#ploss.yaxis.label.set_color(_l.get_color())
#pacc.yaxis.label.set_color(_a.get_color())
#plt.savefig("plot.png", bbox_inches='tight')
best_iter = np.argmax(score_total)
print("Best Iteration %d " % best_iter)
print("\tCIDER %.4f " % cider[best_iter])
print("\tBLEU4 %.4f " % bleu4[best_iter])
print("\tROUGE %.4f " % rouge[best_iter])
print("\tMETEOR %.4f " % meteor[best_iter])
print("\tTotalScore %.4f " % score_total[best_iter])
plt.show()
| 28.337209 | 116 | 0.657366 |
acdf707a3fcdb3c71742cc805495ed05394b01bf | 5,718 | py | Python | example/sanitizer_experiment.py | ailabstw/blurnn | da7a6f622fab8b3619f7d8c1025b5860e282ef18 | [
"MIT"
] | 5 | 2020-02-24T08:59:36.000Z | 2022-03-30T01:06:53.000Z | example/sanitizer_experiment.py | ailabstw/blurnn | da7a6f622fab8b3619f7d8c1025b5860e282ef18 | [
"MIT"
] | null | null | null | example/sanitizer_experiment.py | ailabstw/blurnn | da7a6f622fab8b3619f7d8c1025b5860e282ef18 | [
"MIT"
] | null | null | null | from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
from blurnn import ModelSanitizer
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch, round_idx):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Round {} Epoch {}: [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
round_idx, epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--rounds', type=int, default=1, metavar='N',
help='number of rounds to train (default: 1)')
parser.add_argument('--epochs', type=int, default=1, metavar='N',
help='number of epochs to train (default: 14)')
parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
help='learning rate (default: 1.0)')
parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
help='Learning rate step gamma (default: 0.7)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
sanitizer = ModelSanitizer(epochs=args.epochs, eps1=10, eps3=10, sensitivity=1e-2, gamma=5e-3, release_proportion=0.4, tau=2.5e-3)
for _round in range(1, args.rounds + 1):
sanitizer.base_model = model
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch, _round)
test(args, model, device, test_loader)
scheduler.step()
sanitizer.sanitize(model)
print(f'Round {_round} Result:')
test(args, model, device, test_loader)
model.eval()
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
if __name__ == '__main__':
main()
| 40.267606 | 134 | 0.597062 |
acdf70c8d7ccbf768d31923c5627273de2e026e4 | 35,685 | py | Python | sanic/app.py | cmcaine/sanic | 076cf51fb295e7624968db766fd613aa3948289d | [
"MIT"
] | 1 | 2018-08-30T06:30:11.000Z | 2018-08-30T06:30:11.000Z | sanic/app.py | cmcaine/sanic | 076cf51fb295e7624968db766fd613aa3948289d | [
"MIT"
] | null | null | null | sanic/app.py | cmcaine/sanic | 076cf51fb295e7624968db766fd613aa3948289d | [
"MIT"
] | null | null | null | import os
import logging
import logging.config
import re
import warnings
from asyncio import get_event_loop, ensure_future, CancelledError
from collections import deque, defaultdict
from functools import partial
from inspect import getmodulename, isawaitable, signature, stack
from traceback import format_exc
from urllib.parse import urlencode, urlunparse
from ssl import create_default_context, Purpose
from sanic.config import Config
from sanic.constants import HTTP_METHODS
from sanic.exceptions import ServerError, URLBuildError, SanicException
from sanic.handlers import ErrorHandler
from sanic.log import logger, error_logger, LOGGING_CONFIG_DEFAULTS
from sanic.response import HTTPResponse, StreamingHTTPResponse
from sanic.router import Router
from sanic.server import serve, serve_multiple, HttpProtocol, Signal
from sanic.static import register as static_register
from sanic.testing import SanicTestClient
from sanic.views import CompositionView
from sanic.websocket import WebSocketProtocol, ConnectionClosed
import sanic.reloader_helpers as reloader_helpers
class Sanic:
def __init__(self, name=None, router=None, error_handler=None,
load_env=True, request_class=None,
strict_slashes=False, log_config=None,
configure_logging=True):
# Get name from previous stack frame
if name is None:
frame_records = stack()[1]
name = getmodulename(frame_records[1])
# logging
if configure_logging:
logging.config.dictConfig(log_config or LOGGING_CONFIG_DEFAULTS)
self.name = name
self.router = router or Router()
self.request_class = request_class
self.error_handler = error_handler or ErrorHandler()
self.config = Config(load_env=load_env)
self.request_middleware = deque()
self.response_middleware = deque()
self.blueprints = {}
self._blueprint_order = []
self.configure_logging = configure_logging
self.debug = None
self.sock = None
self.strict_slashes = strict_slashes
self.listeners = defaultdict(list)
self.is_running = False
self.is_request_stream = False
self.websocket_enabled = False
self.websocket_tasks = set()
# Register alternative method names
self.go_fast = self.run
@property
def loop(self):
"""Synonymous with asyncio.get_event_loop().
Only supported when using the `app.run` method.
"""
if not self.is_running:
raise SanicException(
'Loop can only be retrieved after the app has started '
'running. Not supported with `create_server` function')
return get_event_loop()
# -------------------------------------------------------------------- #
# Registration
# -------------------------------------------------------------------- #
def add_task(self, task):
"""Schedule a task to run later, after the loop has started.
Different from asyncio.ensure_future in that it does not
also return a future, and the actual ensure_future call
is delayed until before server start.
:param task: future, couroutine or awaitable
"""
try:
if callable(task):
try:
self.loop.create_task(task(self))
except TypeError:
self.loop.create_task(task())
else:
self.loop.create_task(task)
except SanicException:
@self.listener('before_server_start')
def run(app, loop):
if callable(task):
try:
loop.create_task(task(self))
except TypeError:
loop.create_task(task())
else:
loop.create_task(task)
# Decorator
def listener(self, event):
"""Create a listener from a decorated function.
:param event: event to listen to
"""
def decorator(listener):
self.listeners[event].append(listener)
return listener
return decorator
def register_listener(self, listener, event):
"""
Register the listener for a given event.
Args:
listener: callable i.e. setup_db(app, loop)
event: when to register listener i.e. 'before_server_start'
Returns: listener
"""
return self.listener(event)(listener)
# Decorator
def route(self, uri, methods=frozenset({'GET'}), host=None,
strict_slashes=None, stream=False, version=None, name=None):
"""Decorate a function to be registered as a route
:param uri: path of the URL
:param methods: list or tuple of methods allowed
:param host:
:param strict_slashes:
:param stream:
:param version:
:param name: user defined route name for url_for
:return: decorated function
"""
# Fix case where the user did not prefix the URL with a /
# and will probably get confused as to why it's not working
if not uri.startswith('/'):
uri = '/' + uri
if stream:
self.is_request_stream = True
if strict_slashes is None:
strict_slashes = self.strict_slashes
def response(handler):
args = [key for key in signature(handler).parameters.keys()]
if args:
if stream:
handler.is_stream = stream
self.router.add(uri=uri, methods=methods, handler=handler,
host=host, strict_slashes=strict_slashes,
version=version, name=name)
return handler
else:
raise ValueError(
'Required parameter `request` missing'
'in the {0}() route?'.format(
handler.__name__))
return response
# Shorthand method decorators
def get(self, uri, host=None, strict_slashes=None, version=None,
name=None):
return self.route(uri, methods=frozenset({"GET"}), host=host,
strict_slashes=strict_slashes, version=version,
name=name)
def post(self, uri, host=None, strict_slashes=None, stream=False,
version=None, name=None):
return self.route(uri, methods=frozenset({"POST"}), host=host,
strict_slashes=strict_slashes, stream=stream,
version=version, name=name)
def put(self, uri, host=None, strict_slashes=None, stream=False,
version=None, name=None):
return self.route(uri, methods=frozenset({"PUT"}), host=host,
strict_slashes=strict_slashes, stream=stream,
version=version, name=name)
def head(self, uri, host=None, strict_slashes=None, version=None,
name=None):
return self.route(uri, methods=frozenset({"HEAD"}), host=host,
strict_slashes=strict_slashes, version=version,
name=name)
def options(self, uri, host=None, strict_slashes=None, version=None,
name=None):
return self.route(uri, methods=frozenset({"OPTIONS"}), host=host,
strict_slashes=strict_slashes, version=version,
name=name)
def patch(self, uri, host=None, strict_slashes=None, stream=False,
version=None, name=None):
return self.route(uri, methods=frozenset({"PATCH"}), host=host,
strict_slashes=strict_slashes, stream=stream,
version=version, name=name)
def delete(self, uri, host=None, strict_slashes=None, version=None,
name=None):
return self.route(uri, methods=frozenset({"DELETE"}), host=host,
strict_slashes=strict_slashes, version=version,
name=name)
def add_route(self, handler, uri, methods=frozenset({'GET'}), host=None,
strict_slashes=None, version=None, name=None, stream=False):
"""A helper method to register class instance or
functions as a handler to the application url
routes.
:param handler: function or class instance
:param uri: path of the URL
:param methods: list or tuple of methods allowed, these are overridden
if using a HTTPMethodView
:param host:
:param strict_slashes:
:param version:
:param name: user defined route name for url_for
:param stream: boolean specifying if the handler is a stream handler
:return: function or class instance
"""
# Handle HTTPMethodView differently
if hasattr(handler, 'view_class'):
methods = set()
for method in HTTP_METHODS:
_handler = getattr(handler.view_class, method.lower(), None)
if _handler:
methods.add(method)
if hasattr(_handler, 'is_stream'):
stream = True
# handle composition view differently
if isinstance(handler, CompositionView):
methods = handler.handlers.keys()
for _handler in handler.handlers.values():
if hasattr(_handler, 'is_stream'):
stream = True
break
if strict_slashes is None:
strict_slashes = self.strict_slashes
self.route(uri=uri, methods=methods, host=host,
strict_slashes=strict_slashes, stream=stream,
version=version, name=name)(handler)
return handler
# Decorator
def websocket(self, uri, host=None, strict_slashes=None,
subprotocols=None, name=None):
"""Decorate a function to be registered as a websocket route
:param uri: path of the URL
:param subprotocols: optional list of strings with the supported
subprotocols
:param host:
:return: decorated function
"""
self.enable_websocket()
# Fix case where the user did not prefix the URL with a /
# and will probably get confused as to why it's not working
if not uri.startswith('/'):
uri = '/' + uri
if strict_slashes is None:
strict_slashes = self.strict_slashes
def response(handler):
async def websocket_handler(request, *args, **kwargs):
request.app = self
try:
protocol = request.transport.get_protocol()
except AttributeError:
# On Python3.5 the Transport classes in asyncio do not
# have a get_protocol() method as in uvloop
protocol = request.transport._protocol
ws = await protocol.websocket_handshake(request, subprotocols)
# schedule the application handler
# its future is kept in self.websocket_tasks in case it
# needs to be cancelled due to the server being stopped
fut = ensure_future(handler(request, ws, *args, **kwargs))
self.websocket_tasks.add(fut)
try:
await fut
except (CancelledError, ConnectionClosed):
pass
finally:
self.websocket_tasks.remove(fut)
await ws.close()
self.router.add(uri=uri, handler=websocket_handler,
methods=frozenset({'GET'}), host=host,
strict_slashes=strict_slashes, name=name)
return handler
return response
def add_websocket_route(self, handler, uri, host=None,
strict_slashes=None, subprotocols=None, name=None):
"""A helper method to register a function as a websocket route."""
if strict_slashes is None:
strict_slashes = self.strict_slashes
return self.websocket(uri, host=host, strict_slashes=strict_slashes,
subprotocols=subprotocols, name=name)(handler)
def enable_websocket(self, enable=True):
"""Enable or disable the support for websocket.
Websocket is enabled automatically if websocket routes are
added to the application.
"""
if not self.websocket_enabled:
# if the server is stopped, we want to cancel any ongoing
# websocket tasks, to allow the server to exit promptly
@self.listener('before_server_stop')
def cancel_websocket_tasks(app, loop):
for task in self.websocket_tasks:
task.cancel()
self.websocket_enabled = enable
def remove_route(self, uri, clean_cache=True, host=None):
self.router.remove(uri, clean_cache, host)
# Decorator
def exception(self, *exceptions):
"""Decorate a function to be registered as a handler for exceptions
:param exceptions: exceptions
:return: decorated function
"""
def response(handler):
for exception in exceptions:
if isinstance(exception, (tuple, list)):
for e in exception:
self.error_handler.add(e, handler)
else:
self.error_handler.add(exception, handler)
return handler
return response
def register_middleware(self, middleware, attach_to='request'):
if attach_to == 'request':
self.request_middleware.append(middleware)
if attach_to == 'response':
self.response_middleware.appendleft(middleware)
return middleware
# Decorator
def middleware(self, middleware_or_request):
"""Decorate and register middleware to be called before a request.
Can either be called as @app.middleware or @app.middleware('request')
"""
# Detect which way this was called, @middleware or @middleware('AT')
if callable(middleware_or_request):
return self.register_middleware(middleware_or_request)
else:
return partial(self.register_middleware,
attach_to=middleware_or_request)
# Static Files
def static(self, uri, file_or_directory, pattern=r'/?.+',
use_modified_since=True, use_content_range=False,
stream_large_files=False, name='static', host=None,
strict_slashes=None, content_type=None):
"""Register a root to serve files from. The input can either be a
file or a directory. See
"""
static_register(self, uri, file_or_directory, pattern,
use_modified_since, use_content_range,
stream_large_files, name, host, strict_slashes,
content_type)
def blueprint(self, blueprint, **options):
"""Register a blueprint on the application.
:param blueprint: Blueprint object or (list, tuple) thereof
:param options: option dictionary with blueprint defaults
:return: Nothing
"""
if isinstance(blueprint, (list, tuple)):
for item in blueprint:
self.blueprint(item, **options)
return
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, \
'A blueprint with the name "%s" is already registered. ' \
'Blueprint names must be unique.' % \
(blueprint.name,)
else:
self.blueprints[blueprint.name] = blueprint
self._blueprint_order.append(blueprint)
blueprint.register(self, options)
def register_blueprint(self, *args, **kwargs):
# TODO: deprecate 1.0
if self.debug:
warnings.simplefilter('default')
warnings.warn("Use of register_blueprint will be deprecated in "
"version 1.0. Please use the blueprint method"
" instead",
DeprecationWarning)
return self.blueprint(*args, **kwargs)
def url_for(self, view_name: str, **kwargs):
"""Build a URL based on a view name and the values provided.
In order to build a URL, all request parameters must be supplied as
keyword arguments, and each parameter must pass the test for the
specified parameter type. If these conditions are not met, a
`URLBuildError` will be thrown.
Keyword arguments that are not request parameters will be included in
the output URL's query string.
:param view_name: string referencing the view name
:param \*\*kwargs: keys and values that are used to build request
parameters and query string arguments.
:return: the built URL
Raises:
URLBuildError
"""
# find the route by the supplied view name
kw = {}
# special static files url_for
if view_name == 'static':
kw.update(name=kwargs.pop('name', 'static'))
elif view_name.endswith('.static'): # blueprint.static
kwargs.pop('name', None)
kw.update(name=view_name)
uri, route = self.router.find_route_by_view_name(view_name, **kw)
if not (uri and route):
raise URLBuildError('Endpoint with name `{}` was not found'.format(
view_name))
if view_name == 'static' or view_name.endswith('.static'):
filename = kwargs.pop('filename', None)
# it's static folder
if '<file_uri:' in uri:
folder_ = uri.split('<file_uri:', 1)[0]
if folder_.endswith('/'):
folder_ = folder_[:-1]
if filename.startswith('/'):
filename = filename[1:]
uri = '{}/{}'.format(folder_, filename)
if uri != '/' and uri.endswith('/'):
uri = uri[:-1]
out = uri
# find all the parameters we will need to build in the URL
matched_params = re.findall(
self.router.parameter_pattern, uri)
# _method is only a placeholder now, don't know how to support it
kwargs.pop('_method', None)
anchor = kwargs.pop('_anchor', '')
# _external need SERVER_NAME in config or pass _server arg
external = kwargs.pop('_external', False)
scheme = kwargs.pop('_scheme', '')
if scheme and not external:
raise ValueError('When specifying _scheme, _external must be True')
netloc = kwargs.pop('_server', None)
if netloc is None and external:
netloc = self.config.get('SERVER_NAME', '')
if external:
if not scheme:
if ':' in netloc[:8]:
scheme = netloc[:8].split(':', 1)[0]
else:
scheme = 'http'
if '://' in netloc[:8]:
netloc = netloc.split('://', 1)[-1]
for match in matched_params:
name, _type, pattern = self.router.parse_parameter_string(
match)
# we only want to match against each individual parameter
specific_pattern = '^{}$'.format(pattern)
supplied_param = None
if name in kwargs:
supplied_param = kwargs.get(name)
del kwargs[name]
else:
raise URLBuildError(
'Required parameter `{}` was not passed to url_for'.format(
name))
supplied_param = str(supplied_param)
# determine if the parameter supplied by the caller passes the test
# in the URL
passes_pattern = re.match(specific_pattern, supplied_param)
if not passes_pattern:
if _type != str:
msg = (
'Value "{}" for parameter `{}` does not '
'match pattern for type `{}`: {}'.format(
supplied_param, name, _type.__name__, pattern))
else:
msg = (
'Value "{}" for parameter `{}` '
'does not satisfy pattern {}'.format(
supplied_param, name, pattern))
raise URLBuildError(msg)
# replace the parameter in the URL with the supplied value
replacement_regex = '(<{}.*?>)'.format(name)
out = re.sub(
replacement_regex, supplied_param, out)
# parse the remainder of the keyword arguments into a querystring
query_string = urlencode(kwargs, doseq=True) if kwargs else ''
# scheme://netloc/path;parameters?query#fragment
out = urlunparse((scheme, netloc, out, '', query_string, anchor))
return out
# -------------------------------------------------------------------- #
# Request Handling
# -------------------------------------------------------------------- #
def converted_response_type(self, response):
pass
async def handle_request(self, request, write_callback, stream_callback):
"""Take a request from the HTTP Server and return a response object
to be sent back The HTTP Server only expects a response object, so
exception handling must be done here
:param request: HTTP Request object
:param write_callback: Synchronous response function to be
called with the response as the only argument
:param stream_callback: Coroutine that handles streaming a
StreamingHTTPResponse if produced by the handler.
:return: Nothing
"""
# Define `response` var here to remove warnings about
# allocation before assignment below.
response = None
cancelled = False
try:
# -------------------------------------------- #
# Request Middleware
# -------------------------------------------- #
request.app = self
response = await self._run_request_middleware(request)
# No middleware results
if not response:
# -------------------------------------------- #
# Execute Handler
# -------------------------------------------- #
# Fetch handler from router
handler, args, kwargs, uri = self.router.get(request)
request.uri_template = uri
if handler is None:
raise ServerError(
("'None' was returned while requesting a "
"handler from the router"))
# Run response handler
response = handler(request, *args, **kwargs)
if isawaitable(response):
response = await response
except CancelledError:
# If response handler times out, the server handles the error
# and cancels the handle_request job.
# In this case, the transport is already closed and we cannot
# issue a response.
response = None
cancelled = True
except Exception as e:
# -------------------------------------------- #
# Response Generation Failed
# -------------------------------------------- #
try:
response = self.error_handler.response(request, e)
if isawaitable(response):
response = await response
except Exception as e:
if isinstance(e, SanicException):
response = self.error_handler.default(request=request,
exception=e)
elif self.debug:
response = HTTPResponse(
"Error while handling error: {}\nStack: {}".format(
e, format_exc()), status=500)
else:
response = HTTPResponse(
"An error occurred while handling an error",
status=500)
finally:
# -------------------------------------------- #
# Response Middleware
# -------------------------------------------- #
# Don't run response middleware if response is None
if response is not None:
try:
response = await self._run_response_middleware(request,
response)
except CancelledError:
# Response middleware can timeout too, as above.
response = None
cancelled = True
except BaseException:
error_logger.exception(
'Exception occurred in one of response '
'middleware handlers'
)
if cancelled:
raise CancelledError()
# pass the response to the correct callback
if isinstance(response, StreamingHTTPResponse):
await stream_callback(response)
else:
write_callback(response)
# -------------------------------------------------------------------- #
# Testing
# -------------------------------------------------------------------- #
@property
def test_client(self):
return SanicTestClient(self)
# -------------------------------------------------------------------- #
# Execution
# -------------------------------------------------------------------- #
def run(self, host=None, port=None, debug=False, ssl=None,
sock=None, workers=1, protocol=None,
backlog=100, stop_event=None, register_sys_signals=True,
access_log=True, **kwargs):
"""Run the HTTP Server and listen until keyboard interrupt or term
signal. On termination, drain connections before closing.
:param host: Address to host on
:param port: Port to host on
:param debug: Enables debug output (slows server)
:param ssl: SSLContext, or location of certificate and key
for SSL encryption of worker(s)
:param sock: Socket for the server to accept connections from
:param workers: Number of processes
received before it is respected
:param backlog:
:param stop_event:
:param register_sys_signals:
:param protocol: Subclass of asyncio protocol class
:return: Nothing
"""
# Default auto_reload to false
auto_reload = False
# If debug is set, default it to true (unless on windows)
if debug and os.name == 'posix':
auto_reload = True
# Allow for overriding either of the defaults
auto_reload = kwargs.get("auto_reload", auto_reload)
if sock is None:
host, port = host or "127.0.0.1", port or 8000
if protocol is None:
protocol = (WebSocketProtocol if self.websocket_enabled
else HttpProtocol)
if stop_event is not None:
if debug:
warnings.simplefilter('default')
warnings.warn("stop_event will be removed from future versions.",
DeprecationWarning)
# compatibility old access_log params
self.config.ACCESS_LOG = access_log
server_settings = self._helper(
host=host, port=port, debug=debug, ssl=ssl, sock=sock,
workers=workers, protocol=protocol, backlog=backlog,
register_sys_signals=register_sys_signals, auto_reload=auto_reload)
try:
self.is_running = True
if workers == 1:
if auto_reload and os.name != 'posix':
# This condition must be removed after implementing
# auto reloader for other operating systems.
raise NotImplementedError
if auto_reload and \
os.environ.get('SANIC_SERVER_RUNNING') != 'true':
reloader_helpers.watchdog(2)
else:
serve(**server_settings)
else:
serve_multiple(server_settings, workers)
except BaseException:
error_logger.exception(
'Experienced exception while trying to serve')
raise
finally:
self.is_running = False
logger.info("Server Stopped")
def stop(self):
"""This kills the Sanic"""
get_event_loop().stop()
def __call__(self):
"""gunicorn compatibility"""
return self
async def create_server(self, host=None, port=None, debug=False,
ssl=None, sock=None, protocol=None,
backlog=100, stop_event=None,
access_log=True):
"""Asynchronous version of `run`.
NOTE: This does not support multiprocessing and is not the preferred
way to run a Sanic application.
"""
if sock is None:
host, port = host or "127.0.0.1", port or 8000
if protocol is None:
protocol = (WebSocketProtocol if self.websocket_enabled
else HttpProtocol)
if stop_event is not None:
if debug:
warnings.simplefilter('default')
warnings.warn("stop_event will be removed from future versions.",
DeprecationWarning)
# compatibility old access_log params
self.config.ACCESS_LOG = access_log
server_settings = self._helper(
host=host, port=port, debug=debug, ssl=ssl, sock=sock,
loop=get_event_loop(), protocol=protocol,
backlog=backlog, run_async=True)
# Trigger before_start events
await self.trigger_events(
server_settings.get('before_start', []),
server_settings.get('loop')
)
return await serve(**server_settings)
async def trigger_events(self, events, loop):
"""Trigger events (functions or async)
:param events: one or more sync or async functions to execute
:param loop: event loop
"""
for event in events:
result = event(loop)
if isawaitable(result):
await result
async def _run_request_middleware(self, request):
# The if improves speed. I don't know why
if self.request_middleware:
for middleware in self.request_middleware:
response = middleware(request)
if isawaitable(response):
response = await response
if response:
return response
return None
async def _run_response_middleware(self, request, response):
if self.response_middleware:
for middleware in self.response_middleware:
_response = middleware(request, response)
if isawaitable(_response):
_response = await _response
if _response:
response = _response
break
return response
def _helper(self, host=None, port=None, debug=False,
ssl=None, sock=None, workers=1, loop=None,
protocol=HttpProtocol, backlog=100, stop_event=None,
register_sys_signals=True, run_async=False, auto_reload=False):
"""Helper function used by `run` and `create_server`."""
if isinstance(ssl, dict):
# try common aliaseses
cert = ssl.get('cert') or ssl.get('certificate')
key = ssl.get('key') or ssl.get('keyfile')
if cert is None or key is None:
raise ValueError("SSLContext or certificate and key required.")
context = create_default_context(purpose=Purpose.CLIENT_AUTH)
context.load_cert_chain(cert, keyfile=key)
ssl = context
if stop_event is not None:
if debug:
warnings.simplefilter('default')
warnings.warn("stop_event will be removed from future versions.",
DeprecationWarning)
self.error_handler.debug = debug
self.debug = debug
server_settings = {
'protocol': protocol,
'request_class': self.request_class,
'is_request_stream': self.is_request_stream,
'router': self.router,
'host': host,
'port': port,
'sock': sock,
'ssl': ssl,
'signal': Signal(),
'debug': debug,
'request_handler': self.handle_request,
'error_handler': self.error_handler,
'request_timeout': self.config.REQUEST_TIMEOUT,
'response_timeout': self.config.RESPONSE_TIMEOUT,
'keep_alive_timeout': self.config.KEEP_ALIVE_TIMEOUT,
'request_max_size': self.config.REQUEST_MAX_SIZE,
'keep_alive': self.config.KEEP_ALIVE,
'loop': loop,
'register_sys_signals': register_sys_signals,
'backlog': backlog,
'access_log': self.config.ACCESS_LOG,
'websocket_max_size': self.config.WEBSOCKET_MAX_SIZE,
'websocket_max_queue': self.config.WEBSOCKET_MAX_QUEUE,
'websocket_read_limit': self.config.WEBSOCKET_READ_LIMIT,
'websocket_write_limit': self.config.WEBSOCKET_WRITE_LIMIT,
'graceful_shutdown_timeout': self.config.GRACEFUL_SHUTDOWN_TIMEOUT
}
# -------------------------------------------- #
# Register start/stop events
# -------------------------------------------- #
for event_name, settings_name, reverse in (
("before_server_start", "before_start", False),
("after_server_start", "after_start", False),
("before_server_stop", "before_stop", True),
("after_server_stop", "after_stop", True),
):
listeners = self.listeners[event_name].copy()
if reverse:
listeners.reverse()
# Prepend sanic to the arguments when listeners are triggered
listeners = [partial(listener, self) for listener in listeners]
server_settings[settings_name] = listeners
if self.configure_logging and debug:
logger.setLevel(logging.DEBUG)
if self.config.LOGO is not None and \
os.environ.get('SANIC_SERVER_RUNNING') != 'true':
logger.debug(self.config.LOGO)
if run_async:
server_settings['run_async'] = True
# Serve
if host and port and os.environ.get('SANIC_SERVER_RUNNING') != 'true':
proto = "http"
if ssl is not None:
proto = "https"
logger.info('Goin\' Fast @ {}://{}:{}'.format(proto, host, port))
return server_settings
| 39.474558 | 79 | 0.555836 |
acdf70f9781033be565b4365f7f79f2ac49ec9ea | 10,384 | py | Python | airflow/providers/google/cloud/example_dags/example_automl_tables.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 15,947 | 2019-01-05T13:51:02.000Z | 2022-03-31T23:33:16.000Z | airflow/providers/google/cloud/example_dags/example_automl_tables.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 14,603 | 2019-01-05T09:43:19.000Z | 2022-03-31T23:11:59.000Z | airflow/providers/google/cloud/example_dags/example_automl_tables.py | ChaseKnowlden/airflow | 6b71eac1997a7c0db3b8e3aed6b4e65d01871440 | [
"Apache-2.0"
] | 8,429 | 2019-01-05T19:45:47.000Z | 2022-03-31T22:13:01.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that uses Google AutoML services.
"""
import os
from copy import deepcopy
from typing import Dict, List
from airflow import models
from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook
from airflow.providers.google.cloud.operators.automl import (
AutoMLBatchPredictOperator,
AutoMLCreateDatasetOperator,
AutoMLDeleteDatasetOperator,
AutoMLDeleteModelOperator,
AutoMLDeployModelOperator,
AutoMLGetModelOperator,
AutoMLImportDataOperator,
AutoMLListDatasetOperator,
AutoMLPredictOperator,
AutoMLTablesListColumnSpecsOperator,
AutoMLTablesListTableSpecsOperator,
AutoMLTablesUpdateDatasetOperator,
AutoMLTrainModelOperator,
)
from airflow.utils.dates import days_ago
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id")
GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1")
GCP_AUTOML_DATASET_BUCKET = os.environ.get(
"GCP_AUTOML_DATASET_BUCKET", "gs://INVALID BUCKET NAME/bank-marketing.csv"
)
TARGET = os.environ.get("GCP_AUTOML_TARGET", "Deposit")
# Example values
MODEL_ID = "TBL123456"
DATASET_ID = "TBL123456"
# Example model
MODEL = {
"display_name": "auto_model_1",
"dataset_id": DATASET_ID,
"tables_model_metadata": {"train_budget_milli_node_hours": 1000},
}
# Example dataset
DATASET = {
"display_name": "test_set",
"tables_dataset_metadata": {"target_column_spec_id": ""},
}
IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_DATASET_BUCKET]}}
extract_object_id = CloudAutoMLHook.extract_object_id
def get_target_column_spec(columns_specs: List[Dict], column_name: str) -> str:
"""
Using column name returns spec of the column.
"""
for column in columns_specs:
if column["display_name"] == column_name:
return extract_object_id(column)
raise Exception(f"Unknown target column: {column_name}")
# Example DAG to create dataset, train model_id and deploy it.
with models.DAG(
"example_create_and_deploy",
schedule_interval='@once', # Override to match your needs
start_date=days_ago(1),
user_defined_macros={
"get_target_column_spec": get_target_column_spec,
"target": TARGET,
"extract_object_id": extract_object_id,
},
tags=['example'],
) as create_deploy_dag:
# [START howto_operator_automl_create_dataset]
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task",
dataset=DATASET,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
dataset_id = create_dataset_task.output['dataset_id']
# [END howto_operator_automl_create_dataset]
MODEL["dataset_id"] = dataset_id
# [START howto_operator_automl_import_data]
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
# [END howto_operator_automl_import_data]
# [START howto_operator_automl_specs]
list_tables_spec_task = AutoMLTablesListTableSpecsOperator(
task_id="list_tables_spec_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_automl_specs]
# [START howto_operator_automl_column_specs]
list_columns_spec_task = AutoMLTablesListColumnSpecsOperator(
task_id="list_columns_spec_task",
dataset_id=dataset_id,
table_spec_id="{{ extract_object_id(task_instance.xcom_pull('list_tables_spec_task')[0]) }}",
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_automl_column_specs]
# [START howto_operator_automl_update_dataset]
update = deepcopy(DATASET)
update["name"] = '{{ task_instance.xcom_pull("create_dataset_task")["name"] }}'
update["tables_dataset_metadata"][ # type: ignore
"target_column_spec_id"
] = "{{ get_target_column_spec(task_instance.xcom_pull('list_columns_spec_task'), target) }}"
update_dataset_task = AutoMLTablesUpdateDatasetOperator(
task_id="update_dataset_task",
dataset=update,
location=GCP_AUTOML_LOCATION,
)
# [END howto_operator_automl_update_dataset]
# [START howto_operator_automl_create_model]
create_model_task = AutoMLTrainModelOperator(
task_id="create_model_task",
model=MODEL,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
model_id = create_model_task.output['model_id']
# [END howto_operator_automl_create_model]
# [START howto_operator_automl_delete_model]
delete_model_task = AutoMLDeleteModelOperator(
task_id="delete_model_task",
model_id=model_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_automl_delete_model]
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
(
import_dataset_task
>> list_tables_spec_task
>> list_columns_spec_task
>> update_dataset_task
>> create_model_task
)
delete_model_task >> delete_datasets_task
# Task dependencies created via `XComArgs`:
# create_dataset_task >> import_dataset_task
# create_dataset_task >> list_tables_spec_task
# create_dataset_task >> list_columns_spec_task
# create_dataset_task >> create_model_task
# create_model_task >> delete_model_task
# create_dataset_task >> delete_datasets_task
# Example DAG for AutoML datasets operations
with models.DAG(
"example_automl_dataset",
schedule_interval='@once', # Override to match your needs
start_date=days_ago(1),
user_defined_macros={"extract_object_id": extract_object_id},
) as example_dag:
create_dataset_task = AutoMLCreateDatasetOperator(
task_id="create_dataset_task",
dataset=DATASET,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
dataset_id = create_dataset_task.output['dataset_id']
import_dataset_task = AutoMLImportDataOperator(
task_id="import_dataset_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
input_config=IMPORT_INPUT_CONFIG,
)
list_tables_spec_task = AutoMLTablesListTableSpecsOperator(
task_id="list_tables_spec_task",
dataset_id=dataset_id,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
list_columns_spec_task = AutoMLTablesListColumnSpecsOperator(
task_id="list_columns_spec_task",
dataset_id=dataset_id,
table_spec_id="{{ extract_object_id(task_instance.xcom_pull('list_tables_spec_task')[0]) }}",
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [START howto_operator_list_dataset]
list_datasets_task = AutoMLListDatasetOperator(
task_id="list_datasets_task",
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_list_dataset]
# [START howto_operator_delete_dataset]
delete_datasets_task = AutoMLDeleteDatasetOperator(
task_id="delete_datasets_task",
dataset_id="{{ task_instance.xcom_pull('list_datasets_task', key='dataset_id_list') | list }}",
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_delete_dataset]
(
import_dataset_task
>> list_tables_spec_task
>> list_columns_spec_task
>> list_datasets_task
>> delete_datasets_task
)
# Task dependencies created via `XComArgs`:
# create_dataset_task >> import_dataset_task
# create_dataset_task >> list_tables_spec_task
# create_dataset_task >> list_columns_spec_task
with models.DAG(
"example_gcp_get_deploy",
schedule_interval='@once', # Override to match your needs
start_date=days_ago(1),
tags=["example"],
) as get_deploy_dag:
# [START howto_operator_get_model]
get_model_task = AutoMLGetModelOperator(
task_id="get_model_task",
model_id=MODEL_ID,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_get_model]
# [START howto_operator_deploy_model]
deploy_model_task = AutoMLDeployModelOperator(
task_id="deploy_model_task",
model_id=MODEL_ID,
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_deploy_model]
with models.DAG(
"example_gcp_predict",
schedule_interval='@once', # Override to match your needs
start_date=days_ago(1),
tags=["example"],
) as predict_dag:
# [START howto_operator_prediction]
predict_task = AutoMLPredictOperator(
task_id="predict_task",
model_id=MODEL_ID,
payload={}, # Add your own payload, the used model_id must be deployed
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_prediction]
# [START howto_operator_batch_prediction]
batch_predict_task = AutoMLBatchPredictOperator(
task_id="batch_predict_task",
model_id=MODEL_ID,
input_config={}, # Add your config
output_config={}, # Add your config
location=GCP_AUTOML_LOCATION,
project_id=GCP_PROJECT_ID,
)
# [END howto_operator_batch_prediction]
| 32.860759 | 103 | 0.723228 |
acdf7169e44873faedfd60f4e4ab6105a4fb1a65 | 16,998 | py | Python | src/oci/data_integration/models/dependent_object.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/data_integration/models/dependent_object.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/data_integration/models/dependent_object.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class DependentObject(object):
"""
The information about a dependent object.
"""
def __init__(self, **kwargs):
"""
Initializes a new DependentObject object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param key:
The value to assign to the key property of this DependentObject.
:type key: str
:param model_type:
The value to assign to the model_type property of this DependentObject.
:type model_type: str
:param model_version:
The value to assign to the model_version property of this DependentObject.
:type model_version: str
:param name:
The value to assign to the name property of this DependentObject.
:type name: str
:param description:
The value to assign to the description property of this DependentObject.
:type description: str
:param application_version:
The value to assign to the application_version property of this DependentObject.
:type application_version: int
:param object_status:
The value to assign to the object_status property of this DependentObject.
:type object_status: int
:param identifier:
The value to assign to the identifier property of this DependentObject.
:type identifier: str
:param parent_ref:
The value to assign to the parent_ref property of this DependentObject.
:type parent_ref: oci.data_integration.models.ParentReference
:param object_version:
The value to assign to the object_version property of this DependentObject.
:type object_version: int
:param dependent_object_metadata:
The value to assign to the dependent_object_metadata property of this DependentObject.
:type dependent_object_metadata: list[oci.data_integration.models.PatchObjectMetadata]
:param published_object_metadata:
The value to assign to the published_object_metadata property of this DependentObject.
:type published_object_metadata: dict(str, PatchObjectMetadata)
:param source_application_info:
The value to assign to the source_application_info property of this DependentObject.
:type source_application_info: oci.data_integration.models.SourceApplicationInfo
:param time_patched:
The value to assign to the time_patched property of this DependentObject.
:type time_patched: datetime
:param metadata:
The value to assign to the metadata property of this DependentObject.
:type metadata: oci.data_integration.models.ObjectMetadata
:param key_map:
The value to assign to the key_map property of this DependentObject.
:type key_map: dict(str, str)
"""
self.swagger_types = {
'key': 'str',
'model_type': 'str',
'model_version': 'str',
'name': 'str',
'description': 'str',
'application_version': 'int',
'object_status': 'int',
'identifier': 'str',
'parent_ref': 'ParentReference',
'object_version': 'int',
'dependent_object_metadata': 'list[PatchObjectMetadata]',
'published_object_metadata': 'dict(str, PatchObjectMetadata)',
'source_application_info': 'SourceApplicationInfo',
'time_patched': 'datetime',
'metadata': 'ObjectMetadata',
'key_map': 'dict(str, str)'
}
self.attribute_map = {
'key': 'key',
'model_type': 'modelType',
'model_version': 'modelVersion',
'name': 'name',
'description': 'description',
'application_version': 'applicationVersion',
'object_status': 'objectStatus',
'identifier': 'identifier',
'parent_ref': 'parentRef',
'object_version': 'objectVersion',
'dependent_object_metadata': 'dependentObjectMetadata',
'published_object_metadata': 'publishedObjectMetadata',
'source_application_info': 'sourceApplicationInfo',
'time_patched': 'timePatched',
'metadata': 'metadata',
'key_map': 'keyMap'
}
self._key = None
self._model_type = None
self._model_version = None
self._name = None
self._description = None
self._application_version = None
self._object_status = None
self._identifier = None
self._parent_ref = None
self._object_version = None
self._dependent_object_metadata = None
self._published_object_metadata = None
self._source_application_info = None
self._time_patched = None
self._metadata = None
self._key_map = None
@property
def key(self):
"""
Gets the key of this DependentObject.
Generated key that can be used in API calls to identify application.
:return: The key of this DependentObject.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""
Sets the key of this DependentObject.
Generated key that can be used in API calls to identify application.
:param key: The key of this DependentObject.
:type: str
"""
self._key = key
@property
def model_type(self):
"""
Gets the model_type of this DependentObject.
The object type.
:return: The model_type of this DependentObject.
:rtype: str
"""
return self._model_type
@model_type.setter
def model_type(self, model_type):
"""
Sets the model_type of this DependentObject.
The object type.
:param model_type: The model_type of this DependentObject.
:type: str
"""
self._model_type = model_type
@property
def model_version(self):
"""
Gets the model_version of this DependentObject.
The object's model version.
:return: The model_version of this DependentObject.
:rtype: str
"""
return self._model_version
@model_version.setter
def model_version(self, model_version):
"""
Sets the model_version of this DependentObject.
The object's model version.
:param model_version: The model_version of this DependentObject.
:type: str
"""
self._model_version = model_version
@property
def name(self):
"""
Gets the name of this DependentObject.
Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters.
:return: The name of this DependentObject.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this DependentObject.
Free form text without any restriction on permitted characters. Name can have letters, numbers, and special characters. The value is editable and is restricted to 1000 characters.
:param name: The name of this DependentObject.
:type: str
"""
self._name = name
@property
def description(self):
"""
Gets the description of this DependentObject.
Detailed description for the object.
:return: The description of this DependentObject.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this DependentObject.
Detailed description for the object.
:param description: The description of this DependentObject.
:type: str
"""
self._description = description
@property
def application_version(self):
"""
Gets the application_version of this DependentObject.
The application's version.
:return: The application_version of this DependentObject.
:rtype: int
"""
return self._application_version
@application_version.setter
def application_version(self, application_version):
"""
Sets the application_version of this DependentObject.
The application's version.
:param application_version: The application_version of this DependentObject.
:type: int
"""
self._application_version = application_version
@property
def object_status(self):
"""
Gets the object_status of this DependentObject.
The status of an object that can be set to value 1 for shallow references across objects, other values reserved.
:return: The object_status of this DependentObject.
:rtype: int
"""
return self._object_status
@object_status.setter
def object_status(self, object_status):
"""
Sets the object_status of this DependentObject.
The status of an object that can be set to value 1 for shallow references across objects, other values reserved.
:param object_status: The object_status of this DependentObject.
:type: int
"""
self._object_status = object_status
@property
def identifier(self):
"""
Gets the identifier of this DependentObject.
Value can only contain upper case letters, underscore, and numbers. It should begin with upper case letter or underscore. The value can be modified.
:return: The identifier of this DependentObject.
:rtype: str
"""
return self._identifier
@identifier.setter
def identifier(self, identifier):
"""
Sets the identifier of this DependentObject.
Value can only contain upper case letters, underscore, and numbers. It should begin with upper case letter or underscore. The value can be modified.
:param identifier: The identifier of this DependentObject.
:type: str
"""
self._identifier = identifier
@property
def parent_ref(self):
"""
Gets the parent_ref of this DependentObject.
:return: The parent_ref of this DependentObject.
:rtype: oci.data_integration.models.ParentReference
"""
return self._parent_ref
@parent_ref.setter
def parent_ref(self, parent_ref):
"""
Sets the parent_ref of this DependentObject.
:param parent_ref: The parent_ref of this DependentObject.
:type: oci.data_integration.models.ParentReference
"""
self._parent_ref = parent_ref
@property
def object_version(self):
"""
Gets the object_version of this DependentObject.
The version of the object that is used to track changes in the object instance.
:return: The object_version of this DependentObject.
:rtype: int
"""
return self._object_version
@object_version.setter
def object_version(self, object_version):
"""
Sets the object_version of this DependentObject.
The version of the object that is used to track changes in the object instance.
:param object_version: The object_version of this DependentObject.
:type: int
"""
self._object_version = object_version
@property
def dependent_object_metadata(self):
"""
Gets the dependent_object_metadata of this DependentObject.
A list of dependent objects in this patch.
:return: The dependent_object_metadata of this DependentObject.
:rtype: list[oci.data_integration.models.PatchObjectMetadata]
"""
return self._dependent_object_metadata
@dependent_object_metadata.setter
def dependent_object_metadata(self, dependent_object_metadata):
"""
Sets the dependent_object_metadata of this DependentObject.
A list of dependent objects in this patch.
:param dependent_object_metadata: The dependent_object_metadata of this DependentObject.
:type: list[oci.data_integration.models.PatchObjectMetadata]
"""
self._dependent_object_metadata = dependent_object_metadata
@property
def published_object_metadata(self):
"""
Gets the published_object_metadata of this DependentObject.
A list of objects that are published or unpublished in this patch.
:return: The published_object_metadata of this DependentObject.
:rtype: dict(str, PatchObjectMetadata)
"""
return self._published_object_metadata
@published_object_metadata.setter
def published_object_metadata(self, published_object_metadata):
"""
Sets the published_object_metadata of this DependentObject.
A list of objects that are published or unpublished in this patch.
:param published_object_metadata: The published_object_metadata of this DependentObject.
:type: dict(str, PatchObjectMetadata)
"""
self._published_object_metadata = published_object_metadata
@property
def source_application_info(self):
"""
Gets the source_application_info of this DependentObject.
:return: The source_application_info of this DependentObject.
:rtype: oci.data_integration.models.SourceApplicationInfo
"""
return self._source_application_info
@source_application_info.setter
def source_application_info(self, source_application_info):
"""
Sets the source_application_info of this DependentObject.
:param source_application_info: The source_application_info of this DependentObject.
:type: oci.data_integration.models.SourceApplicationInfo
"""
self._source_application_info = source_application_info
@property
def time_patched(self):
"""
Gets the time_patched of this DependentObject.
The date and time the application was patched, in the timestamp format defined by RFC3339.
:return: The time_patched of this DependentObject.
:rtype: datetime
"""
return self._time_patched
@time_patched.setter
def time_patched(self, time_patched):
"""
Sets the time_patched of this DependentObject.
The date and time the application was patched, in the timestamp format defined by RFC3339.
:param time_patched: The time_patched of this DependentObject.
:type: datetime
"""
self._time_patched = time_patched
@property
def metadata(self):
"""
Gets the metadata of this DependentObject.
:return: The metadata of this DependentObject.
:rtype: oci.data_integration.models.ObjectMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this DependentObject.
:param metadata: The metadata of this DependentObject.
:type: oci.data_integration.models.ObjectMetadata
"""
self._metadata = metadata
@property
def key_map(self):
"""
Gets the key_map of this DependentObject.
A key map. If provided, key is replaced with generated key. This structure provides mapping between user provided key and generated key.
:return: The key_map of this DependentObject.
:rtype: dict(str, str)
"""
return self._key_map
@key_map.setter
def key_map(self, key_map):
"""
Sets the key_map of this DependentObject.
A key map. If provided, key is replaced with generated key. This structure provides mapping between user provided key and generated key.
:param key_map: The key_map of this DependentObject.
:type: dict(str, str)
"""
self._key_map = key_map
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 32.438931 | 245 | 0.653018 |
acdf71ad6290ba02585f7086ff6bcc651d603b25 | 5,093 | py | Python | tests/validation/tests/v3_api/test_k3s_airgap.py | ursinnDev/rancher_rancher | 152db95e885cd0c5a52a21c5e8e8ee73e327885f | [
"Apache-2.0"
] | 18,697 | 2015-07-09T17:56:35.000Z | 2022-03-31T21:10:25.000Z | tests/validation/tests/v3_api/test_k3s_airgap.py | ursinnDev/rancher_rancher | 152db95e885cd0c5a52a21c5e8e8ee73e327885f | [
"Apache-2.0"
] | 31,132 | 2015-07-09T17:28:32.000Z | 2022-03-31T23:31:26.000Z | tests/validation/tests/v3_api/test_k3s_airgap.py | ursinnDev/rancher_rancher | 152db95e885cd0c5a52a21c5e8e8ee73e327885f | [
"Apache-2.0"
] | 2,979 | 2015-07-12T02:48:07.000Z | 2022-03-31T13:35:39.000Z | import os
from lib.aws import AWS_USER
from .common import AmazonWebServices
from .test_airgap import (AG_HOST_NAME, ARCH, NUMBER_OF_INSTANCES,
TARBALL_TYPE,
get_bastion_node, prepare_registries_mirror_on_node,
run_command_on_airgap_node, prepare_private_registry,
copy_certs_to_node, deploy_airgap_cluster,
trust_certs_on_node, add_tarball_to_node,
optionally_add_cluster_to_rancher)
RANCHER_K3S_VERSION = os.environ.get("RANCHER_K3S_VERSION", "")
K3S_SERVER_OPTIONS = os.environ.get("K3S_SERVER_OPTIONS", "")
K3S_AGENT_OPTIONS = os.environ.get("K3S_AGENT_OPTIONS", "")
def test_deploy_airgap_k3s_system_default_registry():
bastion_node = get_bastion_node(auth=False)
prepare_private_registry(bastion_node, RANCHER_K3S_VERSION)
ag_nodes = prepare_airgap_k3s(bastion_node, NUMBER_OF_INSTANCES,
'system_default_registry')
server_ops = K3S_SERVER_OPTIONS + " --system-default-registry={}".format(
bastion_node.host_name)
agent_ops = K3S_AGENT_OPTIONS
deploy_airgap_cluster(bastion_node, ag_nodes, "k3s", server_ops, agent_ops)
def test_deploy_airgap_k3s_private_registry():
bastion_node = get_bastion_node(auth=True)
prepare_private_registry(bastion_node, RANCHER_K3S_VERSION)
ag_nodes = prepare_airgap_k3s(bastion_node, NUMBER_OF_INSTANCES,
'private_registry')
deploy_airgap_cluster(bastion_node, ag_nodes, "k3s",
K3S_SERVER_OPTIONS, K3S_AGENT_OPTIONS)
optionally_add_cluster_to_rancher(bastion_node, ag_nodes)
def test_deploy_airgap_k3s_tarball():
bastion_node = get_bastion_node()
add_k3s_tarball_to_bastion(bastion_node, RANCHER_K3S_VERSION)
ag_nodes = prepare_airgap_k3s(bastion_node, NUMBER_OF_INSTANCES, 'tarball')
deploy_airgap_cluster(bastion_node, ag_nodes, "k3s",
K3S_SERVER_OPTIONS, K3S_AGENT_OPTIONS)
optionally_add_cluster_to_rancher(bastion_node, ag_nodes, prep="k3s")
def add_k3s_tarball_to_bastion(bastion_node, k3s_version):
# Get k3s files associated with the specified version
k3s_binary = 'k3s'
if ARCH == 'arm64':
k3s_binary = 'k3s-arm64'
get_tarball_command = \
'wget -O k3s-airgap-images-{1}.{3} https://github.com/k3s-io/k3s/' \
'releases/download/{0}/k3s-airgap-images-{1}.{3} && ' \
'wget -O k3s-install.sh https://get.k3s.io/ && ' \
'wget -O k3s https://github.com/k3s-io/k3s/' \
'releases/download/{0}/{2}'.format(k3s_version, ARCH, k3s_binary,
TARBALL_TYPE)
bastion_node.execute_command(get_tarball_command)
def prepare_airgap_k3s(bastion_node, number_of_nodes, method):
node_name = AG_HOST_NAME + "-k3s-airgap"
# Create Airgap Node in AWS
ag_nodes = AmazonWebServices().create_multiple_nodes(
number_of_nodes, node_name, public_ip=False)
for num, ag_node in enumerate(ag_nodes):
# Copy relevant k3s files to airgapped node
ag_node_copy_files = \
'scp -i "{0}.pem" -o StrictHostKeyChecking=no ./k3s-install.sh ' \
'{1}@{2}:~/install.sh && ' \
'scp -i "{0}.pem" -o StrictHostKeyChecking=no ./k3s ' \
'{1}@{2}:~/k3s && ' \
'scp -i "{0}.pem" -o StrictHostKeyChecking=no certs/* ' \
'{1}@{2}:~/'.format(bastion_node.ssh_key_name, AWS_USER,
ag_node.private_ip_address)
bastion_node.execute_command(ag_node_copy_files)
ag_node_make_executable = \
'sudo mv ./k3s /usr/local/bin/k3s && ' \
'sudo chmod +x /usr/local/bin/k3s && sudo chmod +x install.sh'
run_command_on_airgap_node(bastion_node, ag_node,
ag_node_make_executable)
if method == 'private_registry':
prepare_registries_mirror_on_node(bastion_node, ag_node, 'k3s')
elif method == 'system_default_registry':
copy_certs_to_node(bastion_node, ag_node)
trust_certs_on_node(bastion_node, ag_node)
elif method == 'tarball':
add_tarball_to_node(bastion_node, ag_node,
'k3s-airgap-images-{0}.{1}'.format(
ARCH, TARBALL_TYPE), 'k3s')
print("Airgapped K3S Instance Details:\nNAME: {}-{}\nPRIVATE IP: {}\n"
"".format(node_name, num, ag_node.private_ip_address))
assert len(ag_nodes) == NUMBER_OF_INSTANCES
print(
'{} airgapped k3s instance(s) created.\n'
'Connect to these and run commands by connecting to bastion node, '
'then connecting to these:\n'
'ssh -i {}.pem {}@NODE_PRIVATE_IP'.format(
NUMBER_OF_INSTANCES, bastion_node.ssh_key_name, AWS_USER))
for ag_node in ag_nodes:
assert ag_node.private_ip_address is not None
assert ag_node.public_ip_address is None
return ag_nodes
| 45.882883 | 79 | 0.650893 |
acdf71f88a3ebb0d289279df464afd75fe09fb58 | 11,364 | py | Python | azure-devops/azext_devops/dev/team/wiki.py | doggy8088/azure-devops-cli-extension | 2f6b1a6ffbc49ae454df640a8bb00dac991d6514 | [
"MIT"
] | 326 | 2019-04-10T12:38:23.000Z | 2022-03-31T23:07:49.000Z | azure-devops/azext_devops/dev/team/wiki.py | doggy8088/azure-devops-cli-extension | 2f6b1a6ffbc49ae454df640a8bb00dac991d6514 | [
"MIT"
] | 562 | 2019-04-10T07:36:12.000Z | 2022-03-28T07:37:54.000Z | azure-devops/azext_devops/dev/team/wiki.py | doggy8088/azure-devops-cli-extension | 2f6b1a6ffbc49ae454df640a8bb00dac991d6514 | [
"MIT"
] | 166 | 2019-04-10T07:59:40.000Z | 2022-03-16T14:17:13.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import webbrowser
from knack.util import CLIError
from azext_devops.dev.common.services import (get_wiki_client,
get_git_client,
get_project_id_from_name,
resolve_instance,
resolve_instance_and_project,
resolve_instance_project_and_repo)
_DEFAULT_PAGE_ADD_MESSAGE = 'Added a new page using Azure DevOps CLI'
_DEFAULT_PAGE_UPDATE_MESSAGE = 'Updated the page using Azure DevOps CLI'
_DEFAULT_PAGE_DELETE_MESSAGE = 'Deleted the page using Azure DevOps CLI'
def create_wiki(name=None, wiki_type='projectwiki', mapped_path=None, version=None,
organization=None, project=None, repository=None, detect=None):
"""Create a wiki.
:param name: Name of the new wiki.
:type name: str
:param wiki_type: Type of wiki to create.
:type wiki_type: str
:param version: [Required for codewiki type] Repository branch name to publish the code wiki from.
:type version: str
:param mapped_path: [Required for codewiki type] Mapped path of the new wiki
e.g. '/' to publish from root of repository.
:type mapped_path: str
:param repository: [Required for codewiki type] Name or ID of the repository to publish the wiki from.
:type repository: str
"""
repository_id = None
if wiki_type == 'codewiki':
if not name:
raise CLIError('--name is required for wiki type \'codewiki\'')
organization, project, repository = resolve_instance_project_and_repo(detect=detect,
organization=organization,
project=project,
repo=repository,
repo_required=True)
repository_id = _get_repository_id_from_name(organization=organization,
project=project,
repository=repository)
else:
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
wiki_client = get_wiki_client(organization)
from azext_devops.devops_sdk.v5_0.wiki.models import WikiCreateParametersV2
wiki_params = WikiCreateParametersV2()
wiki_params.name = name
wiki_params.type = wiki_type
project_id = get_project_id_from_name(organization=organization,
project=project)
wiki_params.project_id = project_id
wiki_params.repository_id = repository_id
if mapped_path:
wiki_params.mapped_path = mapped_path
if version:
from azext_devops.devops_sdk.v5_0.wiki.models import GitVersionDescriptor
version_descriptor = GitVersionDescriptor()
version_descriptor.version = version
wiki_params.version = version_descriptor
return wiki_client.create_wiki(wiki_create_params=wiki_params, project=project)
def delete_wiki(wiki, organization=None, project=None, detect=None):
"""Delete a wiki.
:param wiki: Name or Id of the wiki to delete.
:type wiki: str
"""
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
wiki_client = get_wiki_client(organization)
return wiki_client.delete_wiki(wiki_identifier=wiki, project=project)
def list_wiki(scope='project', organization=None, project=None, detect=None):
"""List all the wikis in a project or organization.
:param scope: List the wikis at project or organization level.
:type scope: str
"""
if scope == 'project':
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
else:
organization = resolve_instance(detect=detect,
organization=organization)
wiki_client = get_wiki_client(organization)
return wiki_client.get_all_wikis(project=project)
def show_wiki(wiki, open=False, organization=None, project=None, detect=None): # pylint: disable=redefined-builtin
"""Show details of a wiki.
:param wiki: Name or Id of the wiki.
:type wiki: str
:param open: Open the wiki in your web browser.
:type open: bool
"""
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
wiki_client = get_wiki_client(organization)
wiki_object = wiki_client.get_wiki(wiki_identifier=wiki, project=project)
if open:
webbrowser.open_new(url=wiki_object.remote_url)
return wiki_object
def add_page(wiki, path, comment=_DEFAULT_PAGE_ADD_MESSAGE, content=None, file_path=None,
encoding='utf-8', organization=None, project=None, detect=None):
"""Add a new page.
:param wiki: Name or Id of the wiki.
:type wiki: str
:param path: Path of the wiki page.
:type path: str
:param content: Content of the wiki page. Ignored if --file-path is specified.
:type content: str
:param file_path: Path of the file input if content is specified in the file.
:type file_path: str
:param encoding: Encoding of the file. Used in conjunction with --file-path parameter.
:type encoding: str
:param comment: Comment in the commit message of file add operation.
:type comment: str
"""
if not content and not file_path:
raise CLIError('Either --file-path or --content must be specified.')
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
wiki_client = get_wiki_client(organization)
from azext_devops.devops_sdk.v5_0.wiki.models import WikiPageCreateOrUpdateParameters
parameters = WikiPageCreateOrUpdateParameters()
if content:
parameters.content = content
elif file_path:
from azext_devops.dev.common.utils import read_file_content
parameters.content = read_file_content(file_path=file_path, encoding=encoding)
return wiki_client.create_or_update_page(parameters=parameters, wiki_identifier=wiki,
project=project, path=path, version=None, comment=comment)
def update_page(wiki, path, version, comment=_DEFAULT_PAGE_UPDATE_MESSAGE, content=None, file_path=None,
encoding='utf-8', organization=None, project=None, detect=None):
"""Edit a page.
:param wiki: Name or Id of the wiki.
:type wiki: str
:param path: Path of the wiki page.
:type path: str
:param content: Content of the wiki page. Ignored if --file-path is specified.
:type content: str
:param file_path: Path of the file input if content is specified in the file.
:type file_path: str
:param encoding: Encoding of the file. Used in conjunction with --file-path parameter.
:type encoding: str
:param comment: Comment in the commit message of file edit operation.
:type comment: str
:param version: Version (ETag) of file to edit.
:type version: str
"""
if not content and not file_path:
raise CLIError('Either --file-path or --content must be specified.')
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
wiki_client = get_wiki_client(organization)
from azext_devops.devops_sdk.v5_0.wiki.models import WikiPageCreateOrUpdateParameters
parameters = WikiPageCreateOrUpdateParameters()
if content:
parameters.content = content
elif file_path:
from azext_devops.dev.common.utils import read_file_content
parameters.content = read_file_content(file_path=file_path, encoding=encoding)
return wiki_client.create_or_update_page(parameters=parameters, wiki_identifier=wiki,
project=project, path=path, version=version, comment=comment)
def get_page(wiki, path, version=None, open=False, # pylint: disable=redefined-builtin
include_content=False, organization=None, project=None, detect=None):
"""Get the content of a page or open a page.
:param wiki: Name or Id of the wiki.
:type wiki: str
:param path: Path of the wiki page.
:type path: str
:param version: Version (ETag) of the wiki page.
:type version: str
:param include_content: Include content of the page.
:type include_content: str
:param open: Open the wiki page in your web browser.
:type open: bool
"""
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
wiki_client = get_wiki_client(organization)
page_object = wiki_client.get_page(
wiki_identifier=wiki, project=project, path=path,
recursion_level=None, version_descriptor=version,
include_content=include_content)
if open:
webbrowser.open_new(url=page_object.page.remote_url)
return page_object
def delete_page(wiki, path, comment=_DEFAULT_PAGE_DELETE_MESSAGE, organization=None, project=None, detect=None):
"""Delete a page.
:param wiki: Name or Id of the wiki.
:type wiki: str
:param path: Path of the wiki page.
:type path: str
:param comment: Comment in the commit message of delete operation.
:type comment: str
"""
organization, project = resolve_instance_and_project(detect=detect,
organization=organization,
project=project)
wiki_client = get_wiki_client(organization)
return wiki_client.delete_page(wiki_identifier=wiki, path=path, comment=comment, project=project)
def _get_repository_id_from_name(organization, project, repository):
git_client = get_git_client(organization)
repository = git_client.get_repository(project=project, repository_id=repository)
return repository.id
| 49.194805 | 115 | 0.616156 |
acdf7213e0479d4ee869b948d59995baeaf488c3 | 1,383 | py | Python | tests/configuration/test_default_configuration.py | cclauss/statue | ac582dcf1cbcb4379028ef15339ac400bfe8a642 | [
"Apache-2.0"
] | 8 | 2020-09-29T12:31:08.000Z | 2022-02-28T08:49:48.000Z | tests/configuration/test_default_configuration.py | cclauss/statue | ac582dcf1cbcb4379028ef15339ac400bfe8a642 | [
"Apache-2.0"
] | 20 | 2020-06-13T20:21:27.000Z | 2022-03-29T12:52:36.000Z | tests/configuration/test_default_configuration.py | saroad2/statue | ac582dcf1cbcb4379028ef15339ac400bfe8a642 | [
"Apache-2.0"
] | null | null | null | import pytest
from statue.configuration import Configuration
DUMMY_CONFIGURATION = {"a": "b"}
@pytest.fixture
def mock_default_configuration_path(mocker):
return mocker.patch("statue.configuration.DEFAULT_CONFIGURATION_FILE")
def test_set_default_configuration(clear_configuration):
Configuration.set_default_configuration(DUMMY_CONFIGURATION)
assert (
Configuration.default_configuration() == DUMMY_CONFIGURATION
), "Default configuration was not set"
def test_read_default_configuration_from_file_success(
clear_configuration, mock_default_configuration_path, mock_toml_load
):
Configuration.set_default_configuration(None)
mock_default_configuration_path.exists.return_value = True
mock_toml_load.return_value = DUMMY_CONFIGURATION
assert (
Configuration.default_configuration() == DUMMY_CONFIGURATION
), "Default configuration was not set"
mock_toml_load.assert_called_once_with(mock_default_configuration_path)
def test_read_default_configuration_from_file_failure(
clear_configuration, mock_default_configuration_path, mock_toml_load
):
Configuration.set_default_configuration(None)
mock_default_configuration_path.exists.return_value = False
assert (
Configuration.default_configuration() is None
), "Default configuration should be None"
mock_toml_load.assert_not_called()
| 33.731707 | 75 | 0.809834 |
acdf7256ab48647c13d5746ca8e2e6dd817589aa | 768 | py | Python | imblearn/utils/tests/test_testing.py | christophe-rannou/imbalanced-learn | c3f3b0fd9815e206ea63f3f11728f097608bf580 | [
"MIT"
] | null | null | null | imblearn/utils/tests/test_testing.py | christophe-rannou/imbalanced-learn | c3f3b0fd9815e206ea63f3f11728f097608bf580 | [
"MIT"
] | null | null | null | imblearn/utils/tests/test_testing.py | christophe-rannou/imbalanced-learn | c3f3b0fd9815e206ea63f3f11728f097608bf580 | [
"MIT"
] | null | null | null | from sklearn.utils.testing import assert_raises_regex
from imblearn.base import SamplerMixin
from imblearn.utils.testing import all_estimators
def test_all_estimators():
# check if the filtering is working with a list or a single string
type_filter = 'sampler'
all_estimators(type_filter=type_filter)
type_filter = ['sampler']
estimators = all_estimators(type_filter=type_filter)
for estimator in estimators:
# check that all estimators are sampler
assert issubclass(estimator[1], SamplerMixin)
# check that an error is raised when the type is unknown
type_filter = 'rnd'
assert_raises_regex(ValueError, "Parameter type_filter must be 'sampler'",
all_estimators, type_filter=type_filter)
| 36.571429 | 78 | 0.739583 |
acdf727fdbdc6a46cf1a1614c7a7771b0f8971e9 | 189 | py | Python | tiger_app/tiger_app/doctype/phonebook/phonebook.py | amran-quantum/tiger_app | da29ef4148186b9ea4636a0aaa0758a933dddcbb | [
"MIT"
] | null | null | null | tiger_app/tiger_app/doctype/phonebook/phonebook.py | amran-quantum/tiger_app | da29ef4148186b9ea4636a0aaa0758a933dddcbb | [
"MIT"
] | null | null | null | tiger_app/tiger_app/doctype/phonebook/phonebook.py | amran-quantum/tiger_app | da29ef4148186b9ea4636a0aaa0758a933dddcbb | [
"MIT"
] | null | null | null | # Copyright (c) 2021, amran and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class Phonebook(Document):
pass
| 21 | 49 | 0.78836 |
acdf73892a7acd3a886218ecfdddab3214d6c18b | 1,348 | py | Python | main.py | aosothra/l6_api_dev | fd4a80aa6f99e018ce4827c1017c528153057118 | [
"MIT"
] | null | null | null | main.py | aosothra/l6_api_dev | fd4a80aa6f99e018ce4827c1017c528153057118 | [
"MIT"
] | null | null | null | main.py | aosothra/l6_api_dev | fd4a80aa6f99e018ce4827c1017c528153057118 | [
"MIT"
] | null | null | null | import os
from random import Random
import requests
from dotenv import load_dotenv
import vk_basic as vk
from xkcd_fetcher import get_xkcd_count, get_xkcd_by_index
def download_image(url):
response = requests.get(url)
response.raise_for_status()
with open('xkcd.png', 'wb') as file:
file.write(response.content)
def upload_image(url):
with open('xkcd.png', 'rb') as file:
files = {
'photo': file
}
response = requests.post(url, files=files)
response.raise_for_status()
return response.json()
def post_image_to_vk(vk_token, vk_group, caption):
upload_url = vk.get_wall_upload_server(vk_token, vk_group)
photo_properties = upload_image(upload_url)
photo_id, owner_id = vk.save_wall_photo(vk_token, vk_group, photo_properties)
vk.wall_post(vk_token, vk_group, photo_id, owner_id, caption)
def main():
load_dotenv()
vk_token = os.getenv('VK_ACCESS_TOKEN')
vk_group = int(os.getenv('VK_GROUP_ID'))
comic_count = get_xkcd_count()
comic_index = Random().randint(100, comic_count)
comic_url, comic_caption = get_xkcd_by_index(comic_index)
download_image(comic_url)
try:
post_image_to_vk(vk_token, vk_group, comic_caption)
finally:
os.remove('xkcd.png')
if __name__ == '__main__':
main()
| 22.466667 | 81 | 0.695104 |
acdf744106141c038bfcdd49921c6cb140572c23 | 19,585 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_03_01/operations/_load_balancers_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 1 | 2021-06-02T08:01:35.000Z | 2021-06-02T08:01:35.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_03_01/operations/_load_balancers_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2017_03_01/operations/_load_balancers_operations.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class LoadBalancersOperations(object):
"""LoadBalancersOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-03-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-03-01"
self.config = config
def _delete_initial(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, load_balancer_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'}
def get(
self, resource_group_name, load_balancer_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: LoadBalancer or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2017_03_01.models.LoadBalancer or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'}
def _create_or_update_initial(
self, resource_group_name, load_balancer_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'LoadBalancer')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', response)
if response.status_code == 201:
deserialized = self._deserialize('LoadBalancer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, load_balancer_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param parameters: Parameters supplied to the create or update load
balancer operation.
:type parameters: ~azure.mgmt.network.v2017_03_01.models.LoadBalancer
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns LoadBalancer or
ClientRawResponse<LoadBalancer> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2017_03_01.models.LoadBalancer]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2017_03_01.models.LoadBalancer]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('LoadBalancer', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'}
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancers in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of LoadBalancer
:rtype:
~azure.mgmt.network.v2017_03_01.models.LoadBalancerPaged[~azure.mgmt.network.v2017_03_01.models.LoadBalancer]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all the load balancers in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of LoadBalancer
:rtype:
~azure.mgmt.network.v2017_03_01.models.LoadBalancerPaged[~azure.mgmt.network.v2017_03_01.models.LoadBalancer]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers'}
| 46.082353 | 170 | 0.664948 |
acdf76199f60bfecdf949681fd5870a1ce135720 | 1,263 | py | Python | mwlib/myjson.py | pediapress/mwlib | 1074fbc429e65180de09671dbc369ed1eeb2345d | [
"Unlicense"
] | 53 | 2015-02-17T16:20:06.000Z | 2022-03-18T09:22:00.000Z | mwlib/myjson.py | pediapress/mwlib | 1074fbc429e65180de09671dbc369ed1eeb2345d | [
"Unlicense"
] | 23 | 2015-01-30T16:26:20.000Z | 2022-03-11T23:26:03.000Z | mwlib/myjson.py | pediapress/mwlib | 1074fbc429e65180de09671dbc369ed1eeb2345d | [
"Unlicense"
] | 19 | 2015-01-21T13:55:46.000Z | 2019-02-23T23:14:31.000Z | # Copyright (c) 2007-2009 PediaPress GmbH
# See README.rst for additional licensing information.
"""custom json encoder/decoder, which can handle metabook objects"""
from mwlib import metabook
try:
import simplejson as json
except ImportError:
import json
from json import loads # protect us against http://pypi.python.org/pypi/python-json/
def object_hook(dct):
try:
type = dct["type"]
except KeyError:
type = None
if type in ["collection", "article", "chapter", "source",
"interwiki", "license", "wikiconf", "custom"]:
klass = getattr(metabook, type)
d = {}
for k, v in dct.items():
d[str(k)] = v
d["type"] = type
return klass(**d)
return dct
class mbencoder(json.JSONEncoder):
def default(self, obj):
try:
m = obj._json
except AttributeError:
return json.JSONEncoder.default(self, obj)
return m()
def loads(data):
return json.loads(data, object_hook=object_hook)
def dump(obj, fp, **kw):
return json.dump(obj, fp, cls=mbencoder, **kw)
def dumps(obj, **kw):
return json.dumps(obj, cls=mbencoder, **kw)
def load(fp):
return json.load(fp, object_hook=object_hook)
| 22.157895 | 89 | 0.617577 |
acdf778932c42cba2be01db176854c93762ee7af | 6,123 | py | Python | captioning/utils.py | divyanshu25/ImageQuery | 4d2679fc0f06ead60f6a1e4aeb5693e155186703 | [
"Apache-2.0"
] | null | null | null | captioning/utils.py | divyanshu25/ImageQuery | 4d2679fc0f06ead60f6a1e4aeb5693e155186703 | [
"Apache-2.0"
] | null | null | null | captioning/utils.py | divyanshu25/ImageQuery | 4d2679fc0f06ead60f6a1e4aeb5693e155186703 | [
"Apache-2.0"
] | null | null | null | # ================================================================
# Copyright 2020 Image Query Team
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================
from captioning.captioning_config import Config
import matplotlib.pyplot as plt
import numpy as np
import nltk
import torch
import math
from collections import Counter
from torchtext.data.metrics import bleu_score, _compute_ngram_counter
def imshow(img, txt=None):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.text(0, 0, s=txt, bbox=dict(facecolor="red", alpha=0.5))
plt.show()
def clean_sentence(output, vocab, bert=None, use_bert=False):
# output = output.numpy()
words_sequence = []
for i in output:
if use_bert:
words_sequence.append(bert.get_tokenizer().convert_ids_to_tokens(i))
if i == 102:
break
else:
words_sequence.append(vocab.idx2word[i])
if i == 1:
break
words_sequence = words_sequence[1:-1]
sentence = " ".join(words_sequence)
sentence = sentence.capitalize()
return sentence
def convert_captions(images, target, vocab, config, bert=None):
# images, target = input
all_captions = None
caption_lengths = None
if len(target) > 0:
if not len(target) == config.batch_size:
target = target[0]
all_captions = []
caption_lengths = []
for c in target:
caption = []
if not config.enable_bert:
tokens = nltk.tokenize.word_tokenize(str(c).lower())
caption.append(vocab(vocab.start_word))
caption.extend([vocab(token) for token in tokens])
caption.append(vocab(vocab.end_word))
cap_length = len(caption)
if cap_length < config.max_length:
for i in range(config.max_length - len(caption)):
caption.append(vocab(vocab.pad_word))
else:
caption = caption[0 : config.max_length - 1]
caption.append(vocab(vocab.end_word))
cap_length = len(caption)
caption_lengths.append(cap_length)
else:
tokenizer = bert.get_tokenizer()
tokens = tokenizer.tokenize(str(c).lower())
caption.append(tokenizer.cls_token_id)
caption.extend(
[tokenizer.convert_tokens_to_ids(token) for token in tokens]
)
caption.append(tokenizer.sep_token_id)
cap_length = len(caption)
if cap_length < config.max_length:
for i in range(config.max_length - len(caption)):
caption.append(tokenizer.sep_token_id)
else:
caption = caption[0 : config.max_length - 1]
caption.append(tokenizer.sep_token_id)
cap_length = len(caption)
caption_lengths.append(cap_length)
all_captions.append(caption)
# caption = caption[:1]
all_captions = torch.Tensor(all_captions).long()
caption_lengths = torch.Tensor(caption_lengths).unsqueeze(1).long()
return images, all_captions, caption_lengths
def get_term_weights(references_corpus):
term_count = Counter()
for ref in references_corpus:
count = _compute_ngram_counter(ref, 1)
term_count = term_count + count
total_count = sum(term_count.values())
term_weights = {}
for k in term_count:
term_weights[k] = 1 - (term_count[k] * 2 / total_count)
return term_weights
def custom_bleu(candidate_corpus, references_corpus, max_n=4, weights=[0.25]*4, term_weights={}):
assert max_n == len(weights), 'Length of the "weights" list has be equal to max_n'
assert len(candidate_corpus) == len(references_corpus), \
'The length of candidate and reference corpus should be the same'
clipped_counts = torch.zeros(max_n)
total_counts = torch.zeros(max_n)
weights = torch.tensor(weights)
candidate_len = 0.0
refs_len = 0.0
for (candidate, refs) in zip(candidate_corpus, references_corpus):
candidate_len += len(candidate)
# Get the length of the reference that's closest in length to the candidate
refs_len_list = [float(len(ref)) for ref in refs]
refs_len += min(refs_len_list, key=lambda x: abs(len(candidate) - x))
reference_counters = _compute_ngram_counter(refs[0], max_n)
for ref in refs[1:]:
reference_counters = reference_counters | _compute_ngram_counter(ref, max_n)
candidate_counter = _compute_ngram_counter(candidate, max_n)
clipped_counter = candidate_counter & reference_counters
for ngram in clipped_counter:
score = clipped_counter[ngram]
if ngram in term_weights:
score = clipped_counter[ngram] * term_weights[ngram]
clipped_counts[len(ngram) - 1] += score
for ngram in candidate_counter: # TODO: no need to loop through the whole counter
total_counts[len(ngram) - 1] += candidate_counter[ngram]
if min(clipped_counts) == 0:
return 0.0
else:
pn = clipped_counts / total_counts
log_pn = weights * torch.log(pn)
score = torch.exp(sum(log_pn))
bp = math.exp(min(1 - refs_len / candidate_len, 0))
return bp * score.item()
| 37.335366 | 97 | 0.612445 |
acdf77a8981b4110abf47da60e56100c2fdf221a | 1,312 | py | Python | test.py | arthurgtllr/doggos-classifier | 23002b011bebdba1248c2f50fba5070a9f82c919 | [
"MIT"
] | null | null | null | test.py | arthurgtllr/doggos-classifier | 23002b011bebdba1248c2f50fba5070a9f82c919 | [
"MIT"
] | null | null | null | test.py | arthurgtllr/doggos-classifier | 23002b011bebdba1248c2f50fba5070a9f82c919 | [
"MIT"
] | null | null | null | import pandas as pd
import constants
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv(constants.RAW_DATA_PATH + '/../raw_data.csv')
datasets = list(set(df['dataset'].values.tolist()))
classes = list(set(df['class'].values.tolist()))
N = len(classes)
ind = np.arange(N) # the x locations for the groups
width = 0.5 # the width of the bars: can also be len(x) sequence
plt.rcParams.update({'font.size': 25})
nbs_oxford = []
for doggo_class in classes:
new_df = df[(df['class']==doggo_class)&(df['dataset']=='oxford')]
nb = new_df.shape[0]
nbs_oxford.append(nb)
p1 = plt.bar(ind, nbs_oxford, width)
nbs_stanford = []
for doggo_class in classes:
new_df = df[(df['class']==doggo_class)&(df['dataset']=='stanford')]
nb = new_df.shape[0]
nbs_stanford.append(nb)
p2 = plt.bar(ind, nbs_stanford, width, bottom=nbs_oxford)
nbs_udacity = []
for doggo_class in classes:
new_df = df[(df['class']==doggo_class)&(df['dataset']=='udacity')]
nb = new_df.shape[0]
nbs_udacity.append(nb)
p3 = plt.bar(ind, nbs_udacity, width, bottom=[nbs_stanford[i] + nbs_oxford[i] for i in range(len(classes))])
plt.xticks(ind, classes)
plt.yticks(np.arange(0, 2000, 500))
plt.legend((p1[0], p2[0], p3[0]), ['oxford', 'stanford', 'udacity'])
plt.show() | 27.914894 | 110 | 0.664634 |
acdf77de3237768ee8fe1a7f44622b998eeb4750 | 625 | py | Python | gammapy/utils/tests/test_array.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | 3 | 2019-01-28T12:21:14.000Z | 2019-02-10T19:58:07.000Z | gammapy/utils/tests/test_array.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | null | null | null | gammapy/utils/tests/test_array.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from ..array import array_stats_str, shape_2N
def test_array_stats_str():
actual = array_stats_str(np.pi, 'pi')
assert actual == 'pi : size = 1, min = 3.142, max = 3.142\n'
actual = array_stats_str([np.pi, 42])
assert actual == 'size = 2, min = 3.142, max = 42.000\n'
def test_shape_2N():
shape = (34, 89, 120, 444)
expected_shape = (40, 96, 128, 448)
assert expected_shape == shape_2N(shape=shape, N=3) | 34.722222 | 82 | 0.6624 |
acdf784ed8483a6672ac5eba9f5f9a6b1aa9877d | 5,655 | py | Python | tptp/utils/concurrent/threadedTaskExecuter.py | leoprover/tptp | a670c903bd81990a59ffa7ec19cc0666dd3b5c55 | [
"BSD-3-Clause"
] | 6 | 2019-08-15T13:12:13.000Z | 2021-08-09T12:07:51.000Z | tptp/utils/concurrent/threadedTaskExecuter.py | leoprover/tptp | a670c903bd81990a59ffa7ec19cc0666dd3b5c55 | [
"BSD-3-Clause"
] | null | null | null | tptp/utils/concurrent/threadedTaskExecuter.py | leoprover/tptp | a670c903bd81990a59ffa7ec19cc0666dd3b5c55 | [
"BSD-3-Clause"
] | null | null | null | import logging
import collections
import subprocess
from concurrent import futures
logger = logging.getLogger(__name__)
class ThreadedTaskExecuter:
'''
Usage:
* submit(self, task) to queue a "task" for executing. Each task need a "run()" method
* wait(self) to wait for the termination of all submitted tasks
* onStart(self, task) needs to be overloaded
- is called directly after the execution of the task is started
* onFinish(self, task, result) needs to be overloaded
- is call iff the "task" finisched successfully. "result" contains the result of the task
- allows submitting a task using self.submit(task) inside this function call
* onCanceled(self, task) needs to be overloaded
- is called iff the task is canceled
- allows submitting a task using self.submit(task) inside this function call
* onError(self, error) needs to be overloaded
- is called iff the task run method has thrown an exception
Behaviour:
* all callbacks will be call in the same thread as 'wait(self)' is called
* a submitted task has several states
1. SCHEDULED: the task is enqueued for execution but the execution is not yet stated.
- @see scheduledTasks(self)
2. ACTIVE: the task is started but is waiting to be executed (by the underlaying ThreadPoolExecutor).
- onTaskStart(self, task) is called directly after submitting the task to underlaying ThreadPoolExecutor
- a task is submitting to the underlaying ThreadPoolExecutor iff the number of tasks currently exectuted by
the ThreadPoolExecutor is smaller than the threads available to the executer. Hence, the
time a task is in this state is really short (context change time to the actual executed thread)
- @see activeTasks(self)
3. RUNNING: the task is currently executed (by the underlaying ThreadPoolExecutor)
- @see runningTasks(self)
Warning:
* methods of this class are NOT threadsafe themself, call all methods as well as the constructor from the SAME thread!
'''
def __init__(self, *,
threads=2
):
self.executor = futures.ThreadPoolExecutor(max_workers=threads)
self._scheduledTasks = collections.deque()
self._activeFutures = set()
self._threads = threads
def scheduled(self):
'''
Get all tasks which are scheduled but not yet active active (not submitted to the executer poll yet).
'''
return self._scheduledTasks
def active(self):
'''
Get all tasks which are send to execution (submitted to the underlaying executer pool)
'''
scheduled = []
for future in self._activeFutures:
scheduled.append(future.task)
return scheduled
def running(self):
'''
Get all tasks which execution is currently running (running in the underlaying executer pool)
'''
running = []
for future in self._activeFutures:
if future.running():
running.append(future.task)
return running
def submit(self, task):
'''
Submit a new task, for execution.
'''
logger.debug('schedule {}'.format(task))
self._scheduledTasks.append(task)
self._refillActiveTasks()
def _refillActiveTasks(self):
'''
Add threads to the executer as long as the maximum threadcount is not reached.
This ensures that:
- onTaskStart(self, task) is call close to the actual start of the task
- onTaskStart(self, task) is called in the main thread (the thread were this TaskExecuter is used)
'''
numUsed = len(self._activeFutures)
numOpen = self._threads - numUsed
numScheduled = len(self._scheduledTasks)
numToAdd = min(numOpen, numScheduled)
logger.debug('refill used,open,scheduled: [{}/{}/{}]: {}/{}'.format(numUsed, numOpen, numScheduled, self._activeFutures, self._scheduledTasks))
for i in range(0, numToAdd):
task = self._scheduledTasks.popleft()
future = self.executor.submit(task.run)
future.task = task
task.future = future
self._activeFutures.add(future)
self.onStart(task)
def wait(self):
'''
Wait for all tasks to be finished.
'''
while len(self._scheduledTasks) + len(self._activeFutures) > 0:
done, not_done = futures.wait(self._activeFutures,
return_when=futures.FIRST_COMPLETED
)
for future in done:
self._activeFutures.remove(future)
self._onFinish(future)
self._refillActiveTasks()
def cancle(self, task):
task.future.cancel()
def _onFinish(self, future):
task = future.task
try:
result = future.result()
logger.debug('onFinish {} {}'.format(task, result))
self.onFinish(task, result)
except futures.CancelledError as error:
logger.debug('onCanceled {}'.format(task))
self.onCanceled(task)
except Exception as error:
logger.debug('onError {} {}'.format(task, repr(error)))
self.onError(task, error)
def onStart(self, task):
raise NotImplementedError()
def onFinish(self, task, result):
raise NotImplementedError()
def onCanceled(self, task):
raise NotImplementedError()
def onError(self, task, error):
raise NotImplementedError()
| 37.7 | 151 | 0.636074 |
acdf7939bfd0a90701449cf2ec9148cb51902adc | 1,010 | py | Python | box/kodi/addons/plugin.video.bilibili2/resources/lib/utils.py | ddggkk/NAS-System-for-Raspberry-Pi | 62b63ee027896a68cc4bc3962811e6c49fc0d0d1 | [
"Apache-2.0"
] | 2 | 2019-08-16T01:47:34.000Z | 2021-06-16T05:09:56.000Z | box/kodi/addons/plugin.video.bilibili2/resources/lib/utils.py | ddggkk/NAS-System-for-Raspberry-Pi | 62b63ee027896a68cc4bc3962811e6c49fc0d0d1 | [
"Apache-2.0"
] | null | null | null | box/kodi/addons/plugin.video.bilibili2/resources/lib/utils.py | ddggkk/NAS-System-for-Raspberry-Pi | 62b63ee027896a68cc4bc3962811e6c49fc0d0d1 | [
"Apache-2.0"
] | null | null | null | #coding: utf8
import urllib2
import zlib
import gzip
from io import BytesIO
def _get_gzip_content(content):
bytes_buffer = BytesIO(content)
return gzip.GzipFile(fileobj=bytes_buffer).read()
def _get_zlib_content(content):
page_content = zlib.decompress(content)
return page_content
def get_page_content(page_full_url, data = None, headers = {}):
try:
ua = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0.2) Gecko/20100101 Firefox/6.0.2'}
ua.update(headers)
req = urllib2.Request(page_full_url, headers=ua, data = data)
print (req.headers.items())
response = urllib2.urlopen(req)
print(response.info())
if response.headers.get('content-encoding', '') == 'gzip':
return _get_gzip_content(response.read())
elif response.headers.get('content-encoding', '') == 'deflate':
return _get_zlib_content(response.read())
else:
return response.read()
except:
return ''
| 32.580645 | 105 | 0.652475 |
acdf7e47df9fe94188bef9eeede62d28e84949b6 | 43,009 | py | Python | cca_zoo/model_selection/_search.py | LegrandNico/cca_zoo | 03497ef4d8434f847435b572bff92b2095fbc4fc | [
"MIT"
] | null | null | null | cca_zoo/model_selection/_search.py | LegrandNico/cca_zoo | 03497ef4d8434f847435b572bff92b2095fbc4fc | [
"MIT"
] | null | null | null | cca_zoo/model_selection/_search.py | LegrandNico/cca_zoo | 03497ef4d8434f847435b572bff92b2095fbc4fc | [
"MIT"
] | 1 | 2021-06-28T18:31:45.000Z | 2021-06-28T18:31:45.000Z | # Author: James Chapman
# This code heavily leans on the scikit-learn original
# Original Authors:
# Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# Raghav RV <rvraghav93@gmail.com>
import itertools
import numbers
import time
from collections import defaultdict
from itertools import product
from typing import Mapping, Iterable
import numpy as np
from joblib import Parallel
from mvlearn.compose import SimpleSplitter
from mvlearn.utils import check_Xs
from sklearn import clone
from sklearn.base import is_classifier
from sklearn.metrics import check_scoring
from sklearn.metrics._scorer import _check_multimetric_scoring
from sklearn.model_selection import check_cv
from sklearn.model_selection._search import (
BaseSearchCV as SKBaseSearchCV,
ParameterGrid,
_check_param_grid,
)
from sklearn.model_selection._validation import _fit_and_score, _insert_error_scores
from sklearn.pipeline import Pipeline
from sklearn.utils import indexable
from sklearn.utils.fixes import delayed
from sklearn.utils.validation import _check_fit_params, check_random_state
def param2grid(params):
"""
Converts parameters with a list for each view into a scikit-learn friendly form
Parameters
----------
params : a dictionary of parameters where some parameters may contain a list of lists (one list for each 'view')
Returns : a parameter grid in the form expected by scikit-learn where each element is a single candidate
(a single value or a list with one value for each view)
-------
Examples
---------
>>> params = {'regs': [[1, 2], [3, 4]]}
>>> param2grid(params)
{'regs': [[1, 3], [1, 4], [2, 3], [2, 4]]}
"""
params = params.copy()
for k, v in params.items():
if any([isinstance(v_, list) for v_ in v]):
# itertools expects all lists to perform product
v = [[v_] if not isinstance(v_, list) else v_ for v_ in v]
params[k] = list(map(list, itertools.product(*v)))
return params
class ParameterSampler:
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Parameters
----------
param_distributions : dict
Dictionary with parameters names (`str`) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
If a list of dicts is given, first a dict is sampled uniformly, and
then a parameter is sampled using that dict as above.
n_iter : int
Number of parameter settings that are produced.
random_state : int, RandomState instance or None, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Pass an int for reproducible output across multiple
function calls.
Returns
-------
params : dict of str to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
"""
def __init__(self, param_distributions, n_iter, *, random_state=None):
if not isinstance(param_distributions, (Mapping, Iterable)):
raise TypeError(
"Parameter distribution is not a dict or a list ({!r})".format(
param_distributions
)
)
if isinstance(param_distributions, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_distributions = [param_distributions]
for dist in param_distributions:
if not isinstance(dist, dict):
raise TypeError(
"Parameter distribution is not a dict ({!r})".format(dist)
)
for key in dist:
if isinstance(dist[key], Iterable):
if any(
[
not isinstance(view_param, Iterable)
and not hasattr(view_param, "rvs")
for view_param in dist[key]
]
):
raise TypeError(
"Parameter value for at least one view is not iterable "
"or distribution (key={!r}, value={!r})".format(
key, dist[key]
)
)
elif not hasattr(dist[key], "rvs"):
raise TypeError(
"Parameter value is not iterable "
"or distribution (key={!r}, value={!r})".format(key, dist[key])
)
self.n_iter = n_iter
self.random_state = random_state
self.param_distributions = param_distributions
def __iter__(self):
rng = check_random_state(self.random_state)
for _ in range(self.n_iter):
dist = rng.choice(self.param_distributions)
# Always sort the keys of a dictionary, for reproducibility
items = sorted(dist.items())
params = dict()
for k, v in items:
# if value is an iterable then either the elements are the distribution or each element is a distribution
# for each view.
if isinstance(v, Iterable):
# if each element is a distribution for each view (i.e. it is a non-string Iterable) then call return_param for each view
if any(
[
(isinstance(v_, Iterable) and not isinstance(v_, str))
or hasattr(v_, "rvs")
for v_ in v
]
):
params[k] = [self.return_param(v_) for v_ in v]
# if the parameter is shared across views then the list will just contain non-iterable values
else:
params[k] = self.return_param(v)
# if value is not iterable then it is either a distribution or a value in which case call return param on it.
else:
params[k] = self.return_param(v)
yield params
def return_param(self, v):
rng = check_random_state(self.random_state)
if hasattr(v, "rvs"):
param = v.rvs(random_state=rng)
elif isinstance(v, Iterable) and not isinstance(v, str):
param = v[rng.randint(len(v))]
else:
param = v
return param
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
class BaseSearchCV(SKBaseSearchCV):
def __init__(
self,
estimator,
*,
scoring=None,
n_jobs=None,
refit=True,
cv=None,
verbose=0,
pre_dispatch="2*n_jobs",
error_score=np.nan,
return_train_score=True,
):
super().__init__(
estimator=estimator,
scoring=scoring,
n_jobs=n_jobs,
refit=refit,
cv=cv,
verbose=verbose,
pre_dispatch=pre_dispatch,
error_score=error_score,
return_train_score=return_train_score,
)
def fit(self, Xs, y=None, *, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
Xs : array-like of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples, n_output) \
or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of str -> object
Parameters passed to the ``fit`` method of the estimator.
Returns
-------
self : object
Instance of fitted estimator.
"""
estimator = self.estimator
refit_metric = "score"
if callable(self.scoring):
scorers = self.scoring
elif self.scoring is None or isinstance(self.scoring, str):
scorers = check_scoring(self.estimator, self.scoring)
else:
scorers = _check_multimetric_scoring(self.estimator, self.scoring)
self._check_refit_for_multimetric(scorers)
refit_metric = self.refit
Xs[0], y, groups = indexable(Xs[0], y, groups)
fit_params = _check_fit_params(Xs[0], fit_params)
cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator))
n_splits = cv_orig.get_n_splits(Xs[0], y, groups)
base_estimator = clone(self.estimator)
parallel = Parallel(n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(
scorer=scorers,
fit_params=fit_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=self.error_score,
verbose=self.verbose,
)
results = {}
with parallel:
all_candidate_params = []
all_out = []
all_more_results = defaultdict(list)
def evaluate_candidates(candidate_params, cv=None, more_results=None):
cv = cv or cv_orig
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print(
"Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(
n_splits, n_candidates, n_candidates * n_splits
)
)
X_transformed, _, _, n_features = check_Xs(
Xs, copy=True, return_dimensions=True
)
pipeline = Pipeline(
[
("splitter", SimpleSplitter(n_features)),
("estimator", clone(base_estimator)),
]
)
out = parallel(
delayed(_fit_and_score)(
pipeline,
np.hstack(Xs),
y,
train=train,
test=test,
parameters={
f"estimator__{k}": v for k, v in parameters.items()
},
split_progress=(split_idx, n_splits),
candidate_progress=(cand_idx, n_candidates),
**fit_and_score_kwargs,
)
for (cand_idx, parameters), (split_idx, (train, test)) in product(
enumerate(candidate_params),
enumerate(cv.split(Xs[0], y, groups)),
)
)
if len(out) < 1:
raise ValueError(
"No fits were performed. "
"Was the CV iterator empty? "
"Were there no candidates?"
)
elif len(out) != n_candidates * n_splits:
raise ValueError(
"cv.split and cv.get_n_splits returned "
"inconsistent results. Expected {} "
"splits, got {}".format(n_splits, len(out) // n_candidates)
)
# For callable self.scoring, the return type is only know after
# calling. If the return type is a dictionary, the error scores
# can now be inserted with the correct key. The type checking
# of out will be done in `_insert_error_scores`.
if callable(self.scoring):
_insert_error_scores(out, self.error_score)
all_candidate_params.extend(candidate_params)
all_out.extend(out)
if more_results is not None:
for key, value in more_results.items():
all_more_results[key].extend(value)
nonlocal results
results = self._format_results(
all_candidate_params, n_splits, all_out, all_more_results
)
return results
self._run_search(evaluate_candidates)
# multimetric is determined here because in the case of a callable
# self.scoring the return type is only known after calling
first_test_score = all_out[0]["test_scores"]
self.multimetric_ = isinstance(first_test_score, dict)
# check refit_metric now for a callabe scorer that is multimetric
if callable(self.scoring) and self.multimetric_:
self._check_refit_for_multimetric(first_test_score)
refit_metric = self.refit
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
self.best_index_ = self._select_best_index(
self.refit, refit_metric, results
)
if not callable(self.refit):
# With a non-custom callable, we can select the best score
# based on the best index
self.best_score_ = results[f"mean_test_{refit_metric}"][
self.best_index_
]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
# we clone again after setting params in case some
# of the params are estimators as well.
self.best_estimator_ = clone(
clone(base_estimator).set_params(**self.best_params_)
)
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(Xs, y, **fit_params)
else:
self.best_estimator_.fit(Xs, **fit_params)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
if hasattr(self.best_estimator_, "feature_names_in_"):
self.feature_names_in_ = self.best_estimator_.feature_names_in_
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers
self.cv_results_ = results
self.n_splits_ = n_splits
return self
@staticmethod
def _select_best_index(refit, refit_metric, results):
"""Select index of the best combination of hyperparemeters."""
if callable(refit):
# If callable, refit is expected to return the index of the best
# parameter set.
best_index = refit(results)
if not isinstance(best_index, numbers.Integral):
raise TypeError("best_index_ returned is not an integer")
if best_index < 0 or best_index >= len(results["params"]):
raise IndexError("best_index_ index out of range")
else:
best_index = results[f"rank_test_{refit_metric}"].argmin()
return best_index
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "score_samples", "predict", "predict_proba",
"decision_function", "transform" and "inverse_transform" if they are
implemented in the estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Parameters
----------
estimator : estimator object
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (`str`) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : str, callable, list, tuple or dict, default=None
Strategy to evaluate the performance of the cross-validated model on
the test set.
If `scoring` represents a single score, one can use:
- a single string (see :ref:`scoring_parameter`);
- a callable (see :ref:`scoring`) that returns a single value.
If `scoring` represents multiple scores, one can use:
- a list or tuple of unique strings;
- a callable returning a dictionary where the keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors.
refit : bool, str, or callable, default=True
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a `str` denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
Where there are considerations other than maximum score in
choosing a best estimator, ``refit`` can be set to a function which
returns the selected ``best_index_`` given ``cv_results_``. In that
case, the ``best_estimator_`` and ``best_params_`` will be set
according to the returned ``best_index_`` while the ``best_score_``
attribute will not be available.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``GridSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_params_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer.
See ``scoring`` parameter to know more about multiple metric
evaluation.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : int
Controls the verbosity: the higher, the more messages.
- >1 : the computation time for each fold and parameter candidate is
displayed;
- >2 : the score is also displayed;
- >3 : the fold and candidate parameter indexes are also displayed
together with the starting time of the computation.
pre_dispatch : int, or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A str, giving an expression as a function of n_jobs,
as in '2*n_jobs'
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
return_train_score : bool, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``
will be represented by a ``cv_results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'split0_test_score' : [0.80, 0.70, 0.80, 0.93],
'split1_test_score' : [0.82, 0.50, 0.70, 0.78],
'mean_test_score' : [0.81, 0.60, 0.75, 0.85],
'std_test_score' : [0.01, 0.10, 0.05, 0.08],
'rank_test_score' : [2, 4, 3, 1],
'split0_train_score' : [0.80, 0.92, 0.70, 0.93],
'split1_train_score' : [0.82, 0.55, 0.70, 0.87],
'mean_train_score' : [0.81, 0.74, 0.70, 0.90],
'std_train_score' : [0.01, 0.19, 0.00, 0.03],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.01, 0.06, 0.04, 0.04],
'std_score_time' : [0.00, 0.00, 0.00, 0.01],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above. ('split0_test_precision', 'mean_train_precision' etc.)
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator
For multi-metric evaluation, this is present only if ``refit`` is
specified.
This attribute is not available if ``refit`` is a function.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is present only if ``refit`` is
specified.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is present only if ``refit`` is
specified.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
multimetric_ : bool
Whether or not the scorers compute several metrics.
classes_ : ndarray of shape (n_classes,)
The classes labels. This is present only if ``refit`` is specified and
the underlying estimator is a classifier.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if
`best_estimator_` is defined (see the documentation for the `refit`
parameter for more details) and that `best_estimator_` exposes
`n_features_in_` when fit.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if
`best_estimator_` is defined (see the documentation for the `refit`
parameter for more details) and that `best_estimator_` exposes
`feature_names_in_` when fit.
Examples
---------
>>> from cca_zoo.model_selection import GridSearchCV
>>> from cca_zoo.models import MCCA
>>> X1 = [[0, 0, 1], [1, 0, 0], [2, 2, 2], [3, 5, 4]]
>>> X2 = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> X3 = [[0, 1, 0], [1, 9, 0], [4, 3, 3,], [12, 8, 10]]
>>> model = MCCA()
>>> params = {'c': [[0.1, 0.2], [0.3, 0.4], 0.1]}
>>> GridSearchCV(model,param_grid=params, cv=3).fit([X1,X2,X3]).best_estimator_.c
[0.1, 0.3, 0.1]
Notes
-----
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
"""
_required_parameters = ["estimator", "param_grid"]
def __init__(
self,
estimator,
param_grid,
*,
scoring=None,
n_jobs=None,
refit=True,
cv=None,
verbose=0,
pre_dispatch="2*n_jobs",
error_score=np.nan,
return_train_score=False,
):
super().__init__(
estimator=estimator,
scoring=scoring,
n_jobs=n_jobs,
refit=refit,
cv=cv,
verbose=verbose,
pre_dispatch=pre_dispatch,
error_score=error_score,
return_train_score=return_train_score,
)
self.param_grid = param2grid(param_grid)
_check_param_grid(param_grid)
def _run_search(self, evaluate_candidates):
"""Search all candidates in param_grid"""
evaluate_candidates(ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "score_samples", "predict", "predict_proba",
"decision_function", "transform" and "inverse_transform" if they are
implemented in the estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict or list of dicts
Dictionary with parameters names (`str`) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
If a list of dicts is given, first a dict is sampled uniformly, and
then a parameter is sampled using that dict as above.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : str, callable, list, tuple or dict, default=None
Strategy to evaluate the performance of the cross-validated model on
the test set.
If `scoring` represents a single score, one can use:
- a single string (see :ref:`scoring_parameter`);
- a callable (see :ref:`scoring`) that returns a single value.
If `scoring` represents multiple scores, one can use:
- a list or tuple of unique strings;
- a callable returning a dictionary where the keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
If None, the estimator's score method is used.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors.
refit : bool, str, or callable, default=True
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a `str` denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
Where there are considerations other than maximum score in
choosing a best estimator, ``refit`` can be set to a function which
returns the selected ``best_index_`` given the ``cv_results``. In that
case, the ``best_estimator_`` and ``best_params_`` will be set
according to the returned ``best_index_`` while the ``best_score_``
attribute will not be available.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``RandomizedSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_params_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer.
See ``scoring`` parameter to know more about multiple metric
evaluation.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
verbose : int
Controls the verbosity: the higher, the more messages.
pre_dispatch : int, or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A str, giving an expression as a function of n_jobs,
as in '2*n_jobs'
random_state : int, RandomState instance or None, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Pass an int for reproducible output across multiple
function calls.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
return_train_score : bool, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
will be represented by a ``cv_results_`` dict of::
{
'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'],
mask = False),
'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),
'split0_test_score' : [0.80, 0.84, 0.70],
'split1_test_score' : [0.82, 0.50, 0.70],
'mean_test_score' : [0.81, 0.67, 0.70],
'std_test_score' : [0.01, 0.24, 0.00],
'rank_test_score' : [1, 3, 2],
'split0_train_score' : [0.80, 0.92, 0.70],
'split1_train_score' : [0.82, 0.55, 0.70],
'mean_train_score' : [0.81, 0.74, 0.70],
'std_train_score' : [0.01, 0.19, 0.00],
'mean_fit_time' : [0.73, 0.63, 0.43],
'std_fit_time' : [0.01, 0.02, 0.01],
'mean_score_time' : [0.01, 0.06, 0.04],
'std_score_time' : [0.00, 0.00, 0.00],
'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...],
}
NOTE
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above. ('split0_test_precision', 'mean_train_precision' etc.)
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
For multi-metric evaluation, this attribute is present only if
``refit`` is specified.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator.
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
This attribute is not available if ``refit`` is a function.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
multimetric_ : bool
Whether or not the scorers compute several metrics.
classes_ : ndarray of shape (n_classes,)
The classes labels. This is present only if ``refit`` is specified and
the underlying estimator is a classifier.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if
`best_estimator_` is defined and that `best_estimator_` exposes
`n_features_in_` when fit.
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if
`best_estimator_` is defined and that `best_estimator_` exposes
`feature_names_in_` when fit.
Examples
---------
>>> from cca_zoo.model_selection import RandomizedSearchCV
>>> from cca_zoo.models import MCCA
>>> from sklearn.utils.fixes import loguniform
>>> X1 = [[0, 0, 1], [1, 0, 0], [2, 2, 2], [3, 5, 4]]
>>> X2 = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> X3 = [[0, 1, 0], [1, 9, 0], [4, 3, 3,], [12, 8, 10]]
>>> model = MCCA()
>>> params = {'c': [loguniform(1e-4, 1e0), loguniform(1e-4, 1e0), [0.1]]}
>>> def scorer(estimator, X):
... scores = estimator.score(X)
... return np.mean(scores)
>>> RandomizedSearchCV(model,param_distributions=params, cv=3, scoring=scorer,n_iter=10).fit([X1,X2,X3]).n_iter
10
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
"""
_required_parameters = ["estimator", "param_distributions"]
def __init__(
self,
estimator,
param_distributions,
*,
n_iter=10,
scoring=None,
n_jobs=None,
refit=True,
cv=None,
verbose=0,
pre_dispatch="2*n_jobs",
random_state=None,
error_score=np.nan,
return_train_score=False,
):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super().__init__(
estimator=estimator,
scoring=scoring,
n_jobs=n_jobs,
refit=refit,
cv=cv,
verbose=verbose,
pre_dispatch=pre_dispatch,
error_score=error_score,
return_train_score=return_train_score,
)
def _run_search(self, evaluate_candidates):
"""Search n_iter candidates from param_distributions"""
evaluate_candidates(
ParameterSampler(
self.param_distributions, self.n_iter, random_state=self.random_state
)
)
| 46.296017 | 141 | 0.614453 |
acdf7f7f21a00dfde4a30ffc4197f767ed8f405e | 1,095 | py | Python | pytglib/api/types/photo.py | iTeam-co/pytglib | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 6 | 2019-10-30T08:57:27.000Z | 2021-02-08T14:17:43.000Z | pytglib/api/types/photo.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 1 | 2021-08-19T05:44:10.000Z | 2021-08-19T07:14:56.000Z | pytglib/api/types/photo.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 5 | 2019-12-04T05:30:39.000Z | 2021-05-21T18:23:32.000Z |
from ..utils import Object
class Photo(Object):
"""
Describes a photo
Attributes:
ID (:obj:`str`): ``Photo``
Args:
has_stickers (:obj:`bool`):
True, if stickers were added to the photo
minithumbnail (:class:`telegram.api.types.minithumbnail`):
Photo minithumbnail; may be null
sizes (List of :class:`telegram.api.types.photoSize`):
Available variants of the photo, in different sizes
Returns:
Photo
Raises:
:class:`telegram.Error`
"""
ID = "photo"
def __init__(self, has_stickers, minithumbnail, sizes, **kwargs):
self.has_stickers = has_stickers # bool
self.minithumbnail = minithumbnail # Minithumbnail
self.sizes = sizes # list of photoSize
@staticmethod
def read(q: dict, *args) -> "Photo":
has_stickers = q.get('has_stickers')
minithumbnail = Object.read(q.get('minithumbnail'))
sizes = [Object.read(i) for i in q.get('sizes', [])]
return Photo(has_stickers, minithumbnail, sizes)
| 26.707317 | 69 | 0.60274 |
acdf82c468feec1710ece19fb70885b20a8c2026 | 3,000 | py | Python | CNN/ColorNet.py | aman-chauhan/Image-Coloring | 469da3f52eddbfdec1600d5790c89e3c48eb2298 | [
"Apache-2.0"
] | 4 | 2018-07-03T17:00:34.000Z | 2019-12-27T14:02:44.000Z | CNN/ColorNet.py | aman-chauhan/Image-Coloring | 469da3f52eddbfdec1600d5790c89e3c48eb2298 | [
"Apache-2.0"
] | null | null | null | CNN/ColorNet.py | aman-chauhan/Image-Coloring | 469da3f52eddbfdec1600d5790c89e3c48eb2298 | [
"Apache-2.0"
] | null | null | null | from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import UpSampling2D
from keras.layers.convolutional import Conv2D
from keras.layers import Input
from keras.models import Model
from keras import regularizers
def color():
# Input tensor
color_input = Input(batch_shape=(None, None, None, 128), name='color_input')
# Convolutional Layer with 128 3x3 kernels with single stride and same padding
color_conv1 = Conv2D(filters=128, kernel_size=3, strides=1, padding='same', activation='relu',
kernel_initializer='he_uniform', bias_initializer='he_uniform')(color_input)
color_conv1 = BatchNormalization()(color_conv1)
# Convolutional Layer with 64 3x3 kernels with single stride and same padding
color_conv2 = Conv2D(filters=64, kernel_size=3, strides=1, padding='same', activation='relu',
kernel_initializer='he_uniform', bias_initializer='he_uniform')(color_conv1)
color_conv2 = BatchNormalization()(color_conv2)
# Upsampling
color_upsm1 = UpSampling2D(size=2)(color_conv2)
# Convolutional Layer with 64 3x3 kernels with single stride and same padding
color_conv3 = Conv2D(filters=64, kernel_size=3, strides=1, padding='same', activation='relu',
kernel_initializer='he_uniform', bias_initializer='he_uniform')(color_upsm1)
color_conv3 = BatchNormalization()(color_conv3)
# Convolutional Layer with 32 3x3 kernels with single stride and same padding
color_conv4 = Conv2D(filters=32, kernel_size=3, strides=1, padding='same', activation='relu',
kernel_initializer='he_uniform', bias_initializer='he_uniform')(color_conv3)
color_conv4 = BatchNormalization()(color_conv4)
# Upsampling
color_upsm2 = UpSampling2D(size=2)(color_conv4)
# Convolutional Layer with 32 3x3 kernels with single stride and same padding
color_conv5 = Conv2D(filters=32, kernel_size=3, strides=1, padding='same', activation='relu',
kernel_initializer='he_uniform', bias_initializer='he_uniform')(color_upsm2)
color_conv5 = BatchNormalization()(color_conv5)
# Convolutional Layer with 16 3x3 kernels with single stride and same padding
color_conv6 = Conv2D(filters=16, kernel_size=3, strides=1, padding='same', activation='relu',
kernel_initializer='he_uniform', bias_initializer='he_uniform')(color_conv5)
color_conv6 = BatchNormalization()(color_conv6)
# Upsampling
color_upsm3 = UpSampling2D(size=2)(color_conv6)
# Convolutional Layer with 2 3x3 kernels with single stride and same padding
color_conv7 = Conv2D(filters=2, kernel_size=3, strides=1, padding='same',
activation='relu', kernel_initializer='he_uniform', bias_initializer='he_uniform')(color_upsm3)
# Model definition
color_model = Model(inputs=color_input, outputs=color_conv7, name='color_model')
return color_model
| 50 | 120 | 0.726 |
acdf84c193f146b83803bf3e6ed9ea61ede5aa32 | 4,797 | py | Python | s_appearance/forms.py | AmatanHead/collective-blog | 9bf040faac43feae08b33900e30bf7d17b817ae4 | [
"MIT"
] | null | null | null | s_appearance/forms.py | AmatanHead/collective-blog | 9bf040faac43feae08b33900e30bf7d17b817ae4 | [
"MIT"
] | 4 | 2016-09-22T06:37:20.000Z | 2016-09-22T16:49:48.000Z | s_appearance/forms.py | AmatanHead/collective-blog | 9bf040faac43feae08b33900e30bf7d17b817ae4 | [
"MIT"
] | null | null | null | """Base form classes for rendering light-compatible html"""
from django.db import models
from django.forms.forms import BaseForm
from django.utils.html import conditional_escape
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
import re
class BaseFormRenderer(BaseForm):
"""Mixin that provides functions for rendering light-compatible html"""
def _render_group(self, group):
"""Render several fields at the same line
Responsive grid is used.
"""
if len(group) > 3:
raise AssertionError('group \'%s\' is too long' % repr(group))
elif len(group) == 3:
cols = 'one-third'
offset_classes = ['',
' offset-by-one-third',
' offset-by-two-thirds']
elif len(group) == 2:
cols = 'one-half'
offset_classes = ['',
' offset-by-one-half']
else:
raise AssertionError('group \'%s\' is too short' % repr(group))
html_output = '<div class="row">'
offset = 0
for field in group:
if field is None:
offset += 1
else:
offset_class = offset_classes[offset]
offset = 0
html_output += '<div class="%s column%s">' % (
cols, offset_class)
html_output += self._render_field(field)
html_output += '</div>'
html_output += '</div>'
return html_output
# Matches `renderer` elements that will expand to <hr>s
_separator = re.compile(r'-+')
def _render_field(self, field):
"""Render a <section> with a single field"""
if field is None:
return '<section> </section>'
elif self._separator.match(field):
return '<hr>'
elif field not in self.fields:
raise IndexError('unknown field %s' % field)
field = self[field]
errors = self.error_class(
[conditional_escape(error) for error in field.errors])
classes = field.css_classes().split()
if hasattr(self, 'additional_section_classes'):
classes.extend(getattr(self, 'additional_section_classes'))
if classes:
css_classes = ' class="%s"' % ' '.join(classes)
else:
css_classes = ''
if field.label:
label = conditional_escape(force_text(field.label))
label = field.label_tag(label) or ''
else:
label = ''
if field.help_text:
help_text = '<div class="desc">%s</div>' % force_text(field.help_text)
else:
help_text = ''
if field.is_hidden and not errors:
hidden = ' style="display: none;"'
else:
hidden = ''
return ('<section%(hidden)s%(css_classes)s>'
' %(label)s %(field)s'
' %(help_text)s'
' %(errors)s'
'</section>' % {
'hidden': hidden,
'css_classes': css_classes,
'label': label,
'help_text': help_text,
'errors': errors,
'field': str(field),
})
def as_section(self):
"""Returns this form rendered as HTML <section>s
Uou can set up `renderer` array in your class.
This array should contain a list of field names.
If such one is specified, this function will first render that fields.
Each element of the `renderer` list can be:
* A string of hyphens (e.g. '-----')
will render the `<hr>` tag.
* A name of a field will render that field.
* A tuple of two or three field names will render that fields
in a single row (using flexible grid).
* A `None` object will render an empty `<section>`.
"""
renderer = getattr(self, 'renderer', [])
served_fields = set()
if self.non_field_errors():
html_output = '<section class="separate">%s</section>' % force_text(self.non_field_errors())
else:
html_output = ''
for field in renderer:
if isinstance(field, (list, tuple)):
served_fields |= set(field)
html_output += self._render_group(field)
else:
served_fields.add(field)
html_output += self._render_field(field)
for field in self.fields.keys():
if field not in served_fields:
served_fields.add(field)
html_output += self._render_field(field)
return mark_safe(html_output)
| 31.98 | 104 | 0.534084 |
acdf85530d73c5ba71a22d11bb181fcf48ce3176 | 1,187 | py | Python | deeptables/utils/consts.py | lixfz/DeepTables | c8cf97f5e13dfc8f6895fe3a2235ea9beea4f5fa | [
"Apache-2.0"
] | 828 | 2020-05-24T02:42:33.000Z | 2022-03-31T01:37:36.000Z | deeptables/utils/consts.py | lixfz/DeepTables | c8cf97f5e13dfc8f6895fe3a2235ea9beea4f5fa | [
"Apache-2.0"
] | 36 | 2020-06-02T14:20:20.000Z | 2022-02-23T11:05:09.000Z | deeptables/utils/consts.py | lixfz/DeepTables | c8cf97f5e13dfc8f6895fe3a2235ea9beea4f5fa | [
"Apache-2.0"
] | 170 | 2020-05-26T15:43:13.000Z | 2022-03-25T06:35:37.000Z | # -*- coding:utf-8 -*-
"""
DeepTables constants module.
"""
from hypernets.utils.const import *
PROJECT_NAME = 'deeptables'
# TASK_AUTO = 'auto'
# TASK_BINARY = 'binary'
# TASK_MULTICLASS = 'multiclass'
# TASK_REGRESSION = 'regression'
# TASK_MULTILABEL = 'multilabel'
INPUT_PREFIX_CAT = 'cat_'
INPUT_PREFIX_NUM = 'input_continuous_'
INPUT_PREFIX_SEQ = 'seq_'
LAYER_PREFIX_EMBEDDING = 'emb_'
# COLUMNNAME_POSTFIX_DISCRETE = '_discrete'
# COLUMNNAME_POSTFIX_CATEGORIZE = '_cat'
DATATYPE_TENSOR_FLOAT = 'float32'
DATATYPE_PREDICT_CLASS = 'int32'
# DATATYPE_LABEL = 'int16'
LAYER_NAME_BN_DENSE_ALL = 'bn_dense_all'
LAYER_NAME_CONCAT_CONT_INPUTS = 'concat_continuous_inputs'
MODEL_SELECT_MODE_MIN = 'min'
MODEL_SELECT_MODE_MAX = 'max'
MODEL_SELECT_MODE_AUTO = 'auto'
METRIC_NAME_AUC = 'AUC'
METRIC_NAME_ACCURACY = 'accuracy'
METRIC_NAME_MSE = 'mse'
MODEL_SELECTOR_BEST = 'best'
MODEL_SELECTOR_CURRENT = 'current'
MODEL_SELECTOR_ALL = 'all'
EMBEDDING_OUT_DIM_DEFAULT = 4
GBM_FEATURE_TYPE_EMB = 'embedding'
GBM_FEATURE_TYPE_DENSE = 'dense'
STACKING_OP_CONCAT = 'concat'
STACKING_OP_ADD = 'add'
GPU_USAGE_STRATEGY_GROWTH = 'memory_growth'
ENV_DEEPTABLES_HOME = 'DEEPTABLES_HOME'
| 21.981481 | 58 | 0.781803 |
acdf85e29b93c80ba4cd12bac12d8fa6c6a508b2 | 200 | py | Python | test_main.py | oscarmcm/pypi-tokens-demo | 9ccbbfac4c1d1a9b88c78f8c0f5d26ba875bccfe | [
"MIT"
] | null | null | null | test_main.py | oscarmcm/pypi-tokens-demo | 9ccbbfac4c1d1a9b88c78f8c0f5d26ba875bccfe | [
"MIT"
] | null | null | null | test_main.py | oscarmcm/pypi-tokens-demo | 9ccbbfac4c1d1a9b88c78f8c0f5d26ba875bccfe | [
"MIT"
] | null | null | null | import pytest
from main import main, rand_name
@pytest.mark.parametrize('arg1', [5])
def test_main(arg1):
assert arg1 == 5
def test_rand_name():
name = rand_name()
assert len(name) > 0 | 16.666667 | 37 | 0.68 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.