hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f730a9b7a1db7a1618efa71421a99e18b273e3d6 | 2,861 | py | Python | flask_app.py | mnksingh94/Flipkart-Review-Scraper | 63ab0d4e9c640d3f88e81dc520b66dcb9931b5dc | [
"MIT"
] | null | null | null | flask_app.py | mnksingh94/Flipkart-Review-Scraper | 63ab0d4e9c640d3f88e81dc520b66dcb9931b5dc | [
"MIT"
] | null | null | null | flask_app.py | mnksingh94/Flipkart-Review-Scraper | 63ab0d4e9c640d3f88e81dc520b66dcb9931b5dc | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, jsonify
from flask_cors import CORS, cross_origin
import requests
from bs4 import BeautifulSoup as bs
from urllib.request import urlopen as uReq
import pymongo
app = Flask(__name__)
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
searchString = request.form['content'].replace(" ", "")
try:
dbConn = pymongo.MongoClient("mongodb://localhost:27017")
db =dbConn['crawlerDB']
reviews = db[searchString].find({})
print(reviews.count())
if reviews.count() > 0:
return render_template('results.html', reviews=reviews)
else:
flipkart_url = "https://www.flipkart.com/search?q=" + searchString
uClient = uReq(flipkart_url)
flipkartPage = uClient.read()
uClient.close()
flipkart_html = bs(flipkartPage, "html.parser")
bigboxes = flipkart_html.find_all("div", {"class": "_1AtVbE col-12-12"})
del bigboxes[0:3]
box = bigboxes[0]
productLink = "https://www.flipkart.com" + box.div.div.div.a['href']
prodRes = requests.get(productLink)
prod_html = bs(prodRes.text, "html.parser")
commentboxs = prod_html.find_all('div', {'class': "_16PBlm"})
table = db[searchString]
reviews = []
for commentbox in commentboxs:
try:
name = commentbox.div.div.find_all('p', {'class':'_2sc7ZR _2V5EHH'})[0].text
except:
name = 'No name'
try:
rating = commentbox.div.div.div.div.text
except:
rating = 'No rating'
try:
commentHead = commentbox.div.find_all('p', {'class':'_2-N8zT'})[0].text
except:
commentHead = 'No comment heading'
try:
comtag = commentbox.div.div.find_all('div', {'class':''})
custComment = comtag[0].div.text
except:
custComment = 'no customer comment'
mydict = {'Product':searchString, 'Name':name, 'Rating':rating, 'CommentHead': commentHead,
'Comment':custComment}
x = table.insert_one(mydict)
reviews.append(mydict)
return render_template('results.html', reviews=reviews)
except:
return 'something is wrong'
else:
return render_template('index.html')
if __name__ == "__main__":
app.run(port=8000, debug=True)
| 40.871429 | 112 | 0.51136 | from flask import Flask, render_template, request, jsonify
from flask_cors import CORS, cross_origin
import requests
from bs4 import BeautifulSoup as bs
from urllib.request import urlopen as uReq
import pymongo
app = Flask(__name__)
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
searchString = request.form['content'].replace(" ", "")
try:
dbConn = pymongo.MongoClient("mongodb://localhost:27017")
db =dbConn['crawlerDB']
reviews = db[searchString].find({})
print(reviews.count())
if reviews.count() > 0:
return render_template('results.html', reviews=reviews)
else:
flipkart_url = "https://www.flipkart.com/search?q=" + searchString
uClient = uReq(flipkart_url)
flipkartPage = uClient.read()
uClient.close()
flipkart_html = bs(flipkartPage, "html.parser")
bigboxes = flipkart_html.find_all("div", {"class": "_1AtVbE col-12-12"})
del bigboxes[0:3]
box = bigboxes[0]
productLink = "https://www.flipkart.com" + box.div.div.div.a['href']
prodRes = requests.get(productLink)
prod_html = bs(prodRes.text, "html.parser")
commentboxs = prod_html.find_all('div', {'class': "_16PBlm"})
table = db[searchString]
reviews = []
for commentbox in commentboxs:
try:
name = commentbox.div.div.find_all('p', {'class':'_2sc7ZR _2V5EHH'})[0].text
except:
name = 'No name'
try:
rating = commentbox.div.div.div.div.text
except:
rating = 'No rating'
try:
commentHead = commentbox.div.find_all('p', {'class':'_2-N8zT'})[0].text
except:
commentHead = 'No comment heading'
try:
comtag = commentbox.div.div.find_all('div', {'class':''})
custComment = comtag[0].div.text
except:
custComment = 'no customer comment'
mydict = {'Product':searchString, 'Name':name, 'Rating':rating, 'CommentHead': commentHead,
'Comment':custComment}
x = table.insert_one(mydict)
reviews.append(mydict)
return render_template('results.html', reviews=reviews)
except:
return 'something is wrong'
else:
return render_template('index.html')
if __name__ == "__main__":
app.run(port=8000, debug=True)
| true | true |
f730a9e2fce406cfb29f69a52f9dfdd07eb5199b | 96 | py | Python | Python Pattern Programs/Alphabetic Patterns/Pattern 5.py | trial1user/Printing-Pattern-Programs | dde29e056b8e067fb3a824edb7ecb7dd9c9a776a | [
"MIT"
] | 61 | 2021-01-07T03:56:25.000Z | 2022-02-26T14:39:52.000Z | PythonPatternPrograms/AlphabeticPatterns/Pattern 5.py | Ankur-586/Printing-Pattern-Programs | 33e534ed66a02705e6cd6bc1992d4818a44d1b6b | [
"MIT"
] | 51 | 2020-12-25T17:06:26.000Z | 2021-05-07T12:52:56.000Z | PythonPatternPrograms/AlphabeticPatterns/Pattern 5.py | Ankur-586/Printing-Pattern-Programs | 33e534ed66a02705e6cd6bc1992d4818a44d1b6b | [
"MIT"
] | 13 | 2021-01-07T09:50:21.000Z | 2021-12-17T11:03:57.000Z | for x in range(65, 70):
for y in range(65, x + 1):
print(chr(x), end="")
print() | 24 | 30 | 0.489583 | for x in range(65, 70):
for y in range(65, x + 1):
print(chr(x), end="")
print() | true | true |
f730a9e647e5d1432afdb7a94d9a14a04264fc9b | 5,738 | py | Python | platform.py | arhi/platform-nordicnrf51 | 267cd3fe97a6dd694c7c8b5fbae0919caeef5304 | [
"Apache-2.0"
] | null | null | null | platform.py | arhi/platform-nordicnrf51 | 267cd3fe97a6dd694c7c8b5fbae0919caeef5304 | [
"Apache-2.0"
] | null | null | null | platform.py | arhi/platform-nordicnrf51 | 267cd3fe97a6dd694c7c8b5fbae0919caeef5304 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import platform
from platformio.managers.platform import PlatformBase
from platformio.util import get_systype
class Nordicnrf51Platform(PlatformBase):
def is_embedded(self):
return True
def configure_default_packages(self, variables, targets):
if "erase" in targets:
self.packages["tool-nrfjprog"]["optional"] = False
if "zephyr" in variables.get("pioframework", []):
for p in self.packages:
if p.startswith("framework-zephyr-") or p in (
"tool-cmake", "tool-dtc", "tool-ninja"):
self.packages[p]["optional"] = False
self.packages["toolchain-gccarmnoneeabi"]["version"] = "~1.80201.0"
if "windows" not in get_systype():
self.packages["tool-gperf"]["optional"] = False
# configure J-LINK tool
jlink_conds = [
"jlink" in variables.get(option, "")
for option in ("upload_protocol", "debug_tool")
]
if variables.get("board"):
board_config = self.board_config(variables.get("board"))
jlink_conds.extend([
"jlink" in board_config.get(key, "")
for key in ("debug.default_tools", "upload.protocol")
])
jlink_pkgname = "tool-jlink"
if not any(jlink_conds) and jlink_pkgname in self.packages:
del self.packages[jlink_pkgname]
return PlatformBase.configure_default_packages(self, variables,
targets)
def get_boards(self, id_=None):
result = PlatformBase.get_boards(self, id_)
if not result:
return result
if id_:
return self._add_default_debug_tools(result)
else:
for key, value in result.items():
result[key] = self._add_default_debug_tools(result[key])
return result
def _add_default_debug_tools(self, board):
debug = board.manifest.get("debug", {})
upload_protocols = board.manifest.get("upload", {}).get(
"protocols", [])
if "tools" not in debug:
debug["tools"] = {}
# J-Link / ST-Link / BlackMagic Probe / CMSIS-DAP
for link in ("blackmagic", "jlink", "stlink", "cmsis-dap"):
if link not in upload_protocols or link in debug["tools"]:
continue
if link == "blackmagic":
debug["tools"]["blackmagic"] = {
"hwids": [["0x1d50", "0x6018"]],
"require_debug_port": True
}
elif link == "jlink":
assert debug.get("jlink_device"), (
"Missed J-Link Device ID for %s" % board.id)
debug["tools"][link] = {
"server": {
"package": "tool-jlink",
"arguments": [
"-singlerun",
"-if", "SWD",
"-select", "USB",
"-device", debug.get("jlink_device"),
"-port", "2331"
],
"executable": ("JLinkGDBServerCL.exe"
if platform.system() == "Windows" else
"JLinkGDBServer")
}
}
else:
server_args = [
"-s", "$PACKAGE_DIR/scripts",
"-f", "interface/%s.cfg" % link
]
if link == "stlink":
server_args.extend([
"-c",
"transport select hla_swd; set WORKAREASIZE 0x4000"
])
server_args.extend(["-f", "target/nrf51.cfg"])
debug["tools"][link] = {
"server": {
"package": "tool-openocd",
"executable": "bin/openocd",
"arguments": server_args
}
}
debug["tools"][link]["onboard"] = link in debug.get("onboard_tools", [])
debug["tools"][link]["default"] = link in debug.get("default_tools", [])
board.manifest['debug'] = debug
return board
def configure_debug_options(self, initial_debug_options, ide_data):
debug_options = copy.deepcopy(initial_debug_options)
adapter_speed = initial_debug_options.get("speed")
if adapter_speed:
server_options = debug_options.get("server") or {}
server_executable = server_options.get("executable", "").lower()
if "openocd" in server_executable:
debug_options["server"]["arguments"].extend(
["-c", "adapter speed %s" % adapter_speed]
)
elif "jlink" in server_executable:
debug_options["server"]["arguments"].extend(
["-speed", adapter_speed]
)
return debug_options
| 39.30137 | 84 | 0.518822 |
import copy
import platform
from platformio.managers.platform import PlatformBase
from platformio.util import get_systype
class Nordicnrf51Platform(PlatformBase):
def is_embedded(self):
return True
def configure_default_packages(self, variables, targets):
if "erase" in targets:
self.packages["tool-nrfjprog"]["optional"] = False
if "zephyr" in variables.get("pioframework", []):
for p in self.packages:
if p.startswith("framework-zephyr-") or p in (
"tool-cmake", "tool-dtc", "tool-ninja"):
self.packages[p]["optional"] = False
self.packages["toolchain-gccarmnoneeabi"]["version"] = "~1.80201.0"
if "windows" not in get_systype():
self.packages["tool-gperf"]["optional"] = False
jlink_conds = [
"jlink" in variables.get(option, "")
for option in ("upload_protocol", "debug_tool")
]
if variables.get("board"):
board_config = self.board_config(variables.get("board"))
jlink_conds.extend([
"jlink" in board_config.get(key, "")
for key in ("debug.default_tools", "upload.protocol")
])
jlink_pkgname = "tool-jlink"
if not any(jlink_conds) and jlink_pkgname in self.packages:
del self.packages[jlink_pkgname]
return PlatformBase.configure_default_packages(self, variables,
targets)
def get_boards(self, id_=None):
result = PlatformBase.get_boards(self, id_)
if not result:
return result
if id_:
return self._add_default_debug_tools(result)
else:
for key, value in result.items():
result[key] = self._add_default_debug_tools(result[key])
return result
def _add_default_debug_tools(self, board):
debug = board.manifest.get("debug", {})
upload_protocols = board.manifest.get("upload", {}).get(
"protocols", [])
if "tools" not in debug:
debug["tools"] = {}
for link in ("blackmagic", "jlink", "stlink", "cmsis-dap"):
if link not in upload_protocols or link in debug["tools"]:
continue
if link == "blackmagic":
debug["tools"]["blackmagic"] = {
"hwids": [["0x1d50", "0x6018"]],
"require_debug_port": True
}
elif link == "jlink":
assert debug.get("jlink_device"), (
"Missed J-Link Device ID for %s" % board.id)
debug["tools"][link] = {
"server": {
"package": "tool-jlink",
"arguments": [
"-singlerun",
"-if", "SWD",
"-select", "USB",
"-device", debug.get("jlink_device"),
"-port", "2331"
],
"executable": ("JLinkGDBServerCL.exe"
if platform.system() == "Windows" else
"JLinkGDBServer")
}
}
else:
server_args = [
"-s", "$PACKAGE_DIR/scripts",
"-f", "interface/%s.cfg" % link
]
if link == "stlink":
server_args.extend([
"-c",
"transport select hla_swd; set WORKAREASIZE 0x4000"
])
server_args.extend(["-f", "target/nrf51.cfg"])
debug["tools"][link] = {
"server": {
"package": "tool-openocd",
"executable": "bin/openocd",
"arguments": server_args
}
}
debug["tools"][link]["onboard"] = link in debug.get("onboard_tools", [])
debug["tools"][link]["default"] = link in debug.get("default_tools", [])
board.manifest['debug'] = debug
return board
def configure_debug_options(self, initial_debug_options, ide_data):
debug_options = copy.deepcopy(initial_debug_options)
adapter_speed = initial_debug_options.get("speed")
if adapter_speed:
server_options = debug_options.get("server") or {}
server_executable = server_options.get("executable", "").lower()
if "openocd" in server_executable:
debug_options["server"]["arguments"].extend(
["-c", "adapter speed %s" % adapter_speed]
)
elif "jlink" in server_executable:
debug_options["server"]["arguments"].extend(
["-speed", adapter_speed]
)
return debug_options
| true | true |
f730a9f146914b909d2007a03b578d74b8950165 | 248 | py | Python | setup.py | decisionscients/Cancer | cdf86ed654881a1d113b9623d5e21f76dc9d36d2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | decisionscients/Cancer | cdf86ed654881a1d113b9623d5e21f76dc9d36d2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | decisionscients/Cancer | cdf86ed654881a1d113b9623d5e21f76dc9d36d2 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Learning Eligibility in Clinical Trials Using Neural Networks',
author='John James',
license='BSD-3',
)
| 22.545455 | 80 | 0.693548 | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Learning Eligibility in Clinical Trials Using Neural Networks',
author='John James',
license='BSD-3',
)
| true | true |
f730aa6163e999ba3e3dedeced39133e1bbdaa3b | 4,662 | py | Python | src/aoikprojectstarter/mediator.py | AoiKuiyuyou/AoikProjectStarter-Python | 448789ad012be8774d4a3792639b155394b16048 | [
"MIT"
] | 3 | 2016-09-15T12:49:08.000Z | 2016-10-22T11:47:11.000Z | src/aoikprojectstarter/mediator.py | AoiKuiyuyou/AoikProjectStarter-Python | 448789ad012be8774d4a3792639b155394b16048 | [
"MIT"
] | null | null | null | src/aoikprojectstarter/mediator.py | AoiKuiyuyou/AoikProjectStarter-Python | 448789ad012be8774d4a3792639b155394b16048 | [
"MIT"
] | null | null | null | # coding: utf-8
#
"""
Mediator module.
This module puts together other modules to implement the program logic.
"""
from __future__ import absolute_import
# Standard-library imports
from argparse import ArgumentParser
from argparse import ArgumentTypeError
import sys
import traceback
# Local imports
from .func import factorial
def int_ge0(text):
"""
Convert given text to an integer greater than or equal to 0.
Used by `ArgumentParser`.
:param text: Text to convert to integer.
:return: An integer greater than or equal to 0.
"""
try:
# Convert to int
int_value = int(text)
# Ensure greater than or equal to 0
assert int_value >= 0
except Exception:
# Raise an exception to notify ArgumentParser
raise ArgumentTypeError(
'`%s` is not an integer greater than or equal to 0.' % text)
# Return the valid value
return int_value
def get_cmdargs_parser():
"""
Create command line arguments parser.
:return: Command line arguments parser, an `ArgumentParser` instance.
"""
# Create command line arguments parser
parser = ArgumentParser(prog='aoikprojectstarter')
# Add argument
parser.add_argument(
'number',
type=int_ge0,
default=None,
metavar='NUMBER',
help='the number for which to compute factorial.',
)
# Return the command line arguments parser
return parser
def main_core(args=None, step_func=None):
"""
Implement program core logic.
:param args: Command line arguments list.
:param step_func: Step info setter function.
:return: Exit code.
"""
# If step function is not given
if step_func is None:
# Raise error
raise ValueError('Error (3P92V): Argument `step_func` is not given.')
# If step function is given.
# Set step info
step_func(title='Parse command line arguments')
# Create command line arguments parser
cmdargs_parser = get_cmdargs_parser()
# If command line arguments list is not given
if args is None:
# Use default command line arguments list
args = sys.argv[1:]
# If command line arguments list is empty
if not args:
# Print help
cmdargs_parser.print_help()
# Return without error
return 0
# If command line arguments list is not empty.
# Parse command line arguments
cmdargs = cmdargs_parser.parse_args(args)
# Set step info
step_func(title='Compute factorial')
# Get number
number = cmdargs.number
# Compute the number's factorial
result = factorial(number)
# Get message
msg = 'Factorial of {} is {}'.format(number, result)
# Print the message
print(msg)
# Return without error
return 0
def main_wrap(args=None):
"""
Wrap `main_core` to provide uncaught exception handling.
:param args: Command line arguments list.
:return: Exit code.
"""
# Dict that contains step info
step_info = {
# Step title
'title': '',
# Exit code
'exit_code': 0
}
# Create step info setter function
def _step_func(title=None, exit_code=None):
"""
Step info setter function.
:param title: Step title.
:param exit_code: Exit code.
:return: None.
"""
# If step title is given
if title is not None:
# Update step title
step_info['title'] = title
# If exit code is given
if exit_code is not None:
# Update exit code
step_info['exit_code'] = exit_code
#
try:
# Call `main_core` to implement program core logic
return main_core(args=args, step_func=_step_func)
# Catch exit
except SystemExit:
# Re-raise
raise
# Catch keyboard interrupt
except KeyboardInterrupt:
# Return without error
return 0
# Catch other exceptions
except BaseException:
# Get step title
step_title = step_info.get('title', '')
# Get traceback
tb_msg = traceback.format_exc()
# If step title is not empty
if step_title:
# Get message
msg = '# Error (5QDEX): {0}\n---\n{1}---\n'.format(
step_title, tb_msg
)
else:
# Get message
msg = '# Error (5QDEX)\n---\n{0}---\n'.format(tb_msg)
# Print message
sys.stderr.write(msg)
# Get exit code
exit_code = step_info.get('exit_code', 8)
# Return exit code
return exit_code
| 22.631068 | 77 | 0.611111 |
from __future__ import absolute_import
from argparse import ArgumentParser
from argparse import ArgumentTypeError
import sys
import traceback
from .func import factorial
def int_ge0(text):
try:
int_value = int(text)
assert int_value >= 0
except Exception:
raise ArgumentTypeError(
'`%s` is not an integer greater than or equal to 0.' % text)
return int_value
def get_cmdargs_parser():
parser = ArgumentParser(prog='aoikprojectstarter')
parser.add_argument(
'number',
type=int_ge0,
default=None,
metavar='NUMBER',
help='the number for which to compute factorial.',
)
return parser
def main_core(args=None, step_func=None):
if step_func is None:
raise ValueError('Error (3P92V): Argument `step_func` is not given.')
step_func(title='Parse command line arguments')
cmdargs_parser = get_cmdargs_parser()
if args is None:
args = sys.argv[1:]
if not args:
cmdargs_parser.print_help()
return 0
cmdargs = cmdargs_parser.parse_args(args)
step_func(title='Compute factorial')
number = cmdargs.number
result = factorial(number)
# Get message
msg = 'Factorial of {} is {}'.format(number, result)
# Print the message
print(msg)
# Return without error
return 0
def main_wrap(args=None):
# Dict that contains step info
step_info = {
# Step title
'title': '',
# Exit code
'exit_code': 0
}
# Create step info setter function
def _step_func(title=None, exit_code=None):
# If step title is given
if title is not None:
# Update step title
step_info['title'] = title
# If exit code is given
if exit_code is not None:
# Update exit code
step_info['exit_code'] = exit_code
#
try:
# Call `main_core` to implement program core logic
return main_core(args=args, step_func=_step_func)
# Catch exit
except SystemExit:
# Re-raise
raise
# Catch keyboard interrupt
except KeyboardInterrupt:
# Return without error
return 0
# Catch other exceptions
except BaseException:
# Get step title
step_title = step_info.get('title', '')
# Get traceback
tb_msg = traceback.format_exc()
# If step title is not empty
if step_title:
# Get message
msg = '
step_title, tb_msg
)
else:
# Get message
msg = '
# Print message
sys.stderr.write(msg)
# Get exit code
exit_code = step_info.get('exit_code', 8)
# Return exit code
return exit_code
| true | true |
f730abdde75909808210df1f3886c99110f87f72 | 11,682 | py | Python | poetry/repositories/legacy_repository.py | jancespivo/poetry | 4aee3fb9c2e99c189d2723a81fce2356c3589047 | [
"MIT"
] | null | null | null | poetry/repositories/legacy_repository.py | jancespivo/poetry | 4aee3fb9c2e99c189d2723a81fce2356c3589047 | [
"MIT"
] | null | null | null | poetry/repositories/legacy_repository.py | jancespivo/poetry | 4aee3fb9c2e99c189d2723a81fce2356c3589047 | [
"MIT"
] | null | null | null | import cgi
import re
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
try:
from html import unescape
except ImportError:
try:
from html.parser import HTMLParser
except ImportError:
from HTMLParser import HTMLParser
unescape = HTMLParser().unescape
from typing import Generator
from typing import Union
import html5lib
import requests
from cachecontrol import CacheControl
from cachecontrol.caches.file_cache import FileCache
from cachy import CacheManager
import poetry.packages
from poetry.config import Config
from poetry.locations import CACHE_DIR
from poetry.masonry.publishing.uploader import wheel_file_re
from poetry.packages import Package
from poetry.packages import dependency_from_pep_508
from poetry.packages.utils.link import Link
from poetry.semver import parse_constraint
from poetry.semver import Version
from poetry.semver import VersionConstraint
from poetry.utils._compat import Path
from poetry.utils.helpers import canonicalize_name, get_http_basic_auth
from poetry.version.markers import InvalidMarker
from .pypi_repository import PyPiRepository
class Page:
VERSION_REGEX = re.compile(r"(?i)([a-z0-9_\-.]+?)-(?=\d)([a-z0-9_.!+-]+)")
SUPPORTED_FORMATS = [
".tar.gz",
".whl",
".zip",
".tar.bz2",
".tar.xz",
".tar.Z",
".tar",
]
def __init__(self, url, content, headers):
if not url.endswith("/"):
url += "/"
self._url = url
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params["charset"]
self._content = content
if encoding is None:
self._parsed = html5lib.parse(content, namespaceHTMLElements=False)
else:
self._parsed = html5lib.parse(
content, transport_encoding=encoding, namespaceHTMLElements=False
)
@property
def versions(self): # type: () -> Generator[Version]
seen = set()
for link in self.links:
version = self.link_version(link)
if not version:
continue
if version in seen:
continue
seen.add(version)
yield version
@property
def links(self): # type: () -> Generator[Link]
for anchor in self._parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self._url, href))
pyrequire = anchor.get("data-requires-python")
pyrequire = unescape(pyrequire) if pyrequire else None
link = Link(url, self, requires_python=pyrequire)
if link.ext not in self.SUPPORTED_FORMATS:
continue
yield link
def links_for_version(self, version): # type: (Version) -> Generator[Link]
for link in self.links:
if self.link_version(link) == version:
yield link
def link_version(self, link): # type: (Link) -> Union[Version, None]
m = wheel_file_re.match(link.filename)
if m:
version = m.group("ver")
else:
info, ext = link.splitext()
match = self.VERSION_REGEX.match(info)
if not match:
return
version = match.group(2)
try:
version = Version.parse(version)
except ValueError:
return
return version
_clean_re = re.compile(r"[^a-z0-9$&+,/:;=?@.#%_\\|-]", re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(lambda match: "%%%2x" % ord(match.group(0)), url)
class LegacyRepository(PyPiRepository):
def __init__(self, name, url, disable_cache=False):
if name == "pypi":
raise ValueError("The name [pypi] is reserved for repositories")
self._packages = []
self._name = name
self._url = url.rstrip("/")
self._cache_dir = Path(CACHE_DIR) / "cache" / "repositories" / name
self._cache = CacheManager(
{
"default": "releases",
"serializer": "json",
"stores": {
"releases": {"driver": "file", "path": str(self._cache_dir)},
"packages": {"driver": "dict"},
"matches": {"driver": "dict"},
},
}
)
self._session = CacheControl(
requests.session(), cache=FileCache(str(self._cache_dir / "_http"))
)
url_parts = urlparse.urlparse(self._url)
if not url_parts.username:
self._session.auth = get_http_basic_auth(
Config.create("auth.toml"), self.name
)
self._disable_cache = disable_cache
@property
def name(self):
return self._name
def find_packages(
self, name, constraint=None, extras=None, allow_prereleases=False
):
packages = []
if constraint is not None and not isinstance(constraint, VersionConstraint):
constraint = parse_constraint(constraint)
key = name
if constraint:
key = "{}:{}".format(key, str(constraint))
if self._cache.store("matches").has(key):
versions = self._cache.store("matches").get(key)
else:
page = self._get("/{}/".format(canonicalize_name(name).replace(".", "-")))
if page is None:
return []
versions = []
for version in page.versions:
if not constraint or (constraint and constraint.allows(version)):
versions.append(version)
self._cache.store("matches").put(key, versions, 5)
for version in versions:
package = Package(name, version)
package.source_type = "legacy"
package.source_url = self._url
if extras is not None:
package.requires_extras = extras
packages.append(package)
self._log(
"{} packages found for {} {}".format(len(packages), name, str(constraint)),
level="debug",
)
return packages
def package(
self, name, version, extras=None
): # type: (...) -> poetry.packages.Package
"""
Retrieve the release information.
This is a heavy task which takes time.
We have to download a package to get the dependencies.
We also need to download every file matching this release
to get the various hashes.
Note that, this will be cached so the subsequent operations
should be much faster.
"""
try:
index = self._packages.index(
poetry.packages.Package(name, version, version)
)
return self._packages[index]
except ValueError:
if extras is None:
extras = []
release_info = self.get_release_info(name, version)
package = poetry.packages.Package(name, version, version)
package.source_type = "legacy"
package.source_url = self._url
package.source_reference = self.name
requires_dist = release_info["requires_dist"] or []
for req in requires_dist:
try:
dependency = dependency_from_pep_508(req)
except InvalidMarker:
# Invalid marker
# We strip the markers hoping for the best
req = req.split(";")[0]
dependency = dependency_from_pep_508(req)
if dependency.extras:
for extra in dependency.extras:
if extra not in package.extras:
package.extras[extra] = []
package.extras[extra].append(dependency)
if not dependency.is_optional():
package.requires.append(dependency)
# Adding description
package.description = release_info.get("summary", "")
# Adding hashes information
package.hashes = release_info["digests"]
# Activate extra dependencies
for extra in extras:
if extra in package.extras:
for dep in package.extras[extra]:
dep.activate()
package.requires += package.extras[extra]
self._packages.append(package)
return package
def _get_release_info(self, name, version): # type: (str, str) -> dict
page = self._get("/{}/".format(canonicalize_name(name).replace(".", "-")))
if page is None:
raise ValueError('No package named "{}"'.format(name))
data = {
"name": name,
"version": version,
"summary": "",
"requires_dist": [],
"requires_python": [],
"digests": [],
}
links = list(page.links_for_version(Version.parse(version)))
if not links:
raise ValueError(
'No valid distribution links found for package: "{}" version: "{}"'.format(
name, version
)
)
urls = {}
hashes = []
default_link = links[0]
for link in links:
if link.is_wheel:
urls["bdist_wheel"] = link.url
elif link.filename.endswith(".tar.gz"):
urls["sdist"] = link.url
elif (
link.filename.endswith((".zip", ".bz2", ".xz", ".Z", ".tar"))
and "sdist" not in urls
):
urls["sdist"] = link.url
hash = link.hash
if link.hash_name == "sha256":
hashes.append(hash)
data["digests"] = hashes
if not urls:
if default_link.is_wheel:
m = wheel_file_re.match(default_link.filename)
python = m.group("pyver")
platform = m.group("plat")
if python == "py2.py3" and platform == "any":
urls["bdist_wheel"] = default_link.url
elif default_link.filename.endswith(".tar.gz"):
urls["sdist"] = default_link.url
elif (
default_link.filename.endswith((".zip", ".bz2")) and "sdist" not in urls
):
urls["sdist"] = default_link.url
else:
return data
info = self._get_info_from_urls(urls)
data["summary"] = info["summary"]
data["requires_dist"] = info["requires_dist"]
data["requires_python"] = info["requires_python"]
return data
def _download(self, url, dest): # type: (str, str) -> None
r = self._session.get(url, stream=True)
with open(dest, "wb") as f:
for chunk in r.raw.stream(1024):
if chunk:
f.write(chunk)
def _get(self, endpoint): # type: (str) -> Union[Page, None]
url = self._url + endpoint
response = self._session.get(url)
if response.status_code == 404:
return
return Page(url, response.content, response.headers)
| 31.069149 | 91 | 0.54982 | import cgi
import re
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
try:
from html import unescape
except ImportError:
try:
from html.parser import HTMLParser
except ImportError:
from HTMLParser import HTMLParser
unescape = HTMLParser().unescape
from typing import Generator
from typing import Union
import html5lib
import requests
from cachecontrol import CacheControl
from cachecontrol.caches.file_cache import FileCache
from cachy import CacheManager
import poetry.packages
from poetry.config import Config
from poetry.locations import CACHE_DIR
from poetry.masonry.publishing.uploader import wheel_file_re
from poetry.packages import Package
from poetry.packages import dependency_from_pep_508
from poetry.packages.utils.link import Link
from poetry.semver import parse_constraint
from poetry.semver import Version
from poetry.semver import VersionConstraint
from poetry.utils._compat import Path
from poetry.utils.helpers import canonicalize_name, get_http_basic_auth
from poetry.version.markers import InvalidMarker
from .pypi_repository import PyPiRepository
class Page:
VERSION_REGEX = re.compile(r"(?i)([a-z0-9_\-.]+?)-(?=\d)([a-z0-9_.!+-]+)")
SUPPORTED_FORMATS = [
".tar.gz",
".whl",
".zip",
".tar.bz2",
".tar.xz",
".tar.Z",
".tar",
]
def __init__(self, url, content, headers):
if not url.endswith("/"):
url += "/"
self._url = url
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params["charset"]
self._content = content
if encoding is None:
self._parsed = html5lib.parse(content, namespaceHTMLElements=False)
else:
self._parsed = html5lib.parse(
content, transport_encoding=encoding, namespaceHTMLElements=False
)
@property
def versions(self):
seen = set()
for link in self.links:
version = self.link_version(link)
if not version:
continue
if version in seen:
continue
seen.add(version)
yield version
@property
def links(self):
for anchor in self._parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self._url, href))
pyrequire = anchor.get("data-requires-python")
pyrequire = unescape(pyrequire) if pyrequire else None
link = Link(url, self, requires_python=pyrequire)
if link.ext not in self.SUPPORTED_FORMATS:
continue
yield link
def links_for_version(self, version):
for link in self.links:
if self.link_version(link) == version:
yield link
def link_version(self, link):
m = wheel_file_re.match(link.filename)
if m:
version = m.group("ver")
else:
info, ext = link.splitext()
match = self.VERSION_REGEX.match(info)
if not match:
return
version = match.group(2)
try:
version = Version.parse(version)
except ValueError:
return
return version
_clean_re = re.compile(r"[^a-z0-9$&+,/:;=?@.#%_\\|-]", re.I)
def clean_link(self, url):
return self._clean_re.sub(lambda match: "%%%2x" % ord(match.group(0)), url)
class LegacyRepository(PyPiRepository):
def __init__(self, name, url, disable_cache=False):
if name == "pypi":
raise ValueError("The name [pypi] is reserved for repositories")
self._packages = []
self._name = name
self._url = url.rstrip("/")
self._cache_dir = Path(CACHE_DIR) / "cache" / "repositories" / name
self._cache = CacheManager(
{
"default": "releases",
"serializer": "json",
"stores": {
"releases": {"driver": "file", "path": str(self._cache_dir)},
"packages": {"driver": "dict"},
"matches": {"driver": "dict"},
},
}
)
self._session = CacheControl(
requests.session(), cache=FileCache(str(self._cache_dir / "_http"))
)
url_parts = urlparse.urlparse(self._url)
if not url_parts.username:
self._session.auth = get_http_basic_auth(
Config.create("auth.toml"), self.name
)
self._disable_cache = disable_cache
@property
def name(self):
return self._name
def find_packages(
self, name, constraint=None, extras=None, allow_prereleases=False
):
packages = []
if constraint is not None and not isinstance(constraint, VersionConstraint):
constraint = parse_constraint(constraint)
key = name
if constraint:
key = "{}:{}".format(key, str(constraint))
if self._cache.store("matches").has(key):
versions = self._cache.store("matches").get(key)
else:
page = self._get("/{}/".format(canonicalize_name(name).replace(".", "-")))
if page is None:
return []
versions = []
for version in page.versions:
if not constraint or (constraint and constraint.allows(version)):
versions.append(version)
self._cache.store("matches").put(key, versions, 5)
for version in versions:
package = Package(name, version)
package.source_type = "legacy"
package.source_url = self._url
if extras is not None:
package.requires_extras = extras
packages.append(package)
self._log(
"{} packages found for {} {}".format(len(packages), name, str(constraint)),
level="debug",
)
return packages
def package(
self, name, version, extras=None
):
try:
index = self._packages.index(
poetry.packages.Package(name, version, version)
)
return self._packages[index]
except ValueError:
if extras is None:
extras = []
release_info = self.get_release_info(name, version)
package = poetry.packages.Package(name, version, version)
package.source_type = "legacy"
package.source_url = self._url
package.source_reference = self.name
requires_dist = release_info["requires_dist"] or []
for req in requires_dist:
try:
dependency = dependency_from_pep_508(req)
except InvalidMarker:
req = req.split(";")[0]
dependency = dependency_from_pep_508(req)
if dependency.extras:
for extra in dependency.extras:
if extra not in package.extras:
package.extras[extra] = []
package.extras[extra].append(dependency)
if not dependency.is_optional():
package.requires.append(dependency)
package.description = release_info.get("summary", "")
package.hashes = release_info["digests"]
for extra in extras:
if extra in package.extras:
for dep in package.extras[extra]:
dep.activate()
package.requires += package.extras[extra]
self._packages.append(package)
return package
def _get_release_info(self, name, version):
page = self._get("/{}/".format(canonicalize_name(name).replace(".", "-")))
if page is None:
raise ValueError('No package named "{}"'.format(name))
data = {
"name": name,
"version": version,
"summary": "",
"requires_dist": [],
"requires_python": [],
"digests": [],
}
links = list(page.links_for_version(Version.parse(version)))
if not links:
raise ValueError(
'No valid distribution links found for package: "{}" version: "{}"'.format(
name, version
)
)
urls = {}
hashes = []
default_link = links[0]
for link in links:
if link.is_wheel:
urls["bdist_wheel"] = link.url
elif link.filename.endswith(".tar.gz"):
urls["sdist"] = link.url
elif (
link.filename.endswith((".zip", ".bz2", ".xz", ".Z", ".tar"))
and "sdist" not in urls
):
urls["sdist"] = link.url
hash = link.hash
if link.hash_name == "sha256":
hashes.append(hash)
data["digests"] = hashes
if not urls:
if default_link.is_wheel:
m = wheel_file_re.match(default_link.filename)
python = m.group("pyver")
platform = m.group("plat")
if python == "py2.py3" and platform == "any":
urls["bdist_wheel"] = default_link.url
elif default_link.filename.endswith(".tar.gz"):
urls["sdist"] = default_link.url
elif (
default_link.filename.endswith((".zip", ".bz2")) and "sdist" not in urls
):
urls["sdist"] = default_link.url
else:
return data
info = self._get_info_from_urls(urls)
data["summary"] = info["summary"]
data["requires_dist"] = info["requires_dist"]
data["requires_python"] = info["requires_python"]
return data
def _download(self, url, dest):
r = self._session.get(url, stream=True)
with open(dest, "wb") as f:
for chunk in r.raw.stream(1024):
if chunk:
f.write(chunk)
def _get(self, endpoint):
url = self._url + endpoint
response = self._session.get(url)
if response.status_code == 404:
return
return Page(url, response.content, response.headers)
| true | true |
f730abe304c37698a7a929d72c33c9de240a15f7 | 9,543 | py | Python | config/settings/base.py | szymanskirafal/tryread | f7e395c318e33de84992c79eaa5844028c378a2d | [
"MIT"
] | null | null | null | config/settings/base.py | szymanskirafal/tryread | f7e395c318e33de84992c79eaa5844028c378a2d | [
"MIT"
] | null | null | null | config/settings/base.py | szymanskirafal/tryread | f7e395c318e33de84992c79eaa5844028c378a2d | [
"MIT"
] | null | null | null | """
Base settings to build other settings files upon.
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (tryread/config/settings/base.py - 3 = tryread/)
APPS_DIR = ROOT_DIR.path('tryread')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR.path('.env')))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = 'UTC'
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///tryread'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = 'config.urls'
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.humanize', # Handy template tags
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_framework',
'django_pdb',
]
LOCAL_APPS = [
'tryread.users.apps.UsersConfig',
# Your stuff: custom apps go here
'books',
'home',
'read',
'writer',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {
'sites': 'tryread.contrib.sites.migrations'
}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = 'users.User'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = 'users:redirect'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = 'account_login'
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_pdb.middleware.PdbMiddleware',
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = 'admin/'
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Rafal Szymanski""", 'r.szymansky@gmail.com'),
]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = 'username'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = 'tryread.users.adapters.AccountAdapter'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = 'tryread.users.adapters.SocialAccountAdapter'
# Your stuff...
# ------------------------------------------------------------------------------
| 38.635628 | 98 | 0.625799 |
import environ
ROOT_DIR = environ.Path(__file__) - 3
APPS_DIR = ROOT_DIR.path('tryread')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
env.read_env(str(ROOT_DIR.path('.env')))
= env.bool('DJANGO_DEBUG', False)
TIME_ZONE = 'UTC'
= 'en-us'
= 1
= True
= True
= True
S = {
'default': env.db('DATABASE_URL', default='postgres:///tryread'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
= 'config.urls'
= 'config.wsgi.application'
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
admin',
]
THIRD_PARTY_APPS = [
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_framework',
'django_pdb',
]
LOCAL_APPS = [
'tryread.users.apps.UsersConfig',
'books',
'home',
'read',
'writer',
]
= DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
= {
'sites': 'tryread.contrib.sites.migrations'
}
= [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
= 'users.User'
= 'users:redirect'
= 'account_login'
= [
.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
= [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
= [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_pdb.middleware.PdbMiddleware',
]
= str(ROOT_DIR('staticfiles'))
= '/static/'
(APPS_DIR.path('static')),
]
= [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
= str(APPS_DIR('media'))
= '/media/'
= [
{
mplate.backends.django.DjangoTemplates',
': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
ebug': DEBUG,
ders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
sors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
E_PACK = 'bootstrap4'
= (
str(APPS_DIR.path('fixtures')),
)
= env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
ADMIN_URL = 'admin/'
= [
("""Rafal Szymanski""", 'r.szymansky@gmail.com'),
]
= ADMINS
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ADAPTER = 'tryread.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'tryread.users.adapters.SocialAccountAdapter'
| true | true |
f730acfe327038eafb73c8469ea9eb060bf5ea02 | 4,297 | py | Python | inference.py | trainOwn/yolov3-Inference | 5695469292b82ca54209b99b75115c1aabad0b9e | [
"MIT"
] | null | null | null | inference.py | trainOwn/yolov3-Inference | 5695469292b82ca54209b99b75115c1aabad0b9e | [
"MIT"
] | null | null | null | inference.py | trainOwn/yolov3-Inference | 5695469292b82ca54209b99b75115c1aabad0b9e | [
"MIT"
] | null | null | null | import numpy as np
import argparse
import cv2 as cv
import subprocess
import time
import os
from support import infer_image, show_image
FLAGS = []
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model-path',
type=str,
default='./model/',
help='The directory where the model weights and \
configuration files are.')
parser.add_argument('-w', '--weights',
type=str,
default='./model/yolov3.weights',
help='Path to the file which contains the weights \
for YOLOv3.')
parser.add_argument('-cfg', '--config',
type=str,
default='./model/yolov3.cfg',
help='Path to the configuration file for the YOLOv3 model.')
parser.add_argument('-i', '--image-path',
type=str,
help='The path to the image file')
parser.add_argument('-v', '--video-path',
type=str,
help='The path to the video file')
parser.add_argument('-vo', '--video-output-path',
type=str,
default='./output.avi',
help='The path of the output video file')
parser.add_argument('-l', '--labels',
type=str,
default='./model/coco-labels',
help='Path to the file having the \
labels in a new-line seperated way.')
parser.add_argument('-c', '--confidence',
type=float,
default=0.5,
help='The model will reject boundaries which has a \
probabiity less than the confidence value. \
default: 0.5')
parser.add_argument('-th', '--threshold',
type=float,
default=0.3,
help='The threshold to use when applying the \
Non-Max Suppresion')
parser.add_argument('--download-model',
type=bool,
default=False,
help='Set to True, if the model weights and configurations \
are not present on your local machine.')
parser.add_argument('-t', '--show-time',
type=bool,
default=False,
help='Show the time taken to infer each image.')
FLAGS, unparsed = parser.parse_known_args()
if FLAGS.download_model:
subprocess.call(['./model/get_model.sh'])
labels = open(FLAGS.labels).read().strip().split('\n')
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
net = cv.dnn.readNetFromDarknet(FLAGS.config, FLAGS.weights)
layer_names = net.getLayerNames()
layer_names = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
if FLAGS.image_path is None and FLAGS.video_path is None:
print ('Neither path to an image or path to video provided')
print ('Starting Inference on Webcam')
if FLAGS.image_path:
try:
img = cv.imread(FLAGS.image_path)
height, width = img.shape[:2]
except:
raise 'Image cannot be loaded!\n\
Please check the path provided!'
finally:
img, _, _, _, _ = infer_image(net, layer_names, height, width, img, colors, labels, FLAGS)
show_image(img)
elif FLAGS.video_path:
# Read the video
try:
vid = cv.VideoCapture(FLAGS.video_path)
height, width = None, None
writer = None
except:
raise 'Video cannot be loaded!\n\
Please check the path provided!'
finally:
print("Inferencing on video:", FLAGS.video_path)
while True:
grabbed, frame = vid.read()
if not grabbed:
break
if width is None or height is None:
height, width = frame.shape[:2]
frame, _, _, _, _ = infer_image(net, layer_names, height, width, frame, colors, labels, FLAGS)
if writer is None:
# Initialize the video writer
fourcc = cv.VideoWriter_fourcc(*"MJPG")
writer = cv.VideoWriter(FLAGS.video_output_path, fourcc, 30,
(frame.shape[1], frame.shape[0]), True)
writer.write(frame)
print ("[INFO] Cleaning up...")
writer.release()
vid.release()
else:
count = 0
vid = cv.VideoCapture(0)
while True:
_, frame = vid.read()
height, width = frame.shape[:2]
if count == 0:
frame, boxes, confidences, classids, idxs = infer_image(net, layer_names, \
height, width, frame, colors, labels, FLAGS)
count += 1
else:
frame, boxes, confidences, classids, idxs = infer_image(net, layer_names, \
height, width, frame, colors, labels, FLAGS, boxes, confidences, classids, idxs, infer=False)
count = (count + 1) % 6
cv.imshow('webcam', frame)
if cv.waitKey(1) & 0xFF == ord('q'):
break
vid.release()
cv.destroyAllWindows()
| 26.20122 | 105 | 0.658366 | import numpy as np
import argparse
import cv2 as cv
import subprocess
import time
import os
from support import infer_image, show_image
FLAGS = []
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model-path',
type=str,
default='./model/',
help='The directory where the model weights and \
configuration files are.')
parser.add_argument('-w', '--weights',
type=str,
default='./model/yolov3.weights',
help='Path to the file which contains the weights \
for YOLOv3.')
parser.add_argument('-cfg', '--config',
type=str,
default='./model/yolov3.cfg',
help='Path to the configuration file for the YOLOv3 model.')
parser.add_argument('-i', '--image-path',
type=str,
help='The path to the image file')
parser.add_argument('-v', '--video-path',
type=str,
help='The path to the video file')
parser.add_argument('-vo', '--video-output-path',
type=str,
default='./output.avi',
help='The path of the output video file')
parser.add_argument('-l', '--labels',
type=str,
default='./model/coco-labels',
help='Path to the file having the \
labels in a new-line seperated way.')
parser.add_argument('-c', '--confidence',
type=float,
default=0.5,
help='The model will reject boundaries which has a \
probabiity less than the confidence value. \
default: 0.5')
parser.add_argument('-th', '--threshold',
type=float,
default=0.3,
help='The threshold to use when applying the \
Non-Max Suppresion')
parser.add_argument('--download-model',
type=bool,
default=False,
help='Set to True, if the model weights and configurations \
are not present on your local machine.')
parser.add_argument('-t', '--show-time',
type=bool,
default=False,
help='Show the time taken to infer each image.')
FLAGS, unparsed = parser.parse_known_args()
if FLAGS.download_model:
subprocess.call(['./model/get_model.sh'])
labels = open(FLAGS.labels).read().strip().split('\n')
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
net = cv.dnn.readNetFromDarknet(FLAGS.config, FLAGS.weights)
layer_names = net.getLayerNames()
layer_names = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
if FLAGS.image_path is None and FLAGS.video_path is None:
print ('Neither path to an image or path to video provided')
print ('Starting Inference on Webcam')
if FLAGS.image_path:
try:
img = cv.imread(FLAGS.image_path)
height, width = img.shape[:2]
except:
raise 'Image cannot be loaded!\n\
Please check the path provided!'
finally:
img, _, _, _, _ = infer_image(net, layer_names, height, width, img, colors, labels, FLAGS)
show_image(img)
elif FLAGS.video_path:
try:
vid = cv.VideoCapture(FLAGS.video_path)
height, width = None, None
writer = None
except:
raise 'Video cannot be loaded!\n\
Please check the path provided!'
finally:
print("Inferencing on video:", FLAGS.video_path)
while True:
grabbed, frame = vid.read()
if not grabbed:
break
if width is None or height is None:
height, width = frame.shape[:2]
frame, _, _, _, _ = infer_image(net, layer_names, height, width, frame, colors, labels, FLAGS)
if writer is None:
fourcc = cv.VideoWriter_fourcc(*"MJPG")
writer = cv.VideoWriter(FLAGS.video_output_path, fourcc, 30,
(frame.shape[1], frame.shape[0]), True)
writer.write(frame)
print ("[INFO] Cleaning up...")
writer.release()
vid.release()
else:
count = 0
vid = cv.VideoCapture(0)
while True:
_, frame = vid.read()
height, width = frame.shape[:2]
if count == 0:
frame, boxes, confidences, classids, idxs = infer_image(net, layer_names, \
height, width, frame, colors, labels, FLAGS)
count += 1
else:
frame, boxes, confidences, classids, idxs = infer_image(net, layer_names, \
height, width, frame, colors, labels, FLAGS, boxes, confidences, classids, idxs, infer=False)
count = (count + 1) % 6
cv.imshow('webcam', frame)
if cv.waitKey(1) & 0xFF == ord('q'):
break
vid.release()
cv.destroyAllWindows()
| true | true |
f730ad586dc64abb0dc959278121e0408d62bbe2 | 1,041 | py | Python | src/Python/12_Napisy_anagramy_palindromy/Zad6.py | djeada/Nauka-programowania | b1eb6840c15b830acf552f0a0fc5cc692759152f | [
"MIT"
] | 3 | 2020-09-19T21:38:30.000Z | 2022-03-30T11:02:26.000Z | src/Python/12_Napisy_anagramy_palindromy/Zad6.py | djeada/Nauka-programowania | b1eb6840c15b830acf552f0a0fc5cc692759152f | [
"MIT"
] | null | null | null | src/Python/12_Napisy_anagramy_palindromy/Zad6.py | djeada/Nauka-programowania | b1eb6840c15b830acf552f0a0fc5cc692759152f | [
"MIT"
] | 1 | 2022-02-04T09:13:20.000Z | 2022-02-04T09:13:20.000Z | """
Sprawdz czy istnieje permutacja danego slowa bedaca palindromem.
"""
# Wersja 1
def znajdz_permutacje(napis, start, koniec, wynik=[]):
if start >= koniec:
if "".join(napis) not in wynik:
wynik.append("".join(napis))
else:
for i in range(start, koniec):
napis[start], napis[i] = napis[i], napis[start]
znajdz_permutacje(napis, start + 1, koniec, wynik)
napis[start], napis[i] = napis[i], napis[start]
return wynik
def czy_palindrom(slowo):
for i in range(len(slowo) // 2):
if slowo[i] != slowo[-i - 1]:
return False
return True
def czy_istnieje_permutacja_bedaca_palindromem_v1(slowo):
permutacje = znajdz_permutacje(list(slowo), 0, len(slowo))
wynik = []
for p in permutacje:
if czy_palindrom(p):
wynik.append(p)
return wynik
# testy poprawnosci
slowo = "adamm"
wynik = ["madam", "amdma"]
assert sorted(czy_istnieje_permutacja_bedaca_palindromem_v1(slowo)) == sorted(wynik)
| 20.82 | 84 | 0.621518 |
def znajdz_permutacje(napis, start, koniec, wynik=[]):
if start >= koniec:
if "".join(napis) not in wynik:
wynik.append("".join(napis))
else:
for i in range(start, koniec):
napis[start], napis[i] = napis[i], napis[start]
znajdz_permutacje(napis, start + 1, koniec, wynik)
napis[start], napis[i] = napis[i], napis[start]
return wynik
def czy_palindrom(slowo):
for i in range(len(slowo) // 2):
if slowo[i] != slowo[-i - 1]:
return False
return True
def czy_istnieje_permutacja_bedaca_palindromem_v1(slowo):
permutacje = znajdz_permutacje(list(slowo), 0, len(slowo))
wynik = []
for p in permutacje:
if czy_palindrom(p):
wynik.append(p)
return wynik
slowo = "adamm"
wynik = ["madam", "amdma"]
assert sorted(czy_istnieje_permutacja_bedaca_palindromem_v1(slowo)) == sorted(wynik)
| true | true |
f730adbf9c1f3a579711a5941736814a6d65e98c | 7,139 | py | Python | Tiny-ImageNet/SSKD/cifar.py | UBCDingXin/cGAN-KD | c32a4b014fe024222101ff11d63de518448f7f8d | [
"MIT"
] | 1 | 2021-08-21T09:19:17.000Z | 2021-08-21T09:19:17.000Z | CIFAR/CIFAR_50K/SSKD/cifar.py | UBCDingXin/cGAN-KD | c32a4b014fe024222101ff11d63de518448f7f8d | [
"MIT"
] | null | null | null | CIFAR/CIFAR_50K/SSKD/cifar.py | UBCDingXin/cGAN-KD | c32a4b014fe024222101ff11d63de518448f7f8d | [
"MIT"
] | 2 | 2021-04-15T08:23:49.000Z | 2021-09-15T06:52:25.000Z | from __future__ import print_function
from PIL import Image
import os
import os.path
import numpy as np
import sys
import pickle
import torch
import torch.utils.data as data
from itertools import permutations
class VisionDataset(data.Dataset):
_repr_indent = 4
def __init__(self, root, transforms=None, transform=None, target_transform=None):
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError("Only transforms or transform/target_transform can "
"be passed as argument")
# for backwards-compatibility
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __repr__(self):
head = "Dataset " + self.__class__.__name__
body = ["Number of datapoints: {}".format(self.__len__())]
if self.root is not None:
body.append("Root location: {}".format(self.root))
body += self.extra_repr().splitlines()
if self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return '\n'.join(lines)
def _format_transform_repr(self, transform, head):
lines = transform.__repr__().splitlines()
return (["{}{}".format(head, lines[0])] +
["{}{}".format(" " * len(head), line) for line in lines[1:]])
def extra_repr(self):
return ""
class CIFAR10(VisionDataset):
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
def __init__(self, root, train=True,
transform=None, download=False):
super(CIFAR10, self).__init__(root)
self.transform = transform
self.train = train # training set or test set
if download:
raise ValueError('cannot download.')
exit()
#self.download()
#if not self._check_integrity():
# raise RuntimeError('Dataset not found or corrupted.' +
# ' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.data = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
self._load_meta()
def _load_meta(self):
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
#if not check_integrity(path, self.meta['md5']):
# raise RuntimeError('Dataset metadata file not found or corrupted.' +
# ' You can use download=True to download it')
with open(path, 'rb') as infile:
if sys.version_info[0] == 2:
data = pickle.load(infile)
else:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
if self.train:
if np.random.rand() < 0.5:
img = img[:,::-1,:]
img0 = np.rot90(img, 0).copy()
img0 = Image.fromarray(img0)
img0 = self.transform(img0)
img1 = np.rot90(img, 1).copy()
img1 = Image.fromarray(img1)
img1 = self.transform(img1)
img2 = np.rot90(img, 2).copy()
img2 = Image.fromarray(img2)
img2 = self.transform(img2)
img3 = np.rot90(img, 3).copy()
img3 = Image.fromarray(img3)
img3 = self.transform(img3)
img = torch.stack([img0,img1,img2,img3])
return img, target
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_url(self.url, self.root, self.filename, self.tgz_md5)
# extract file
with tarfile.open(os.path.join(self.root, self.filename), "r:gz") as tar:
tar.extractall(path=self.root)
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
class CIFAR100(CIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
| 32.747706 | 86 | 0.595321 | from __future__ import print_function
from PIL import Image
import os
import os.path
import numpy as np
import sys
import pickle
import torch
import torch.utils.data as data
from itertools import permutations
class VisionDataset(data.Dataset):
_repr_indent = 4
def __init__(self, root, transforms=None, transform=None, target_transform=None):
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError("Only transforms or transform/target_transform can "
"be passed as argument")
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __repr__(self):
head = "Dataset " + self.__class__.__name__
body = ["Number of datapoints: {}".format(self.__len__())]
if self.root is not None:
body.append("Root location: {}".format(self.root))
body += self.extra_repr().splitlines()
if self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return '\n'.join(lines)
def _format_transform_repr(self, transform, head):
lines = transform.__repr__().splitlines()
return (["{}{}".format(head, lines[0])] +
["{}{}".format(" " * len(head), line) for line in lines[1:]])
def extra_repr(self):
return ""
class CIFAR10(VisionDataset):
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
def __init__(self, root, train=True,
transform=None, download=False):
super(CIFAR10, self).__init__(root)
self.transform = transform
self.train = train
if download:
raise ValueError('cannot download.')
exit()
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.data = []
self.targets = []
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1))
self._load_meta()
def _load_meta(self):
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
with open(path, 'rb') as infile:
if sys.version_info[0] == 2:
data = pickle.load(infile)
else:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
if self.train:
if np.random.rand() < 0.5:
img = img[:,::-1,:]
img0 = np.rot90(img, 0).copy()
img0 = Image.fromarray(img0)
img0 = self.transform(img0)
img1 = np.rot90(img, 1).copy()
img1 = Image.fromarray(img1)
img1 = self.transform(img1)
img2 = np.rot90(img, 2).copy()
img2 = Image.fromarray(img2)
img2 = self.transform(img2)
img3 = np.rot90(img, 3).copy()
img3 = Image.fromarray(img3)
img3 = self.transform(img3)
img = torch.stack([img0,img1,img2,img3])
return img, target
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_url(self.url, self.root, self.filename, self.tgz_md5)
with tarfile.open(os.path.join(self.root, self.filename), "r:gz") as tar:
tar.extractall(path=self.root)
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
class CIFAR100(CIFAR10):
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
| true | true |
f730ae6a31dcc419b37475dbdeeafb8f61dff61c | 3,593 | py | Python | src/diffwave/inference.py | egaebel/diffwave | c5d7d8d90b662f208ecdfba616782559146dc116 | [
"Apache-2.0"
] | null | null | null | src/diffwave/inference.py | egaebel/diffwave | c5d7d8d90b662f208ecdfba616782559146dc116 | [
"Apache-2.0"
] | null | null | null | src/diffwave/inference.py | egaebel/diffwave | c5d7d8d90b662f208ecdfba616782559146dc116 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os
import torch
import torchaudio
from argparse import ArgumentParser
from diffwave.params import AttrDict, params as base_params
from diffwave.model import DiffWave
models = {}
def load_model(model_dir, device):
global models
if os.path.exists(f"{model_dir}/weights.pt"):
checkpoint = torch.load(f"{model_dir}/weights.pt", map_location=device)
else:
checkpoint = torch.load(model_dir, map_location=device)
model = DiffWave(AttrDict(base_params)).to(device)
model.load_state_dict(checkpoint["model"])
model.eval()
models[model_dir] = model
def predict(spectrogram, model_dir=None, params=None, device=torch.device("cuda")):
global models
# Lazy load model.
if not model_dir in models:
load_model(model_dir, device)
model = models[model_dir]
model.params.override(params)
with torch.no_grad():
beta = np.array(model.params.noise_schedule)
alpha = 1 - beta
alpha_cum = np.cumprod(alpha)
# Expand rank 2 tensors by adding a batch dimension.
if len(spectrogram.shape) == 2:
spectrogram = spectrogram.unsqueeze(0)
spectrogram = spectrogram.to(device)
audio = torch.randn(
spectrogram.shape[0],
model.params.hop_samples * spectrogram.shape[-1],
device=device,
)
noise_scale = torch.from_numpy(alpha_cum ** 0.5).float().unsqueeze(1).to(device)
for n in range(len(alpha) - 1, -1, -1):
c1 = 1 / alpha[n] ** 0.5
c2 = beta[n] / (1 - alpha_cum[n]) ** 0.5
audio = c1 * (
audio
- c2
* model(
audio, spectrogram, torch.tensor([n], device=audio.device)
).squeeze(1)
)
if n > 0:
noise = torch.randn_like(audio)
sigma = (
(1.0 - alpha_cum[n - 1]) / (1.0 - alpha_cum[n]) * beta[n]
) ** 0.5
audio += sigma * noise
audio = torch.clamp(audio, -1.0, 1.0)
return audio, model.params.sample_rate
def main(args):
spectrogram = torch.from_numpy(np.load(args.spectrogram_path))
audio, sr = predict(spectrogram, model_dir=args.model_dir)
torchaudio.save(args.output, audio.cpu(), sample_rate=sr)
if __name__ == "__main__":
parser = ArgumentParser(
description="runs inference on a spectrogram file generated by diffwave.preprocess"
)
parser.add_argument(
"model_dir",
help="directory containing a trained model (or full path to weights.pt file)",
)
parser.add_argument(
"spectrogram_path",
help="path to a spectrogram file generated by diffwave.preprocess",
)
parser.add_argument("--output", "-o", default="output.wav", help="output file name")
main(parser.parse_args())
| 33.579439 | 91 | 0.618981 |
import numpy as np
import os
import torch
import torchaudio
from argparse import ArgumentParser
from diffwave.params import AttrDict, params as base_params
from diffwave.model import DiffWave
models = {}
def load_model(model_dir, device):
global models
if os.path.exists(f"{model_dir}/weights.pt"):
checkpoint = torch.load(f"{model_dir}/weights.pt", map_location=device)
else:
checkpoint = torch.load(model_dir, map_location=device)
model = DiffWave(AttrDict(base_params)).to(device)
model.load_state_dict(checkpoint["model"])
model.eval()
models[model_dir] = model
def predict(spectrogram, model_dir=None, params=None, device=torch.device("cuda")):
global models
if not model_dir in models:
load_model(model_dir, device)
model = models[model_dir]
model.params.override(params)
with torch.no_grad():
beta = np.array(model.params.noise_schedule)
alpha = 1 - beta
alpha_cum = np.cumprod(alpha)
if len(spectrogram.shape) == 2:
spectrogram = spectrogram.unsqueeze(0)
spectrogram = spectrogram.to(device)
audio = torch.randn(
spectrogram.shape[0],
model.params.hop_samples * spectrogram.shape[-1],
device=device,
)
noise_scale = torch.from_numpy(alpha_cum ** 0.5).float().unsqueeze(1).to(device)
for n in range(len(alpha) - 1, -1, -1):
c1 = 1 / alpha[n] ** 0.5
c2 = beta[n] / (1 - alpha_cum[n]) ** 0.5
audio = c1 * (
audio
- c2
* model(
audio, spectrogram, torch.tensor([n], device=audio.device)
).squeeze(1)
)
if n > 0:
noise = torch.randn_like(audio)
sigma = (
(1.0 - alpha_cum[n - 1]) / (1.0 - alpha_cum[n]) * beta[n]
) ** 0.5
audio += sigma * noise
audio = torch.clamp(audio, -1.0, 1.0)
return audio, model.params.sample_rate
def main(args):
spectrogram = torch.from_numpy(np.load(args.spectrogram_path))
audio, sr = predict(spectrogram, model_dir=args.model_dir)
torchaudio.save(args.output, audio.cpu(), sample_rate=sr)
if __name__ == "__main__":
parser = ArgumentParser(
description="runs inference on a spectrogram file generated by diffwave.preprocess"
)
parser.add_argument(
"model_dir",
help="directory containing a trained model (or full path to weights.pt file)",
)
parser.add_argument(
"spectrogram_path",
help="path to a spectrogram file generated by diffwave.preprocess",
)
parser.add_argument("--output", "-o", default="output.wav", help="output file name")
main(parser.parse_args())
| true | true |
f730ae92f94c6a1c98bcbfd1b9c656dbf9f0d677 | 576 | py | Python | emr_mine_python_scipts/pq_tree/Queue.py | debprakash/emr-view | 6b5690c2335482e97b8dabbdec616c8a1d7df898 | [
"MIT"
] | null | null | null | emr_mine_python_scipts/pq_tree/Queue.py | debprakash/emr-view | 6b5690c2335482e97b8dabbdec616c8a1d7df898 | [
"MIT"
] | null | null | null | emr_mine_python_scipts/pq_tree/Queue.py | debprakash/emr-view | 6b5690c2335482e97b8dabbdec616c8a1d7df898 | [
"MIT"
] | 1 | 2018-10-24T02:54:40.000Z | 2018-10-24T02:54:40.000Z | '''
Created on Dec 30, 2010
@author: patnaik
'''
from collections import deque
class Queue(object):
def __init__(self, data = None):
if data:
self.internal_queue = deque(data)
else:
self.internal_queue = deque()
def enqueue(self, value):
self.internal_queue.append(value)
def dequeue(self):
return self.internal_queue.popleft()
def __len__(self):
return len(self.internal_queue)
def __str__(self):
return "%s" % (list(self.internal_queue)) | 20.571429 | 49 | 0.578125 |
from collections import deque
class Queue(object):
def __init__(self, data = None):
if data:
self.internal_queue = deque(data)
else:
self.internal_queue = deque()
def enqueue(self, value):
self.internal_queue.append(value)
def dequeue(self):
return self.internal_queue.popleft()
def __len__(self):
return len(self.internal_queue)
def __str__(self):
return "%s" % (list(self.internal_queue)) | true | true |
f730af67a6528163daebb523295431d1e6c54c82 | 16,885 | py | Python | intersight/model/storage_base_host.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 5 | 2021-12-16T15:13:32.000Z | 2022-03-29T16:09:54.000Z | intersight/model/storage_base_host.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 4 | 2022-01-25T19:05:51.000Z | 2022-03-29T20:18:37.000Z | intersight/model/storage_base_host.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 2 | 2020-07-07T15:01:08.000Z | 2022-01-31T04:27:35.000Z | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.display_names import DisplayNames
from intersight.model.mo_base_mo import MoBaseMo
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
from intersight.model.storage_base_capacity import StorageBaseCapacity
from intersight.model.storage_base_host_all_of import StorageBaseHostAllOf
from intersight.model.storage_base_initiator import StorageBaseInitiator
from intersight.model.storage_hitachi_host import StorageHitachiHost
from intersight.model.storage_net_app_initiator_group import StorageNetAppInitiatorGroup
from intersight.model.storage_pure_host import StoragePureHost
globals()['DisplayNames'] = DisplayNames
globals()['MoBaseMo'] = MoBaseMo
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
globals()['StorageBaseCapacity'] = StorageBaseCapacity
globals()['StorageBaseHostAllOf'] = StorageBaseHostAllOf
globals()['StorageBaseInitiator'] = StorageBaseInitiator
globals()['StorageHitachiHost'] = StorageHitachiHost
globals()['StorageNetAppInitiatorGroup'] = StorageNetAppInitiatorGroup
globals()['StoragePureHost'] = StoragePureHost
class StorageBaseHost(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'HITACHIHOST': "storage.HitachiHost",
'NETAPPINITIATORGROUP': "storage.NetAppInitiatorGroup",
'PUREHOST': "storage.PureHost",
},
('object_type',): {
'HITACHIHOST': "storage.HitachiHost",
'NETAPPINITIATORGROUP': "storage.NetAppInitiatorGroup",
'PUREHOST': "storage.PureHost",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'description': (str,), # noqa: E501
'initiators': ([StorageBaseInitiator], none_type,), # noqa: E501
'name': (str,), # noqa: E501
'os_type': (str,), # noqa: E501
'storage_utilization': (StorageBaseCapacity,), # noqa: E501
'account_moid': (str,), # noqa: E501
'create_time': (datetime,), # noqa: E501
'domain_group_moid': (str,), # noqa: E501
'mod_time': (datetime,), # noqa: E501
'moid': (str,), # noqa: E501
'owners': ([str], none_type,), # noqa: E501
'shared_scope': (str,), # noqa: E501
'tags': ([MoTag], none_type,), # noqa: E501
'version_context': (MoVersionContext,), # noqa: E501
'ancestors': ([MoBaseMoRelationship], none_type,), # noqa: E501
'parent': (MoBaseMoRelationship,), # noqa: E501
'permission_resources': ([MoBaseMoRelationship], none_type,), # noqa: E501
'display_names': (DisplayNames,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'storage.HitachiHost': StorageHitachiHost,
'storage.NetAppInitiatorGroup': StorageNetAppInitiatorGroup,
'storage.PureHost': StoragePureHost,
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'description': 'Description', # noqa: E501
'initiators': 'Initiators', # noqa: E501
'name': 'Name', # noqa: E501
'os_type': 'OsType', # noqa: E501
'storage_utilization': 'StorageUtilization', # noqa: E501
'account_moid': 'AccountMoid', # noqa: E501
'create_time': 'CreateTime', # noqa: E501
'domain_group_moid': 'DomainGroupMoid', # noqa: E501
'mod_time': 'ModTime', # noqa: E501
'moid': 'Moid', # noqa: E501
'owners': 'Owners', # noqa: E501
'shared_scope': 'SharedScope', # noqa: E501
'tags': 'Tags', # noqa: E501
'version_context': 'VersionContext', # noqa: E501
'ancestors': 'Ancestors', # noqa: E501
'parent': 'Parent', # noqa: E501
'permission_resources': 'PermissionResources', # noqa: E501
'display_names': 'DisplayNames', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, class_id, object_type, *args, **kwargs): # noqa: E501
"""StorageBaseHost - a model defined in OpenAPI
Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data. The enum values provides the list of concrete types that can be instantiated from this abstract type.
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property. The enum values provides the list of concrete types that can be instantiated from this abstract type.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
description (str): Short description about the host.. [optional] # noqa: E501
initiators ([StorageBaseInitiator], none_type): [optional] # noqa: E501
name (str): Name of the host in storage array.. [optional] # noqa: E501
os_type (str): Operating system running on the host.. [optional] # noqa: E501
storage_utilization (StorageBaseCapacity): [optional] # noqa: E501
account_moid (str): The Account ID for this managed object.. [optional] # noqa: E501
create_time (datetime): The time when this managed object was created.. [optional] # noqa: E501
domain_group_moid (str): The DomainGroup ID for this managed object.. [optional] # noqa: E501
mod_time (datetime): The time when this managed object was last modified.. [optional] # noqa: E501
moid (str): The unique identifier of this Managed Object instance.. [optional] # noqa: E501
owners ([str], none_type): [optional] # noqa: E501
shared_scope (str): Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.. [optional] # noqa: E501
tags ([MoTag], none_type): [optional] # noqa: E501
version_context (MoVersionContext): [optional] # noqa: E501
ancestors ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
parent (MoBaseMoRelationship): [optional] # noqa: E501
permission_resources ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
display_names (DisplayNames): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MoBaseMo,
StorageBaseHostAllOf,
],
'oneOf': [
],
}
| 52.601246 | 1,678 | 0.640687 |
import re
import sys
from intersight.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.display_names import DisplayNames
from intersight.model.mo_base_mo import MoBaseMo
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
from intersight.model.storage_base_capacity import StorageBaseCapacity
from intersight.model.storage_base_host_all_of import StorageBaseHostAllOf
from intersight.model.storage_base_initiator import StorageBaseInitiator
from intersight.model.storage_hitachi_host import StorageHitachiHost
from intersight.model.storage_net_app_initiator_group import StorageNetAppInitiatorGroup
from intersight.model.storage_pure_host import StoragePureHost
globals()['DisplayNames'] = DisplayNames
globals()['MoBaseMo'] = MoBaseMo
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
globals()['StorageBaseCapacity'] = StorageBaseCapacity
globals()['StorageBaseHostAllOf'] = StorageBaseHostAllOf
globals()['StorageBaseInitiator'] = StorageBaseInitiator
globals()['StorageHitachiHost'] = StorageHitachiHost
globals()['StorageNetAppInitiatorGroup'] = StorageNetAppInitiatorGroup
globals()['StoragePureHost'] = StoragePureHost
class StorageBaseHost(ModelComposed):
allowed_values = {
('class_id',): {
'HITACHIHOST': "storage.HitachiHost",
'NETAPPINITIATORGROUP': "storage.NetAppInitiatorGroup",
'PUREHOST': "storage.PureHost",
},
('object_type',): {
'HITACHIHOST': "storage.HitachiHost",
'NETAPPINITIATORGROUP': "storage.NetAppInitiatorGroup",
'PUREHOST': "storage.PureHost",
},
}
validations = {
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'class_id': (str,),
'object_type': (str,),
'description': (str,),
'initiators': ([StorageBaseInitiator], none_type,),
'name': (str,),
'os_type': (str,),
'storage_utilization': (StorageBaseCapacity,),
'account_moid': (str,),
'create_time': (datetime,),
'domain_group_moid': (str,),
'mod_time': (datetime,),
'moid': (str,),
'owners': ([str], none_type,),
'shared_scope': (str,),
'tags': ([MoTag], none_type,),
'version_context': (MoVersionContext,),
'ancestors': ([MoBaseMoRelationship], none_type,),
'parent': (MoBaseMoRelationship,),
'permission_resources': ([MoBaseMoRelationship], none_type,),
'display_names': (DisplayNames,),
}
@cached_property
def discriminator():
lazy_import()
val = {
'storage.HitachiHost': StorageHitachiHost,
'storage.NetAppInitiatorGroup': StorageNetAppInitiatorGroup,
'storage.PureHost': StoragePureHost,
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId',
'object_type': 'ObjectType',
'description': 'Description',
'initiators': 'Initiators',
'name': 'Name',
'os_type': 'OsType',
'storage_utilization': 'StorageUtilization',
'account_moid': 'AccountMoid',
'create_time': 'CreateTime',
'domain_group_moid': 'DomainGroupMoid',
'mod_time': 'ModTime',
'moid': 'Moid',
'owners': 'Owners',
'shared_scope': 'SharedScope',
'tags': 'Tags',
'version_context': 'VersionContext',
'ancestors': 'Ancestors',
'parent': 'Parent',
'permission_resources': 'PermissionResources',
'display_names': 'DisplayNames',
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, class_id, object_type, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MoBaseMo,
StorageBaseHostAllOf,
],
'oneOf': [
],
}
| true | true |
f730af851f20a268b6a0f44fd441c1856d5e489a | 8,362 | py | Python | transformers/modeling_tf_transfo_xl_utilities.py | wietsedv/transformers | 8efc6dd544bf1a30d99d4b5abfc5e214699eab2b | [
"Apache-2.0"
] | 4 | 2020-02-18T22:33:35.000Z | 2021-06-24T20:34:50.000Z | transformers/modeling_tf_transfo_xl_utilities.py | eangelica2014/transformers | 5e289f69bc564c94132f77c89a34e5f1dd69a592 | [
"Apache-2.0"
] | null | null | null | transformers/modeling_tf_transfo_xl_utilities.py | eangelica2014/transformers | 5e289f69bc564c94132f77c89a34e5f1dd69a592 | [
"Apache-2.0"
] | 1 | 2020-07-01T01:16:11.000Z | 2020-07-01T01:16:11.000Z | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" A TF 2.0 Adaptive Softmax for Transformer XL model.
"""
from collections import defaultdict
import numpy as np
import tensorflow as tf
from .modeling_tf_utils import shape_list
class TFAdaptiveSoftmaxMask(tf.keras.layers.Layer):
def __init__(self, vocab_size, d_embed, d_proj, cutoffs, div_val=1,
keep_order=False, **kwargs):
super(TFAdaptiveSoftmaxMask, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [vocab_size]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
self.keep_order = keep_order
self.out_layers = []
self.out_projs = []
def build(self, input_shape):
if self.n_clusters > 0:
self.cluster_weight = self.add_weight(shape=(self.n_clusters, self.d_embed),
initializer='zeros',
trainable=True,
name='cluster_weight')
self.cluster_bias = self.add_weight(shape=(self.n_clusters,),
initializer='zeros',
trainable=True,
name='cluster_bias')
if self.div_val == 1:
for i in range(len(self.cutoffs)):
if self.d_proj != self.d_embed:
weight = self.add_weight(shape=(self.d_embed, self.d_proj),
initializer='zeros',
trainable=True,
name='out_projs_._{}'.format(i))
self.out_projs.append(weight)
else:
self.out_projs.append(None)
weight = self.add_weight(shape=(self.vocab_size, self.d_embed,),
initializer='zeros',
trainable=True,
name='out_layers_._{}_._weight'.format(i))
bias = self.add_weight(shape=(self.vocab_size,),
initializer='zeros',
trainable=True,
name='out_layers_._{}_._bias'.format(i))
self.out_layers.append((weight, bias))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = self.d_embed // (self.div_val ** i)
weight = self.add_weight(shape=(d_emb_i, self.d_proj),
initializer='zeros',
trainable=True,
name='out_projs_._{}'.format(i))
self.out_projs.append(weight)
weight = self.add_weight(shape=(r_idx-l_idx, d_emb_i,),
initializer='zeros',
trainable=True,
name='out_layers_._{}_._weight'.format(i))
bias = self.add_weight(shape=(r_idx-l_idx,),
initializer='zeros',
trainable=True,
name='out_layers_._{}_._bias'.format(i))
self.out_layers.append((weight, bias))
super(TFAdaptiveSoftmaxMask, self).build(input_shape)
@staticmethod
def _logit(x, W, b, proj=None):
y = x
if proj is not None:
y = tf.einsum('ibd,ed->ibe', y, proj)
return tf.einsum('ibd,nd->ibn', y, W) + b
@staticmethod
def _gather_logprob(logprob, target):
lp_size = shape_list(logprob)
r = tf.range(lp_size[0])
idx = tf.stack([r, target], 1)
return tf.gather_nd(logprob, idx)
def call(self, inputs, return_mean=True, training=False):
hidden, target = inputs
head_logprob = 0
if self.n_clusters == 0:
softmax_b = tf.get_variable('bias', [self.config.vocab_size], initializer=tf.zeros_initializer())
output = self._logit(hidden, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0])
if target is not None:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output)
out = tf.nn.log_softmax(output, axis=-1)
else:
hidden_sizes = shape_list(hidden)
out = []
loss = tf.zeros(hidden_sizes[:2], dtype=tf.float32)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
mask = (target >= l_idx) & (target < r_idx)
mask_idx = tf.where(mask)
cur_target = tf.boolean_mask(target, mask) - l_idx
if self.div_val == 1:
cur_W = self.out_layers[0][0][l_idx:r_idx]
cur_b = self.out_layers[0][1][l_idx:r_idx]
else:
cur_W = self.out_layers[i][0]
cur_b = self.out_layers[i][1]
if i == 0:
cur_W = tf.concat([cur_W, self.cluster_weight], 0)
cur_b = tf.concat([cur_b, self.cluster_bias], 0)
head_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[0])
head_logprob = tf.nn.log_softmax(head_logit)
out.append(head_logprob[..., :self.cutoffs[0]])
if target is not None:
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
cur_logprob = self._gather_logprob(cur_head_logprob, cur_target)
else:
tail_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[i])
tail_logprob = tf.nn.log_softmax(tail_logit)
cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
logprob_i = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(logprob_i)
if target is not None:
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
cur_tail_logprob = tf.boolean_mask(tail_logprob, mask)
cur_logprob = self._gather_logprob(cur_tail_logprob, cur_target)
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(mask_idx, -cur_logprob, tf.cast(shape_list(loss), dtype=tf.int64))
out = tf.concat(out, axis=-1)
if target is not None:
if return_mean:
loss = tf.reduce_mean(loss)
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(loss)
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(loss, name=self.name, aggregation='mean' if return_mean else '')
return out
| 47.511364 | 110 | 0.527864 |
from collections import defaultdict
import numpy as np
import tensorflow as tf
from .modeling_tf_utils import shape_list
class TFAdaptiveSoftmaxMask(tf.keras.layers.Layer):
def __init__(self, vocab_size, d_embed, d_proj, cutoffs, div_val=1,
keep_order=False, **kwargs):
super(TFAdaptiveSoftmaxMask, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [vocab_size]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
self.keep_order = keep_order
self.out_layers = []
self.out_projs = []
def build(self, input_shape):
if self.n_clusters > 0:
self.cluster_weight = self.add_weight(shape=(self.n_clusters, self.d_embed),
initializer='zeros',
trainable=True,
name='cluster_weight')
self.cluster_bias = self.add_weight(shape=(self.n_clusters,),
initializer='zeros',
trainable=True,
name='cluster_bias')
if self.div_val == 1:
for i in range(len(self.cutoffs)):
if self.d_proj != self.d_embed:
weight = self.add_weight(shape=(self.d_embed, self.d_proj),
initializer='zeros',
trainable=True,
name='out_projs_._{}'.format(i))
self.out_projs.append(weight)
else:
self.out_projs.append(None)
weight = self.add_weight(shape=(self.vocab_size, self.d_embed,),
initializer='zeros',
trainable=True,
name='out_layers_._{}_._weight'.format(i))
bias = self.add_weight(shape=(self.vocab_size,),
initializer='zeros',
trainable=True,
name='out_layers_._{}_._bias'.format(i))
self.out_layers.append((weight, bias))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = self.d_embed // (self.div_val ** i)
weight = self.add_weight(shape=(d_emb_i, self.d_proj),
initializer='zeros',
trainable=True,
name='out_projs_._{}'.format(i))
self.out_projs.append(weight)
weight = self.add_weight(shape=(r_idx-l_idx, d_emb_i,),
initializer='zeros',
trainable=True,
name='out_layers_._{}_._weight'.format(i))
bias = self.add_weight(shape=(r_idx-l_idx,),
initializer='zeros',
trainable=True,
name='out_layers_._{}_._bias'.format(i))
self.out_layers.append((weight, bias))
super(TFAdaptiveSoftmaxMask, self).build(input_shape)
@staticmethod
def _logit(x, W, b, proj=None):
y = x
if proj is not None:
y = tf.einsum('ibd,ed->ibe', y, proj)
return tf.einsum('ibd,nd->ibn', y, W) + b
@staticmethod
def _gather_logprob(logprob, target):
lp_size = shape_list(logprob)
r = tf.range(lp_size[0])
idx = tf.stack([r, target], 1)
return tf.gather_nd(logprob, idx)
def call(self, inputs, return_mean=True, training=False):
hidden, target = inputs
head_logprob = 0
if self.n_clusters == 0:
softmax_b = tf.get_variable('bias', [self.config.vocab_size], initializer=tf.zeros_initializer())
output = self._logit(hidden, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0])
if target is not None:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output)
out = tf.nn.log_softmax(output, axis=-1)
else:
hidden_sizes = shape_list(hidden)
out = []
loss = tf.zeros(hidden_sizes[:2], dtype=tf.float32)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
mask = (target >= l_idx) & (target < r_idx)
mask_idx = tf.where(mask)
cur_target = tf.boolean_mask(target, mask) - l_idx
if self.div_val == 1:
cur_W = self.out_layers[0][0][l_idx:r_idx]
cur_b = self.out_layers[0][1][l_idx:r_idx]
else:
cur_W = self.out_layers[i][0]
cur_b = self.out_layers[i][1]
if i == 0:
cur_W = tf.concat([cur_W, self.cluster_weight], 0)
cur_b = tf.concat([cur_b, self.cluster_bias], 0)
head_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[0])
head_logprob = tf.nn.log_softmax(head_logit)
out.append(head_logprob[..., :self.cutoffs[0]])
if target is not None:
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
cur_logprob = self._gather_logprob(cur_head_logprob, cur_target)
else:
tail_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[i])
tail_logprob = tf.nn.log_softmax(tail_logit)
cluster_prob_idx = self.cutoffs[0] + i - 1
logprob_i = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(logprob_i)
if target is not None:
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
cur_tail_logprob = tf.boolean_mask(tail_logprob, mask)
cur_logprob = self._gather_logprob(cur_tail_logprob, cur_target)
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(mask_idx, -cur_logprob, tf.cast(shape_list(loss), dtype=tf.int64))
out = tf.concat(out, axis=-1)
if target is not None:
if return_mean:
loss = tf.reduce_mean(loss)
self.add_loss(loss)
self.add_metric(loss, name=self.name, aggregation='mean' if return_mean else '')
return out
| true | true |
f730b057549fb7fa3a7ab3af3435630749ad857a | 131 | py | Python | program1.py | saenzzzup/criptografia-UNAM-2020 | 05a19e076ea19218753fdc6aa8525e297cbb0b12 | [
"MIT"
] | null | null | null | program1.py | saenzzzup/criptografia-UNAM-2020 | 05a19e076ea19218753fdc6aa8525e297cbb0b12 | [
"MIT"
] | null | null | null | program1.py | saenzzzup/criptografia-UNAM-2020 | 05a19e076ea19218753fdc6aa8525e297cbb0b12 | [
"MIT"
] | null | null | null | import fileinput
suma = sum(float(num) for num in fileinput.input())
if (suma).is_integer():
print(int(suma))
else:
print(suma) | 16.375 | 51 | 0.709924 | import fileinput
suma = sum(float(num) for num in fileinput.input())
if (suma).is_integer():
print(int(suma))
else:
print(suma) | true | true |
f730b0aa8d6d0ccc56c5c91946429f4bb15d48a4 | 218 | py | Python | src/redistool/basic.py | vinthony/racpider | 56cea984b69ed4faf0f1c5d264ce38690d4b449e | [
"MIT"
] | 4 | 2015-05-27T04:07:59.000Z | 2016-03-08T15:13:35.000Z | src/redistool/basic.py | vinthony/racpider | 56cea984b69ed4faf0f1c5d264ce38690d4b449e | [
"MIT"
] | null | null | null | src/redistool/basic.py | vinthony/racpider | 56cea984b69ed4faf0f1c5d264ce38690d4b449e | [
"MIT"
] | null | null | null | from redis import Redis
from config.getconfig import getconfig
def conn():
config = config()
rec = getconfig()["redis"]
self.conn = Redis(host=rec["host"],port=int(rec["port"]),db=int(rec['db']))
return self.conn | 27.25 | 77 | 0.697248 | from redis import Redis
from config.getconfig import getconfig
def conn():
config = config()
rec = getconfig()["redis"]
self.conn = Redis(host=rec["host"],port=int(rec["port"]),db=int(rec['db']))
return self.conn | true | true |
f730b168fcf7dec7355c788c5f9285fee330b4ac | 325 | py | Python | 1876.py | romanvelichkin/acm.timus.ru | ff2e946b04fec857be142dca18e1ac3c49b14cf5 | [
"Apache-2.0"
] | null | null | null | 1876.py | romanvelichkin/acm.timus.ru | ff2e946b04fec857be142dca18e1ac3c49b14cf5 | [
"Apache-2.0"
] | null | null | null | 1876.py | romanvelichkin/acm.timus.ru | ff2e946b04fec857be142dca18e1ac3c49b14cf5 | [
"Apache-2.0"
] | null | null | null | # 1876. Утро сороконожки
# solved
boots = input().split(' ')
left_boots = int(boots[0])
right_boots = int(boots[1])
left_legs = 40
right_legs = 40
result = 0
if right_boots >= left_boots:
result = right_boots*2 + left_legs
else:
result = (right_legs - 1)*2 + left_legs + (left_boots - left_legs)*2 + 1
print(result) | 21.666667 | 76 | 0.683077 |
boots = input().split(' ')
left_boots = int(boots[0])
right_boots = int(boots[1])
left_legs = 40
right_legs = 40
result = 0
if right_boots >= left_boots:
result = right_boots*2 + left_legs
else:
result = (right_legs - 1)*2 + left_legs + (left_boots - left_legs)*2 + 1
print(result) | true | true |
f730b1d1edd280ab8da6c0db314c7a6d740157a1 | 12,189 | py | Python | fastai/callback.py | fish5421/fastai_update | c3dbdfba59512b5004093119f7676f224eb1d15c | [
"Apache-2.0"
] | null | null | null | fastai/callback.py | fish5421/fastai_update | c3dbdfba59512b5004093119f7676f224eb1d15c | [
"Apache-2.0"
] | null | null | null | fastai/callback.py | fish5421/fastai_update | c3dbdfba59512b5004093119f7676f224eb1d15c | [
"Apache-2.0"
] | 1 | 2018-08-26T02:48:51.000Z | 2018-08-26T02:48:51.000Z | "Callbacks provides extensibility to the `basic_train` loop. See `train` for examples of custom callbacks."
from .data import *
from .torch_core import *
__all__ = ['Callback', 'CallbackHandler', 'OptimWrapper', 'SmoothenValue', 'Stepper', 'annealing_cos', 'CallbackList',
'annealing_exp', 'annealing_linear', 'annealing_no', 'annealing_poly', 'do_annealing_poly']
class OptimWrapper():
"Basic wrapper around an optimizer to simplify HP changes."
def __init__(self, opt:optim.Optimizer, wd:Floats=0., true_wd:bool=False, bn_wd:bool=True):
self.opt,self.true_wd,self.bn_wd = opt,true_wd,bn_wd
self.opt_keys = list(self.opt.param_groups[0].keys())
self.opt_keys.remove('params')
self.read_defaults()
self.wd = wd
@classmethod
def create(cls, opt_fn:Union[type,Callable], lr:Union[float,Tuple,List],
layer_groups:ModuleList, **kwargs:Any)->optim.Optimizer:
"Create an optim.Optimizer from `opt_fn` with `lr`. Set lr on `layer_groups`."
split_groups = split_bn_bias(layer_groups)
opt = opt_fn([{'params': trainable_params(l), 'lr':0} for l in split_groups])
opt = cls(opt, **kwargs)
opt.lr = listify(lr, layer_groups)
return opt
def __repr__(self)->str:
return f'OptimWrapper over {repr(self.opt)}.\nTrue weight decay: {self.true_wd}'
#Pytorch optimizer methods
def step(self)->None:
"Set weight decay and step optimizer."
# weight decay outside of optimizer step (AdamW)
if self.true_wd:
for lr,wd,pg1,pg2 in zip(self._lr,self._wd,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
for p in pg1['params']: p.data.mul_(1 - wd*lr)
if self.bn_wd:
for p in pg2['params']: p.data.mul_(1 - wd*lr)
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
def zero_grad(self)->None:
"Clear optimizer gradients."
self.opt.zero_grad()
#Hyperparameters as properties
@property
def lr(self)->float:
"Get learning rate."
return self._lr[-1]
@lr.setter
def lr(self, val:float)->None:
"Set learning rate."
self._lr = self.set_val('lr', listify(val, self._lr))
@property
def mom(self)->float:
"Get momentum."
return self._mom[-1]
@mom.setter
def mom(self, val:float)->None:
"Set momentum."
if 'momentum' in self.opt_keys: self.set_val('momentum', listify(val, self._mom))
elif 'betas' in self.opt_keys: self.set_val('betas', (listify(val, self._mom), self._beta))
self._mom = listify(val, self._mom)
@property
def beta(self)->float:
"Get beta (or alpha as makes sense for given optimizer)."
return None if self._beta is None else self._beta[-1]
@beta.setter
def beta(self, val:float)->None:
"Set beta (or alpha as makes sense for given optimizer)."
if val is None: return
if 'betas' in self.opt_keys: self.set_val('betas', (self._mom, listify(val, self._beta)))
elif 'alpha' in self.opt_keys: self.set_val('alpha', listify(val, self._beta))
self._beta = listify(val, self._beta)
@property
def wd(self)->float:
"Get weight decay."
return self._wd[-1]
@wd.setter
def wd(self, val:float)->None:
"Set weight decay."
if not self.true_wd: self.set_val('weight_decay', listify(val, self._wd), bn_groups=self.bn_wd)
self._wd = listify(val, self._wd)
#Helper functions
def read_defaults(self)->None:
"Read the values inside the optimizer for the hyper-parameters."
self._beta = None
if 'lr' in self.opt_keys: self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys: self._mom = self.read_val('momentum')
if 'alpha' in self.opt_keys: self._beta = self.read_val('alpha')
if 'betas' in self.opt_keys: self._mom,self._beta = self.read_val('betas')
if 'weight_decay' in self.opt_keys: self._wd = self.read_val('weight_decay')
def set_val(self, key:str, val:Any, bn_groups:bool=True)->Any:
"Set the values inside the optimizer dictionary at the key."
if is_tuple(val): val = [(v1,v2) for v1,v2 in zip(*val)]
for v,pg1,pg2 in zip(val,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups: pg2[key] = v
return val
def read_val(self, key:str) -> Union[List[float],Tuple[List[float],List[float]]]:
"Read a hyperparameter key in the optimizer dictionary."
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
class Callback():
"Base class for callbacks that want to record values, dynamically change learner params, etc."
_order=0
def on_train_begin(self, **kwargs:Any)->None:
"To initialize constants in the callback."
pass
def on_epoch_begin(self, **kwargs:Any)->None:
"At the beginning of each epoch."
pass
def on_batch_begin(self, **kwargs:Any)->None:
"Set HP before the step is done. Returns xb, yb (which can allow us to modify the input at that step if needed)."
pass
def on_loss_begin(self, **kwargs:Any)->None:
"Called after forward pass but before loss has been computed. Returns the output (which can allow us to modify it)."
pass
def on_backward_begin(self, **kwargs:Any)->None:
"""Called after the forward pass and the loss has been computed, but before backprop.
Returns the loss (which can allow us to modify it, for instance for reg functions)"""
pass
def on_backward_end(self, **kwargs:Any)->None:
"Called after backprop but before optimizer step. Useful for true weight decay in AdamW."
pass
def on_step_end(self, **kwargs:Any)->None:
"Called after the step of the optimizer but before the gradients are zeroed."
pass
def on_batch_end(self, **kwargs:Any)->None:
"Called at the end of the batch."
pass
def on_epoch_end(self, **kwargs:Any)->bool:
"Called at the end of an epoch."
return False
def on_train_end(self, **kwargs:Any)->None:
"Useful for cleaning up things and saving files/models."
pass
class SmoothenValue():
"Create a smooth moving average for a value (loss, etc)."
def __init__(self, beta:float):
"Create smoother for value, beta should be 0<beta<1."
self.beta,self.n,self.mov_avg = beta,0,0
def add_value(self, val:float)->None:
"Add current value to calculate updated smoothed value."
self.n += 1
self.mov_avg = self.beta * self.mov_avg + (1 - self.beta) * val
self.smooth = self.mov_avg / (1 - self.beta ** self.n)
CallbackList = Collection[Callback]
def _get_init_state(): return {'epoch':0, 'iteration':0, 'num_batch':0}
@dataclass
class CallbackHandler():
"Manage all of the registered callback objects, smoothing loss by momentum `beta`."
callbacks:CallbackList
beta:float=0.98
def __post_init__(self)->None:
"Initialize smoother and learning stats."
self.callbacks = sorted(self.callbacks, key=lambda o: getattr(o, '_order', 0))
self.smoothener = SmoothenValue(self.beta)
self.state_dict:Dict[str,Union[int,float,Tensor]]=_get_init_state()
def __call__(self, cb_name, **kwargs)->None:
"Call through to all of the `CallbakHandler` functions."
return [getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs) for cb in self.callbacks]
def on_train_begin(self, epochs:int, pbar:PBar, metrics:MetricFuncList)->None:
"About to start learning."
self.state_dict = _get_init_state()
self.state_dict['n_epochs'],self.state_dict['pbar'],self.state_dict['metrics'] = epochs,pbar,metrics
self('train_begin')
def on_epoch_begin(self)->None:
"Handle new epoch."
self.state_dict['num_batch'] = 0
self('epoch_begin')
def on_batch_begin(self, xb:Tensor, yb:Tensor)->None:
"Handle new batch `xb`,`yb`."
self.state_dict['last_input'], self.state_dict['last_target'] = xb, yb
for cb in self.callbacks:
a = cb.on_batch_begin(**self.state_dict)
if a is not None: self.state_dict['last_input'], self.state_dict['last_target'] = a
return self.state_dict['last_input'], self.state_dict['last_target']
def on_loss_begin(self, out:Tensor)->None:
"Handle start of loss calculation with model output `out`."
self.state_dict['last_output'] = out
for cb in self.callbacks:
a = cb.on_loss_begin(**self.state_dict)
if a is not None: self.state_dict['last_output'] = a
return self.state_dict['last_output']
def on_backward_begin(self, loss:Tensor)->None:
"Handle gradient calculation on `loss`."
self.smoothener.add_value(loss.detach())
self.state_dict['last_loss'], self.state_dict['smooth_loss'] = loss, self.smoothener.smooth
for cb in self.callbacks:
a = cb.on_backward_begin(**self.state_dict)
if a is not None: self.state_dict['last_loss'] = a
return self.state_dict['last_loss']
def on_backward_end(self)->None:
"Handle end of gradient calculation."
self('backward_end')
def on_step_end(self)->None:
"Handle end of optimization step."
self('step_end')
def on_batch_end(self, loss:Tensor)->None:
"Handle end of processing one batch with `loss`."
self.state_dict['last_loss'] = loss
stop = np.any(self('batch_end'))
self.state_dict['iteration'] += 1
self.state_dict['num_batch'] += 1
return stop
def on_epoch_end(self, val_metrics:MetricsList)->bool:
"Epoch is done, process `val_metrics`."
self.state_dict['last_metrics'] = val_metrics
stop = np.any(self('epoch_end'))
self.state_dict['epoch'] += 1
return stop
def on_train_end(self, exception:Union[bool,Exception])->None:
"Handle end of training, `exception` is an `Exception` or False if no exceptions during training."
self('train_end', exception=exception)
def annealing_no(start:Number, end:Number, pct:float)->Number:
"No annealing, always return `start`."
return start
def annealing_linear(start:Number, end:Number, pct:float)->Number:
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start + pct * (end-start)
def annealing_exp(start:Number, end:Number, pct:float)->Number:
"Exponentially anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start * (end/start) ** pct
def annealing_cos(start:Number, end:Number, pct:float)->Number:
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out
def do_annealing_poly(start:Number, end:Number, pct:float, degree:Number)->Number:
"Helper function for `anneal_poly`."
return end + (start-end) * (1-pct)**degree
def annealing_poly(degree:Number)->Number:
"Anneal polynomically from `start` to `end` as pct goes from 0.0 to 1.0."
return functools.partial(do_annealing_poly, degree=degree)
class Stepper():
"Used to \"step\" from start,end (`vals`) over `n_iter` iterations on a schedule defined by `func`"
def __init__(self, vals:StartOptEnd, n_iter:int, func:Optional[AnnealFunc]=None):
self.start,self.end = (vals[0],vals[1]) if is_tuple(vals) else (vals,0)
self.n_iter = n_iter
if func is None: self.func = annealing_linear if is_tuple(vals) else annealing_no
else: self.func = func
self.n = 0
def step(self)->Number:
"Return next value along annealed schedule."
self.n += 1
return self.func(self.start, self.end, self.n/self.n_iter)
@property
def is_done(self)->bool:
"Schedule completed."
return self.n >= self.n_iter
| 42.618881 | 124 | 0.644844 | from .data import *
from .torch_core import *
__all__ = ['Callback', 'CallbackHandler', 'OptimWrapper', 'SmoothenValue', 'Stepper', 'annealing_cos', 'CallbackList',
'annealing_exp', 'annealing_linear', 'annealing_no', 'annealing_poly', 'do_annealing_poly']
class OptimWrapper():
def __init__(self, opt:optim.Optimizer, wd:Floats=0., true_wd:bool=False, bn_wd:bool=True):
self.opt,self.true_wd,self.bn_wd = opt,true_wd,bn_wd
self.opt_keys = list(self.opt.param_groups[0].keys())
self.opt_keys.remove('params')
self.read_defaults()
self.wd = wd
@classmethod
def create(cls, opt_fn:Union[type,Callable], lr:Union[float,Tuple,List],
layer_groups:ModuleList, **kwargs:Any)->optim.Optimizer:
split_groups = split_bn_bias(layer_groups)
opt = opt_fn([{'params': trainable_params(l), 'lr':0} for l in split_groups])
opt = cls(opt, **kwargs)
opt.lr = listify(lr, layer_groups)
return opt
def __repr__(self)->str:
return f'OptimWrapper over {repr(self.opt)}.\nTrue weight decay: {self.true_wd}'
def step(self)->None:
if self.true_wd:
for lr,wd,pg1,pg2 in zip(self._lr,self._wd,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
for p in pg1['params']: p.data.mul_(1 - wd*lr)
if self.bn_wd:
for p in pg2['params']: p.data.mul_(1 - wd*lr)
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
def zero_grad(self)->None:
self.opt.zero_grad()
@property
def lr(self)->float:
return self._lr[-1]
@lr.setter
def lr(self, val:float)->None:
self._lr = self.set_val('lr', listify(val, self._lr))
@property
def mom(self)->float:
return self._mom[-1]
@mom.setter
def mom(self, val:float)->None:
if 'momentum' in self.opt_keys: self.set_val('momentum', listify(val, self._mom))
elif 'betas' in self.opt_keys: self.set_val('betas', (listify(val, self._mom), self._beta))
self._mom = listify(val, self._mom)
@property
def beta(self)->float:
return None if self._beta is None else self._beta[-1]
@beta.setter
def beta(self, val:float)->None:
if val is None: return
if 'betas' in self.opt_keys: self.set_val('betas', (self._mom, listify(val, self._beta)))
elif 'alpha' in self.opt_keys: self.set_val('alpha', listify(val, self._beta))
self._beta = listify(val, self._beta)
@property
def wd(self)->float:
return self._wd[-1]
@wd.setter
def wd(self, val:float)->None:
if not self.true_wd: self.set_val('weight_decay', listify(val, self._wd), bn_groups=self.bn_wd)
self._wd = listify(val, self._wd)
def read_defaults(self)->None:
self._beta = None
if 'lr' in self.opt_keys: self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys: self._mom = self.read_val('momentum')
if 'alpha' in self.opt_keys: self._beta = self.read_val('alpha')
if 'betas' in self.opt_keys: self._mom,self._beta = self.read_val('betas')
if 'weight_decay' in self.opt_keys: self._wd = self.read_val('weight_decay')
def set_val(self, key:str, val:Any, bn_groups:bool=True)->Any:
if is_tuple(val): val = [(v1,v2) for v1,v2 in zip(*val)]
for v,pg1,pg2 in zip(val,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups: pg2[key] = v
return val
def read_val(self, key:str) -> Union[List[float],Tuple[List[float],List[float]]]:
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
class Callback():
_order=0
def on_train_begin(self, **kwargs:Any)->None:
pass
def on_epoch_begin(self, **kwargs:Any)->None:
pass
def on_batch_begin(self, **kwargs:Any)->None:
pass
def on_loss_begin(self, **kwargs:Any)->None:
pass
def on_backward_begin(self, **kwargs:Any)->None:
pass
def on_backward_end(self, **kwargs:Any)->None:
pass
def on_step_end(self, **kwargs:Any)->None:
pass
def on_batch_end(self, **kwargs:Any)->None:
pass
def on_epoch_end(self, **kwargs:Any)->bool:
return False
def on_train_end(self, **kwargs:Any)->None:
pass
class SmoothenValue():
def __init__(self, beta:float):
self.beta,self.n,self.mov_avg = beta,0,0
def add_value(self, val:float)->None:
self.n += 1
self.mov_avg = self.beta * self.mov_avg + (1 - self.beta) * val
self.smooth = self.mov_avg / (1 - self.beta ** self.n)
CallbackList = Collection[Callback]
def _get_init_state(): return {'epoch':0, 'iteration':0, 'num_batch':0}
@dataclass
class CallbackHandler():
callbacks:CallbackList
beta:float=0.98
def __post_init__(self)->None:
self.callbacks = sorted(self.callbacks, key=lambda o: getattr(o, '_order', 0))
self.smoothener = SmoothenValue(self.beta)
self.state_dict:Dict[str,Union[int,float,Tensor]]=_get_init_state()
def __call__(self, cb_name, **kwargs)->None:
return [getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs) for cb in self.callbacks]
def on_train_begin(self, epochs:int, pbar:PBar, metrics:MetricFuncList)->None:
self.state_dict = _get_init_state()
self.state_dict['n_epochs'],self.state_dict['pbar'],self.state_dict['metrics'] = epochs,pbar,metrics
self('train_begin')
def on_epoch_begin(self)->None:
self.state_dict['num_batch'] = 0
self('epoch_begin')
def on_batch_begin(self, xb:Tensor, yb:Tensor)->None:
self.state_dict['last_input'], self.state_dict['last_target'] = xb, yb
for cb in self.callbacks:
a = cb.on_batch_begin(**self.state_dict)
if a is not None: self.state_dict['last_input'], self.state_dict['last_target'] = a
return self.state_dict['last_input'], self.state_dict['last_target']
def on_loss_begin(self, out:Tensor)->None:
self.state_dict['last_output'] = out
for cb in self.callbacks:
a = cb.on_loss_begin(**self.state_dict)
if a is not None: self.state_dict['last_output'] = a
return self.state_dict['last_output']
def on_backward_begin(self, loss:Tensor)->None:
self.smoothener.add_value(loss.detach())
self.state_dict['last_loss'], self.state_dict['smooth_loss'] = loss, self.smoothener.smooth
for cb in self.callbacks:
a = cb.on_backward_begin(**self.state_dict)
if a is not None: self.state_dict['last_loss'] = a
return self.state_dict['last_loss']
def on_backward_end(self)->None:
self('backward_end')
def on_step_end(self)->None:
self('step_end')
def on_batch_end(self, loss:Tensor)->None:
self.state_dict['last_loss'] = loss
stop = np.any(self('batch_end'))
self.state_dict['iteration'] += 1
self.state_dict['num_batch'] += 1
return stop
def on_epoch_end(self, val_metrics:MetricsList)->bool:
self.state_dict['last_metrics'] = val_metrics
stop = np.any(self('epoch_end'))
self.state_dict['epoch'] += 1
return stop
def on_train_end(self, exception:Union[bool,Exception])->None:
self('train_end', exception=exception)
def annealing_no(start:Number, end:Number, pct:float)->Number:
return start
def annealing_linear(start:Number, end:Number, pct:float)->Number:
return start + pct * (end-start)
def annealing_exp(start:Number, end:Number, pct:float)->Number:
return start * (end/start) ** pct
def annealing_cos(start:Number, end:Number, pct:float)->Number:
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out
def do_annealing_poly(start:Number, end:Number, pct:float, degree:Number)->Number:
return end + (start-end) * (1-pct)**degree
def annealing_poly(degree:Number)->Number:
return functools.partial(do_annealing_poly, degree=degree)
class Stepper():
def __init__(self, vals:StartOptEnd, n_iter:int, func:Optional[AnnealFunc]=None):
self.start,self.end = (vals[0],vals[1]) if is_tuple(vals) else (vals,0)
self.n_iter = n_iter
if func is None: self.func = annealing_linear if is_tuple(vals) else annealing_no
else: self.func = func
self.n = 0
def step(self)->Number:
self.n += 1
return self.func(self.start, self.end, self.n/self.n_iter)
@property
def is_done(self)->bool:
return self.n >= self.n_iter
| true | true |
f730b2ef46405f107ae301a6a6737279f08c4c9d | 2,465 | py | Python | main.py | wiseleywu/Conference-Central-API | dedb765097c3f0378352c54def575d5ce598cd84 | [
"Apache-2.0"
] | null | null | null | main.py | wiseleywu/Conference-Central-API | dedb765097c3f0378352c54def575d5ce598cd84 | [
"Apache-2.0"
] | null | null | null | main.py | wiseleywu/Conference-Central-API | dedb765097c3f0378352c54def575d5ce598cd84 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
main.py -- Udacity conference server-side Python App Engine
HTTP controller handlers for memcache & task queue access
"""
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.ext import ndb
from conference import ConferenceApi
from settings import MEMCACHE_SPEAKER_KEY
from models import Conference, Session, Speaker
__author__ = 'wiseleywu@gmail.com (Wiseley Wu)'
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
class checkedFeaturedSpeaker(webapp2.RequestHandler):
def post(self):
"""Check Featured Speaker within a Conference"""
conf = ndb.Key(urlsafe=self.request.get('wsck')).get()
speaker = ndb.Key(Speaker, int(self.request.get('speakerId'))).get()
sessions = Session.query(ancestor=conf.key)
sessions = sessions.filter(
Session.speakerId == int(self.request.get('speakerId')))
# don't featured speaker if only in 0 or 1 session
if sessions.count() <= 1:
announcement = ""
else:
announcement = '%s %s %s %s' % (
'Featured Speaker - ',
speaker.displayName,
'. You can find the speaker in the following sessions: ',
', '.join(
session.name for session in sessions)
)
memcache.set(MEMCACHE_SPEAKER_KEY, announcement)
self.response.set_status(204)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/check_featured_speaker', checkedFeaturedSpeaker),
], debug=True)
| 33.767123 | 76 | 0.636917 |
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.ext import ndb
from conference import ConferenceApi
from settings import MEMCACHE_SPEAKER_KEY
from models import Conference, Session, Speaker
__author__ = 'wiseleywu@gmail.com (Wiseley Wu)'
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()),
self.request.get('email'),
'You created a new Conference!',
'Hi, you have created a following '
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
class checkedFeaturedSpeaker(webapp2.RequestHandler):
def post(self):
conf = ndb.Key(urlsafe=self.request.get('wsck')).get()
speaker = ndb.Key(Speaker, int(self.request.get('speakerId'))).get()
sessions = Session.query(ancestor=conf.key)
sessions = sessions.filter(
Session.speakerId == int(self.request.get('speakerId')))
if sessions.count() <= 1:
announcement = ""
else:
announcement = '%s %s %s %s' % (
'Featured Speaker - ',
speaker.displayName,
'. You can find the speaker in the following sessions: ',
', '.join(
session.name for session in sessions)
)
memcache.set(MEMCACHE_SPEAKER_KEY, announcement)
self.response.set_status(204)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/check_featured_speaker', checkedFeaturedSpeaker),
], debug=True)
| true | true |
f730b4147de5dae0eab5ed3e717e1a146221dfe5 | 972 | py | Python | src/sage/combinat/posets/forest.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | 1,742 | 2015-01-04T07:06:13.000Z | 2022-03-30T11:32:52.000Z | src/sage/combinat/posets/forest.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | 66 | 2015-03-19T19:17:24.000Z | 2022-03-16T11:59:30.000Z | src/sage/combinat/posets/forest.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | 495 | 2015-01-10T10:23:18.000Z | 2022-03-24T22:06:11.000Z | r"""
Forest Posets
AUTHORS:
- Stefan Grosser (06-2020): initial implementation
"""
# ****************************************************************************
# Copyright (C) 2020 Stefan Grosser <stefan.grosser1@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.combinat.posets.posets import FinitePoset
from sage.combinat.posets.linear_extensions import LinearExtensionsOfForest
class ForestPoset(FinitePoset):
r"""
A forest poset is a poset where the underlying Hasse diagram and is
directed acyclic graph.
"""
_lin_ext_type = LinearExtensionsOfForest
_desc = 'Finite forest poset'
| 32.4 | 78 | 0.616255 |
from sage.combinat.posets.posets import FinitePoset
from sage.combinat.posets.linear_extensions import LinearExtensionsOfForest
class ForestPoset(FinitePoset):
_lin_ext_type = LinearExtensionsOfForest
_desc = 'Finite forest poset'
| true | true |
f730b4360fcf30609dec2be0e7dc23ff7f04436d | 190 | py | Python | torch_inception_resnet_v2/utils/convolution_config.py | mhconradt/inception-resnet-v2 | 0816e9885eb7034d99a67519efa0642578526a3e | [
"MIT"
] | 9 | 2019-11-28T01:33:43.000Z | 2021-09-06T06:51:47.000Z | torch_inception_resnet_v2/utils/convolution_config.py | mhconradt/inception-resnet-v2 | 0816e9885eb7034d99a67519efa0642578526a3e | [
"MIT"
] | 1 | 2022-02-06T12:03:00.000Z | 2022-02-07T02:30:47.000Z | torch_inception_resnet_v2/utils/convolution_config.py | mhconradt/inception-resnet-v2 | 0816e9885eb7034d99a67519efa0642578526a3e | [
"MIT"
] | 4 | 2019-12-12T05:35:28.000Z | 2021-04-30T18:41:41.000Z | from collections import namedtuple
ConvolutionConfig = namedtuple('ConvolutionConfig', ['n_filters', 'kernel_size', 'stride', 'padding'])
PadConfig = namedtuple('PadConfig', ['padding'])
| 27.142857 | 102 | 0.752632 | from collections import namedtuple
ConvolutionConfig = namedtuple('ConvolutionConfig', ['n_filters', 'kernel_size', 'stride', 'padding'])
PadConfig = namedtuple('PadConfig', ['padding'])
| true | true |
f730b62321ee0ae029cfec6153d97c708713be29 | 37,777 | py | Python | web2py/gluon/packages/dal/pydal/base.py | aduckworth1969/smc | b1771d9ed68f0e35f46271aab5b1e1fab363e3d9 | [
"MIT"
] | 1 | 2018-04-19T05:09:06.000Z | 2018-04-19T05:09:06.000Z | web2py/gluon/packages/dal/pydal/base.py | aduckworth1969/smc | b1771d9ed68f0e35f46271aab5b1e1fab363e3d9 | [
"MIT"
] | 14 | 2018-03-04T22:56:41.000Z | 2020-12-10T19:49:43.000Z | web2py/gluon/packages/dal/pydal/base.py | aduckworth1969/smc | b1771d9ed68f0e35f46271aab5b1e1fab363e3d9 | [
"MIT"
] | 2 | 2020-09-18T15:12:26.000Z | 2020-11-10T22:09:59.000Z | # -*- coding: utf-8 -*-
# pylint: disable=no-member
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: BSD
|
This file contains the DAL support for many relational databases, including:
- SQLite & SpatiaLite
- MySQL
- Postgres
- Firebird
- Oracle
- MS SQL
- DB2
- Interbase
- Ingres
- Informix (9+ and SE)
- SapDB (experimental)
- Cubrid (experimental)
- CouchDB (experimental)
- MongoDB (in progress)
- Google:nosql
- Google:sql
- Teradata
- IMAP (experimental)
Example of usage::
>>> # from dal import DAL, Field
### create DAL connection (and create DB if it doesn't exist)
>>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'),
... folder=None)
### define a table 'person' (create/alter as necessary)
>>> person = db.define_table('person',Field('name','string'))
### insert a record
>>> id = person.insert(name='James')
### retrieve it by id
>>> james = person(id)
### retrieve it by name
>>> james = person(name='James')
### retrieve it by arbitrary query
>>> query = (person.name=='James') & (person.name.startswith('J'))
>>> james = db(query).select(person.ALL)[0]
### update one record
>>> james.update_record(name='Jim')
<Row {'id': 1, 'name': 'Jim'}>
### update multiple records by query
>>> db(person.name.like('J%')).update(name='James')
1
### delete records by query
>>> db(person.name.lower() == 'jim').delete()
0
### retrieve multiple records (rows)
>>> people = db(person).select(orderby=person.name,
... groupby=person.name, limitby=(0,100))
### further filter them
>>> james = people.find(lambda row: row.name == 'James').first()
>>> print james.id, james.name
1 James
### check aggregates
>>> counter = person.id.count()
>>> print db(person).select(counter).first()(counter)
1
### delete one record
>>> james.delete_record()
1
### delete (drop) entire database table
>>> person.drop()
Supported DAL URI strings::
'sqlite://test.db'
'spatialite://test.db'
'sqlite:memory'
'spatialite:memory'
'jdbc:sqlite://test.db'
'mysql://root:none@localhost/test'
'postgres://mdipierro:password@localhost/test'
'postgres:psycopg2://mdipierro:password@localhost/test'
'postgres:pg8000://mdipierro:password@localhost/test'
'jdbc:postgres://mdipierro:none@localhost/test'
'mssql://web2py:none@A64X2/web2py_test'
'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings
'mssql3://web2py:none@A64X2/web2py_test' # better pagination (requires >= 2005)
'mssql4://web2py:none@A64X2/web2py_test' # best pagination (requires >= 2012)
'pytds://user:password@server:port/database' # python-tds
'oracle://username:password@database'
'firebird://user:password@server:3050/database'
'db2:ibm_db_dbi://DSN=dsn;UID=user;PWD=pass'
'db2:pyodbc://driver=DB2;hostname=host;database=database;uid=user;pwd=password;port=port'
'firebird://username:password@hostname/database'
'firebird_embedded://username:password@c://path'
'informix://user:password@server:3050/database'
'informixu://user:password@server:3050/database' # unicode informix
'ingres://database' # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name'
'google:datastore' # for google app engine datastore (uses ndb by default)
'google:sql' # for google app engine with sql (mysql compatible)
'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental
'imap://user:password@server:port' # experimental
'mongodb://user:password@server:port/database' # experimental
For more info::
help(DAL)
help(Field)
"""
import glob
import logging
import socket
import threading
import time
import traceback
import urllib
from ._compat import (
PY2,
pickle,
hashlib_md5,
pjoin,
copyreg,
integer_types,
with_metaclass,
long,
unquote,
iteritems,
)
from ._globals import GLOBAL_LOCKER, THREAD_LOCAL, DEFAULT
from ._load import OrderedDict
from .helpers.classes import (
Serializable,
SQLCallableList,
BasicStorage,
RecordUpdater,
RecordDeleter,
TimingHandler,
)
from .helpers.methods import hide_password, smart_query, auto_validators, auto_represent, uuidstr
from .helpers.regex import REGEX_PYTHON_KEYWORDS, REGEX_DBNAME
from .helpers.rest import RestParser
from .helpers.serializers import serializers
from .objects import Table, Field, Rows, Row, Set
from .adapters.base import BaseAdapter, NullAdapter
from .default_validators import default_validators
TABLE_ARGS = set(
(
"migrate",
"primarykey",
"fake_migrate",
"format",
"redefine",
"singular",
"plural",
"trigger_name",
"sequence_name",
"fields",
"common_filter",
"polymodel",
"table_class",
"on_define",
"rname",
)
)
class MetaDAL(type):
def __call__(cls, *args, **kwargs):
#: intercept arguments for DAL customisation on call
intercepts = [
"logger",
"representers",
"serializers",
"uuid",
"validators",
"validators_method",
"Table",
"Row",
]
intercepted = []
for name in intercepts:
val = kwargs.get(name)
if val:
intercepted.append((name, val))
del kwargs[name]
for tup in intercepted:
setattr(cls, tup[0], tup[1])
obj = super(MetaDAL, cls).__call__(*args, **kwargs)
return obj
class DAL(with_metaclass(MetaDAL, Serializable, BasicStorage)):
"""
An instance of this class represents a database connection
Args:
uri(str): contains information for connecting to a database.
Defaults to `'sqlite://dummy.db'`
Note:
experimental: you can specify a dictionary as uri
parameter i.e. with::
db = DAL({"uri": "sqlite://storage.sqlite",
"tables": {...}, ...})
for an example of dict input you can check the output
of the scaffolding db model with
db.as_dict()
Note that for compatibility with Python older than
version 2.6.5 you should cast your dict input keys
to str due to a syntax limitation on kwarg names.
for proper DAL dictionary input you can use one of::
obj = serializers.cast_keys(dict, [encoding="utf-8"])
#or else (for parsing json input)
obj = serializers.loads_json(data, unicode_keys=False)
pool_size: How many open connections to make to the database object.
folder: where .table files will be created. Automatically set within
web2py. Use an explicit path when using DAL outside web2py
db_codec: string encoding of the database (default: 'UTF-8')
table_hash: database identifier with .tables. If your connection hash
change you can still using old .tables if they have db_hash
as prefix
check_reserved: list of adapters to check tablenames and column names
against sql/nosql reserved keywords. Defaults to `None`
- 'common' List of sql keywords that are common to all database
types such as "SELECT, INSERT". (recommended)
- 'all' Checks against all known SQL keywords
- '<adaptername>'' Checks against the specific adapters list of
keywords
- '<adaptername>_nonreserved' Checks against the specific adapters
list of nonreserved keywords. (if available)
migrate: sets default migrate behavior for all tables
fake_migrate: sets default fake_migrate behavior for all tables
migrate_enabled: If set to False disables ALL migrations
fake_migrate_all: If set to True fake migrates ALL tables
attempts: Number of times to attempt connecting
auto_import: If set to True, tries import automatically table
definitions from the databases folder (works only for simple models)
bigint_id: If set, turn on bigint instead of int for id and reference
fields
lazy_tables: delays table definition until table access
after_connection: can a callable that will be executed after the
connection
Example:
Use as::
db = DAL('sqlite://test.db')
or::
db = DAL(**{"uri": ..., "tables": [...]...}) # experimental
db.define_table('tablename', Field('fieldname1'),
Field('fieldname2'))
"""
serializers = None
validators = None
representers = {}
validators_method = default_validators
uuid = uuidstr
logger = logging.getLogger("pyDAL")
Field = Field
Table = Table
Rows = Rows
Row = Row
record_operators = {"update_record": RecordUpdater, "delete_record": RecordDeleter}
execution_handlers = [TimingHandler]
def __new__(cls, uri="sqlite://dummy.db", *args, **kwargs):
if not hasattr(THREAD_LOCAL, "_pydal_db_instances_"):
THREAD_LOCAL._pydal_db_instances_ = {}
if not hasattr(THREAD_LOCAL, "_pydal_db_instances_zombie_"):
THREAD_LOCAL._pydal_db_instances_zombie_ = {}
if uri == "<zombie>":
db_uid = kwargs["db_uid"] # a zombie must have a db_uid!
if db_uid in THREAD_LOCAL._pydal_db_instances_:
db_group = THREAD_LOCAL._pydal_db_instances_[db_uid]
db = db_group[-1]
elif db_uid in THREAD_LOCAL._pydal_db_instances_zombie_:
db = THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
else:
db = super(DAL, cls).__new__(cls)
THREAD_LOCAL._pydal_db_instances_zombie_[db_uid] = db
else:
db_uid = kwargs.get("db_uid", hashlib_md5(repr(uri)).hexdigest())
if db_uid in THREAD_LOCAL._pydal_db_instances_zombie_:
db = THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
del THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
else:
db = super(DAL, cls).__new__(cls)
db_group = THREAD_LOCAL._pydal_db_instances_.get(db_uid, [])
db_group.append(db)
THREAD_LOCAL._pydal_db_instances_[db_uid] = db_group
db._db_uid = db_uid
return db
@staticmethod
def set_folder(folder):
# ## this allows gluon to set a folder for this thread
# ## <<<<<<<<< Should go away as new DAL replaces old sql.py
BaseAdapter.set_folder(folder)
@staticmethod
def get_instances():
"""
Returns a dictionary with uri as key with timings and defined tables::
{'sqlite://storage.sqlite': {
'dbstats': [(select auth_user.email from auth_user, 0.02009)],
'dbtables': {
'defined': ['auth_cas', 'auth_event', 'auth_group',
'auth_membership', 'auth_permission', 'auth_user'],
'lazy': '[]'
}
}
}
"""
dbs = getattr(THREAD_LOCAL, "_pydal_db_instances_", {}).items()
infos = {}
for db_uid, db_group in dbs:
for db in db_group:
if not db._uri:
continue
k = hide_password(db._adapter.uri)
infos[k] = dict(
dbstats=[(row[0], row[1]) for row in db._timings],
dbtables={
"defined": sorted(
list(set(db.tables) - set(db._LAZY_TABLES.keys()))
),
"lazy": sorted(db._LAZY_TABLES.keys()),
},
)
return infos
@staticmethod
def distributed_transaction_begin(*instances):
if not instances:
return
thread_key = "%s.%s" % (socket.gethostname(), threading.currentThread())
instances = enumerate(instances)
keys = ["%s.%i" % (thread_key, i) for (i, db) in instances]
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
"distributed transaction not suported by %s" % db._dbname
)
for (i, db) in instances:
db._adapter.distributed_transaction_begin(keys[i])
@staticmethod
def distributed_transaction_commit(*instances):
if not instances:
return
instances = enumerate(instances)
thread_key = "%s.%s" % (socket.gethostname(), threading.currentThread())
keys = ["%s.%i" % (thread_key, i) for (i, db) in instances]
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
"distributed transaction not suported by %s" % db._dbanme
)
try:
for (i, db) in instances:
db._adapter.prepare(keys[i])
except:
for (i, db) in instances:
db._adapter.rollback_prepared(keys[i])
raise RuntimeError("failure to commit distributed transaction")
else:
for (i, db) in instances:
db._adapter.commit_prepared(keys[i])
return
def __init__(
self,
uri="sqlite://dummy.db",
pool_size=0,
folder=None,
db_codec="UTF-8",
check_reserved=None,
migrate=True,
fake_migrate=False,
migrate_enabled=True,
fake_migrate_all=False,
decode_credentials=False,
driver_args=None,
adapter_args=None,
attempts=5,
auto_import=False,
bigint_id=False,
debug=False,
lazy_tables=False,
db_uid=None,
after_connection=None,
tables=None,
ignore_field_case=True,
entity_quoting=True,
table_hash=None,
):
if uri == "<zombie>" and db_uid is not None:
return
super(DAL, self).__init__()
if not issubclass(self.Rows, Rows):
raise RuntimeError("`Rows` class must be a subclass of pydal.objects.Rows")
if not issubclass(self.Row, Row):
raise RuntimeError("`Row` class must be a subclass of pydal.objects.Row")
from .drivers import DRIVERS, is_jdbc
self._drivers_available = DRIVERS
if not decode_credentials:
credential_decoder = lambda cred: cred
else:
credential_decoder = lambda cred: unquote(cred)
self._folder = folder
if folder:
self.set_folder(folder)
self._uri = uri
self._pool_size = pool_size
self._db_codec = db_codec
self._pending_references = {}
self._request_tenant = "request_tenant"
self._common_fields = []
self._referee_name = "%(table)s"
self._bigint_id = bigint_id
self._debug = debug
self._migrated = []
self._LAZY_TABLES = {}
self._lazy_tables = lazy_tables
self._tables = SQLCallableList()
self._aliased_tables = threading.local()
self._driver_args = driver_args
self._adapter_args = adapter_args
self._check_reserved = check_reserved
self._decode_credentials = decode_credentials
self._attempts = attempts
self._ignore_field_case = ignore_field_case
if not str(attempts).isdigit() or attempts < 0:
attempts = 5
if uri:
uris = isinstance(uri, (list, tuple)) and uri or [uri]
connected = False
for k in range(attempts):
for uri in uris:
try:
from .adapters import adapters
if is_jdbc and not uri.startswith("jdbc:"):
uri = "jdbc:" + uri
self._dbname = REGEX_DBNAME.match(uri).group()
# notice that driver args or {} else driver_args
# defaults to {} global, not correct
kwargs = dict(
db=self,
uri=uri,
pool_size=pool_size,
folder=folder,
db_codec=db_codec,
credential_decoder=credential_decoder,
driver_args=driver_args or {},
adapter_args=adapter_args or {},
after_connection=after_connection,
entity_quoting=entity_quoting,
)
adapter = adapters.get_for(self._dbname)
self._adapter = adapter(**kwargs)
# self._adapter.ignore_field_case = ignore_field_case
if bigint_id:
self._adapter.dialect._force_bigints()
connected = True
break
except SyntaxError:
raise
except Exception:
tb = traceback.format_exc()
self.logger.debug(
"DEBUG: connect attempt %i, connection error:\n%s" % (k, tb)
)
if connected:
break
else:
time.sleep(1)
if not connected:
raise RuntimeError(
"Failure to connect, tried %d times:\n%s" % (attempts, tb)
)
else:
self._adapter = NullAdapter(
db=self,
pool_size=0,
uri="None",
folder=folder,
db_codec=db_codec,
after_connection=after_connection,
entity_quoting=entity_quoting,
)
migrate = fake_migrate = False
self.validators_method = None
self.validators = None
adapter = self._adapter
self._uri_hash = table_hash or hashlib_md5(adapter.uri).hexdigest()
if check_reserved:
from .contrib.reserved_sql_keywords import ADAPTERS as RSK
self.RSK = RSK
self._migrate = migrate
self._fake_migrate = fake_migrate
self._migrate_enabled = migrate_enabled
self._fake_migrate_all = fake_migrate_all
if self.serializers is not None:
for k, v in self.serializers.items():
serializers._custom_[k] = v
if auto_import or tables:
self.import_table_definitions(adapter.folder, tables=tables)
@property
def tables(self):
return self._tables
@property
def _timings(self):
return getattr(THREAD_LOCAL, "_pydal_timings_", [])
@property
def _lastsql(self):
return self._timings[-1] if self._timings else None
def import_table_definitions(
self, path, migrate=False, fake_migrate=False, tables=None
):
if tables:
for table in tables:
self.define_table(**table)
else:
pattern = pjoin(path, self._uri_hash + "_*.table")
for filename in glob.glob(pattern):
tfile = self._adapter.migrator.file_open(filename, "r" if PY2 else "rb")
try:
sql_fields = pickle.load(tfile)
name = filename[len(pattern) - 7 : -6]
mf = [
(
value["sortable"],
Field(
key,
type=value["type"],
length=value.get("length", None),
notnull=value.get("notnull", False),
unique=value.get("unique", False),
),
)
for key, value in iteritems(sql_fields)
]
mf.sort(key=lambda a: a[0])
self.define_table(
name,
*[item[1] for item in mf],
**dict(migrate=migrate, fake_migrate=fake_migrate)
)
finally:
self._adapter.migrator.file_close(tfile)
def check_reserved_keyword(self, name):
"""
Validates `name` against SQL keywords
Uses self._check_reserved which is a list of operators to use.
"""
for backend in self._check_reserved:
if name.upper() in self.RSK[backend]:
raise SyntaxError(
'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword'
% (name, backend.upper())
)
def parse_as_rest(self, patterns, args, vars, queries=None, nested_select=True):
return RestParser(self).parse(patterns, args, vars, queries, nested_select)
def define_table(self, tablename, *fields, **kwargs):
invalid_kwargs = set(kwargs) - TABLE_ARGS
if invalid_kwargs:
raise SyntaxError(
'invalid table "%s" attributes: %s' % (tablename, invalid_kwargs)
)
if not fields and "fields" in kwargs:
fields = kwargs.get("fields", ())
if not isinstance(tablename, str):
if isinstance(tablename, unicode):
try:
tablename = str(tablename)
except UnicodeEncodeError:
raise SyntaxError("invalid unicode table name")
else:
raise SyntaxError("missing table name")
redefine = kwargs.get("redefine", False)
if tablename in self.tables:
if redefine:
try:
delattr(self, tablename)
except:
pass
else:
raise SyntaxError("table already defined: %s" % tablename)
elif (
tablename.startswith("_")
or tablename in dir(self)
or REGEX_PYTHON_KEYWORDS.match(tablename)
):
raise SyntaxError("invalid table name: %s" % tablename)
elif self._check_reserved:
self.check_reserved_keyword(tablename)
if self._lazy_tables:
if tablename not in self._LAZY_TABLES or redefine:
self._LAZY_TABLES[tablename] = (tablename, fields, kwargs)
table = None
else:
table = self.lazy_define_table(tablename, *fields, **kwargs)
if tablename not in self.tables:
self.tables.append(tablename)
return table
def lazy_define_table(self, tablename, *fields, **kwargs):
kwargs_get = kwargs.get
common_fields = self._common_fields
if common_fields:
fields = list(fields) + [
f if isinstance(f, Table) else f.clone() for f in common_fields
]
table_class = kwargs_get("table_class", Table)
table = table_class(self, tablename, *fields, **kwargs)
table._actual = True
self[tablename] = table
# must follow above line to handle self references
table._create_references()
for field in table:
if field.requires is DEFAULT:
field.requires = auto_validators(field)
if field.represent is None:
field.represent = auto_represent(field)
migrate = self._migrate_enabled and kwargs_get("migrate", self._migrate)
if (
migrate
and self._uri not in (None, "None")
or self._adapter.dbengine == "google:datastore"
):
fake_migrate = self._fake_migrate_all or kwargs_get(
"fake_migrate", self._fake_migrate
)
polymodel = kwargs_get("polymodel", None)
try:
GLOBAL_LOCKER.acquire()
self._adapter.create_table(
table,
migrate=migrate,
fake_migrate=fake_migrate,
polymodel=polymodel,
)
finally:
GLOBAL_LOCKER.release()
else:
table._dbt = None
on_define = kwargs_get("on_define", None)
if on_define:
on_define(table)
return table
def as_dict(self, flat=False, sanitize=True):
db_uid = uri = None
if not sanitize:
uri, db_uid = (self._uri, self._db_uid)
db_as_dict = dict(
tables=[],
uri=uri,
db_uid=db_uid,
**dict(
[
(k, getattr(self, "_" + k, None))
for k in [
"pool_size",
"folder",
"db_codec",
"check_reserved",
"migrate",
"fake_migrate",
"migrate_enabled",
"fake_migrate_all",
"decode_credentials",
"driver_args",
"adapter_args",
"attempts",
"bigint_id",
"debug",
"lazy_tables",
]
]
)
)
for table in self:
db_as_dict["tables"].append(table.as_dict(flat=flat, sanitize=sanitize))
return db_as_dict
def __contains__(self, tablename):
try:
return tablename in self.tables
except AttributeError:
# The instance has no .tables attribute yet
return False
def __iter__(self):
for tablename in self.tables:
yield self[tablename]
def __getitem__(self, key):
return self.__getattr__(str(key))
def __getattr__(self, key):
if object.__getattribute__(
self, "_lazy_tables"
) and key in object.__getattribute__(self, "_LAZY_TABLES"):
tablename, fields, kwargs = self._LAZY_TABLES.pop(key)
return self.lazy_define_table(tablename, *fields, **kwargs)
aliased_tables = object.__getattribute__(self, "_aliased_tables")
aliased = getattr(aliased_tables, key, None)
if aliased:
return aliased
return BasicStorage.__getattribute__(self, key)
def __setattr__(self, key, value):
if key[:1] != "_" and key in self:
raise SyntaxError("Object %s exists and cannot be redefined" % key)
return super(DAL, self).__setattr__(key, value)
def __repr__(self):
if hasattr(self, "_uri"):
return '<DAL uri="%s">' % hide_password(self._adapter.uri)
else:
return '<DAL db_uid="%s">' % self._db_uid
def smart_query(self, fields, text):
return Set(self, smart_query(fields, text))
def __call__(self, query=None, ignore_common_filters=None):
return self.where(query, ignore_common_filters)
def where(self, query=None, ignore_common_filters=None):
if isinstance(query, Table):
query = self._adapter.id_query(query)
elif isinstance(query, Field):
query = query != None
elif isinstance(query, dict):
icf = query.get("ignore_common_filters")
if icf:
ignore_common_filters = icf
return Set(self, query, ignore_common_filters=ignore_common_filters)
def commit(self):
self._adapter.commit()
object.__getattribute__(self, "_aliased_tables").__dict__.clear()
def rollback(self):
self._adapter.rollback()
object.__getattribute__(self, "_aliased_tables").__dict__.clear()
def close(self):
self._adapter.close()
if self._db_uid in THREAD_LOCAL._pydal_db_instances_:
db_group = THREAD_LOCAL._pydal_db_instances_[self._db_uid]
db_group.remove(self)
if not db_group:
del THREAD_LOCAL._pydal_db_instances_[self._db_uid]
self._adapter._clean_tlocals()
def executesql(
self,
query,
placeholders=None,
as_dict=False,
fields=None,
colnames=None,
as_ordered_dict=False,
):
"""
Executes an arbitrary query
Args:
query (str): the query to submit to the backend
placeholders: is optional and will always be None.
If using raw SQL with placeholders, placeholders may be
a sequence of values to be substituted in
or, (if supported by the DB driver), a dictionary with keys
matching named placeholders in your SQL.
as_dict: will always be None when using DAL.
If using raw SQL can be set to True and the results cursor
returned by the DB driver will be converted to a sequence of
dictionaries keyed with the db field names. Results returned
with as_dict=True are the same as those returned when applying
.to_list() to a DAL query. If "as_ordered_dict"=True the
behaviour is the same as when "as_dict"=True with the keys
(field names) guaranteed to be in the same order as returned
by the select name executed on the database.
fields: list of DAL Fields that match the fields returned from the
DB. The Field objects should be part of one or more Table
objects defined on the DAL object. The "fields" list can include
one or more DAL Table objects in addition to or instead of
including Field objects, or it can be just a single table
(not in a list). In that case, the Field objects will be
extracted from the table(s).
Note:
if either `fields` or `colnames` is provided, the results
will be converted to a DAL `Rows` object using the
`db._adapter.parse()` method
colnames: list of field names in tablename.fieldname format
Note:
It is also possible to specify both "fields" and the associated
"colnames". In that case, "fields" can also include DAL Expression
objects in addition to Field objects. For Field objects in "fields",
the associated "colnames" must still be in tablename.fieldname
format. For Expression objects in "fields", the associated
"colnames" can be any arbitrary labels.
DAL Table objects referred to by "fields" or "colnames" can be dummy
tables and do not have to represent any real tables in the database.
Also, note that the "fields" and "colnames" must be in the
same order as the fields in the results cursor returned from the DB.
"""
adapter = self._adapter
if placeholders:
adapter.execute(query, placeholders)
else:
adapter.execute(query)
if as_dict or as_ordered_dict:
if not hasattr(adapter.cursor, "description"):
raise RuntimeError(
"database does not support executesql(...,as_dict=True)"
)
# Non-DAL legacy db query, converts cursor results to dict.
# sequence of 7-item sequences. each sequence tells about a column.
# first item is always the field name according to Python Database API specs
columns = adapter.cursor.description
# reduce the column info down to just the field names
fields = colnames or [f[0] for f in columns]
if len(fields) != len(set(fields)):
raise RuntimeError(
"Result set includes duplicate column names. Specify unique column names using the 'colnames' argument"
)
#: avoid bytes strings in columns names (py3)
if columns and not PY2:
for i in range(0, len(fields)):
if isinstance(fields[i], bytes):
fields[i] = fields[i].decode("utf8")
# will hold our finished resultset in a list
data = adapter.fetchall()
# convert the list for each row into a dictionary so it's
# easier to work with. row['field_name'] rather than row[0]
if as_ordered_dict:
_dict = OrderedDict
else:
_dict = dict
return [_dict(zip(fields, row)) for row in data]
try:
data = adapter.fetchall()
except:
return None
if fields or colnames:
fields = [] if fields is None else fields
if not isinstance(fields, list):
fields = [fields]
extracted_fields = []
for field in fields:
if isinstance(field, Table):
extracted_fields.extend([f for f in field])
else:
extracted_fields.append(field)
if not colnames:
colnames = [f.sqlsafe for f in extracted_fields]
else:
#: extracted_fields is empty we should make it from colnames
# what 'col_fields' is for
col_fields = [] # [[tablename, fieldname], ....]
newcolnames = []
for tf in colnames:
if "." in tf:
t_f = tf.split(".")
tf = ".".join(adapter.dialect.quote(f) for f in t_f)
else:
t_f = None
if not extracted_fields:
col_fields.append(t_f)
newcolnames.append(tf)
colnames = newcolnames
data = adapter.parse(
data,
fields = extracted_fields or [tf and self[tf[0]][tf[1]] for tf in col_fields],
colnames=colnames
)
return data
def _remove_references_to(self, thistable):
for table in self:
table._referenced_by = [
field for field in table._referenced_by if not field.table == thistable
]
def has_representer(self, name):
return callable(self.representers.get(name))
def represent(self, name, *args, **kwargs):
return self.representers[name](*args, **kwargs)
def export_to_csv_file(self, ofile, *args, **kwargs):
step = long(kwargs.get("max_fetch_rows,", 500))
write_colnames = kwargs["write_colnames"] = kwargs.get("write_colnames", True)
for table in self.tables:
ofile.write("TABLE %s\r\n" % table)
query = self._adapter.id_query(self[table])
nrows = self(query).count()
kwargs["write_colnames"] = write_colnames
for k in range(0, nrows, step):
self(query).select(limitby=(k, k + step)).export_to_csv_file(
ofile, *args, **kwargs
)
kwargs["write_colnames"] = False
ofile.write("\r\n\r\n")
ofile.write("END")
def import_from_csv_file(
self,
ifile,
id_map=None,
null="<NULL>",
unique="uuid",
map_tablenames=None,
ignore_missing_tables=False,
*args,
**kwargs
):
# if id_map is None: id_map={}
id_offset = {} # only used if id_map is None
map_tablenames = map_tablenames or {}
for line in ifile:
line = line.strip()
if not line:
continue
elif line == "END":
return
elif not line.startswith("TABLE "):
raise SyntaxError("Invalid file format")
elif not line[6:] in self.tables:
raise SyntaxError("Unknown table : %s" % line[6:])
else:
tablename = line[6:]
tablename = map_tablenames.get(tablename, tablename)
if tablename is not None and tablename in self.tables:
self[tablename].import_from_csv_file(
ifile, id_map, null, unique, id_offset, *args, **kwargs
)
elif tablename is None or ignore_missing_tables:
# skip all non-empty lines
for line in ifile:
if not line.strip():
break
else:
raise RuntimeError(
"Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)"
)
def can_join(self):
return self._adapter.can_join()
def DAL_unpickler(db_uid):
return DAL("<zombie>", db_uid=db_uid)
def DAL_pickler(db):
return DAL_unpickler, (db._db_uid,)
copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
| 36.819688 | 169 | 0.556953 |
import glob
import logging
import socket
import threading
import time
import traceback
import urllib
from ._compat import (
PY2,
pickle,
hashlib_md5,
pjoin,
copyreg,
integer_types,
with_metaclass,
long,
unquote,
iteritems,
)
from ._globals import GLOBAL_LOCKER, THREAD_LOCAL, DEFAULT
from ._load import OrderedDict
from .helpers.classes import (
Serializable,
SQLCallableList,
BasicStorage,
RecordUpdater,
RecordDeleter,
TimingHandler,
)
from .helpers.methods import hide_password, smart_query, auto_validators, auto_represent, uuidstr
from .helpers.regex import REGEX_PYTHON_KEYWORDS, REGEX_DBNAME
from .helpers.rest import RestParser
from .helpers.serializers import serializers
from .objects import Table, Field, Rows, Row, Set
from .adapters.base import BaseAdapter, NullAdapter
from .default_validators import default_validators
TABLE_ARGS = set(
(
"migrate",
"primarykey",
"fake_migrate",
"format",
"redefine",
"singular",
"plural",
"trigger_name",
"sequence_name",
"fields",
"common_filter",
"polymodel",
"table_class",
"on_define",
"rname",
)
)
class MetaDAL(type):
def __call__(cls, *args, **kwargs):
intercepts = [
"logger",
"representers",
"serializers",
"uuid",
"validators",
"validators_method",
"Table",
"Row",
]
intercepted = []
for name in intercepts:
val = kwargs.get(name)
if val:
intercepted.append((name, val))
del kwargs[name]
for tup in intercepted:
setattr(cls, tup[0], tup[1])
obj = super(MetaDAL, cls).__call__(*args, **kwargs)
return obj
class DAL(with_metaclass(MetaDAL, Serializable, BasicStorage)):
serializers = None
validators = None
representers = {}
validators_method = default_validators
uuid = uuidstr
logger = logging.getLogger("pyDAL")
Field = Field
Table = Table
Rows = Rows
Row = Row
record_operators = {"update_record": RecordUpdater, "delete_record": RecordDeleter}
execution_handlers = [TimingHandler]
def __new__(cls, uri="sqlite://dummy.db", *args, **kwargs):
if not hasattr(THREAD_LOCAL, "_pydal_db_instances_"):
THREAD_LOCAL._pydal_db_instances_ = {}
if not hasattr(THREAD_LOCAL, "_pydal_db_instances_zombie_"):
THREAD_LOCAL._pydal_db_instances_zombie_ = {}
if uri == "<zombie>":
db_uid = kwargs["db_uid"]
if db_uid in THREAD_LOCAL._pydal_db_instances_:
db_group = THREAD_LOCAL._pydal_db_instances_[db_uid]
db = db_group[-1]
elif db_uid in THREAD_LOCAL._pydal_db_instances_zombie_:
db = THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
else:
db = super(DAL, cls).__new__(cls)
THREAD_LOCAL._pydal_db_instances_zombie_[db_uid] = db
else:
db_uid = kwargs.get("db_uid", hashlib_md5(repr(uri)).hexdigest())
if db_uid in THREAD_LOCAL._pydal_db_instances_zombie_:
db = THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
del THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
else:
db = super(DAL, cls).__new__(cls)
db_group = THREAD_LOCAL._pydal_db_instances_.get(db_uid, [])
db_group.append(db)
THREAD_LOCAL._pydal_db_instances_[db_uid] = db_group
db._db_uid = db_uid
return db
@staticmethod
def set_folder(folder):
s:
for db in db_group:
if not db._uri:
continue
k = hide_password(db._adapter.uri)
infos[k] = dict(
dbstats=[(row[0], row[1]) for row in db._timings],
dbtables={
"defined": sorted(
list(set(db.tables) - set(db._LAZY_TABLES.keys()))
),
"lazy": sorted(db._LAZY_TABLES.keys()),
},
)
return infos
@staticmethod
def distributed_transaction_begin(*instances):
if not instances:
return
thread_key = "%s.%s" % (socket.gethostname(), threading.currentThread())
instances = enumerate(instances)
keys = ["%s.%i" % (thread_key, i) for (i, db) in instances]
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
"distributed transaction not suported by %s" % db._dbname
)
for (i, db) in instances:
db._adapter.distributed_transaction_begin(keys[i])
@staticmethod
def distributed_transaction_commit(*instances):
if not instances:
return
instances = enumerate(instances)
thread_key = "%s.%s" % (socket.gethostname(), threading.currentThread())
keys = ["%s.%i" % (thread_key, i) for (i, db) in instances]
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
"distributed transaction not suported by %s" % db._dbanme
)
try:
for (i, db) in instances:
db._adapter.prepare(keys[i])
except:
for (i, db) in instances:
db._adapter.rollback_prepared(keys[i])
raise RuntimeError("failure to commit distributed transaction")
else:
for (i, db) in instances:
db._adapter.commit_prepared(keys[i])
return
def __init__(
self,
uri="sqlite://dummy.db",
pool_size=0,
folder=None,
db_codec="UTF-8",
check_reserved=None,
migrate=True,
fake_migrate=False,
migrate_enabled=True,
fake_migrate_all=False,
decode_credentials=False,
driver_args=None,
adapter_args=None,
attempts=5,
auto_import=False,
bigint_id=False,
debug=False,
lazy_tables=False,
db_uid=None,
after_connection=None,
tables=None,
ignore_field_case=True,
entity_quoting=True,
table_hash=None,
):
if uri == "<zombie>" and db_uid is not None:
return
super(DAL, self).__init__()
if not issubclass(self.Rows, Rows):
raise RuntimeError("`Rows` class must be a subclass of pydal.objects.Rows")
if not issubclass(self.Row, Row):
raise RuntimeError("`Row` class must be a subclass of pydal.objects.Row")
from .drivers import DRIVERS, is_jdbc
self._drivers_available = DRIVERS
if not decode_credentials:
credential_decoder = lambda cred: cred
else:
credential_decoder = lambda cred: unquote(cred)
self._folder = folder
if folder:
self.set_folder(folder)
self._uri = uri
self._pool_size = pool_size
self._db_codec = db_codec
self._pending_references = {}
self._request_tenant = "request_tenant"
self._common_fields = []
self._referee_name = "%(table)s"
self._bigint_id = bigint_id
self._debug = debug
self._migrated = []
self._LAZY_TABLES = {}
self._lazy_tables = lazy_tables
self._tables = SQLCallableList()
self._aliased_tables = threading.local()
self._driver_args = driver_args
self._adapter_args = adapter_args
self._check_reserved = check_reserved
self._decode_credentials = decode_credentials
self._attempts = attempts
self._ignore_field_case = ignore_field_case
if not str(attempts).isdigit() or attempts < 0:
attempts = 5
if uri:
uris = isinstance(uri, (list, tuple)) and uri or [uri]
connected = False
for k in range(attempts):
for uri in uris:
try:
from .adapters import adapters
if is_jdbc and not uri.startswith("jdbc:"):
uri = "jdbc:" + uri
self._dbname = REGEX_DBNAME.match(uri).group()
kwargs = dict(
db=self,
uri=uri,
pool_size=pool_size,
folder=folder,
db_codec=db_codec,
credential_decoder=credential_decoder,
driver_args=driver_args or {},
adapter_args=adapter_args or {},
after_connection=after_connection,
entity_quoting=entity_quoting,
)
adapter = adapters.get_for(self._dbname)
self._adapter = adapter(**kwargs)
if bigint_id:
self._adapter.dialect._force_bigints()
connected = True
break
except SyntaxError:
raise
except Exception:
tb = traceback.format_exc()
self.logger.debug(
"DEBUG: connect attempt %i, connection error:\n%s" % (k, tb)
)
if connected:
break
else:
time.sleep(1)
if not connected:
raise RuntimeError(
"Failure to connect, tried %d times:\n%s" % (attempts, tb)
)
else:
self._adapter = NullAdapter(
db=self,
pool_size=0,
uri="None",
folder=folder,
db_codec=db_codec,
after_connection=after_connection,
entity_quoting=entity_quoting,
)
migrate = fake_migrate = False
self.validators_method = None
self.validators = None
adapter = self._adapter
self._uri_hash = table_hash or hashlib_md5(adapter.uri).hexdigest()
if check_reserved:
from .contrib.reserved_sql_keywords import ADAPTERS as RSK
self.RSK = RSK
self._migrate = migrate
self._fake_migrate = fake_migrate
self._migrate_enabled = migrate_enabled
self._fake_migrate_all = fake_migrate_all
if self.serializers is not None:
for k, v in self.serializers.items():
serializers._custom_[k] = v
if auto_import or tables:
self.import_table_definitions(adapter.folder, tables=tables)
@property
def tables(self):
return self._tables
@property
def _timings(self):
return getattr(THREAD_LOCAL, "_pydal_timings_", [])
@property
def _lastsql(self):
return self._timings[-1] if self._timings else None
def import_table_definitions(
self, path, migrate=False, fake_migrate=False, tables=None
):
if tables:
for table in tables:
self.define_table(**table)
else:
pattern = pjoin(path, self._uri_hash + "_*.table")
for filename in glob.glob(pattern):
tfile = self._adapter.migrator.file_open(filename, "r" if PY2 else "rb")
try:
sql_fields = pickle.load(tfile)
name = filename[len(pattern) - 7 : -6]
mf = [
(
value["sortable"],
Field(
key,
type=value["type"],
length=value.get("length", None),
notnull=value.get("notnull", False),
unique=value.get("unique", False),
),
)
for key, value in iteritems(sql_fields)
]
mf.sort(key=lambda a: a[0])
self.define_table(
name,
*[item[1] for item in mf],
**dict(migrate=migrate, fake_migrate=fake_migrate)
)
finally:
self._adapter.migrator.file_close(tfile)
def check_reserved_keyword(self, name):
for backend in self._check_reserved:
if name.upper() in self.RSK[backend]:
raise SyntaxError(
'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword'
% (name, backend.upper())
)
def parse_as_rest(self, patterns, args, vars, queries=None, nested_select=True):
return RestParser(self).parse(patterns, args, vars, queries, nested_select)
def define_table(self, tablename, *fields, **kwargs):
invalid_kwargs = set(kwargs) - TABLE_ARGS
if invalid_kwargs:
raise SyntaxError(
'invalid table "%s" attributes: %s' % (tablename, invalid_kwargs)
)
if not fields and "fields" in kwargs:
fields = kwargs.get("fields", ())
if not isinstance(tablename, str):
if isinstance(tablename, unicode):
try:
tablename = str(tablename)
except UnicodeEncodeError:
raise SyntaxError("invalid unicode table name")
else:
raise SyntaxError("missing table name")
redefine = kwargs.get("redefine", False)
if tablename in self.tables:
if redefine:
try:
delattr(self, tablename)
except:
pass
else:
raise SyntaxError("table already defined: %s" % tablename)
elif (
tablename.startswith("_")
or tablename in dir(self)
or REGEX_PYTHON_KEYWORDS.match(tablename)
):
raise SyntaxError("invalid table name: %s" % tablename)
elif self._check_reserved:
self.check_reserved_keyword(tablename)
if self._lazy_tables:
if tablename not in self._LAZY_TABLES or redefine:
self._LAZY_TABLES[tablename] = (tablename, fields, kwargs)
table = None
else:
table = self.lazy_define_table(tablename, *fields, **kwargs)
if tablename not in self.tables:
self.tables.append(tablename)
return table
def lazy_define_table(self, tablename, *fields, **kwargs):
kwargs_get = kwargs.get
common_fields = self._common_fields
if common_fields:
fields = list(fields) + [
f if isinstance(f, Table) else f.clone() for f in common_fields
]
table_class = kwargs_get("table_class", Table)
table = table_class(self, tablename, *fields, **kwargs)
table._actual = True
self[tablename] = table
table._create_references()
for field in table:
if field.requires is DEFAULT:
field.requires = auto_validators(field)
if field.represent is None:
field.represent = auto_represent(field)
migrate = self._migrate_enabled and kwargs_get("migrate", self._migrate)
if (
migrate
and self._uri not in (None, "None")
or self._adapter.dbengine == "google:datastore"
):
fake_migrate = self._fake_migrate_all or kwargs_get(
"fake_migrate", self._fake_migrate
)
polymodel = kwargs_get("polymodel", None)
try:
GLOBAL_LOCKER.acquire()
self._adapter.create_table(
table,
migrate=migrate,
fake_migrate=fake_migrate,
polymodel=polymodel,
)
finally:
GLOBAL_LOCKER.release()
else:
table._dbt = None
on_define = kwargs_get("on_define", None)
if on_define:
on_define(table)
return table
def as_dict(self, flat=False, sanitize=True):
db_uid = uri = None
if not sanitize:
uri, db_uid = (self._uri, self._db_uid)
db_as_dict = dict(
tables=[],
uri=uri,
db_uid=db_uid,
**dict(
[
(k, getattr(self, "_" + k, None))
for k in [
"pool_size",
"folder",
"db_codec",
"check_reserved",
"migrate",
"fake_migrate",
"migrate_enabled",
"fake_migrate_all",
"decode_credentials",
"driver_args",
"adapter_args",
"attempts",
"bigint_id",
"debug",
"lazy_tables",
]
]
)
)
for table in self:
db_as_dict["tables"].append(table.as_dict(flat=flat, sanitize=sanitize))
return db_as_dict
def __contains__(self, tablename):
try:
return tablename in self.tables
except AttributeError:
return False
def __iter__(self):
for tablename in self.tables:
yield self[tablename]
def __getitem__(self, key):
return self.__getattr__(str(key))
def __getattr__(self, key):
if object.__getattribute__(
self, "_lazy_tables"
) and key in object.__getattribute__(self, "_LAZY_TABLES"):
tablename, fields, kwargs = self._LAZY_TABLES.pop(key)
return self.lazy_define_table(tablename, *fields, **kwargs)
aliased_tables = object.__getattribute__(self, "_aliased_tables")
aliased = getattr(aliased_tables, key, None)
if aliased:
return aliased
return BasicStorage.__getattribute__(self, key)
def __setattr__(self, key, value):
if key[:1] != "_" and key in self:
raise SyntaxError("Object %s exists and cannot be redefined" % key)
return super(DAL, self).__setattr__(key, value)
def __repr__(self):
if hasattr(self, "_uri"):
return '<DAL uri="%s">' % hide_password(self._adapter.uri)
else:
return '<DAL db_uid="%s">' % self._db_uid
def smart_query(self, fields, text):
return Set(self, smart_query(fields, text))
def __call__(self, query=None, ignore_common_filters=None):
return self.where(query, ignore_common_filters)
def where(self, query=None, ignore_common_filters=None):
if isinstance(query, Table):
query = self._adapter.id_query(query)
elif isinstance(query, Field):
query = query != None
elif isinstance(query, dict):
icf = query.get("ignore_common_filters")
if icf:
ignore_common_filters = icf
return Set(self, query, ignore_common_filters=ignore_common_filters)
def commit(self):
self._adapter.commit()
object.__getattribute__(self, "_aliased_tables").__dict__.clear()
def rollback(self):
self._adapter.rollback()
object.__getattribute__(self, "_aliased_tables").__dict__.clear()
def close(self):
self._adapter.close()
if self._db_uid in THREAD_LOCAL._pydal_db_instances_:
db_group = THREAD_LOCAL._pydal_db_instances_[self._db_uid]
db_group.remove(self)
if not db_group:
del THREAD_LOCAL._pydal_db_instances_[self._db_uid]
self._adapter._clean_tlocals()
def executesql(
self,
query,
placeholders=None,
as_dict=False,
fields=None,
colnames=None,
as_ordered_dict=False,
):
adapter = self._adapter
if placeholders:
adapter.execute(query, placeholders)
else:
adapter.execute(query)
if as_dict or as_ordered_dict:
if not hasattr(adapter.cursor, "description"):
raise RuntimeError(
"database does not support executesql(...,as_dict=True)"
)
columns = adapter.cursor.description
fields = colnames or [f[0] for f in columns]
if len(fields) != len(set(fields)):
raise RuntimeError(
"Result set includes duplicate column names. Specify unique column names using the 'colnames' argument"
)
if columns and not PY2:
for i in range(0, len(fields)):
if isinstance(fields[i], bytes):
fields[i] = fields[i].decode("utf8")
data = adapter.fetchall()
# easier to work with. row['field_name'] rather than row[0]
if as_ordered_dict:
_dict = OrderedDict
else:
_dict = dict
return [_dict(zip(fields, row)) for row in data]
try:
data = adapter.fetchall()
except:
return None
if fields or colnames:
fields = [] if fields is None else fields
if not isinstance(fields, list):
fields = [fields]
extracted_fields = []
for field in fields:
if isinstance(field, Table):
extracted_fields.extend([f for f in field])
else:
extracted_fields.append(field)
if not colnames:
colnames = [f.sqlsafe for f in extracted_fields]
else:
#: extracted_fields is empty we should make it from colnames
# what 'col_fields' is for
col_fields = [] # [[tablename, fieldname], ....]
newcolnames = []
for tf in colnames:
if "." in tf:
t_f = tf.split(".")
tf = ".".join(adapter.dialect.quote(f) for f in t_f)
else:
t_f = None
if not extracted_fields:
col_fields.append(t_f)
newcolnames.append(tf)
colnames = newcolnames
data = adapter.parse(
data,
fields = extracted_fields or [tf and self[tf[0]][tf[1]] for tf in col_fields],
colnames=colnames
)
return data
def _remove_references_to(self, thistable):
for table in self:
table._referenced_by = [
field for field in table._referenced_by if not field.table == thistable
]
def has_representer(self, name):
return callable(self.representers.get(name))
def represent(self, name, *args, **kwargs):
return self.representers[name](*args, **kwargs)
def export_to_csv_file(self, ofile, *args, **kwargs):
step = long(kwargs.get("max_fetch_rows,", 500))
write_colnames = kwargs["write_colnames"] = kwargs.get("write_colnames", True)
for table in self.tables:
ofile.write("TABLE %s\r\n" % table)
query = self._adapter.id_query(self[table])
nrows = self(query).count()
kwargs["write_colnames"] = write_colnames
for k in range(0, nrows, step):
self(query).select(limitby=(k, k + step)).export_to_csv_file(
ofile, *args, **kwargs
)
kwargs["write_colnames"] = False
ofile.write("\r\n\r\n")
ofile.write("END")
def import_from_csv_file(
self,
ifile,
id_map=None,
null="<NULL>",
unique="uuid",
map_tablenames=None,
ignore_missing_tables=False,
*args,
**kwargs
):
# if id_map is None: id_map={}
id_offset = {} # only used if id_map is None
map_tablenames = map_tablenames or {}
for line in ifile:
line = line.strip()
if not line:
continue
elif line == "END":
return
elif not line.startswith("TABLE "):
raise SyntaxError("Invalid file format")
elif not line[6:] in self.tables:
raise SyntaxError("Unknown table : %s" % line[6:])
else:
tablename = line[6:]
tablename = map_tablenames.get(tablename, tablename)
if tablename is not None and tablename in self.tables:
self[tablename].import_from_csv_file(
ifile, id_map, null, unique, id_offset, *args, **kwargs
)
elif tablename is None or ignore_missing_tables:
# skip all non-empty lines
for line in ifile:
if not line.strip():
break
else:
raise RuntimeError(
"Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)"
)
def can_join(self):
return self._adapter.can_join()
def DAL_unpickler(db_uid):
return DAL("<zombie>", db_uid=db_uid)
def DAL_pickler(db):
return DAL_unpickler, (db._db_uid,)
copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
| true | true |
f730b66e7a44d8029420d31e049ca3f917f8e10f | 10,128 | py | Python | billforward/models/data_synchronization_job_paged_metadata.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | 2 | 2016-11-23T17:32:37.000Z | 2022-02-24T05:13:20.000Z | billforward/models/data_synchronization_job_paged_metadata.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | null | null | null | billforward/models/data_synchronization_job_paged_metadata.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | 1 | 2016-12-30T20:02:48.000Z | 2016-12-30T20:02:48.000Z | # coding: utf-8
"""
BillForward REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class DataSynchronizationJobPagedMetadata(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, next_page=None, current_page=None, current_offset=None, records_requested=None, records_returned=None, execution_time=None, results=None):
"""
DataSynchronizationJobPagedMetadata - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'next_page': 'str',
'current_page': 'int',
'current_offset': 'int',
'records_requested': 'int',
'records_returned': 'int',
'execution_time': 'int',
'results': 'list[MutableBillingEntity]'
}
self.attribute_map = {
'next_page': 'nextPage',
'current_page': 'currentPage',
'current_offset': 'currentOffset',
'records_requested': 'recordsRequested',
'records_returned': 'recordsReturned',
'execution_time': 'executionTime',
'results': 'results'
}
self._next_page = next_page
self._current_page = current_page
self._current_offset = current_offset
self._records_requested = records_requested
self._records_returned = records_returned
self._execution_time = execution_time
self._results = results
@property
def next_page(self):
"""
Gets the next_page of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Paging parameter. URL fragment that can be used to fetch next page of results.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:return: The next_page of this DataSynchronizationJobPagedMetadata.
:rtype: str
"""
return self._next_page
@next_page.setter
def next_page(self, next_page):
"""
Sets the next_page of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Paging parameter. URL fragment that can be used to fetch next page of results.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:param next_page: The next_page of this DataSynchronizationJobPagedMetadata.
:type: str
"""
self._next_page = next_page
@property
def current_page(self):
"""
Gets the current_page of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Paging parameter. 0-indexed. Describes which page (given a page size of `recordsRequested`) of the result set you are viewing.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:return: The current_page of this DataSynchronizationJobPagedMetadata.
:rtype: int
"""
return self._current_page
@current_page.setter
def current_page(self, current_page):
"""
Sets the current_page of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Paging parameter. 0-indexed. Describes which page (given a page size of `recordsRequested`) of the result set you are viewing.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:param current_page: The current_page of this DataSynchronizationJobPagedMetadata.
:type: int
"""
self._current_page = current_page
@property
def current_offset(self):
"""
Gets the current_offset of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Paging parameter. 0-indexed. Describes your current location within a pageable list of query results.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:return: The current_offset of this DataSynchronizationJobPagedMetadata.
:rtype: int
"""
return self._current_offset
@current_offset.setter
def current_offset(self, current_offset):
"""
Sets the current_offset of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Paging parameter. 0-indexed. Describes your current location within a pageable list of query results.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:param current_offset: The current_offset of this DataSynchronizationJobPagedMetadata.
:type: int
"""
self._current_offset = current_offset
@property
def records_requested(self):
"""
Gets the records_requested of this DataSynchronizationJobPagedMetadata.
{\"default\":10,\"description\":\"Paging parameter. Describes how many records you requested.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:return: The records_requested of this DataSynchronizationJobPagedMetadata.
:rtype: int
"""
return self._records_requested
@records_requested.setter
def records_requested(self, records_requested):
"""
Sets the records_requested of this DataSynchronizationJobPagedMetadata.
{\"default\":10,\"description\":\"Paging parameter. Describes how many records you requested.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:param records_requested: The records_requested of this DataSynchronizationJobPagedMetadata.
:type: int
"""
self._records_requested = records_requested
@property
def records_returned(self):
"""
Gets the records_returned of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Describes how many records were returned by your query.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:return: The records_returned of this DataSynchronizationJobPagedMetadata.
:rtype: int
"""
return self._records_returned
@records_returned.setter
def records_returned(self, records_returned):
"""
Sets the records_returned of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Describes how many records were returned by your query.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:param records_returned: The records_returned of this DataSynchronizationJobPagedMetadata.
:type: int
"""
self._records_returned = records_returned
@property
def execution_time(self):
"""
Gets the execution_time of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Number of milliseconds taken by API to calculate response.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:return: The execution_time of this DataSynchronizationJobPagedMetadata.
:rtype: int
"""
return self._execution_time
@execution_time.setter
def execution_time(self, execution_time):
"""
Sets the execution_time of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Number of milliseconds taken by API to calculate response.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:param execution_time: The execution_time of this DataSynchronizationJobPagedMetadata.
:type: int
"""
self._execution_time = execution_time
@property
def results(self):
"""
Gets the results of this DataSynchronizationJobPagedMetadata.
{\"description\":\"The results returned by your query.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:return: The results of this DataSynchronizationJobPagedMetadata.
:rtype: list[MutableBillingEntity]
"""
return self._results
@results.setter
def results(self, results):
"""
Sets the results of this DataSynchronizationJobPagedMetadata.
{\"description\":\"The results returned by your query.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:param results: The results of this DataSynchronizationJobPagedMetadata.
:type: list[MutableBillingEntity]
"""
self._results = results
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 36.042705 | 193 | 0.626086 |
from pprint import pformat
from six import iteritems
import re
class DataSynchronizationJobPagedMetadata(object):
def __init__(self, next_page=None, current_page=None, current_offset=None, records_requested=None, records_returned=None, execution_time=None, results=None):
self.swagger_types = {
'next_page': 'str',
'current_page': 'int',
'current_offset': 'int',
'records_requested': 'int',
'records_returned': 'int',
'execution_time': 'int',
'results': 'list[MutableBillingEntity]'
}
self.attribute_map = {
'next_page': 'nextPage',
'current_page': 'currentPage',
'current_offset': 'currentOffset',
'records_requested': 'recordsRequested',
'records_returned': 'recordsReturned',
'execution_time': 'executionTime',
'results': 'results'
}
self._next_page = next_page
self._current_page = current_page
self._current_offset = current_offset
self._records_requested = records_requested
self._records_returned = records_returned
self._execution_time = execution_time
self._results = results
@property
def next_page(self):
return self._next_page
@next_page.setter
def next_page(self, next_page):
self._next_page = next_page
@property
def current_page(self):
return self._current_page
@current_page.setter
def current_page(self, current_page):
self._current_page = current_page
@property
def current_offset(self):
return self._current_offset
@current_offset.setter
def current_offset(self, current_offset):
self._current_offset = current_offset
@property
def records_requested(self):
return self._records_requested
@records_requested.setter
def records_requested(self, records_requested):
self._records_requested = records_requested
@property
def records_returned(self):
return self._records_returned
@records_returned.setter
def records_returned(self, records_returned):
self._records_returned = records_returned
@property
def execution_time(self):
return self._execution_time
@execution_time.setter
def execution_time(self, execution_time):
self._execution_time = execution_time
@property
def results(self):
return self._results
@results.setter
def results(self, results):
self._results = results
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f730b975efb7291bf0ef3c3242905078b950a0fa | 45 | py | Python | packages/pyolite-kernel/py/nbformat/nbformat/__init__.py | marimeireles/jupyterlite | 65c9304cf89d311b8a48f227a0cbb2b7f50cf4bd | [
"BSD-3-Clause"
] | 1,392 | 2021-03-28T01:11:50.000Z | 2022-03-23T21:46:27.000Z | packages/pyolite-kernel/py/nbformat/nbformat/__init__.py | marimeireles/jupyterlite | 65c9304cf89d311b8a48f227a0cbb2b7f50cf4bd | [
"BSD-3-Clause"
] | 195 | 2021-03-27T20:38:54.000Z | 2021-10-01T12:43:17.000Z | packages/pyolite-kernel/py/nbformat/nbformat/__init__.py | marimeireles/jupyterlite | 65c9304cf89d311b8a48f227a0cbb2b7f50cf4bd | [
"BSD-3-Clause"
] | 50 | 2021-04-16T07:08:03.000Z | 2022-02-21T05:06:47.000Z | """A nbformat mock"""
__version__ = "4.2.0"
| 11.25 | 21 | 0.6 |
__version__ = "4.2.0"
| true | true |
f730ba393656175bda415e196e26904e2b289597 | 1,606 | py | Python | release/xgboost_tests/workloads/train_gpu.py | manishnish138/ray | 790dca28cf6972edebc44826c8aa3d62cbee1a5e | [
"Apache-2.0"
] | 3 | 2021-08-29T20:41:21.000Z | 2022-01-31T18:47:51.000Z | release/xgboost_tests/workloads/train_gpu.py | QPC-database/amazon-ray | 55aa4cac02a412b96252aea4e8c3f177a28324a1 | [
"Apache-2.0"
] | 59 | 2021-01-14T14:59:36.000Z | 2022-03-25T23:07:05.000Z | release/xgboost_tests/workloads/train_gpu.py | majacQ/ray | bc08c6cdcc7ddf4da751ca2a972defd3db509061 | [
"Apache-2.0"
] | null | null | null | """Training on a GPU cluster.
This will train a small dataset on a distributed GPU cluster.
Test owner: krfricke
Acceptance criteria: Should run through and report final results.
Notes: The test will report output such as this:
```
[05:14:49] WARNING: ../src/gbm/gbtree.cc:350: Loading from a raw memory buffer
on CPU only machine. Changing tree_method to hist.
[05:14:49] WARNING: ../src/learner.cc:222: No visible GPU is found, setting
`gpu_id` to -1
```
This is _not_ an error. This is due to the checkpoints being loaded on the
XGBoost driver, and since the driver lives on the head node (which has no
GPU), XGBoost warns that it can't use the GPU. Training still happened using
the GPUs.
"""
import json
import os
import time
import ray
from xgboost_ray import RayParams
from _train import train_ray
if __name__ == "__main__":
ray.init(address="auto")
ray_params = RayParams(
elastic_training=False,
max_actor_restarts=2,
num_actors=4,
cpus_per_actor=4,
gpus_per_actor=1)
start = time.time()
train_ray(
path="/data/classification.parquet",
num_workers=4,
num_boost_rounds=100,
num_files=25,
regression=False,
use_gpu=True,
ray_params=ray_params,
xgboost_params=None,
)
taken = time.time() - start
result = {
"time_taken": taken,
}
test_output_json = os.environ.get("TEST_OUTPUT_JSON",
"/tmp/train_gpu.json")
with open(test_output_json, "wt") as f:
json.dump(result, f)
print("PASSED.")
| 25.492063 | 78 | 0.663138 | import json
import os
import time
import ray
from xgboost_ray import RayParams
from _train import train_ray
if __name__ == "__main__":
ray.init(address="auto")
ray_params = RayParams(
elastic_training=False,
max_actor_restarts=2,
num_actors=4,
cpus_per_actor=4,
gpus_per_actor=1)
start = time.time()
train_ray(
path="/data/classification.parquet",
num_workers=4,
num_boost_rounds=100,
num_files=25,
regression=False,
use_gpu=True,
ray_params=ray_params,
xgboost_params=None,
)
taken = time.time() - start
result = {
"time_taken": taken,
}
test_output_json = os.environ.get("TEST_OUTPUT_JSON",
"/tmp/train_gpu.json")
with open(test_output_json, "wt") as f:
json.dump(result, f)
print("PASSED.")
| true | true |
f730bb742a33abcb801bbbb522892e6ea4f4e546 | 2,152 | py | Python | DeepJanus-BNG/self_driving/road_storage.py | zohdit/DeepJanus | c32022bdff2994e91df7af8af64a022d3e7e6a75 | [
"MIT"
] | 7 | 2020-10-12T10:46:30.000Z | 2021-06-23T10:42:30.000Z | DeepJanus-BNG/self_driving/road_storage.py | zohdit/DeepJanus | c32022bdff2994e91df7af8af64a022d3e7e6a75 | [
"MIT"
] | null | null | null | DeepJanus-BNG/self_driving/road_storage.py | zohdit/DeepJanus | c32022bdff2994e91df7af8af64a022d3e7e6a75 | [
"MIT"
] | 2 | 2021-04-26T12:46:44.000Z | 2021-09-16T08:27:53.000Z | import json
import os
from typing import Tuple, List, Callable
from core.folders import folders
class RoadStorage:
def __init__(self, path: str = None):
if path is None:
path='test_driving'
self.folder = str(folders.member_seeds.joinpath(path))
os.makedirs(self.folder, exist_ok=True)
def all_files(self) -> List[str]:
expanded = [os.path.join(self.folder, filename) for filename in os.listdir(self.folder)]
return [path for path in expanded if os.path.isfile(path)]
def get_road_path_by_index(self, index) -> str:
assert index > 0
path = os.path.join(self.folder, 'road{:03}_nodes.json'.format(index))
return path
def get_road_nodes_by_index(self, index) -> List[Tuple[float, float, float, float]]:
path = self.get_road_path_by_index(index)
nodes = self.get_road_nodes(path)
return nodes
def get_road_nodes(self, path) -> List[Tuple[float, float, float, float]]:
assert os.path.exists(path), path
with open(path, 'r') as f:
nodes = json.loads(f.read())
return nodes
def cache(self, road_name: str, get_points: Callable) -> List[Tuple[float, float, float, float]]:
path = os.path.join(self.folder, road_name + '.json')
if os.path.exists(path):
with open(path, 'r') as f:
nodes = json.loads(f.read())
else:
nodes = get_points()
with open(path, 'w') as f:
f.write(json.dumps(nodes))
return nodes
def save(self, road_name: str, contents: str) -> List[Tuple[float, float, float, float]]:
path = os.path.join(self.folder, road_name + '.json')
with open(path, 'w') as f:
f.write(contents)
def read(self, path) -> List[Tuple[float, float, float, float]]:
assert os.path.exists(path), path
with open(path, 'r') as f:
beamng_member = json.loads(f.read())
return beamng_member
if __name__ == '__main__':
for i in range(1, 31):
nodes = RoadStorage().get_road_nodes_by_index(i)
print(i, len(nodes))
| 34.15873 | 101 | 0.608271 | import json
import os
from typing import Tuple, List, Callable
from core.folders import folders
class RoadStorage:
def __init__(self, path: str = None):
if path is None:
path='test_driving'
self.folder = str(folders.member_seeds.joinpath(path))
os.makedirs(self.folder, exist_ok=True)
def all_files(self) -> List[str]:
expanded = [os.path.join(self.folder, filename) for filename in os.listdir(self.folder)]
return [path for path in expanded if os.path.isfile(path)]
def get_road_path_by_index(self, index) -> str:
assert index > 0
path = os.path.join(self.folder, 'road{:03}_nodes.json'.format(index))
return path
def get_road_nodes_by_index(self, index) -> List[Tuple[float, float, float, float]]:
path = self.get_road_path_by_index(index)
nodes = self.get_road_nodes(path)
return nodes
def get_road_nodes(self, path) -> List[Tuple[float, float, float, float]]:
assert os.path.exists(path), path
with open(path, 'r') as f:
nodes = json.loads(f.read())
return nodes
def cache(self, road_name: str, get_points: Callable) -> List[Tuple[float, float, float, float]]:
path = os.path.join(self.folder, road_name + '.json')
if os.path.exists(path):
with open(path, 'r') as f:
nodes = json.loads(f.read())
else:
nodes = get_points()
with open(path, 'w') as f:
f.write(json.dumps(nodes))
return nodes
def save(self, road_name: str, contents: str) -> List[Tuple[float, float, float, float]]:
path = os.path.join(self.folder, road_name + '.json')
with open(path, 'w') as f:
f.write(contents)
def read(self, path) -> List[Tuple[float, float, float, float]]:
assert os.path.exists(path), path
with open(path, 'r') as f:
beamng_member = json.loads(f.read())
return beamng_member
if __name__ == '__main__':
for i in range(1, 31):
nodes = RoadStorage().get_road_nodes_by_index(i)
print(i, len(nodes))
| true | true |
f730bbfbb24db521361e11a81c81fd782b3e533d | 10,665 | py | Python | inbreast.py | wentaozhu/deep-mil-for-whole-mammogram-classification | 8c046bbd77d268499849319cf57254015778549c | [
"MIT"
] | 106 | 2017-03-12T17:26:49.000Z | 2022-02-12T01:37:17.000Z | inbreast.py | huhansan666666/deep-mil-for-whole-mammogram-classification | 8c046bbd77d268499849319cf57254015778549c | [
"MIT"
] | 17 | 2017-04-11T14:49:34.000Z | 2022-03-19T07:57:37.000Z | inbreast.py | huhansan666666/deep-mil-for-whole-mammogram-classification | 8c046bbd77d268499849319cf57254015778549c | [
"MIT"
] | 41 | 2017-03-21T09:48:39.000Z | 2021-11-29T06:51:16.000Z | #import dicom # some machines not install pydicom
import scipy.misc
import numpy as np
from sklearn.model_selection import StratifiedKFold
import cPickle
#import matplotlib
#import matplotlib.pyplot as plt
from skimage.filters import threshold_otsu
import os
from os.path import join as join
import csv
import scipy.ndimage
import dicom
#import cv2
path = '../AllDICOMs/'
preprocesspath = '../preprocesspath/'
labelfile = './label.txt'
def readlabel():
'''read the label as a dict from labelfile'''
mydict = {}
with open(labelfile, 'r') as f:
flines = f.readlines()
for line in flines:
data = line.split()
if int(data[1]) == 0:
mydict[data[0]] = int(data[1])
else:
assert(int(data[1])==2 or int(data[1])==1)
mydict[data[0]] = int(data[1])-1
return mydict
def readdicom(mydict):
'''read the dicom image, rename it consistently with the name in labels, crop and resize, and save as pickle.
mydict is the returned value of readlabel'''
img_ext = '.dcm'
img_fnames = [x for x in os.listdir(path) if x.endswith(img_ext)]
for f in img_fnames:
names = f.split('_')
if names[0] not in mydict:
print(names[0]+'occur error')
dicom_content = dicom.read_file(join(path,f))
img = dicom_content.pixel_array
'''fig = plt.figure()
ax1 = plt.subplot(3,3,1)
ax2 = plt.subplot(3,3,2)
ax3 = plt.subplot(3,3,3)
ax4 = plt.subplot(3,3,4)
ax5 = plt.subplot(3,3,5)
ax6 = plt.subplot(3,3,6)
ax7 = plt.subplot(3,3,7)
ax8 = plt.subplot(3,3,8)
ax9 = plt.subplot(3,3,9)
ax1.imshow(img, cmap='Greys_r')
ax1.set_title('Original')
ax1.axis('off')'''
thresh = threshold_otsu(img)
binary = img > thresh
#ax2.imshow(binary, cmap='Greys_r')
#ax2.set_title('mask')
#ax2.axis('off')
minx, miny = 0, 0
maxx, maxy = img.shape[0], img.shape[1]
for xx in xrange(img.shape[1]):
if sum(binary[xx, :]==0) < binary.shape[1]-60:
minx = xx
break
for xx in xrange(img.shape[0]-1,0,-1):
if sum(binary[xx, :]==0) < binary.shape[1]-60:
maxx = xx
break
if names[3] == 'R':
maxy = img.shape[1]
for yy in xrange(int(img.shape[1]*3.0/4), -1, -1):
if sum(binary[:,yy]==0) > binary.shape[0]-10:
miny = yy
break
else:
miny = 0
for yy in xrange(int(img.shape[1]/4.0), img.shape[1], 1):
if sum(binary[:,yy]==0) > binary.shape[0]-10:
maxy = yy
break
print(minx, maxx, miny, maxy)
#ax3.set_title('Foreground')
#ax3.imshow(img[minx:maxx+1, miny:maxy+1], cmap='Greys_r')
#ax3.axis('off')
img = img.astype(np.float32)
img1 = scipy.misc.imresize(img[minx:maxx+1, miny:maxy+1], (227, 227), interp='cubic')
with open(join(preprocesspath, names[0])+'227.pickle', 'wb') as outfile:
cPickle.dump(img1, outfile)
img1 = scipy.misc.imresize(img[minx:maxx+1, miny:maxy+1], (299, 299), interp='cubic')
with open(join(preprocesspath, names[0])+'299.pickle', 'wb') as outfile:
cPickle.dump(img1, outfile)
'''ax4.set_title('Resize')
ax4.imshow(img, cmap='Greys_r')
ax4.axis('off')
img = img.astype(np.float32)
img -= np.mean(img)
img /= np.std(img)
ax5.set_title('Norm')
ax5.imshow(img, cmap='Greys_r')
ax5.axis('off')
with open(join(preprocesspath, names[0])+'norm.pickle', 'wb') as outfile:
cPickle.dump(img, outfile)
#imgshape = img.shape
img = np.fliplr(img)
ax6.set_title('Flip')
ax6.imshow(img, cmap='Greys_r')
ax6.axis('off')
num_rot = np.random.choice(4) #rotate 90 randomly
img = np.rot90(img, num_rot)
ax7.set_title('Rotation')
ax7.imshow(img, cmap='Greys_r')
ax7.axis('off')
fig.savefig(join(preprocesspath, names[0])+'.jpg')
plt.close(fig)'''
def cvsplit(fold, totalfold, mydict):
'''get the split of train and test
fold is the returned fold th data, from 0 to totalfold-1
total fold is for the cross validation
mydict is the return dict from readlabel'''
skf = StratifiedKFold(n_splits=totalfold) # default shuffle is false, okay!
#readdicom(mydict)
y = mydict.values()
x = mydict.keys()
count = 0
for train, test in skf.split(x,y):
print(len(train), len(test))
if count == fold:
#print test
return train, test
count += 1
def cvsplitenhance(fold, totalfold, mydict, valfold=-1):
'''get the split of train and test
fold is the returned fold th data, from 0 to totalfold-1
total fold is for the cross validation
mydict is the return dict from readlabel
sperate the data into train, validation, test'''
skf = StratifiedKFold(n_splits=totalfold) # default shuffle is false, okay!
#readdicom(mydict)
y = mydict.values()
x = mydict.keys()
count = 0
if valfold == -1:
valfold = (fold+1) % totalfold
print('valfold'+str(valfold))
trainls, valls, testls = [], [], []
for train, test in skf.split(x,y):
print(len(train), len(test))
if count == fold:
#print test[:]
testls = test[:]
elif count == valfold:
valls = test[:]
else:
for i in test:
trainls.append(i)
count += 1
return trainls, valls, testls
def loadim(fname, preprocesspath=preprocesspath):
''' from preprocess path load fname
fname file name in preprocesspath
aug is true, we augment im fliplr, rot 4'''
ims = []
with open(join(preprocesspath, fname), 'rb') as inputfile:
im = cPickle.load(inputfile)
#up_bound = np.random.choice(174) #zero out square
#right_bound = np.random.choice(174)
img = im
#img[up_bound:(up_bound+50), right_bound:(right_bound+50)] = 0.0
ims.append(img)
inputfile.close()
return ims
def loaddata(fold, totalfold, usedream=True, aug=True):
'''get the fold th train and test data from inbreast
fold is the returned fold th data, from 0 to totalfold-1
total fold is for the cross validation'''
mydict = readlabel()
mydictkey = mydict.keys()
mydictvalue = mydict.values()
trainindex, testindex = cvsplit(fold, totalfold, mydict)
if aug == True:
traindata, trainlabel = np.zeros((6*len(trainindex),227,227)), np.zeros((6*len(trainindex),))
else:
traindata, trainlabel = np.zeros((len(trainindex),227,227)), np.zeros((len(trainindex),))
testdata, testlabel = np.zeros((len(testindex),227,227)), np.zeros((len(testindex),))
traincount = 0
for i in xrange(len(trainindex)):
ims = loadim(mydictkey[trainindex[i]]+'.pickle', aug=aug)
for im in ims:
traindata[traincount, :, :] = im
trainlabel[traincount] = mydictvalue[trainindex[i]]
traincount += 1
assert(traincount==traindata.shape[0])
testcount = 0
for i in xrange(len(testindex)):
ims = loadim(mydictkey[testindex[i]]+'.pickle', aug=aug)
testdata[testcount,:,:] = ims[0]
testlabel[testcount] = mydictvalue[testindex[i]]
testcount += 1
assert(testcount==testdata.shape[0])
if usedream:
outx, outy = extractdreamdata()
traindata = np.concatenate((traindata,outx), axis=0)
trainlabel = np.concatenate((trainlabel,outy), axis=0)
return traindata, trainlabel, testdata, testlabel
def loaddataenhance(fold, totalfold, valfold=-1, valnum=60):
'''get the fold th train and test data from inbreast
fold is the returned fold th data, from 0 to totalfold-1
total fold is for the cross validation'''
mydict = readlabel()
mydictkey = mydict.keys()
mydictvalue = mydict.values()
trainindex, valindex, testindex = cvsplitenhance(fold, totalfold, mydict, valfold=valfold)
traindata, trainlabel = np.zeros((len(trainindex),227,227)), np.zeros((len(trainindex),))
valdata, vallabel = np.zeros((len(valindex),227,227)), np.zeros((len(valindex),))
testdata, testlabel = np.zeros((len(testindex),227,227)), np.zeros((len(testindex),))
traincount = 0
for i in xrange(len(trainindex)):
ims = loadim(mydictkey[trainindex[i]]+'227.pickle')
for im in ims:
traindata[traincount, :, :] = im
trainlabel[traincount] = int(mydictvalue[trainindex[i]])
traincount += 1
assert(traincount==traindata.shape[0])
valcount = 0
for i in xrange(len(valindex)):
ims = loadim(mydictkey[valindex[i]]+'227.pickle')
valdata[valcount,:,:] = ims[0]
vallabel[valcount] = int(mydictvalue[valindex[i]])
valcount += 1
assert(valcount==valdata.shape[0])
testcount = 0
for i in xrange(len(testindex)):
#print mydictkey[testindex[i]]
ims = loadim(mydictkey[testindex[i]]+'227.pickle')
testdata[testcount,:,:] = ims[0]
testlabel[testcount] = int(mydictvalue[testindex[i]])
testcount += 1
assert(testcount==testdata.shape[0])
#print(valdata.shape)
randindex = np.random.permutation(valdata.shape[0])
valdata = valdata[randindex,:,:]
vallabel = vallabel[randindex]
#print(valdata.shape)
traindata = np.concatenate((traindata, valdata[valnum:,:,:]), axis=0)
trainlabel = np.concatenate((trainlabel, vallabel[valnum:]), axis=0)
valdata = valdata[:valnum,:,:]
vallabel = vallabel[:valnum]
maxvalue = (traindata.max()*1.0)
print('inbreast max %f', maxvalue)
traindata = traindata / maxvalue
valdata = valdata / maxvalue
testdata = testdata / maxvalue
print('train data feature')
#meanx = traindata.mean()
#stdx = traindata.std()
#traindata -= meanx
#traindata /= stdx
#valdata -= meanx
#valdata /= stdx
#testdata -= meanx
#testdata /= stdx
print(traindata.mean(), traindata.std(), traindata.max(), traindata.min())
print('val data feature')
print(valdata.mean(), valdata.std(), valdata.max(), valdata.min())
print('test data feature')
print(testdata.mean(), testdata.std(), testdata.max(), testdata.min())
#meandata = traindata.mean()
#stddata = traindata.std()
#traindata = traindata - meandata
#traindata = traindata / stddata
#valdata = valdata - meandata
#valdata = valdata / stddata
#testdata = testdata - meandata
#testdata = testdata / stddata
return traindata, trainlabel, valdata, vallabel, testdata, testlabel
if __name__ == '__main__':
traindata, trainlabel, testdata, testlabel = loaddata(0, 5)
print(sum(trainlabel), sum(testlabel))
traindata, trainlabel, valdata, vallabel, testdata, testlabel = loaddataenhance(0, 5)
print(sum(trainlabel), sum(vallabel), sum(testlabel))
| 35.909091 | 112 | 0.634974 | np
from sklearn.model_selection import StratifiedKFold
import cPickle
from skimage.filters import threshold_otsu
import os
from os.path import join as join
import csv
import scipy.ndimage
import dicom
path = '../AllDICOMs/'
preprocesspath = '../preprocesspath/'
labelfile = './label.txt'
def readlabel():
mydict = {}
with open(labelfile, 'r') as f:
flines = f.readlines()
for line in flines:
data = line.split()
if int(data[1]) == 0:
mydict[data[0]] = int(data[1])
else:
assert(int(data[1])==2 or int(data[1])==1)
mydict[data[0]] = int(data[1])-1
return mydict
def readdicom(mydict):
img_ext = '.dcm'
img_fnames = [x for x in os.listdir(path) if x.endswith(img_ext)]
for f in img_fnames:
names = f.split('_')
if names[0] not in mydict:
print(names[0]+'occur error')
dicom_content = dicom.read_file(join(path,f))
img = dicom_content.pixel_array
thresh = threshold_otsu(img)
binary = img > thresh
minx, miny = 0, 0
maxx, maxy = img.shape[0], img.shape[1]
for xx in xrange(img.shape[1]):
if sum(binary[xx, :]==0) < binary.shape[1]-60:
minx = xx
break
for xx in xrange(img.shape[0]-1,0,-1):
if sum(binary[xx, :]==0) < binary.shape[1]-60:
maxx = xx
break
if names[3] == 'R':
maxy = img.shape[1]
for yy in xrange(int(img.shape[1]*3.0/4), -1, -1):
if sum(binary[:,yy]==0) > binary.shape[0]-10:
miny = yy
break
else:
miny = 0
for yy in xrange(int(img.shape[1]/4.0), img.shape[1], 1):
if sum(binary[:,yy]==0) > binary.shape[0]-10:
maxy = yy
break
print(minx, maxx, miny, maxy)
img = img.astype(np.float32)
img1 = scipy.misc.imresize(img[minx:maxx+1, miny:maxy+1], (227, 227), interp='cubic')
with open(join(preprocesspath, names[0])+'227.pickle', 'wb') as outfile:
cPickle.dump(img1, outfile)
img1 = scipy.misc.imresize(img[minx:maxx+1, miny:maxy+1], (299, 299), interp='cubic')
with open(join(preprocesspath, names[0])+'299.pickle', 'wb') as outfile:
cPickle.dump(img1, outfile)
def cvsplit(fold, totalfold, mydict):
skf = StratifiedKFold(n_splits=totalfold)
y = mydict.values()
x = mydict.keys()
count = 0
for train, test in skf.split(x,y):
print(len(train), len(test))
if count == fold:
return train, test
count += 1
def cvsplitenhance(fold, totalfold, mydict, valfold=-1):
skf = StratifiedKFold(n_splits=totalfold)
y = mydict.values()
x = mydict.keys()
count = 0
if valfold == -1:
valfold = (fold+1) % totalfold
print('valfold'+str(valfold))
trainls, valls, testls = [], [], []
for train, test in skf.split(x,y):
print(len(train), len(test))
if count == fold:
testls = test[:]
elif count == valfold:
valls = test[:]
else:
for i in test:
trainls.append(i)
count += 1
return trainls, valls, testls
def loadim(fname, preprocesspath=preprocesspath):
ims = []
with open(join(preprocesspath, fname), 'rb') as inputfile:
im = cPickle.load(inputfile)
m
ims.append(img)
inputfile.close()
return ims
def loaddata(fold, totalfold, usedream=True, aug=True):
mydict = readlabel()
mydictkey = mydict.keys()
mydictvalue = mydict.values()
trainindex, testindex = cvsplit(fold, totalfold, mydict)
if aug == True:
traindata, trainlabel = np.zeros((6*len(trainindex),227,227)), np.zeros((6*len(trainindex),))
else:
traindata, trainlabel = np.zeros((len(trainindex),227,227)), np.zeros((len(trainindex),))
testdata, testlabel = np.zeros((len(testindex),227,227)), np.zeros((len(testindex),))
traincount = 0
for i in xrange(len(trainindex)):
ims = loadim(mydictkey[trainindex[i]]+'.pickle', aug=aug)
for im in ims:
traindata[traincount, :, :] = im
trainlabel[traincount] = mydictvalue[trainindex[i]]
traincount += 1
assert(traincount==traindata.shape[0])
testcount = 0
for i in xrange(len(testindex)):
ims = loadim(mydictkey[testindex[i]]+'.pickle', aug=aug)
testdata[testcount,:,:] = ims[0]
testlabel[testcount] = mydictvalue[testindex[i]]
testcount += 1
assert(testcount==testdata.shape[0])
if usedream:
outx, outy = extractdreamdata()
traindata = np.concatenate((traindata,outx), axis=0)
trainlabel = np.concatenate((trainlabel,outy), axis=0)
return traindata, trainlabel, testdata, testlabel
def loaddataenhance(fold, totalfold, valfold=-1, valnum=60):
mydict = readlabel()
mydictkey = mydict.keys()
mydictvalue = mydict.values()
trainindex, valindex, testindex = cvsplitenhance(fold, totalfold, mydict, valfold=valfold)
traindata, trainlabel = np.zeros((len(trainindex),227,227)), np.zeros((len(trainindex),))
valdata, vallabel = np.zeros((len(valindex),227,227)), np.zeros((len(valindex),))
testdata, testlabel = np.zeros((len(testindex),227,227)), np.zeros((len(testindex),))
traincount = 0
for i in xrange(len(trainindex)):
ims = loadim(mydictkey[trainindex[i]]+'227.pickle')
for im in ims:
traindata[traincount, :, :] = im
trainlabel[traincount] = int(mydictvalue[trainindex[i]])
traincount += 1
assert(traincount==traindata.shape[0])
valcount = 0
for i in xrange(len(valindex)):
ims = loadim(mydictkey[valindex[i]]+'227.pickle')
valdata[valcount,:,:] = ims[0]
vallabel[valcount] = int(mydictvalue[valindex[i]])
valcount += 1
assert(valcount==valdata.shape[0])
testcount = 0
for i in xrange(len(testindex)):
ims = loadim(mydictkey[testindex[i]]+'227.pickle')
testdata[testcount,:,:] = ims[0]
testlabel[testcount] = int(mydictvalue[testindex[i]])
testcount += 1
assert(testcount==testdata.shape[0])
randindex = np.random.permutation(valdata.shape[0])
valdata = valdata[randindex,:,:]
vallabel = vallabel[randindex]
traindata = np.concatenate((traindata, valdata[valnum:,:,:]), axis=0)
trainlabel = np.concatenate((trainlabel, vallabel[valnum:]), axis=0)
valdata = valdata[:valnum,:,:]
vallabel = vallabel[:valnum]
maxvalue = (traindata.max()*1.0)
print('inbreast max %f', maxvalue)
traindata = traindata / maxvalue
valdata = valdata / maxvalue
testdata = testdata / maxvalue
print('train data feature')
print(traindata.mean(), traindata.std(), traindata.max(), traindata.min())
print('val data feature')
print(valdata.mean(), valdata.std(), valdata.max(), valdata.min())
print('test data feature')
print(testdata.mean(), testdata.std(), testdata.max(), testdata.min())
return traindata, trainlabel, valdata, vallabel, testdata, testlabel
if __name__ == '__main__':
traindata, trainlabel, testdata, testlabel = loaddata(0, 5)
print(sum(trainlabel), sum(testlabel))
traindata, trainlabel, valdata, vallabel, testdata, testlabel = loaddataenhance(0, 5)
print(sum(trainlabel), sum(vallabel), sum(testlabel))
| true | true |
f730bcb2483c07c6b27c50e0f64f975de88ddef7 | 1,707 | py | Python | sdks/python/apache_beam/io/__init__.py | charithe/beam | f085cb500730cf0c67c467ac55f92b3c59f52b39 | [
"Apache-2.0"
] | 2 | 2019-12-14T04:24:33.000Z | 2020-02-21T07:17:40.000Z | sdks/python/apache_beam/io/__init__.py | charithe/beam | f085cb500730cf0c67c467ac55f92b3c59f52b39 | [
"Apache-2.0"
] | 14 | 2020-02-12T22:20:41.000Z | 2021-11-09T19:41:23.000Z | sdks/python/apache_beam/io/__init__.py | charithe/beam | f085cb500730cf0c67c467ac55f92b3c59f52b39 | [
"Apache-2.0"
] | 2 | 2020-06-22T11:17:44.000Z | 2020-11-04T04:11:59.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A package defining several input sources and output sinks."""
# pylint: disable=wildcard-import
from __future__ import absolute_import
from apache_beam.io.avroio import *
from apache_beam.io.filebasedsink import *
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Sink
from apache_beam.io.iobase import Write
from apache_beam.io.iobase import Writer
from apache_beam.io.mongodbio import *
from apache_beam.io.parquetio import *
from apache_beam.io.textio import *
from apache_beam.io.tfrecordio import *
from apache_beam.io.range_trackers import *
# Protect against environments where clientslibrary is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apache_beam.io.gcp.bigquery import *
from apache_beam.io.gcp.pubsub import *
from apache_beam.io.gcp import gcsio
except ImportError:
pass
# pylint: enable=wrong-import-order, wrong-import-position
| 39.697674 | 74 | 0.794376 |
from __future__ import absolute_import
from apache_beam.io.avroio import *
from apache_beam.io.filebasedsink import *
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Sink
from apache_beam.io.iobase import Write
from apache_beam.io.iobase import Writer
from apache_beam.io.mongodbio import *
from apache_beam.io.parquetio import *
from apache_beam.io.textio import *
from apache_beam.io.tfrecordio import *
from apache_beam.io.range_trackers import *
try:
from apache_beam.io.gcp.bigquery import *
from apache_beam.io.gcp.pubsub import *
from apache_beam.io.gcp import gcsio
except ImportError:
pass
| true | true |
f730bd8c30f682808cde1355d3c15659e66ef93c | 582 | py | Python | src/tests/spider_test.py | volvet/spider | c374de5ad299423eb47b662e8c2f1d16ead58a9f | [
"MIT"
] | null | null | null | src/tests/spider_test.py | volvet/spider | c374de5ad299423eb47b662e8c2f1d16ead58a9f | [
"MIT"
] | null | null | null | src/tests/spider_test.py | volvet/spider | c374de5ad299423eb47b662e8c2f1d16ead58a9f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon May 10 21:23:13 2021
@author: Administrator
"""
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
from utils import SpiderFormat # noqa: E402
from spider_factory import SpiderFactory # noqa: E402
def test_onnxspider():
spider = SpiderFactory.create(SpiderFormat.ONNX)
assert spider is not None
def test_torchspider():
spider = SpiderFactory.create(SpiderFormat.TORCH)
assert spider is None
if __name__ == '__main__':
print('Hello, test')
test_onnxspider()
test_torchspider()
| 20.068966 | 68 | 0.725086 |
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
from utils import SpiderFormat
from spider_factory import SpiderFactory
def test_onnxspider():
spider = SpiderFactory.create(SpiderFormat.ONNX)
assert spider is not None
def test_torchspider():
spider = SpiderFactory.create(SpiderFormat.TORCH)
assert spider is None
if __name__ == '__main__':
print('Hello, test')
test_onnxspider()
test_torchspider()
| true | true |
f730bdfa184e875f2155e79716a3aed6b404a24c | 1,861 | py | Python | tests/config.py | armandomeeuwenoord/freight | 31ae2fa9252ab0b25385abd04742475e6671e3b1 | [
"Apache-2.0"
] | 562 | 2015-02-20T08:25:24.000Z | 2021-11-12T19:58:44.000Z | tests/config.py | armandomeeuwenoord/freight | 31ae2fa9252ab0b25385abd04742475e6671e3b1 | [
"Apache-2.0"
] | 129 | 2015-02-20T07:41:14.000Z | 2022-02-17T21:14:40.000Z | tests/config.py | armandomeeuwenoord/freight | 31ae2fa9252ab0b25385abd04742475e6671e3b1 | [
"Apache-2.0"
] | 54 | 2015-02-28T01:12:23.000Z | 2021-03-02T11:14:52.000Z | SQLALCHEMY_DATABASE_URI = "postgresql:///test_freight"
LOG_LEVEL = "INFO"
WORKSPACE_ROOT = "/tmp/freight-tests"
SSH_PRIVATE_KEY = "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEArvyc+vZVxUjC5ZcFg1VN3jQOCOjO94gwQKFxlz0zOCrCz+Sq\nnWk28YdUpOU016Zinlh4ZZk2136nCKKTMnNMjd6cTTCn5fWomjR+F2CSdaYYpYfO\nNtVnq0SIDUgGmjyPncOGrxVT6EzjjSvgE8W8YIc5rVJqNMAH5OywUH0nqISYN2yP\nwbUPVf8zqu3kpnTt7YcWZ+Ye4b3jX6Fo2Xw5P1TTwQ92K9JdVAltBRpwSLtBQUYC\nMkwtNf6QIbRYKoVZuEhi/8XCxT0zG78Lsqpbld8IEnLWUGifCtx9mKqVi8Y3QTsT\nknMWFaf+Su8htgw/W7tufmrtTKNJYDtPTGiBeQIDAQABAoIBABYsC/gAnn2Q6qEM\nsbYiaOtuzRhz50WWDAckbbAsIQFM6cJNxxCK9FtGOoNqR3fLrVNDAn5dG4XSlneR\nofUShvCy9DsTnzKUHfjsDc4IfoZJtXXD720jPS+GT3bfWXbRlaD31Wj52tfkZjDN\nDmdy9puEhtpfRvXIHzfyhaStNwkzDh0jp8e8yok1mLA+3FPqkJPF6ptxPs6HEQS8\npY75jxvypbux2+W9249J/HqMmd5/+r7tt62vciqnXb2LG2AmUxLhTAQU9mGM2OSL\nrh2j+7/2apEQLdJ0DbS19IkQZRpO/DLPyhg6C29ZuNQffQWoLiZlfgIEaBT939aM\nkFdzy8ECgYEA4BdisLRCyCdm2M7fMDsV7j71z48Q1Kdl5A6/ngiK1dCwnjRMvkLx\nKOHtmvpJxHTH+JAewrrGUg0GF1YpM3gi0FQ7f9qTlAeFIrU3udV8F/m6+rIOpx92\nB2FSrYTaonLX8g4OzXKNtQcwzx91mFWTIEmfQl9let0WMrCRzReXp0sCgYEAx+dC\ncbERCVcJvs9+SUwVXXOreCF4PedLrg7bjkfYSpmAJk9c36EOi1jIGO5rat5/k7Nb\n0plWghADjtcb4r8oO6pzhMR81cESgFOk1UasP4rPYX4mEYPBwVGgN7ECUXj9XFPZ\n/tk7lgneBc1/6eV978MTprXiHU5Rv7yZBMuf68sCgYAd6YE27Rjs9rV3w0VvfrOS\ntbzCE+q/OAkVxBI32hQOLmkk9P45d14RgvbgdQBbxOrcdwBkJeJLGYnym4GsaSDc\nhiHbEyYX4FkZJO9nUuPZn3Ah/pqOHFj46zjKCK3WeVXx7YZ0ThI0U91kCGL+Do4x\nBSLJDUrSd6h6467SnY+UuQKBgGV0/AYT5h+lay7KxL+Su+04Pbi01AAnGgP3SnuF\n/0KtcZsAAJUHewhCQRxWNXKCBqICEAJtDLjqQ8QFbQPCHTtbIVIrH2ilmyxCR5Bv\nVBDT9Lj4e328L2Rcd0KMti5/h6eKb0OnIVTfIS40xE0Dys0bZyfffCl/jIIRyF/k\nsP/NAoGBAIfxtr881cDFrxahrTJ3AtGXxjJjMUW/S6+gKd7Lj9i+Uadb9vjD8Wt8\ngWrUDwXVAhD5Sxv+OCBizPF1CxXTgC3+/ophkUcy5VTcBchgQI7JrItujxUc0EvR\nCwA7/JPyO8DaUtvpodUKO27vr11G/NmXYrOohCP6VxH/Y6p5L9o4\n-----END RSA PRIVATE KEY-----"
GITHUB_TOKEN = "a" * 40
| 186.1 | 1,720 | 0.922085 | SQLALCHEMY_DATABASE_URI = "postgresql:///test_freight"
LOG_LEVEL = "INFO"
WORKSPACE_ROOT = "/tmp/freight-tests"
SSH_PRIVATE_KEY = "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEArvyc+vZVxUjC5ZcFg1VN3jQOCOjO94gwQKFxlz0zOCrCz+Sq\nnWk28YdUpOU016Zinlh4ZZk2136nCKKTMnNMjd6cTTCn5fWomjR+F2CSdaYYpYfO\nNtVnq0SIDUgGmjyPncOGrxVT6EzjjSvgE8W8YIc5rVJqNMAH5OywUH0nqISYN2yP\nwbUPVf8zqu3kpnTt7YcWZ+Ye4b3jX6Fo2Xw5P1TTwQ92K9JdVAltBRpwSLtBQUYC\nMkwtNf6QIbRYKoVZuEhi/8XCxT0zG78Lsqpbld8IEnLWUGifCtx9mKqVi8Y3QTsT\nknMWFaf+Su8htgw/W7tufmrtTKNJYDtPTGiBeQIDAQABAoIBABYsC/gAnn2Q6qEM\nsbYiaOtuzRhz50WWDAckbbAsIQFM6cJNxxCK9FtGOoNqR3fLrVNDAn5dG4XSlneR\nofUShvCy9DsTnzKUHfjsDc4IfoZJtXXD720jPS+GT3bfWXbRlaD31Wj52tfkZjDN\nDmdy9puEhtpfRvXIHzfyhaStNwkzDh0jp8e8yok1mLA+3FPqkJPF6ptxPs6HEQS8\npY75jxvypbux2+W9249J/HqMmd5/+r7tt62vciqnXb2LG2AmUxLhTAQU9mGM2OSL\nrh2j+7/2apEQLdJ0DbS19IkQZRpO/DLPyhg6C29ZuNQffQWoLiZlfgIEaBT939aM\nkFdzy8ECgYEA4BdisLRCyCdm2M7fMDsV7j71z48Q1Kdl5A6/ngiK1dCwnjRMvkLx\nKOHtmvpJxHTH+JAewrrGUg0GF1YpM3gi0FQ7f9qTlAeFIrU3udV8F/m6+rIOpx92\nB2FSrYTaonLX8g4OzXKNtQcwzx91mFWTIEmfQl9let0WMrCRzReXp0sCgYEAx+dC\ncbERCVcJvs9+SUwVXXOreCF4PedLrg7bjkfYSpmAJk9c36EOi1jIGO5rat5/k7Nb\n0plWghADjtcb4r8oO6pzhMR81cESgFOk1UasP4rPYX4mEYPBwVGgN7ECUXj9XFPZ\n/tk7lgneBc1/6eV978MTprXiHU5Rv7yZBMuf68sCgYAd6YE27Rjs9rV3w0VvfrOS\ntbzCE+q/OAkVxBI32hQOLmkk9P45d14RgvbgdQBbxOrcdwBkJeJLGYnym4GsaSDc\nhiHbEyYX4FkZJO9nUuPZn3Ah/pqOHFj46zjKCK3WeVXx7YZ0ThI0U91kCGL+Do4x\nBSLJDUrSd6h6467SnY+UuQKBgGV0/AYT5h+lay7KxL+Su+04Pbi01AAnGgP3SnuF\n/0KtcZsAAJUHewhCQRxWNXKCBqICEAJtDLjqQ8QFbQPCHTtbIVIrH2ilmyxCR5Bv\nVBDT9Lj4e328L2Rcd0KMti5/h6eKb0OnIVTfIS40xE0Dys0bZyfffCl/jIIRyF/k\nsP/NAoGBAIfxtr881cDFrxahrTJ3AtGXxjJjMUW/S6+gKd7Lj9i+Uadb9vjD8Wt8\ngWrUDwXVAhD5Sxv+OCBizPF1CxXTgC3+/ophkUcy5VTcBchgQI7JrItujxUc0EvR\nCwA7/JPyO8DaUtvpodUKO27vr11G/NmXYrOohCP6VxH/Y6p5L9o4\n-----END RSA PRIVATE KEY-----"
GITHUB_TOKEN = "a" * 40
| true | true |
f730be982c887d5e48842dce53d62d19a740a19a | 8,899 | py | Python | tests_gpu/test_multi_gpu/test_core_pytorch_compare/test_ddp/test_mnist_cnn.py | mv1388/AIToolbox | c64ac4810a02d230ce471d86b758e82ea232a7e7 | [
"MIT"
] | 3 | 2019-10-12T12:24:09.000Z | 2020-08-02T02:42:43.000Z | tests_gpu/test_multi_gpu/test_core_pytorch_compare/test_ddp/test_mnist_cnn.py | mv1388/aitoolbox | 1060435e6cbdfd19abcb726c4080b663536b7467 | [
"MIT"
] | 3 | 2020-04-10T14:07:07.000Z | 2020-04-22T19:04:38.000Z | tests_gpu/test_multi_gpu/test_core_pytorch_compare/test_ddp/test_mnist_cnn.py | mv1388/aitoolbox | 1060435e6cbdfd19abcb726c4080b663536b7467 | [
"MIT"
] | null | null | null | import unittest
import os
import shutil
import random
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from aitoolbox import TrainLoop, TTModel
from tests_gpu.test_multi_gpu.ddp_prediction_saver import DDPPredictionSave
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class CNNNet(TTModel):
def __init__(self):
super(CNNNet, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def get_loss(self, batch_data, criterion, device):
data, target = batch_data
data, target = data.to(device), target.to(device)
output = self(data)
loss = criterion(output, target)
return loss
def get_predictions(self, batch_data, device):
data, y_test = batch_data
data = data.to(device)
output = self(data)
y_pred = output.argmax(dim=1, keepdim=False)
return y_pred.cpu(), y_test, {}
class TestMNISTCNN(unittest.TestCase):
def test_trainloop_core_pytorch_compare(self):
os.mkdir(f'{THIS_DIR}/ddp_cnn_save')
val_loss_tl, y_pred_tl, y_true_tl = self.train_eval_trainloop(num_epochs=5, use_real_train_data=True)
val_loss_pt, y_pred_pt, y_true_pt = self.train_eval_core_pytorch(num_epochs=5, use_real_train_data=True)
self.assertAlmostEqual(val_loss_tl, val_loss_pt, places=8)
self.assertEqual(y_pred_tl, y_pred_pt)
self.assertEqual(y_true_tl, y_true_pt)
project_path = os.path.join(THIS_DIR, 'ddp_cnn_save')
if os.path.exists(project_path):
shutil.rmtree(project_path)
project_path = os.path.join(THIS_DIR, 'data')
if os.path.exists(project_path):
shutil.rmtree(project_path)
def train_eval_trainloop(self, num_epochs, use_real_train_data=False):
self.set_seeds()
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=use_real_train_data, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100, shuffle=True)
val_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100)
model = CNNNet()
optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999))
criterion = nn.NLLLoss()
print('Starting train loop')
tl = TrainLoop(
model,
train_loader, val_loader, None,
optimizer, criterion,
gpu_mode='ddp'
)
self.assertEqual(tl.device.type, "cuda")
tl.fit(num_epochs=num_epochs,
callbacks=[DDPPredictionSave(dir_path=f'{THIS_DIR}/ddp_cnn_save',
file_name='tl_ddp_predictions.p')])
with open(f'{THIS_DIR}/ddp_cnn_save/tl_ddp_predictions.p', 'rb') as f:
val_loss, y_pred, y_true = pickle.load(f)
return val_loss, y_pred, y_true
def train_eval_core_pytorch(self, num_epochs, use_real_train_data=False):
self.set_seeds()
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=use_real_train_data, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100)
val_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100)
model_pt = CNNNet()
optimizer_pt = optim.Adam(model_pt.parameters(), lr=0.001, betas=(0.9, 0.999))
criterion_pt = nn.NLLLoss()
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '8888'
print('Starting the manual DDP training')
mp.spawn(
self.manual_ddp_training,
args=(num_epochs, model_pt, optimizer_pt, criterion_pt, train_loader, val_loader),
nprocs=torch.cuda.device_count()
)
val_loss, y_pred, y_true = [], [], []
for idx in range(torch.cuda.device_count()):
with open(f'{THIS_DIR}/ddp_cnn_save/pt_ddp_predictions_{idx}.p', 'rb') as f:
val_loss_f, y_pred_f, y_true_f = pickle.load(f)
val_loss += val_loss_f
y_pred += y_pred_f
y_true += y_true_f
val_loss = np.mean(val_loss)
return val_loss, y_pred, y_true
@staticmethod
def manual_ddp_training(gpu, num_epochs, model_pt, optimizer_pt, criterion_pt, train_loader, val_loader):
rank = gpu
dist.init_process_group(backend='nccl', init_method='env://', world_size=torch.cuda.device_count(), rank=rank)
torch.manual_seed(0)
torch.cuda.set_device(gpu)
device = torch.device(f"cuda:{gpu}")
train_sampler = DistributedSampler(dataset=train_loader.dataset, shuffle=True,
num_replicas=torch.cuda.device_count(), rank=rank)
val_sampler = DistributedSampler(dataset=val_loader.dataset, shuffle=False,
num_replicas=torch.cuda.device_count(), rank=rank)
train_loader_ddp = DataLoader(train_loader.dataset, batch_size=100, sampler=train_sampler)
val_loader_ddp = DataLoader(val_loader.dataset, batch_size=100, sampler=val_sampler)
model_pt = model_pt.to(device)
criterion_pt = criterion_pt.to(device)
model_pt = DistributedDataParallel(model_pt, device_ids=[gpu])
model_pt.train()
for epoch in range(num_epochs):
print(f'Epoch: {epoch}')
train_sampler.set_epoch(epoch)
for i, (input_data, target) in enumerate(train_loader_ddp):
input_data = input_data.to(device)
target = target.to(device)
predicted = model_pt(input_data)
loss = criterion_pt(predicted, target)
loss.backward()
optimizer_pt.step()
optimizer_pt.zero_grad()
# Imitate what happens in auto_execute_end_of_epoch() in TrainLoop
for _ in train_loader:
pass
for _ in val_loader:
pass
print('Evaluating')
val_loss, val_pred, val_true = [], [], []
model_pt.eval()
with torch.no_grad():
for input_data, target in val_loader_ddp:
input_data = input_data.to(device)
target = target.to(device)
predicted = model_pt(input_data)
loss_batch = criterion_pt(predicted, target).cpu().item()
val_pred += predicted.argmax(dim=1, keepdim=False).cpu().tolist()
val_true += target.cpu().tolist()
val_loss.append(loss_batch)
with open(f'{THIS_DIR}/ddp_cnn_save/pt_ddp_predictions_{gpu}.p', 'wb') as f:
pickle.dump([val_loss, val_pred, val_true], f)
@staticmethod
def set_seeds():
manual_seed = 0
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
np.random.seed(manual_seed)
random.seed(manual_seed)
torch.manual_seed(manual_seed)
# if you are suing GPU
torch.cuda.manual_seed(manual_seed)
torch.cuda.manual_seed_all(manual_seed)
| 37.23431 | 118 | 0.60681 | import unittest
import os
import shutil
import random
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from aitoolbox import TrainLoop, TTModel
from tests_gpu.test_multi_gpu.ddp_prediction_saver import DDPPredictionSave
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class CNNNet(TTModel):
def __init__(self):
super(CNNNet, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def get_loss(self, batch_data, criterion, device):
data, target = batch_data
data, target = data.to(device), target.to(device)
output = self(data)
loss = criterion(output, target)
return loss
def get_predictions(self, batch_data, device):
data, y_test = batch_data
data = data.to(device)
output = self(data)
y_pred = output.argmax(dim=1, keepdim=False)
return y_pred.cpu(), y_test, {}
class TestMNISTCNN(unittest.TestCase):
def test_trainloop_core_pytorch_compare(self):
os.mkdir(f'{THIS_DIR}/ddp_cnn_save')
val_loss_tl, y_pred_tl, y_true_tl = self.train_eval_trainloop(num_epochs=5, use_real_train_data=True)
val_loss_pt, y_pred_pt, y_true_pt = self.train_eval_core_pytorch(num_epochs=5, use_real_train_data=True)
self.assertAlmostEqual(val_loss_tl, val_loss_pt, places=8)
self.assertEqual(y_pred_tl, y_pred_pt)
self.assertEqual(y_true_tl, y_true_pt)
project_path = os.path.join(THIS_DIR, 'ddp_cnn_save')
if os.path.exists(project_path):
shutil.rmtree(project_path)
project_path = os.path.join(THIS_DIR, 'data')
if os.path.exists(project_path):
shutil.rmtree(project_path)
def train_eval_trainloop(self, num_epochs, use_real_train_data=False):
self.set_seeds()
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=use_real_train_data, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100, shuffle=True)
val_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100)
model = CNNNet()
optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999))
criterion = nn.NLLLoss()
print('Starting train loop')
tl = TrainLoop(
model,
train_loader, val_loader, None,
optimizer, criterion,
gpu_mode='ddp'
)
self.assertEqual(tl.device.type, "cuda")
tl.fit(num_epochs=num_epochs,
callbacks=[DDPPredictionSave(dir_path=f'{THIS_DIR}/ddp_cnn_save',
file_name='tl_ddp_predictions.p')])
with open(f'{THIS_DIR}/ddp_cnn_save/tl_ddp_predictions.p', 'rb') as f:
val_loss, y_pred, y_true = pickle.load(f)
return val_loss, y_pred, y_true
def train_eval_core_pytorch(self, num_epochs, use_real_train_data=False):
self.set_seeds()
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=use_real_train_data, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100)
val_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100)
model_pt = CNNNet()
optimizer_pt = optim.Adam(model_pt.parameters(), lr=0.001, betas=(0.9, 0.999))
criterion_pt = nn.NLLLoss()
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '8888'
print('Starting the manual DDP training')
mp.spawn(
self.manual_ddp_training,
args=(num_epochs, model_pt, optimizer_pt, criterion_pt, train_loader, val_loader),
nprocs=torch.cuda.device_count()
)
val_loss, y_pred, y_true = [], [], []
for idx in range(torch.cuda.device_count()):
with open(f'{THIS_DIR}/ddp_cnn_save/pt_ddp_predictions_{idx}.p', 'rb') as f:
val_loss_f, y_pred_f, y_true_f = pickle.load(f)
val_loss += val_loss_f
y_pred += y_pred_f
y_true += y_true_f
val_loss = np.mean(val_loss)
return val_loss, y_pred, y_true
@staticmethod
def manual_ddp_training(gpu, num_epochs, model_pt, optimizer_pt, criterion_pt, train_loader, val_loader):
rank = gpu
dist.init_process_group(backend='nccl', init_method='env://', world_size=torch.cuda.device_count(), rank=rank)
torch.manual_seed(0)
torch.cuda.set_device(gpu)
device = torch.device(f"cuda:{gpu}")
train_sampler = DistributedSampler(dataset=train_loader.dataset, shuffle=True,
num_replicas=torch.cuda.device_count(), rank=rank)
val_sampler = DistributedSampler(dataset=val_loader.dataset, shuffle=False,
num_replicas=torch.cuda.device_count(), rank=rank)
train_loader_ddp = DataLoader(train_loader.dataset, batch_size=100, sampler=train_sampler)
val_loader_ddp = DataLoader(val_loader.dataset, batch_size=100, sampler=val_sampler)
model_pt = model_pt.to(device)
criterion_pt = criterion_pt.to(device)
model_pt = DistributedDataParallel(model_pt, device_ids=[gpu])
model_pt.train()
for epoch in range(num_epochs):
print(f'Epoch: {epoch}')
train_sampler.set_epoch(epoch)
for i, (input_data, target) in enumerate(train_loader_ddp):
input_data = input_data.to(device)
target = target.to(device)
predicted = model_pt(input_data)
loss = criterion_pt(predicted, target)
loss.backward()
optimizer_pt.step()
optimizer_pt.zero_grad()
for _ in train_loader:
pass
for _ in val_loader:
pass
print('Evaluating')
val_loss, val_pred, val_true = [], [], []
model_pt.eval()
with torch.no_grad():
for input_data, target in val_loader_ddp:
input_data = input_data.to(device)
target = target.to(device)
predicted = model_pt(input_data)
loss_batch = criterion_pt(predicted, target).cpu().item()
val_pred += predicted.argmax(dim=1, keepdim=False).cpu().tolist()
val_true += target.cpu().tolist()
val_loss.append(loss_batch)
with open(f'{THIS_DIR}/ddp_cnn_save/pt_ddp_predictions_{gpu}.p', 'wb') as f:
pickle.dump([val_loss, val_pred, val_true], f)
@staticmethod
def set_seeds():
manual_seed = 0
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
np.random.seed(manual_seed)
random.seed(manual_seed)
torch.manual_seed(manual_seed)
torch.cuda.manual_seed(manual_seed)
torch.cuda.manual_seed_all(manual_seed)
| true | true |
f730bee814b7a46573d0b55a5eaf76c8bd13efe5 | 1,913 | py | Python | 2021/day03.py | sree-cfa/adventOfCode | 0dffcf2b6668a37505afeedd0869f7ae2b5b93cf | [
"BSD-3-Clause"
] | null | null | null | 2021/day03.py | sree-cfa/adventOfCode | 0dffcf2b6668a37505afeedd0869f7ae2b5b93cf | [
"BSD-3-Clause"
] | null | null | null | 2021/day03.py | sree-cfa/adventOfCode | 0dffcf2b6668a37505afeedd0869f7ae2b5b93cf | [
"BSD-3-Clause"
] | null | null | null | from util.inputReader import read_as_strings
LENGTH = 12
def part1(list_of_strings):
one_count = [0] * LENGTH
zero_count = [0] * LENGTH
for string in list_of_strings:
for i, val in enumerate(string):
if val == '0':
zero_count[i] += 1
else:
one_count[i] += 1
epsilon = ""
gamma = ""
for i in range(LENGTH):
if one_count[i] >= zero_count[i]:
epsilon += '1'
gamma += '0'
else:
epsilon += '0'
gamma += '1'
return int(epsilon, 2) * int(gamma, 2)
def part2(numbers):
ogr_str = ""
co2_str = ""
ogr_bin, co2_bin = 0, 0
for i in range(LENGTH):
ogr_count_0, ogr_count_1 = 0, 0
co2_count_0, co2_count_1 = 0, 0
for number in numbers:
if number.startswith(ogr_str):
ogr_count_0 += 1 if number[i] == '0' else 0
ogr_count_1 += 1 if number[i] == '1' else 0
if number.startswith(co2_str):
co2_count_0 += 1 if number[i] == '0' else 0
co2_count_1 += 1 if number[i] == '1' else 0
if ogr_count_1 + ogr_count_0 == 1: # one number left
ogr_str = next(filter(lambda x: x.startswith(ogr_str), numbers))
ogr_bin = int(ogr_str, 2)
if co2_count_1 + co2_count_0 == 1: # one number left
co2_str = next(filter(lambda x: x.startswith(co2_str), numbers))
co2_bin = int(co2_str, 2)
ogr_str += '1' if ogr_count_1 >= ogr_count_0 else '0'
co2_str += '0' if co2_count_1 >= co2_count_0 else '1'
if ogr_bin == 0:
ogr_bin = int(ogr_str, 2)
if co2_bin == 0:
co2_bin = int(co2_str, 2)
return ogr_bin * co2_bin
lines = read_as_strings("../inputs/2021_03.txt")
print("part1:", part1(lines))
print("part2:", part2(lines))
# part2 12723489 too high
| 27.328571 | 76 | 0.543126 | from util.inputReader import read_as_strings
LENGTH = 12
def part1(list_of_strings):
one_count = [0] * LENGTH
zero_count = [0] * LENGTH
for string in list_of_strings:
for i, val in enumerate(string):
if val == '0':
zero_count[i] += 1
else:
one_count[i] += 1
epsilon = ""
gamma = ""
for i in range(LENGTH):
if one_count[i] >= zero_count[i]:
epsilon += '1'
gamma += '0'
else:
epsilon += '0'
gamma += '1'
return int(epsilon, 2) * int(gamma, 2)
def part2(numbers):
ogr_str = ""
co2_str = ""
ogr_bin, co2_bin = 0, 0
for i in range(LENGTH):
ogr_count_0, ogr_count_1 = 0, 0
co2_count_0, co2_count_1 = 0, 0
for number in numbers:
if number.startswith(ogr_str):
ogr_count_0 += 1 if number[i] == '0' else 0
ogr_count_1 += 1 if number[i] == '1' else 0
if number.startswith(co2_str):
co2_count_0 += 1 if number[i] == '0' else 0
co2_count_1 += 1 if number[i] == '1' else 0
if ogr_count_1 + ogr_count_0 == 1:
ogr_str = next(filter(lambda x: x.startswith(ogr_str), numbers))
ogr_bin = int(ogr_str, 2)
if co2_count_1 + co2_count_0 == 1:
co2_str = next(filter(lambda x: x.startswith(co2_str), numbers))
co2_bin = int(co2_str, 2)
ogr_str += '1' if ogr_count_1 >= ogr_count_0 else '0'
co2_str += '0' if co2_count_1 >= co2_count_0 else '1'
if ogr_bin == 0:
ogr_bin = int(ogr_str, 2)
if co2_bin == 0:
co2_bin = int(co2_str, 2)
return ogr_bin * co2_bin
lines = read_as_strings("../inputs/2021_03.txt")
print("part1:", part1(lines))
print("part2:", part2(lines))
| true | true |
f730bf6ba9f601b648b12146b752965863bf095b | 7,902 | py | Python | benchmarks/benchmarks/bench_function_base.py | sankalpdayal5/numpy | 9713e86cc65ebed96464f4d81bb2637857b84f44 | [
"BSD-3-Clause"
] | 1 | 2019-11-15T16:44:36.000Z | 2019-11-15T16:44:36.000Z | benchmarks/benchmarks/bench_function_base.py | sankalpdayal5/numpy | 9713e86cc65ebed96464f4d81bb2637857b84f44 | [
"BSD-3-Clause"
] | null | null | null | benchmarks/benchmarks/bench_function_base.py | sankalpdayal5/numpy | 9713e86cc65ebed96464f4d81bb2637857b84f44 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Histogram1D(Benchmark):
def setup(self):
self.d = np.linspace(0, 100, 100000)
def time_full_coverage(self):
np.histogram(self.d, 200, (0, 100))
def time_small_coverage(self):
np.histogram(self.d, 200, (50, 51))
def time_fine_binning(self):
np.histogram(self.d, 10000, (0, 100))
class Histogram2D(Benchmark):
def setup(self):
self.d = np.linspace(0, 100, 200000).reshape((-1,2))
def time_full_coverage(self):
np.histogramdd(self.d, (200, 200), ((0, 100), (0, 100)))
def time_small_coverage(self):
np.histogramdd(self.d, (200, 200), ((50, 51), (50, 51)))
def time_fine_binning(self):
np.histogramdd(self.d, (10000, 10000), ((0, 100), (0, 100)))
class Bincount(Benchmark):
def setup(self):
self.d = np.arange(80000, dtype=np.intp)
self.e = self.d.astype(np.float64)
def time_bincount(self):
np.bincount(self.d)
def time_weights(self):
np.bincount(self.d, weights=self.e)
class Median(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
def time_even(self):
np.median(self.e)
def time_odd(self):
np.median(self.o)
def time_even_inplace(self):
np.median(self.e, overwrite_input=True)
def time_odd_inplace(self):
np.median(self.o, overwrite_input=True)
def time_even_small(self):
np.median(self.e[:500], overwrite_input=True)
def time_odd_small(self):
np.median(self.o[:500], overwrite_input=True)
class Percentile(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
def time_quartile(self):
np.percentile(self.e, [25, 75])
def time_percentile(self):
np.percentile(self.e, [25, 35, 55, 65, 75])
class Select(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.e = self.d.copy()
self.cond = [(self.d > 4), (self.d < 2)]
self.cond_large = [(self.d > 4), (self.d < 2)] * 10
def time_select(self):
np.select(self.cond, [self.d, self.e])
def time_select_larger(self):
np.select(self.cond_large, ([self.d, self.e] * 10))
def memoize(f):
_memoized = {}
def wrapped(*args):
if args not in _memoized:
_memoized[args] = f(*args)
return _memoized[args].copy()
return f
class SortGenerator(object):
# The size of the unsorted area in the "random unsorted area"
# benchmarks
AREA_SIZE = 100
# The size of the "partially ordered" sub-arrays
BUBBLE_SIZE = 100
@staticmethod
@memoize
def random(size, dtype):
"""
Returns a randomly-shuffled array.
"""
arr = np.arange(size, dtype=dtype)
np.random.shuffle(arr)
return arr
@staticmethod
@memoize
def ordered(size, dtype):
"""
Returns an ordered array.
"""
return np.arange(size, dtype=dtype)
@staticmethod
@memoize
def reversed(size, dtype):
"""
Returns an array that's in descending order.
"""
return np.arange(size-1, -1, -1, dtype=dtype)
@staticmethod
@memoize
def uniform(size, dtype):
"""
Returns an array that has the same value everywhere.
"""
return np.ones(size, dtype=dtype)
@staticmethod
@memoize
def swapped_pair(size, dtype, swap_frac):
"""
Returns an ordered array, but one that has ``swap_frac * size``
pairs swapped.
"""
a = np.arange(size, dtype=dtype)
for _ in range(int(size * swap_frac)):
x, y = np.random.randint(0, size, 2)
a[x], a[y] = a[y], a[x]
return a
@staticmethod
@memoize
def sorted_block(size, dtype, block_size):
"""
Returns an array with blocks that are all sorted.
"""
a = np.arange(size, dtype=dtype)
b = []
if size < block_size:
return a
block_num = size // block_size
for i in range(block_num):
b.extend(a[i::block_num])
return np.array(b)
@classmethod
@memoize
def random_unsorted_area(cls, size, dtype, frac, area_size=None):
"""
This type of array has random unsorted areas such that they
compose the fraction ``frac`` of the original array.
"""
if area_size is None:
area_size = cls.AREA_SIZE
area_num = int(size * frac / area_size)
a = np.arange(size, dtype=dtype)
for _ in range(area_num):
start = np.random.randint(size-area_size)
end = start + area_size
np.random.shuffle(a[start:end])
return a
@classmethod
@memoize
def random_bubble(cls, size, dtype, bubble_num, bubble_size=None):
"""
This type of array has ``bubble_num`` random unsorted areas.
"""
if bubble_size is None:
bubble_size = cls.BUBBLE_SIZE
frac = bubble_size * bubble_num / size
return cls.random_unsorted_area(size, dtype, frac, bubble_size)
class Sort(Benchmark):
"""
This benchmark tests sorting performance with several
different types of arrays that are likely to appear in
real-world applications.
"""
params = [
# In NumPy 1.17 and newer, 'merge' can be one of several
# stable sorts, it isn't necessarily merge sort.
['quick', 'merge', 'heap'],
['float64', 'int64', 'int16'],
[
('random',),
('ordered',),
('reversed',),
('uniform',),
('sorted_block', 10),
('sorted_block', 100),
('sorted_block', 1000),
# ('swapped_pair', 0.01),
# ('swapped_pair', 0.1),
# ('swapped_pair', 0.5),
# ('random_unsorted_area', 0.5),
# ('random_unsorted_area', 0.1),
# ('random_unsorted_area', 0.01),
# ('random_bubble', 1),
# ('random_bubble', 5),
# ('random_bubble', 10),
],
]
param_names = ['kind', 'dtype', 'array_type']
# The size of the benchmarked arrays.
ARRAY_SIZE = 10000
def setup(self, kind, dtype, array_type):
np.random.seed(1234)
array_class = array_type[0]
self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:])
def time_sort(self, kind, dtype, array_type):
# Using np.sort(...) instead of arr.sort(...) because it makes a copy.
# This is important because the data is prepared once per benchmark, but
# used across multiple runs.
np.sort(self.arr, kind=kind)
def time_argsort(self, kind, dtype, array_type):
np.argsort(self.arr, kind=kind)
class SortWorst(Benchmark):
def setup(self):
# quicksort median of 3 worst case
self.worst = np.arange(1000000)
x = self.worst
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
def time_sort_worst(self):
np.sort(self.worst)
# Retain old benchmark name for backward compatability
time_sort_worst.benchmark_name = "bench_function_base.Sort.time_sort_worst"
class Where(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.e = self.d.copy()
self.cond = (self.d > 5000)
def time_1(self):
np.where(self.cond)
def time_2(self):
np.where(self.cond, self.d, self.e)
def time_2_broadcast(self):
np.where(self.cond, self.d, 0)
| 27.248276 | 95 | 0.578208 | from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Histogram1D(Benchmark):
def setup(self):
self.d = np.linspace(0, 100, 100000)
def time_full_coverage(self):
np.histogram(self.d, 200, (0, 100))
def time_small_coverage(self):
np.histogram(self.d, 200, (50, 51))
def time_fine_binning(self):
np.histogram(self.d, 10000, (0, 100))
class Histogram2D(Benchmark):
def setup(self):
self.d = np.linspace(0, 100, 200000).reshape((-1,2))
def time_full_coverage(self):
np.histogramdd(self.d, (200, 200), ((0, 100), (0, 100)))
def time_small_coverage(self):
np.histogramdd(self.d, (200, 200), ((50, 51), (50, 51)))
def time_fine_binning(self):
np.histogramdd(self.d, (10000, 10000), ((0, 100), (0, 100)))
class Bincount(Benchmark):
def setup(self):
self.d = np.arange(80000, dtype=np.intp)
self.e = self.d.astype(np.float64)
def time_bincount(self):
np.bincount(self.d)
def time_weights(self):
np.bincount(self.d, weights=self.e)
class Median(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
def time_even(self):
np.median(self.e)
def time_odd(self):
np.median(self.o)
def time_even_inplace(self):
np.median(self.e, overwrite_input=True)
def time_odd_inplace(self):
np.median(self.o, overwrite_input=True)
def time_even_small(self):
np.median(self.e[:500], overwrite_input=True)
def time_odd_small(self):
np.median(self.o[:500], overwrite_input=True)
class Percentile(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
def time_quartile(self):
np.percentile(self.e, [25, 75])
def time_percentile(self):
np.percentile(self.e, [25, 35, 55, 65, 75])
class Select(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.e = self.d.copy()
self.cond = [(self.d > 4), (self.d < 2)]
self.cond_large = [(self.d > 4), (self.d < 2)] * 10
def time_select(self):
np.select(self.cond, [self.d, self.e])
def time_select_larger(self):
np.select(self.cond_large, ([self.d, self.e] * 10))
def memoize(f):
_memoized = {}
def wrapped(*args):
if args not in _memoized:
_memoized[args] = f(*args)
return _memoized[args].copy()
return f
class SortGenerator(object):
AREA_SIZE = 100
BUBBLE_SIZE = 100
@staticmethod
@memoize
def random(size, dtype):
arr = np.arange(size, dtype=dtype)
np.random.shuffle(arr)
return arr
@staticmethod
@memoize
def ordered(size, dtype):
return np.arange(size, dtype=dtype)
@staticmethod
@memoize
def reversed(size, dtype):
return np.arange(size-1, -1, -1, dtype=dtype)
@staticmethod
@memoize
def uniform(size, dtype):
return np.ones(size, dtype=dtype)
@staticmethod
@memoize
def swapped_pair(size, dtype, swap_frac):
a = np.arange(size, dtype=dtype)
for _ in range(int(size * swap_frac)):
x, y = np.random.randint(0, size, 2)
a[x], a[y] = a[y], a[x]
return a
@staticmethod
@memoize
def sorted_block(size, dtype, block_size):
a = np.arange(size, dtype=dtype)
b = []
if size < block_size:
return a
block_num = size // block_size
for i in range(block_num):
b.extend(a[i::block_num])
return np.array(b)
@classmethod
@memoize
def random_unsorted_area(cls, size, dtype, frac, area_size=None):
if area_size is None:
area_size = cls.AREA_SIZE
area_num = int(size * frac / area_size)
a = np.arange(size, dtype=dtype)
for _ in range(area_num):
start = np.random.randint(size-area_size)
end = start + area_size
np.random.shuffle(a[start:end])
return a
@classmethod
@memoize
def random_bubble(cls, size, dtype, bubble_num, bubble_size=None):
if bubble_size is None:
bubble_size = cls.BUBBLE_SIZE
frac = bubble_size * bubble_num / size
return cls.random_unsorted_area(size, dtype, frac, bubble_size)
class Sort(Benchmark):
params = [
['quick', 'merge', 'heap'],
['float64', 'int64', 'int16'],
[
('random',),
('ordered',),
('reversed',),
('uniform',),
('sorted_block', 10),
('sorted_block', 100),
('sorted_block', 1000),
# ('swapped_pair', 0.01),
# ('swapped_pair', 0.1),
# ('swapped_pair', 0.5),
# ('random_unsorted_area', 0.5),
# ('random_unsorted_area', 0.1),
# ('random_unsorted_area', 0.01),
# ('random_bubble', 1),
# ('random_bubble', 5),
# ('random_bubble', 10),
],
]
param_names = ['kind', 'dtype', 'array_type']
# The size of the benchmarked arrays.
ARRAY_SIZE = 10000
def setup(self, kind, dtype, array_type):
np.random.seed(1234)
array_class = array_type[0]
self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:])
def time_sort(self, kind, dtype, array_type):
# Using np.sort(...) instead of arr.sort(...) because it makes a copy.
# This is important because the data is prepared once per benchmark, but
# used across multiple runs.
np.sort(self.arr, kind=kind)
def time_argsort(self, kind, dtype, array_type):
np.argsort(self.arr, kind=kind)
class SortWorst(Benchmark):
def setup(self):
# quicksort median of 3 worst case
self.worst = np.arange(1000000)
x = self.worst
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
def time_sort_worst(self):
np.sort(self.worst)
# Retain old benchmark name for backward compatability
time_sort_worst.benchmark_name = "bench_function_base.Sort.time_sort_worst"
class Where(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.e = self.d.copy()
self.cond = (self.d > 5000)
def time_1(self):
np.where(self.cond)
def time_2(self):
np.where(self.cond, self.d, self.e)
def time_2_broadcast(self):
np.where(self.cond, self.d, 0)
| true | true |
f730c073a0c4b6559a2cfcc7ed69427ce1242339 | 836 | py | Python | tests/test_exception.py | Clayful/clayful-python | ddd5f1f986fb0079d5128e17f4b0fdce83b4cec1 | [
"MIT"
] | null | null | null | tests/test_exception.py | Clayful/clayful-python | ddd5f1f986fb0079d5128e17f4b0fdce83b4cec1 | [
"MIT"
] | 3 | 2020-04-17T05:24:06.000Z | 2022-02-10T09:00:22.000Z | tests/test_exception.py | Clayful/clayful-python | ddd5f1f986fb0079d5128e17f4b0fdce83b4cec1 | [
"MIT"
] | null | null | null | import unittest
from clayful.exception import ClayfulException
class ClayfulExceptionTest(unittest.TestCase):
def test_clayful_error_constructor(self):
error = ClayfulException(
'Brand',
'get',
400,
{},
'g-no-model',
'my message',
{}
)
self.assertEqual(error.is_clayful, True)
self.assertEqual(error.model, 'Brand')
self.assertEqual(error.method, 'get')
self.assertEqual(error.status, 400)
self.assertEqual(error.headers, {})
self.assertEqual(error.code, 'g-no-model')
self.assertEqual(error.message, 'my message')
self.assertEqual(error.validation, {})
def test_throw_clayful_error(self):
try:
raise ClayfulException(
'Brand',
'get',
400,
{},
'g-no-model',
'my message',
{}
)
except ClayfulException as e:
self.assertEqual(e.is_clayful, True) | 19.44186 | 47 | 0.679426 | import unittest
from clayful.exception import ClayfulException
class ClayfulExceptionTest(unittest.TestCase):
def test_clayful_error_constructor(self):
error = ClayfulException(
'Brand',
'get',
400,
{},
'g-no-model',
'my message',
{}
)
self.assertEqual(error.is_clayful, True)
self.assertEqual(error.model, 'Brand')
self.assertEqual(error.method, 'get')
self.assertEqual(error.status, 400)
self.assertEqual(error.headers, {})
self.assertEqual(error.code, 'g-no-model')
self.assertEqual(error.message, 'my message')
self.assertEqual(error.validation, {})
def test_throw_clayful_error(self):
try:
raise ClayfulException(
'Brand',
'get',
400,
{},
'g-no-model',
'my message',
{}
)
except ClayfulException as e:
self.assertEqual(e.is_clayful, True) | true | true |
f730c0c1c337facbe3bca9654deb29a5694579fe | 618 | py | Python | models/recommendation/tensorflow/wide_deep_large_ds/inference/__init__.py | yangw1234/models-1 | 7e7f484f4f22c760f9a5af836f57a3602b4fa7a6 | [
"Apache-2.0"
] | 357 | 2019-01-23T23:54:30.000Z | 2022-03-31T05:32:25.000Z | models/recommendation/tensorflow/wide_deep_large_ds/inference/__init__.py | yangw1234/models-1 | 7e7f484f4f22c760f9a5af836f57a3602b4fa7a6 | [
"Apache-2.0"
] | 65 | 2019-02-06T15:35:35.000Z | 2022-03-25T09:56:48.000Z | models/recommendation/tensorflow/wide_deep_large_ds/inference/__init__.py | yangw1234/models-1 | 7e7f484f4f22c760f9a5af836f57a3602b4fa7a6 | [
"Apache-2.0"
] | 164 | 2019-02-06T15:05:57.000Z | 2022-03-31T11:48:14.000Z | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
| 29.428571 | 74 | 0.737864 | true | true | |
f730c12f66e371e778dfd8bfc1d9044399d1afe0 | 74,951 | py | Python | src/azure-cli/azure/cli/command_modules/acs/decorator.py | charliedmcb/azure-cli | 6bc9519c91e3c241d476d1351b6e9b7543190f47 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acs/decorator.py | charliedmcb/azure-cli | 6bc9519c91e3c241d476d1351b6e9b7543190f47 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acs/decorator.py | charliedmcb/azure-cli | 6bc9519c91e3c241d476d1351b6e9b7543190f47 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.prompting import NoTTYException, prompt, prompt_pass
from knack.log import get_logger
from typing import Any, List, Dict, Tuple, Union
from azure.cli.core import AzCommandsLoader
from azure.cli.core.azclierror import (
CLIInternalError,
MutuallyExclusiveArgumentError,
RequiredArgumentMissingError,
InvalidArgumentValueError,
NoTTYError,
)
from azure.cli.core.commands import AzCliCommand
from azure.cli.core.profiles import ResourceType
from .custom import (
_get_rg_location,
_validate_ssh_key,
_get_default_dns_prefix,
_set_vm_set_type,
set_load_balancer_sku,
get_subscription_id,
_ensure_aks_service_principal,
)
logger = get_logger(__name__)
def safe_list_get(li: List, idx: int, default: Any = None):
# Attempt to get the element with index `idx` from an object `li` (which should be a `list`),
# if the index is invalid (like out of range), return `default` (whose default value is `None`)
if isinstance(li, list):
try:
return li[idx]
except IndexError:
return default
return None
# pylint: disable=too-many-instance-attributes,too-few-public-methods
class AKSCreateModels:
# Used to store models (i.e. the corresponding class of a certain api version specified by `resource_type`)
# which would be used during the creation process.
def __init__(
self,
cmd: AzCommandsLoader,
resource_type: ResourceType = ResourceType.MGMT_CONTAINERSERVICE,
):
self.__cmd = cmd
self.resource_type = resource_type
self.ManagedClusterWindowsProfile = self.__cmd.get_models(
"ManagedClusterWindowsProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterSKU = self.__cmd.get_models(
"ManagedClusterSKU",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceNetworkProfile = self.__cmd.get_models(
"ContainerServiceNetworkProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceLinuxProfile = self.__cmd.get_models(
"ContainerServiceLinuxProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterServicePrincipalProfile = self.__cmd.get_models(
"ManagedClusterServicePrincipalProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceSshConfiguration = self.__cmd.get_models(
"ContainerServiceSshConfiguration",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceSshPublicKey = self.__cmd.get_models(
"ContainerServiceSshPublicKey",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAADProfile = self.__cmd.get_models(
"ManagedClusterAADProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAutoUpgradeProfile = self.__cmd.get_models(
"ManagedClusterAutoUpgradeProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAgentPoolProfile = self.__cmd.get_models(
"ManagedClusterAgentPoolProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterIdentity = self.__cmd.get_models(
"ManagedClusterIdentity",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.UserAssignedIdentity = self.__cmd.get_models(
"UserAssignedIdentity",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedCluster = self.__cmd.get_models(
"ManagedCluster",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedServiceIdentityUserAssignedIdentitiesValue = (
self.__cmd.get_models(
"ManagedServiceIdentityUserAssignedIdentitiesValue",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
)
self.ExtendedLocation = self.__cmd.get_models(
"ExtendedLocation",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ExtendedLocationTypes = self.__cmd.get_models(
"ExtendedLocationTypes",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
# not directly used
self.ManagedClusterAPIServerAccessProfile = self.__cmd.get_models(
"ManagedClusterAPIServerAccessProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
# pylint: disable=too-many-public-methods
class AKSCreateContext:
# Used to store intermediate variables (usually this stores the dynamically completed value of the parameter,
# which has not been decorated into the `mc` object, and some pure intermediate variables (such as the
# subscription ID)) and a copy of the original function parameters, and provide "getter" methods for all
# parameters.
# To dynamically complete a parameter or check the validity of a parameter, please provide a "getter" function
# named `get_xxx`, where `xxx` is the parameter name. In this function, the process of obtaining parameter
# values, dynamic completion (optional), and validation (optional) should be followed. The obtaining of
# parameter values should further follow the order of obtaining from the `mc` object, from the intermediates,
# or from the original value.
# Note: Dynamic completion will also perform some operations that regulate parameter values, such as
# converting int 0 to None.
# Attention: In case of checking the validity of parameters, be sure not to set the `enable_validation` to
# `True` to avoid loop calls, when using the getter function to obtain the value of other parameters.
# Attention: After the parameter is dynamically completed, it must be added to the intermediates; and after
# the parameter is decorated into the `mc` object, the corresponding intermediate should be deleted.
# Attention: One of the most basic principles is that when the parameter/profile is decorated into the `mc`
# object, it should never be modified, only read-only operations (e.g. validation) can be performed.
def __init__(self, cmd: AzCliCommand, raw_parameters: Dict):
self.cmd = cmd
if not isinstance(raw_parameters, dict):
raise CLIInternalError(
"Unexpected raw_parameters object with type '{}'.".format(
type(raw_parameters)
)
)
self.raw_param = raw_parameters
self.intermediates = dict()
self.mc = None
def attach_mc(self, mc):
if self.mc is None:
self.mc = mc
else:
msg = "the same" if self.mc == mc else "different"
raise CLIInternalError(
"Attempting to attach the `mc` object again, the two objects are {}.".format(
msg
)
)
def get_intermediate(self, variable_name: str, default_value: Any = None):
if variable_name not in self.intermediates:
msg = "The intermediate '{}' does not exist, return default value '{}'.".format(
variable_name, default_value
)
logger.debug(msg)
return self.intermediates.get(variable_name, default_value)
def set_intermediate(
self, variable_name: str, value: Any, overwrite_exists: bool = False
):
if variable_name in self.intermediates:
if overwrite_exists:
msg = "The intermediate '{}' is overwritten. Original value: '{}', new value: '{}'.".format(
variable_name, self.intermediates.get(variable_name), value
)
logger.debug(msg)
self.intermediates[variable_name] = value
elif self.intermediates.get(variable_name) != value:
msg = "The intermediate '{}' already exists, but overwrite is not enabled." \
"Original value: '{}', candidate value: '{}'.".format(
variable_name,
self.intermediates.get(variable_name),
value,
)
# warning level log will be output to the console, which may cause confusion to users
logger.warning(msg)
else:
self.intermediates[variable_name] = value
def remove_intermediate(self, variable_name: str):
self.intermediates.pop(variable_name, None)
# pylint: disable=unused-argument
def get_resource_group_name(self, **kwargs) -> str:
"""Obtain the value of resource_group_name.
Note: resource_group_name will not be decorated into the `mc` object.
The value of this parameter should be provided by user explicitly.
:return: string
"""
# read the original value passed by the command
resource_group_name = self.raw_param.get("resource_group_name")
# this parameter does not need dynamic completion
# this parameter does not need validation
return resource_group_name
# pylint: disable=unused-argument
def get_name(self, **kwargs) -> str:
"""Obtain the value of name.
Note: name will not be decorated into the `mc` object.
The value of this parameter should be provided by user explicitly.
:return: string
"""
# read the original value passed by the command
name = self.raw_param.get("name")
# this parameter does not need dynamic completion
# this parameter does not need validation
return name
# pylint: disable=unused-argument
def get_ssh_key_value(
self, enable_validation: bool = False, **kwargs
) -> str:
"""Obtain the value of ssh_key_value.
If the user does not specify this parameter, the validator function "validate_ssh_key" checks the default file
location "~/.ssh/id_rsa.pub", if the file exists, read its content and return; otherise, create a key pair at
"~/.ssh/id_rsa.pub" and return the public key.
If the user provides a string-like input, the validator function "validate_ssh_key" checks whether it is a file
path, if so, read its content and return; if it is a valid public key, return it; otherwise, create a key pair
there and return the public key.
This function supports the option of enable_validation. When enabled, it will call "_validate_ssh_key" to
verify the validity of ssh_key_value. If parameter no_ssh_key is set to True, verification will be skipped;
otherwise, a CLIError will be raised when the value of ssh_key_value is invalid.
:return: string
"""
# read the original value passed by the command
raw_value = self.raw_param.get("ssh_key_value")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if (
self.mc and
self.mc.linux_profile and
self.mc.linux_profile.ssh and
self.mc.linux_profile.ssh.public_keys
):
public_key_obj = safe_list_get(
self.mc.linux_profile.ssh.public_keys, 0, None
)
if public_key_obj:
value_obtained_from_mc = public_key_obj.key_data
# set default value
if value_obtained_from_mc is not None:
ssh_key_value = value_obtained_from_mc
else:
ssh_key_value = raw_value
# this parameter does not need dynamic completion
# validation
if enable_validation:
_validate_ssh_key(
no_ssh_key=self.get_no_ssh_key(), ssh_key_value=ssh_key_value
)
return ssh_key_value
# pylint: disable=unused-argument
def get_dns_name_prefix(
self, enable_validation: bool = False, **kwargs
) -> Union[str, None]:
"""Dynamically obtain the value of ssh_key_value according to the context.
When both dns_name_prefix and fqdn_subdomain are not assigned, dynamic completion will be triggerd. Function
"_get_default_dns_prefix" will be called to create a default dns_name_prefix composed of name(cluster),
resource_group_name, and subscription_id.
This function supports the option of enable_validation. When enabled, it will check if both dns_name_prefix and
fqdn_subdomain are assigend, if so, raise the MutuallyExclusiveArgumentError.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: string or None
"""
parameter_name = "dns_name_prefix"
# read the original value passed by the command
raw_value = self.raw_param.get(parameter_name)
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc:
value_obtained_from_mc = self.mc.dns_prefix
# set default value
read_from_mc = False
if value_obtained_from_mc is not None:
dns_name_prefix = value_obtained_from_mc
read_from_mc = True
else:
dns_name_prefix = raw_value
# skip dynamic completion & validation if option read_only is specified
if kwargs.get("read_only"):
return dns_name_prefix
dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
if not dns_name_prefix and not self.get_fqdn_subdomain():
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = dynamic_completion and not read_from_mc
# In case the user does not specify the parameter and it meets the conditions of automatic completion,
# necessary information is dynamically completed.
if dynamic_completion:
dns_name_prefix = _get_default_dns_prefix(
name=self.get_name(),
resource_group_name=self.get_resource_group_name(),
subscription_id=self.get_intermediate("subscription_id"),
)
# validation
if enable_validation:
if dns_name_prefix and self.get_fqdn_subdomain():
raise MutuallyExclusiveArgumentError(
"--dns-name-prefix and --fqdn-subdomain cannot be used at same time"
)
return dns_name_prefix
# pylint: disable=unused-argument
def get_location(self, **kwargs) -> str:
"""Dynamically obtain the value of location according to the context.
When location is not assigned, dynamic completion will be triggerd. Function "_get_rg_location" will be called
to get the location of the provided resource group, which internally used ResourceManagementClient to send
the request.
:return: string
"""
parameter_name = "location"
# read the original value passed by the command
raw_value = self.raw_param.get(parameter_name)
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc:
value_obtained_from_mc = self.mc.location
# set default value
read_from_mc = False
if value_obtained_from_mc is not None:
location = value_obtained_from_mc
read_from_mc = True
else:
location = raw_value
dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
if location is None:
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
location = _get_rg_location(
self.cmd.cli_ctx, self.get_resource_group_name()
)
# this parameter does not need validation
return location
# pylint: disable=unused-argument
def get_kubernetes_version(self, **kwargs) -> str:
"""Obtain the value of kubernetes_version.
:return: string
"""
# read the original value passed by the command
raw_value = self.raw_param.get("kubernetes_version")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc:
value_obtained_from_mc = self.mc.kubernetes_version
# set default value
if value_obtained_from_mc is not None:
kubernetes_version = value_obtained_from_mc
else:
kubernetes_version = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return kubernetes_version
# pylint: disable=unused-argument
def get_no_ssh_key(self, enable_validation: bool = False, **kwargs) -> bool:
"""Obtain the value of name.
Note: no_ssh_key will not be decorated into the `mc` object.
This function supports the option of enable_validation. When enabled, it will check if both dns_name_prefix and
fqdn_subdomain are assigend, if so, raise the MutuallyExclusiveArgumentError.
This function supports the option of enable_validation. When enabled, it will call "_validate_ssh_key" to
verify the validity of ssh_key_value. If parameter no_ssh_key is set to True, verification will be skipped;
otherwise, a CLIError will be raised when the value of ssh_key_value is invalid.
:return: bool
"""
# read the original value passed by the command
no_ssh_key = self.raw_param.get("no_ssh_key")
# this parameter does not need dynamic completion
# validation
if enable_validation:
_validate_ssh_key(
no_ssh_key=no_ssh_key, ssh_key_value=self.get_ssh_key_value()
)
return no_ssh_key
# pylint: disable=unused-argument
def get_vm_set_type(self, **kwargs) -> str:
"""Dynamically obtain the value of vm_set_type according to the context.
Dynamic completion will be triggerd by default. Function "_set_vm_set_type" will be called and the
corresponding vm set type will be returned according to the value of kubernetes_version. It will also
normalize the value as server validation is case-sensitive.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: string
"""
parameter_name = "vm_set_type"
# read the original value passed by the command
raw_value = self.raw_param.get(parameter_name)
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.type
# set default value
read_from_mc = False
if value_obtained_from_mc is not None:
vm_set_type = value_obtained_from_mc
read_from_mc = True
else:
vm_set_type = raw_value
# skip dynamic completion & validation if option read_only is specified
if kwargs.get("read_only"):
return vm_set_type
# the value verified by the validator may have case problems, and the
# "_set_vm_set_type" function will adjust it
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
vm_set_type = _set_vm_set_type(
vm_set_type=vm_set_type,
kubernetes_version=self.get_kubernetes_version(),
)
# this parameter does not need validation
return vm_set_type
# pylint: disable=unused-argument
def get_load_balancer_sku(
self, enable_validation: bool = False, **kwargs
) -> str:
"""Dynamically obtain the value of load_balancer_sku according to the context.
When load_balancer_sku is not assigned, dynamic completion will be triggerd. Function "set_load_balancer_sku"
will be called and the corresponding load balancer sku will be returned according to the value of
kubernetes_version.
This function supports the option of enable_validation. When enabled, it will check if load_balancer_sku equals
to "basic" when api_server_authorized_ip_ranges is assigned, if so, raise the MutuallyExclusiveArgumentError.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: string
"""
parameter_name = "load_balancer_sku"
# read the original value passed by the command
raw_value = self.raw_param.get(parameter_name)
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.network_profile:
value_obtained_from_mc = self.mc.network_profile.load_balancer_sku
# set default value
read_from_mc = False
if value_obtained_from_mc is not None:
load_balancer_sku = value_obtained_from_mc
read_from_mc = True
else:
load_balancer_sku = raw_value
# skip dynamic completion & validation if option read_only is specified
if kwargs.get("read_only"):
return load_balancer_sku
dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
if not load_balancer_sku:
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
load_balancer_sku = set_load_balancer_sku(
sku=load_balancer_sku,
kubernetes_version=self.get_kubernetes_version(),
)
# validation
if enable_validation:
if (
load_balancer_sku == "basic" and
self.get_api_server_authorized_ip_ranges()
):
raise MutuallyExclusiveArgumentError(
"--api-server-authorized-ip-ranges can only be used with standard load balancer"
)
return load_balancer_sku
# pylint: disable=unused-argument
def get_api_server_authorized_ip_ranges(
self, enable_validation: bool = False, **kwargs
) -> Union[str, List[str], None]:
"""Obtain the value of api_server_authorized_ip_ranges.
This function supports the option of enable_validation. When enabled, it will check if load_balancer_sku equals
to "basic" when api_server_authorized_ip_ranges is assigned, if so, raise the MutuallyExclusiveArgumentError.
:return: string, empty list or list of strings, or None
"""
parameter_name = "api_server_authorized_ip_ranges"
# read the original value passed by the command
raw_value = self.raw_param.get(parameter_name)
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.api_server_access_profile:
value_obtained_from_mc = (
self.mc.api_server_access_profile.authorized_ip_ranges
)
# set default value
if value_obtained_from_mc is not None:
api_server_authorized_ip_ranges = value_obtained_from_mc
else:
api_server_authorized_ip_ranges = raw_value
# this parameter does not need dynamic completion
# validation
if enable_validation:
if (
api_server_authorized_ip_ranges and
self.get_load_balancer_sku() == "basic"
):
raise MutuallyExclusiveArgumentError(
"--api-server-authorized-ip-ranges can only be used with standard load balancer"
)
return api_server_authorized_ip_ranges
# pylint: disable=unused-argument
def get_fqdn_subdomain(
self, enable_validation: bool = False, **kwargs
) -> Union[str, None]:
"""Obtain the value of fqdn_subdomain.
This function supports the option of enable_validation. When enabled, it will check if both dns_name_prefix and
fqdn_subdomain are assigend, if so, raise the MutuallyExclusiveArgumentError.
:return: string or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("fqdn_subdomain")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc:
value_obtained_from_mc = self.mc.fqdn_subdomain
# set default value
if value_obtained_from_mc is not None:
fqdn_subdomain = value_obtained_from_mc
else:
fqdn_subdomain = raw_value
# this parameter does not need dynamic completion
# validation
if enable_validation:
if fqdn_subdomain and self.get_dns_name_prefix(read_only=True):
raise MutuallyExclusiveArgumentError(
"--dns-name-prefix and --fqdn-subdomain cannot be used at same time"
)
return fqdn_subdomain
# pylint: disable=unused-argument
def get_nodepool_name(self, **kwargs) -> str:
"""Dynamically obtain the value of nodepool_name according to the context.
When additional option enable_trim is enabled, dynamic completion will be triggerd.
This function supports the option of enable_trim. When enabled, it will normalize the value of nodepool_name.
If no value is assigned, the default value "nodepool1" is set, and if the string length is greater than 12,
it is truncated.
:return: string
"""
# read the original value passed by the command
raw_value = self.raw_param.get("nodepool_name")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.name
# set default value
read_from_mc = False
if value_obtained_from_mc is not None:
nodepool_name = value_obtained_from_mc
read_from_mc = True
else:
nodepool_name = raw_value
dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
if kwargs.get("enable_trim", False):
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
if not nodepool_name:
nodepool_name = "nodepool1"
else:
nodepool_name = nodepool_name[:12]
# this parameter does not need validation
return nodepool_name
# pylint: disable=unused-argument
def get_nodepool_tags(self, **kwargs) -> Union[Dict[str, str], None]:
"""Obtain the value of nodepool_tags.
:return: Dictionary or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("nodepool_tags")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.tags
# set default value
if value_obtained_from_mc is not None:
nodepool_tags = value_obtained_from_mc
else:
nodepool_tags = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return nodepool_tags
# pylint: disable=unused-argument
def get_nodepool_labels(self, **kwargs) -> Union[Dict[str, str], None]:
"""Obtain the value of nodepool_labels.
:return: Dictionary or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("nodepool_labels")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.node_labels
# set default value
if value_obtained_from_mc is not None:
nodepool_labels = value_obtained_from_mc
else:
nodepool_labels = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return nodepool_labels
# pylint: disable=unused-argument
def get_node_count(self, enable_validation: bool = False, **kwargs) -> int:
"""Obtain the value of node_count.
This function supports the option of enable_validation. When enabled, on the premise that
enable_cluster_autoscaler is enabled, it will check whether both min_count and max_count are assigned, if not,
raise the RequiredArgumentMissingError; if will also check whether node_count is between min_count and
max_count, if not, raise the InvalidArgumentValueError.
:return: int
"""
# read the original value passed by the command
raw_value = self.raw_param.get("node_count")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.count
# set default value
if value_obtained_from_mc is not None:
node_count = value_obtained_from_mc
else:
node_count = raw_value
# this parameter does not need dynamic completion
# validation
if enable_validation:
enable_cluster_autoscaler = self.get_enable_cluster_autoscaler()
min_count = self.get_min_count()
max_count = self.get_max_count()
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
return int(node_count)
# pylint: disable=unused-argument
def get_node_vm_size(self, **kwargs) -> str:
"""Obtain the value of node_vm_size.
:return: string
"""
# read the original value passed by the command
raw_value = self.raw_param.get("node_vm_size")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.vm_size
# set default value
if value_obtained_from_mc is not None:
node_vm_size = value_obtained_from_mc
else:
node_vm_size = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return node_vm_size
# pylint: disable=unused-argument
def get_vnet_subnet_id(self, **kwargs) -> Union[str, None]:
"""Obtain the value of vnet_subnet_id.
:return: string or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("vnet_subnet_id")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.vnet_subnet_id
# set default value
if value_obtained_from_mc is not None:
vnet_subnet_id = value_obtained_from_mc
else:
vnet_subnet_id = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return vnet_subnet_id
# pylint: disable=unused-argument
def get_ppg(self, **kwargs) -> Union[str, None]:
"""Obtain the value of ppg(proximity_placement_group_id).
:return: string or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("ppg")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.proximity_placement_group_id
)
# set default value
if value_obtained_from_mc is not None:
ppg = value_obtained_from_mc
else:
ppg = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return ppg
# pylint: disable=unused-argument
def get_zones(self, **kwargs) -> Union[List[str], None]:
"""Obtain the value of zones.
:return: list of strings or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("zones")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.availability_zones
# set default value
if value_obtained_from_mc is not None:
zones = value_obtained_from_mc
else:
zones = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return zones
# pylint: disable=unused-argument
def get_enable_node_public_ip(self, **kwargs) -> bool:
"""Obtain the value of enable_node_public_ip.
:return: bool
"""
# read the original value passed by the command
raw_value = self.raw_param.get("enable_node_public_ip")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.enable_node_public_ip
)
# set default value
if value_obtained_from_mc is not None:
enable_node_public_ip = value_obtained_from_mc
else:
enable_node_public_ip = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_node_public_ip
# pylint: disable=unused-argument
def get_node_public_ip_prefix_id(self, **kwargs) -> Union[str, None]:
"""Obtain the value of node_public_ip_prefix_id.
:return: string or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("node_public_ip_prefix_id")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.node_public_ip_prefix_id
)
# set default value
if value_obtained_from_mc is not None:
node_public_ip_prefix_id = value_obtained_from_mc
else:
node_public_ip_prefix_id = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return node_public_ip_prefix_id
# pylint: disable=unused-argument
def get_enable_encryption_at_host(self, **kwargs) -> bool:
"""Obtain the value of enable_encryption_at_host.
:return: bool
"""
# read the original value passed by the command
raw_value = self.raw_param.get("enable_encryption_at_host")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.enable_encryption_at_host
)
# set default value
if value_obtained_from_mc is not None:
enable_encryption_at_host = value_obtained_from_mc
else:
enable_encryption_at_host = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_encryption_at_host
# pylint: disable=unused-argument
def get_enable_ultra_ssd(self, **kwargs) -> bool:
"""Obtain the value of enable_ultra_ssd.
:return: bool
"""
# read the original value passed by the command
raw_value = self.raw_param.get("enable_ultra_ssd")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.enable_ultra_ssd
# set default value
if value_obtained_from_mc is not None:
enable_ultra_ssd = value_obtained_from_mc
else:
enable_ultra_ssd = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_ultra_ssd
# pylint: disable=unused-argument
def get_max_pods(self, **kwargs) -> Union[int, None]:
"""Obtain the value of max_pods.
Note: int 0 is converted to None.
:return: int or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("max_pods")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.max_pods
# set default value
if value_obtained_from_mc is not None:
max_pods = value_obtained_from_mc
else:
max_pods = raw_value
# Note: int 0 is converted to None
if max_pods:
max_pods = int(max_pods)
else:
max_pods = None
# this parameter does not need validation
return max_pods
# pylint: disable=unused-argument
def get_node_osdisk_size(self, **kwargs) -> Union[int, None]:
"""Obtain the value of node_osdisk_size.
Note: int 0 is converted to None.
:return: int or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("node_osdisk_size")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.os_disk_size_gb
# set default value
if value_obtained_from_mc is not None:
node_osdisk_size = value_obtained_from_mc
else:
node_osdisk_size = raw_value
# Note: 0 is converted to None
if node_osdisk_size:
node_osdisk_size = int(node_osdisk_size)
else:
node_osdisk_size = None
# this parameter does not need dynamic completion
# this parameter does not need validation
return node_osdisk_size
# pylint: disable=unused-argument
def get_node_osdisk_type(self, **kwargs) -> Union[str, None]:
"""Obtain the value of node_osdisk_size.
:return: string or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("node_osdisk_type")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.os_disk_type
# set default value
if value_obtained_from_mc is not None:
node_osdisk_type = value_obtained_from_mc
else:
node_osdisk_type = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return node_osdisk_type
# pylint: disable=unused-argument
def get_enable_cluster_autoscaler(
self, enable_validation: bool = False, **kwargs
) -> bool:
"""Obtain the value of enable_cluster_autoscaler.
This function supports the option of enable_validation. When enabled, on the premise that
enable_cluster_autoscaler is enabled, it will check whether both min_count and max_count are assigned, if not,
raise the RequiredArgumentMissingError; if will also check whether min_count is less than max_count and
node_count is between min_count and max_count, if not, raise the InvalidArgumentValueError. If
enable_cluster_autoscaler is not enabled, it will check whether any of min_count or max_count is assigned,
if so, raise the RequiredArgumentMissingError.
:return: bool
"""
# read the original value passed by the command
raw_value = self.raw_param.get("enable_cluster_autoscaler")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.enable_auto_scaling
# set default value
if value_obtained_from_mc is not None:
enable_cluster_autoscaler = value_obtained_from_mc
else:
enable_cluster_autoscaler = raw_value
# this parameter does not need dynamic completion
# validation
if enable_validation:
min_count = self.get_min_count()
max_count = self.get_max_count()
node_count = self.get_node_count()
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if min_count > max_count:
raise InvalidArgumentValueError(
"Value of min-count should be less than or equal to value of max-count"
)
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
else:
if min_count is not None or max_count is not None:
raise RequiredArgumentMissingError(
"min-count and max-count are required for --enable-cluster-autoscaler, please use the flag"
)
return enable_cluster_autoscaler
# pylint: disable=unused-argument
def get_min_count(
self, enable_validation: bool = False, **kwargs
) -> Union[int, None]:
"""Obtain the value of min_count.
This function supports the option of enable_validation. When enabled, on the premise that
enable_cluster_autoscaler is enabled, it will check whether both min_count and max_count are assigned, if not,
raise the RequiredArgumentMissingError; if will also check whether min_count is less than max_count and
node_count is between min_count and max_count, if not, raise the InvalidArgumentValueError. If
enable_cluster_autoscaler is not enabled, it will check whether any of min_count or max_count is assigned,
if so, raise the RequiredArgumentMissingError.
:return: int or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("min_count")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.min_count
# set default value
if value_obtained_from_mc is not None:
min_count = value_obtained_from_mc
else:
min_count = raw_value
# this parameter does not need dynamic completion
# validation
if enable_validation:
enable_cluster_autoscaler = self.get_enable_cluster_autoscaler()
max_count = self.get_max_count()
node_count = self.get_node_count()
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if min_count > max_count:
raise InvalidArgumentValueError(
"Value of min-count should be less than or equal to value of max-count"
)
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
else:
if min_count is not None or max_count is not None:
raise RequiredArgumentMissingError(
"min-count and max-count are required for --enable-cluster-autoscaler, please use the flag"
)
return min_count
# pylint: disable=unused-argument
def get_max_count(
self, enable_validation: bool = False, **kwargs
) -> Union[int, None]:
"""Obtain the value of max_count.
This function supports the option of enable_validation. When enabled, on the premise that
enable_cluster_autoscaler is enabled, it will check whether both min_count and max_count are assigned, if not,
raise the RequiredArgumentMissingError; if will also check whether min_count is less than max_count and
node_count is between min_count and max_count, if not, raise the InvalidArgumentValueError. If
enable_cluster_autoscaler is not enabled, it will check whether any of min_count or max_count is assigned,
if so, raise the RequiredArgumentMissingError.
:return: int or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("max_count")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.max_count
# set default value
if value_obtained_from_mc is not None:
max_count = value_obtained_from_mc
else:
max_count = raw_value
# this parameter does not need dynamic completion
# validation
if enable_validation:
enable_cluster_autoscaler = self.get_enable_cluster_autoscaler()
min_count = self.get_min_count()
node_count = self.get_node_count()
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if min_count > max_count:
raise InvalidArgumentValueError(
"Value of min-count should be less than or equal to value of max-count"
)
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
else:
if min_count is not None or max_count is not None:
raise RequiredArgumentMissingError(
"min-count and max-count are required for --enable-cluster-autoscaler, please use the flag"
)
return max_count
# pylint: disable=unused-argument
def get_admin_username(self, **kwargs) -> str:
"""Obtain the value of admin_username.
:return: str
"""
# read the original value passed by the command
raw_value = self.raw_param.get("admin_username")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.linux_profile:
value_obtained_from_mc = self.mc.linux_profile.admin_username
# set default value
if value_obtained_from_mc is not None:
admin_username = value_obtained_from_mc
else:
admin_username = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return admin_username
# pylint: disable=unused-argument
def get_windows_admin_username_and_password(
self, **kwargs
) -> Tuple[Union[str, None], Union[str, None]]:
"""Dynamically obtain the value of windows_admin_username and windows_admin_password according to the context.
When ont of windows_admin_username and windows_admin_password is not assigned, dynamic completion will be
triggerd. The user will be prompted to enter the missing windows_admin_username or windows_admin_password in
tty(pseudo terminal). If the program is running in a non-interactive environment, a NoTTYError error will be
raised.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: a tuple containing two elements of string or None
"""
# windows_admin_username
# read the original value passed by the command
username_raw_value = self.raw_param.get("windows_admin_username")
# try to read the property value corresponding to the parameter from the `mc` object
username_value_obtained_from_mc = None
if self.mc and self.mc.windows_profile:
username_value_obtained_from_mc = (
self.mc.windows_profile.admin_username
)
# set default value
username_read_from_mc = False
if username_value_obtained_from_mc is not None:
windows_admin_username = username_value_obtained_from_mc
username_read_from_mc = True
else:
windows_admin_username = username_raw_value
# windows_admin_password
# read the original value passed by the command
password_raw_value = self.raw_param.get("windows_admin_password")
# try to read the property value corresponding to the parameter from the `mc` object
password_value_obtained_from_mc = None
if self.mc and self.mc.windows_profile:
password_value_obtained_from_mc = (
self.mc.windows_profile.admin_password
)
# set default value
password_read_from_mc = False
if password_value_obtained_from_mc is not None:
windows_admin_password = password_value_obtained_from_mc
password_read_from_mc = True
else:
windows_admin_password = password_raw_value
# consistent check
if username_read_from_mc != password_read_from_mc:
raise CLIInternalError(
"Inconsistent state detected, one of windows admin name and password is read from the `mc` object."
)
# skip dynamic completion & validation if option read_only is specified
if kwargs.get("read_only"):
return windows_admin_username, windows_admin_password
username_dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
# to avoid that windows_admin_password is set but windows_admin_username is not
if windows_admin_username is None and windows_admin_password:
username_dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
username_dynamic_completion = (
username_dynamic_completion and not username_read_from_mc
)
if username_dynamic_completion:
try:
windows_admin_username = prompt("windows_admin_username: ")
# The validation for admin_username in ManagedClusterWindowsProfile will fail even if
# users still set windows_admin_username to empty here
except NoTTYException:
raise NoTTYError(
"Please specify username for Windows in non-interactive mode."
)
password_dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
# to avoid that windows_admin_username is set but windows_admin_password is not
if windows_admin_password is None and windows_admin_username:
password_dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
password_dynamic_completion = (
password_dynamic_completion and not password_read_from_mc
)
if password_dynamic_completion:
try:
windows_admin_password = prompt_pass(
msg="windows-admin-password: ", confirm=True
)
except NoTTYException:
raise NoTTYError(
"Please specify both username and password in non-interactive mode."
)
# these parameters does not need validation
return windows_admin_username, windows_admin_password
# pylint: disable=unused-argument
def get_enable_ahub(self, **kwargs) -> bool:
"""Obtain the value of enable_ahub.
Note: This parameter will not be directly decorated into the `mc` object.
:return: bool
"""
# read the original value passed by the command
enable_ahub = self.raw_param.get("enable_ahub")
# read the original value passed by the command
raw_value = self.raw_param.get("enable_ahub")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.windows_profile:
value_obtained_from_mc = self.mc.windows_profile.license_type == "Windows_Server"
# set default value
if value_obtained_from_mc is not None:
enable_ahub = value_obtained_from_mc
else:
enable_ahub = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_ahub
# pylint: disable=unused-argument,too-many-statements
def get_service_principal_and_client_secret(
self, **kwargs
) -> Tuple[Union[str, None], Union[str, None]]:
"""Dynamically obtain the values of service_principal and client_secret according to the context.
When service_principal and client_secret are not assigned and enable_managed_identity is True, dynamic
completion will not be triggered. For other cases, dynamic completion will be triggered.
When client_secret is given but service_principal is not, dns_name_prefix or fqdn_subdomain will be used to
create a service principal. The parameters subscription_id, location and name(cluster) are also required when
calling function "_ensure_aks_service_principal".
When service_principal is given but client_secret is not, function "_ensure_aks_service_principal" would raise
CLIError.
:return: a tuple containing two elements of string or None
"""
# service_principal
sp_parameter_name = "service_principal"
sp_property_name_in_mc = "client_id"
# read the original value passed by the command
sp_raw_value = self.raw_param.get(sp_parameter_name)
# try to read the property value corresponding to the parameter from the `mc` object
sp_value_obtained_from_mc = None
if self.mc and self.mc.service_principal_profile:
sp_value_obtained_from_mc = getattr(
self.mc.service_principal_profile, sp_property_name_in_mc
)
# set default value
sp_read_from_mc = False
if sp_value_obtained_from_mc is not None:
service_principal = sp_value_obtained_from_mc
sp_read_from_mc = True
else:
service_principal = sp_raw_value
# client_secret
secret_parameter_name = "client_secret"
secret_property_name_in_mc = "secret"
# read the original value passed by the command
secret_raw_value = self.raw_param.get(secret_parameter_name)
# try to read the property value corresponding to the parameter from the `mc` object
secret_value_obtained_from_mc = None
if self.mc and self.mc.service_principal_profile:
secret_value_obtained_from_mc = getattr(
self.mc.service_principal_profile, secret_property_name_in_mc
)
# set default value
secret_read_from_mc = False
if secret_value_obtained_from_mc is not None:
client_secret = secret_value_obtained_from_mc
secret_read_from_mc = True
else:
client_secret = secret_raw_value
# consistent check
if sp_read_from_mc != secret_read_from_mc:
raise CLIInternalError(
"Inconsistent state detected, one of sp and secret is read from the `mc` object."
)
# skip dynamic completion & validation if option read_only is specified
if kwargs.get("read_only"):
return service_principal, client_secret
# dynamic completion for service_principal and client_secret
dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
enable_managed_identity = self.get_enable_managed_identity(read_only=True)
if not (
enable_managed_identity and
not service_principal and
not client_secret
):
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = (
dynamic_completion and
not sp_read_from_mc and
not secret_read_from_mc
)
if dynamic_completion:
principal_obj = _ensure_aks_service_principal(
cli_ctx=self.cmd.cli_ctx,
service_principal=service_principal,
client_secret=client_secret,
subscription_id=self.get_intermediate(
"subscription_id", None
),
dns_name_prefix=self.get_dns_name_prefix(),
fqdn_subdomain=self.get_fqdn_subdomain(),
location=self.get_location(),
name=self.get_name(),
)
service_principal = principal_obj.get("service_principal")
client_secret = principal_obj.get("client_secret")
# these parameters do not need validation
return service_principal, client_secret
def get_enable_managed_identity(
self, enable_validation=False, **kwargs
) -> bool:
"""Dynamically obtain the values of service_principal and client_secret according to the context.
Note: This parameter will not be directly decorated into the `mc` object.
When both service_principal and client_secret are assigned and enable_managed_identity is True, dynamic
completion will be triggered. The value of enable_managed_identity will be set to False.
:return: bool
"""
# Note: This parameter will not be decorated into the `mc` object.
# read the original value passed by the command
raw_value = self.raw_param.get("enable_managed_identity")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.identity:
value_obtained_from_mc = self.mc.identity.type is not None
# set default value
read_from_mc = False
if value_obtained_from_mc is not None:
enable_managed_identity = value_obtained_from_mc
read_from_mc = True
else:
enable_managed_identity = raw_value
# skip dynamic completion & validation if option read_only is specified
if kwargs.get("read_only"):
return enable_managed_identity
dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
(
service_principal,
client_secret,
) = self.get_service_principal_and_client_secret(read_only=True)
if service_principal and client_secret:
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
enable_managed_identity = False
# validation
if enable_validation:
# TODO: add validation
pass
return enable_managed_identity
class AKSCreateDecorator:
def __init__(
self,
cmd: AzCliCommand,
client,
models: AKSCreateModels,
raw_parameters: Dict,
resource_type: ResourceType = ResourceType.MGMT_CONTAINERSERVICE,
):
self.cmd = cmd
self.client = client
self.models = models
# store the context in the process of assemble the ManagedCluster object
self.context = AKSCreateContext(cmd, raw_parameters)
# `resource_type` is used to dynamically find the model (of a specific api version) provided by the
# containerservice SDK, most models have been passed through the `modles` parameter (instantiatied
# from `AKSCreateModels` (or `PreviewAKSCreateModels` in aks-preview), where resource_type (i.e.,
# api version) has been specified), a very small number of models are instantiated through internal
# functions, one use case is that `api_server_access_profile` is initialized by function
# `_populate_api_server_access_profile` defined in `_helpers.py`
self.resource_type = resource_type
def init_mc(self):
# get subscription id and store as intermediate
subscription_id = get_subscription_id(self.cmd.cli_ctx)
self.context.set_intermediate(
"subscription_id", subscription_id, overwrite_exists=True
)
# initialize the `ManagedCluster` object with mandatory parameters (i.e. location)
mc = self.models.ManagedCluster(location=self.context.get_location())
return mc
def set_up_agent_pool_profiles(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
# Must be 12 chars or less before ACS RP adds to it
name=self.context.get_nodepool_name(enable_trim=True),
tags=self.context.get_nodepool_tags(),
node_labels=self.context.get_nodepool_labels(),
count=self.context.get_node_count(enable_validation=True),
vm_size=self.context.get_node_vm_size(),
os_type="Linux",
vnet_subnet_id=self.context.get_vnet_subnet_id(),
proximity_placement_group_id=self.context.get_ppg(),
availability_zones=self.context.get_zones(),
enable_node_public_ip=self.context.get_enable_node_public_ip(),
node_public_ip_prefix_id=self.context.get_node_public_ip_prefix_id(),
enable_encryption_at_host=self.context.get_enable_encryption_at_host(),
enable_ultra_ssd=self.context.get_enable_ultra_ssd(),
max_pods=self.context.get_max_pods(),
type=self.context.get_vm_set_type(),
mode="System",
os_disk_size_gb=self.context.get_node_osdisk_size(),
os_disk_type=self.context.get_node_osdisk_type(),
min_count=self.context.get_min_count(enable_validation=True),
max_count=self.context.get_max_count(enable_validation=True),
enable_auto_scaling=self.context.get_enable_cluster_autoscaler(
enable_validation=True
),
)
mc.agent_pool_profiles = [agent_pool_profile]
return mc
def set_up_linux_profile(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not self.context.get_no_ssh_key(enable_validation=True):
ssh_config = self.models.ContainerServiceSshConfiguration(
public_keys=[
self.models.ContainerServiceSshPublicKey(
key_data=self.context.get_ssh_key_value(
enable_validation=True
)
)
]
)
linux_profile = self.models.ContainerServiceLinuxProfile(
admin_username=self.context.get_admin_username(), ssh=ssh_config
)
mc.linux_profile = linux_profile
return mc
def set_up_windows_profile(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
(
windows_admin_username,
windows_admin_password,
) = self.context.get_windows_admin_username_and_password()
if windows_admin_username or windows_admin_password:
windows_license_type = None
if self.context.get_enable_ahub():
windows_license_type = "Windows_Server"
# this would throw an error if windows_admin_username is empty (the user enters an empty
# string after being prompted), since admin_username is a required parameter
windows_profile = self.models.ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type,
)
mc.windows_profile = windows_profile
# clean up intermediate after `mc` is decorated
self.context.remove_intermediate("windows_admin_username")
self.context.remove_intermediate("windows_admin_password")
return mc
def set_up_service_principal_profile(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
# If customer explicitly provide a service principal, disable managed identity.
(
service_principal,
client_secret,
) = self.context.get_service_principal_and_client_secret()
enable_managed_identity = self.context.get_enable_managed_identity()
# Skip create service principal profile for the cluster if the cluster enables managed identity
# and customer doesn't explicitly provide a service principal.
if not (
enable_managed_identity and
not service_principal and
not client_secret
):
service_principal_profile = (
self.models.ManagedClusterServicePrincipalProfile(
client_id=service_principal, secret=client_secret
)
)
mc.service_principal_profile = service_principal_profile
# clean up intermediates after `mc` is decorated
self.context.remove_intermediate("service_principal")
self.context.remove_intermediate("client_secret")
return mc
def construct_default_mc(self):
# An all-in-one function used to create the complete `ManagedCluster` object, which will later be
# passed as a parameter to the underlying SDK (mgmt-containerservice) to send the actual request.
# Note: to reduce the risk of regression introduced by refactoring, this function is not complete
# and is being implemented gradually.
# initialize the `ManagedCluster` object, also set up the intermediate named "subscription_id"
mc = self.init_mc()
# set up agent pool profile(s)
mc = self.set_up_agent_pool_profiles(mc)
# set up linux profile (for ssh access)
mc = self.set_up_linux_profile(mc)
# set up windows profile
mc = self.set_up_windows_profile(mc)
# set up service principal profile
mc = self.set_up_service_principal_profile(mc)
return mc
| 42.878146 | 119 | 0.649611 |
from knack.prompting import NoTTYException, prompt, prompt_pass
from knack.log import get_logger
from typing import Any, List, Dict, Tuple, Union
from azure.cli.core import AzCommandsLoader
from azure.cli.core.azclierror import (
CLIInternalError,
MutuallyExclusiveArgumentError,
RequiredArgumentMissingError,
InvalidArgumentValueError,
NoTTYError,
)
from azure.cli.core.commands import AzCliCommand
from azure.cli.core.profiles import ResourceType
from .custom import (
_get_rg_location,
_validate_ssh_key,
_get_default_dns_prefix,
_set_vm_set_type,
set_load_balancer_sku,
get_subscription_id,
_ensure_aks_service_principal,
)
logger = get_logger(__name__)
def safe_list_get(li: List, idx: int, default: Any = None):
if isinstance(li, list):
try:
return li[idx]
except IndexError:
return default
return None
class AKSCreateModels:
def __init__(
self,
cmd: AzCommandsLoader,
resource_type: ResourceType = ResourceType.MGMT_CONTAINERSERVICE,
):
self.__cmd = cmd
self.resource_type = resource_type
self.ManagedClusterWindowsProfile = self.__cmd.get_models(
"ManagedClusterWindowsProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterSKU = self.__cmd.get_models(
"ManagedClusterSKU",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceNetworkProfile = self.__cmd.get_models(
"ContainerServiceNetworkProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceLinuxProfile = self.__cmd.get_models(
"ContainerServiceLinuxProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterServicePrincipalProfile = self.__cmd.get_models(
"ManagedClusterServicePrincipalProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceSshConfiguration = self.__cmd.get_models(
"ContainerServiceSshConfiguration",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceSshPublicKey = self.__cmd.get_models(
"ContainerServiceSshPublicKey",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAADProfile = self.__cmd.get_models(
"ManagedClusterAADProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAutoUpgradeProfile = self.__cmd.get_models(
"ManagedClusterAutoUpgradeProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAgentPoolProfile = self.__cmd.get_models(
"ManagedClusterAgentPoolProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterIdentity = self.__cmd.get_models(
"ManagedClusterIdentity",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.UserAssignedIdentity = self.__cmd.get_models(
"UserAssignedIdentity",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedCluster = self.__cmd.get_models(
"ManagedCluster",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedServiceIdentityUserAssignedIdentitiesValue = (
self.__cmd.get_models(
"ManagedServiceIdentityUserAssignedIdentitiesValue",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
)
self.ExtendedLocation = self.__cmd.get_models(
"ExtendedLocation",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ExtendedLocationTypes = self.__cmd.get_models(
"ExtendedLocationTypes",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAPIServerAccessProfile = self.__cmd.get_models(
"ManagedClusterAPIServerAccessProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
class AKSCreateContext:
def __init__(self, cmd: AzCliCommand, raw_parameters: Dict):
self.cmd = cmd
if not isinstance(raw_parameters, dict):
raise CLIInternalError(
"Unexpected raw_parameters object with type '{}'.".format(
type(raw_parameters)
)
)
self.raw_param = raw_parameters
self.intermediates = dict()
self.mc = None
def attach_mc(self, mc):
if self.mc is None:
self.mc = mc
else:
msg = "the same" if self.mc == mc else "different"
raise CLIInternalError(
"Attempting to attach the `mc` object again, the two objects are {}.".format(
msg
)
)
def get_intermediate(self, variable_name: str, default_value: Any = None):
if variable_name not in self.intermediates:
msg = "The intermediate '{}' does not exist, return default value '{}'.".format(
variable_name, default_value
)
logger.debug(msg)
return self.intermediates.get(variable_name, default_value)
def set_intermediate(
self, variable_name: str, value: Any, overwrite_exists: bool = False
):
if variable_name in self.intermediates:
if overwrite_exists:
msg = "The intermediate '{}' is overwritten. Original value: '{}', new value: '{}'.".format(
variable_name, self.intermediates.get(variable_name), value
)
logger.debug(msg)
self.intermediates[variable_name] = value
elif self.intermediates.get(variable_name) != value:
msg = "The intermediate '{}' already exists, but overwrite is not enabled." \
"Original value: '{}', candidate value: '{}'.".format(
variable_name,
self.intermediates.get(variable_name),
value,
)
logger.warning(msg)
else:
self.intermediates[variable_name] = value
def remove_intermediate(self, variable_name: str):
self.intermediates.pop(variable_name, None)
def get_resource_group_name(self, **kwargs) -> str:
resource_group_name = self.raw_param.get("resource_group_name")
return resource_group_name
def get_name(self, **kwargs) -> str:
name = self.raw_param.get("name")
return name
def get_ssh_key_value(
self, enable_validation: bool = False, **kwargs
) -> str:
raw_value = self.raw_param.get("ssh_key_value")
value_obtained_from_mc = None
if (
self.mc and
self.mc.linux_profile and
self.mc.linux_profile.ssh and
self.mc.linux_profile.ssh.public_keys
):
public_key_obj = safe_list_get(
self.mc.linux_profile.ssh.public_keys, 0, None
)
if public_key_obj:
value_obtained_from_mc = public_key_obj.key_data
if value_obtained_from_mc is not None:
ssh_key_value = value_obtained_from_mc
else:
ssh_key_value = raw_value
if enable_validation:
_validate_ssh_key(
no_ssh_key=self.get_no_ssh_key(), ssh_key_value=ssh_key_value
)
return ssh_key_value
def get_dns_name_prefix(
self, enable_validation: bool = False, **kwargs
) -> Union[str, None]:
parameter_name = "dns_name_prefix"
raw_value = self.raw_param.get(parameter_name)
value_obtained_from_mc = None
if self.mc:
value_obtained_from_mc = self.mc.dns_prefix
read_from_mc = False
if value_obtained_from_mc is not None:
dns_name_prefix = value_obtained_from_mc
read_from_mc = True
else:
dns_name_prefix = raw_value
if kwargs.get("read_only"):
return dns_name_prefix
dynamic_completion = False
if not dns_name_prefix and not self.get_fqdn_subdomain():
dynamic_completion = True
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
dns_name_prefix = _get_default_dns_prefix(
name=self.get_name(),
resource_group_name=self.get_resource_group_name(),
subscription_id=self.get_intermediate("subscription_id"),
)
if enable_validation:
if dns_name_prefix and self.get_fqdn_subdomain():
raise MutuallyExclusiveArgumentError(
"--dns-name-prefix and --fqdn-subdomain cannot be used at same time"
)
return dns_name_prefix
def get_location(self, **kwargs) -> str:
parameter_name = "location"
raw_value = self.raw_param.get(parameter_name)
value_obtained_from_mc = None
if self.mc:
value_obtained_from_mc = self.mc.location
read_from_mc = False
if value_obtained_from_mc is not None:
location = value_obtained_from_mc
read_from_mc = True
else:
location = raw_value
dynamic_completion = False
if location is None:
dynamic_completion = True
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
location = _get_rg_location(
self.cmd.cli_ctx, self.get_resource_group_name()
)
return location
def get_kubernetes_version(self, **kwargs) -> str:
raw_value = self.raw_param.get("kubernetes_version")
value_obtained_from_mc = None
if self.mc:
value_obtained_from_mc = self.mc.kubernetes_version
if value_obtained_from_mc is not None:
kubernetes_version = value_obtained_from_mc
else:
kubernetes_version = raw_value
return kubernetes_version
def get_no_ssh_key(self, enable_validation: bool = False, **kwargs) -> bool:
no_ssh_key = self.raw_param.get("no_ssh_key")
if enable_validation:
_validate_ssh_key(
no_ssh_key=no_ssh_key, ssh_key_value=self.get_ssh_key_value()
)
return no_ssh_key
def get_vm_set_type(self, **kwargs) -> str:
parameter_name = "vm_set_type"
raw_value = self.raw_param.get(parameter_name)
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.type
read_from_mc = False
if value_obtained_from_mc is not None:
vm_set_type = value_obtained_from_mc
read_from_mc = True
else:
vm_set_type = raw_value
if kwargs.get("read_only"):
return vm_set_type
dynamic_completion = True
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
vm_set_type = _set_vm_set_type(
vm_set_type=vm_set_type,
kubernetes_version=self.get_kubernetes_version(),
)
return vm_set_type
def get_load_balancer_sku(
self, enable_validation: bool = False, **kwargs
) -> str:
parameter_name = "load_balancer_sku"
raw_value = self.raw_param.get(parameter_name)
value_obtained_from_mc = None
if self.mc and self.mc.network_profile:
value_obtained_from_mc = self.mc.network_profile.load_balancer_sku
read_from_mc = False
if value_obtained_from_mc is not None:
load_balancer_sku = value_obtained_from_mc
read_from_mc = True
else:
load_balancer_sku = raw_value
if kwargs.get("read_only"):
return load_balancer_sku
dynamic_completion = False
if not load_balancer_sku:
dynamic_completion = True
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
load_balancer_sku = set_load_balancer_sku(
sku=load_balancer_sku,
kubernetes_version=self.get_kubernetes_version(),
)
if enable_validation:
if (
load_balancer_sku == "basic" and
self.get_api_server_authorized_ip_ranges()
):
raise MutuallyExclusiveArgumentError(
"--api-server-authorized-ip-ranges can only be used with standard load balancer"
)
return load_balancer_sku
def get_api_server_authorized_ip_ranges(
self, enable_validation: bool = False, **kwargs
) -> Union[str, List[str], None]:
parameter_name = "api_server_authorized_ip_ranges"
raw_value = self.raw_param.get(parameter_name)
value_obtained_from_mc = None
if self.mc and self.mc.api_server_access_profile:
value_obtained_from_mc = (
self.mc.api_server_access_profile.authorized_ip_ranges
)
if value_obtained_from_mc is not None:
api_server_authorized_ip_ranges = value_obtained_from_mc
else:
api_server_authorized_ip_ranges = raw_value
if enable_validation:
if (
api_server_authorized_ip_ranges and
self.get_load_balancer_sku() == "basic"
):
raise MutuallyExclusiveArgumentError(
"--api-server-authorized-ip-ranges can only be used with standard load balancer"
)
return api_server_authorized_ip_ranges
def get_fqdn_subdomain(
self, enable_validation: bool = False, **kwargs
) -> Union[str, None]:
raw_value = self.raw_param.get("fqdn_subdomain")
value_obtained_from_mc = None
if self.mc:
value_obtained_from_mc = self.mc.fqdn_subdomain
if value_obtained_from_mc is not None:
fqdn_subdomain = value_obtained_from_mc
else:
fqdn_subdomain = raw_value
if enable_validation:
if fqdn_subdomain and self.get_dns_name_prefix(read_only=True):
raise MutuallyExclusiveArgumentError(
"--dns-name-prefix and --fqdn-subdomain cannot be used at same time"
)
return fqdn_subdomain
def get_nodepool_name(self, **kwargs) -> str:
raw_value = self.raw_param.get("nodepool_name")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.name
read_from_mc = False
if value_obtained_from_mc is not None:
nodepool_name = value_obtained_from_mc
read_from_mc = True
else:
nodepool_name = raw_value
dynamic_completion = False
if kwargs.get("enable_trim", False):
dynamic_completion = True
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
if not nodepool_name:
nodepool_name = "nodepool1"
else:
nodepool_name = nodepool_name[:12]
return nodepool_name
def get_nodepool_tags(self, **kwargs) -> Union[Dict[str, str], None]:
raw_value = self.raw_param.get("nodepool_tags")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.tags
if value_obtained_from_mc is not None:
nodepool_tags = value_obtained_from_mc
else:
nodepool_tags = raw_value
return nodepool_tags
def get_nodepool_labels(self, **kwargs) -> Union[Dict[str, str], None]:
raw_value = self.raw_param.get("nodepool_labels")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.node_labels
if value_obtained_from_mc is not None:
nodepool_labels = value_obtained_from_mc
else:
nodepool_labels = raw_value
return nodepool_labels
def get_node_count(self, enable_validation: bool = False, **kwargs) -> int:
raw_value = self.raw_param.get("node_count")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.count
if value_obtained_from_mc is not None:
node_count = value_obtained_from_mc
else:
node_count = raw_value
if enable_validation:
enable_cluster_autoscaler = self.get_enable_cluster_autoscaler()
min_count = self.get_min_count()
max_count = self.get_max_count()
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
return int(node_count)
def get_node_vm_size(self, **kwargs) -> str:
raw_value = self.raw_param.get("node_vm_size")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.vm_size
if value_obtained_from_mc is not None:
node_vm_size = value_obtained_from_mc
else:
node_vm_size = raw_value
return node_vm_size
def get_vnet_subnet_id(self, **kwargs) -> Union[str, None]:
raw_value = self.raw_param.get("vnet_subnet_id")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.vnet_subnet_id
if value_obtained_from_mc is not None:
vnet_subnet_id = value_obtained_from_mc
else:
vnet_subnet_id = raw_value
return vnet_subnet_id
def get_ppg(self, **kwargs) -> Union[str, None]:
raw_value = self.raw_param.get("ppg")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.proximity_placement_group_id
)
if value_obtained_from_mc is not None:
ppg = value_obtained_from_mc
else:
ppg = raw_value
return ppg
def get_zones(self, **kwargs) -> Union[List[str], None]:
raw_value = self.raw_param.get("zones")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.availability_zones
if value_obtained_from_mc is not None:
zones = value_obtained_from_mc
else:
zones = raw_value
return zones
def get_enable_node_public_ip(self, **kwargs) -> bool:
raw_value = self.raw_param.get("enable_node_public_ip")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.enable_node_public_ip
)
if value_obtained_from_mc is not None:
enable_node_public_ip = value_obtained_from_mc
else:
enable_node_public_ip = raw_value
return enable_node_public_ip
def get_node_public_ip_prefix_id(self, **kwargs) -> Union[str, None]:
raw_value = self.raw_param.get("node_public_ip_prefix_id")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.node_public_ip_prefix_id
)
if value_obtained_from_mc is not None:
node_public_ip_prefix_id = value_obtained_from_mc
else:
node_public_ip_prefix_id = raw_value
return node_public_ip_prefix_id
def get_enable_encryption_at_host(self, **kwargs) -> bool:
raw_value = self.raw_param.get("enable_encryption_at_host")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.enable_encryption_at_host
)
if value_obtained_from_mc is not None:
enable_encryption_at_host = value_obtained_from_mc
else:
enable_encryption_at_host = raw_value
return enable_encryption_at_host
def get_enable_ultra_ssd(self, **kwargs) -> bool:
raw_value = self.raw_param.get("enable_ultra_ssd")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.enable_ultra_ssd
if value_obtained_from_mc is not None:
enable_ultra_ssd = value_obtained_from_mc
else:
enable_ultra_ssd = raw_value
return enable_ultra_ssd
def get_max_pods(self, **kwargs) -> Union[int, None]:
raw_value = self.raw_param.get("max_pods")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.max_pods
if value_obtained_from_mc is not None:
max_pods = value_obtained_from_mc
else:
max_pods = raw_value
if max_pods:
max_pods = int(max_pods)
else:
max_pods = None
return max_pods
def get_node_osdisk_size(self, **kwargs) -> Union[int, None]:
raw_value = self.raw_param.get("node_osdisk_size")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.os_disk_size_gb
if value_obtained_from_mc is not None:
node_osdisk_size = value_obtained_from_mc
else:
node_osdisk_size = raw_value
if node_osdisk_size:
node_osdisk_size = int(node_osdisk_size)
else:
node_osdisk_size = None
return node_osdisk_size
def get_node_osdisk_type(self, **kwargs) -> Union[str, None]:
raw_value = self.raw_param.get("node_osdisk_type")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.os_disk_type
if value_obtained_from_mc is not None:
node_osdisk_type = value_obtained_from_mc
else:
node_osdisk_type = raw_value
return node_osdisk_type
def get_enable_cluster_autoscaler(
self, enable_validation: bool = False, **kwargs
) -> bool:
raw_value = self.raw_param.get("enable_cluster_autoscaler")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.enable_auto_scaling
if value_obtained_from_mc is not None:
enable_cluster_autoscaler = value_obtained_from_mc
else:
enable_cluster_autoscaler = raw_value
if enable_validation:
min_count = self.get_min_count()
max_count = self.get_max_count()
node_count = self.get_node_count()
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if min_count > max_count:
raise InvalidArgumentValueError(
"Value of min-count should be less than or equal to value of max-count"
)
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
else:
if min_count is not None or max_count is not None:
raise RequiredArgumentMissingError(
"min-count and max-count are required for --enable-cluster-autoscaler, please use the flag"
)
return enable_cluster_autoscaler
def get_min_count(
self, enable_validation: bool = False, **kwargs
) -> Union[int, None]:
raw_value = self.raw_param.get("min_count")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.min_count
if value_obtained_from_mc is not None:
min_count = value_obtained_from_mc
else:
min_count = raw_value
if enable_validation:
enable_cluster_autoscaler = self.get_enable_cluster_autoscaler()
max_count = self.get_max_count()
node_count = self.get_node_count()
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if min_count > max_count:
raise InvalidArgumentValueError(
"Value of min-count should be less than or equal to value of max-count"
)
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
else:
if min_count is not None or max_count is not None:
raise RequiredArgumentMissingError(
"min-count and max-count are required for --enable-cluster-autoscaler, please use the flag"
)
return min_count
def get_max_count(
self, enable_validation: bool = False, **kwargs
) -> Union[int, None]:
raw_value = self.raw_param.get("max_count")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.max_count
if value_obtained_from_mc is not None:
max_count = value_obtained_from_mc
else:
max_count = raw_value
if enable_validation:
enable_cluster_autoscaler = self.get_enable_cluster_autoscaler()
min_count = self.get_min_count()
node_count = self.get_node_count()
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if min_count > max_count:
raise InvalidArgumentValueError(
"Value of min-count should be less than or equal to value of max-count"
)
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
else:
if min_count is not None or max_count is not None:
raise RequiredArgumentMissingError(
"min-count and max-count are required for --enable-cluster-autoscaler, please use the flag"
)
return max_count
def get_admin_username(self, **kwargs) -> str:
raw_value = self.raw_param.get("admin_username")
value_obtained_from_mc = None
if self.mc and self.mc.linux_profile:
value_obtained_from_mc = self.mc.linux_profile.admin_username
if value_obtained_from_mc is not None:
admin_username = value_obtained_from_mc
else:
admin_username = raw_value
return admin_username
def get_windows_admin_username_and_password(
self, **kwargs
) -> Tuple[Union[str, None], Union[str, None]]:
username_raw_value = self.raw_param.get("windows_admin_username")
username_value_obtained_from_mc = None
if self.mc and self.mc.windows_profile:
username_value_obtained_from_mc = (
self.mc.windows_profile.admin_username
)
username_read_from_mc = False
if username_value_obtained_from_mc is not None:
windows_admin_username = username_value_obtained_from_mc
username_read_from_mc = True
else:
windows_admin_username = username_raw_value
password_raw_value = self.raw_param.get("windows_admin_password")
password_value_obtained_from_mc = None
if self.mc and self.mc.windows_profile:
password_value_obtained_from_mc = (
self.mc.windows_profile.admin_password
)
password_read_from_mc = False
if password_value_obtained_from_mc is not None:
windows_admin_password = password_value_obtained_from_mc
password_read_from_mc = True
else:
windows_admin_password = password_raw_value
if username_read_from_mc != password_read_from_mc:
raise CLIInternalError(
"Inconsistent state detected, one of windows admin name and password is read from the `mc` object."
)
if kwargs.get("read_only"):
return windows_admin_username, windows_admin_password
username_dynamic_completion = False
if windows_admin_username is None and windows_admin_password:
username_dynamic_completion = True
username_dynamic_completion = (
username_dynamic_completion and not username_read_from_mc
)
if username_dynamic_completion:
try:
windows_admin_username = prompt("windows_admin_username: ")
except NoTTYException:
raise NoTTYError(
"Please specify username for Windows in non-interactive mode."
)
password_dynamic_completion = False
if windows_admin_password is None and windows_admin_username:
password_dynamic_completion = True
password_dynamic_completion = (
password_dynamic_completion and not password_read_from_mc
)
if password_dynamic_completion:
try:
windows_admin_password = prompt_pass(
msg="windows-admin-password: ", confirm=True
)
except NoTTYException:
raise NoTTYError(
"Please specify both username and password in non-interactive mode."
)
return windows_admin_username, windows_admin_password
def get_enable_ahub(self, **kwargs) -> bool:
enable_ahub = self.raw_param.get("enable_ahub")
raw_value = self.raw_param.get("enable_ahub")
value_obtained_from_mc = None
if self.mc and self.mc.windows_profile:
value_obtained_from_mc = self.mc.windows_profile.license_type == "Windows_Server"
if value_obtained_from_mc is not None:
enable_ahub = value_obtained_from_mc
else:
enable_ahub = raw_value
return enable_ahub
def get_service_principal_and_client_secret(
self, **kwargs
) -> Tuple[Union[str, None], Union[str, None]]:
sp_parameter_name = "service_principal"
sp_property_name_in_mc = "client_id"
sp_raw_value = self.raw_param.get(sp_parameter_name)
sp_value_obtained_from_mc = None
if self.mc and self.mc.service_principal_profile:
sp_value_obtained_from_mc = getattr(
self.mc.service_principal_profile, sp_property_name_in_mc
)
sp_read_from_mc = False
if sp_value_obtained_from_mc is not None:
service_principal = sp_value_obtained_from_mc
sp_read_from_mc = True
else:
service_principal = sp_raw_value
secret_parameter_name = "client_secret"
secret_property_name_in_mc = "secret"
secret_raw_value = self.raw_param.get(secret_parameter_name)
secret_value_obtained_from_mc = None
if self.mc and self.mc.service_principal_profile:
secret_value_obtained_from_mc = getattr(
self.mc.service_principal_profile, secret_property_name_in_mc
)
secret_read_from_mc = False
if secret_value_obtained_from_mc is not None:
client_secret = secret_value_obtained_from_mc
secret_read_from_mc = True
else:
client_secret = secret_raw_value
if sp_read_from_mc != secret_read_from_mc:
raise CLIInternalError(
"Inconsistent state detected, one of sp and secret is read from the `mc` object."
)
if kwargs.get("read_only"):
return service_principal, client_secret
dynamic_completion = False
enable_managed_identity = self.get_enable_managed_identity(read_only=True)
if not (
enable_managed_identity and
not service_principal and
not client_secret
):
dynamic_completion = True
dynamic_completion = (
dynamic_completion and
not sp_read_from_mc and
not secret_read_from_mc
)
if dynamic_completion:
principal_obj = _ensure_aks_service_principal(
cli_ctx=self.cmd.cli_ctx,
service_principal=service_principal,
client_secret=client_secret,
subscription_id=self.get_intermediate(
"subscription_id", None
),
dns_name_prefix=self.get_dns_name_prefix(),
fqdn_subdomain=self.get_fqdn_subdomain(),
location=self.get_location(),
name=self.get_name(),
)
service_principal = principal_obj.get("service_principal")
client_secret = principal_obj.get("client_secret")
return service_principal, client_secret
def get_enable_managed_identity(
self, enable_validation=False, **kwargs
) -> bool:
raw_value = self.raw_param.get("enable_managed_identity")
value_obtained_from_mc = None
if self.mc and self.mc.identity:
value_obtained_from_mc = self.mc.identity.type is not None
read_from_mc = False
if value_obtained_from_mc is not None:
enable_managed_identity = value_obtained_from_mc
read_from_mc = True
else:
enable_managed_identity = raw_value
if kwargs.get("read_only"):
return enable_managed_identity
dynamic_completion = False
(
service_principal,
client_secret,
) = self.get_service_principal_and_client_secret(read_only=True)
if service_principal and client_secret:
dynamic_completion = True
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
enable_managed_identity = False
if enable_validation:
pass
return enable_managed_identity
class AKSCreateDecorator:
def __init__(
self,
cmd: AzCliCommand,
client,
models: AKSCreateModels,
raw_parameters: Dict,
resource_type: ResourceType = ResourceType.MGMT_CONTAINERSERVICE,
):
self.cmd = cmd
self.client = client
self.models = models
self.context = AKSCreateContext(cmd, raw_parameters)
self.resource_type = resource_type
def init_mc(self):
subscription_id = get_subscription_id(self.cmd.cli_ctx)
self.context.set_intermediate(
"subscription_id", subscription_id, overwrite_exists=True
)
mc = self.models.ManagedCluster(location=self.context.get_location())
return mc
def set_up_agent_pool_profiles(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name=self.context.get_nodepool_name(enable_trim=True),
tags=self.context.get_nodepool_tags(),
node_labels=self.context.get_nodepool_labels(),
count=self.context.get_node_count(enable_validation=True),
vm_size=self.context.get_node_vm_size(),
os_type="Linux",
vnet_subnet_id=self.context.get_vnet_subnet_id(),
proximity_placement_group_id=self.context.get_ppg(),
availability_zones=self.context.get_zones(),
enable_node_public_ip=self.context.get_enable_node_public_ip(),
node_public_ip_prefix_id=self.context.get_node_public_ip_prefix_id(),
enable_encryption_at_host=self.context.get_enable_encryption_at_host(),
enable_ultra_ssd=self.context.get_enable_ultra_ssd(),
max_pods=self.context.get_max_pods(),
type=self.context.get_vm_set_type(),
mode="System",
os_disk_size_gb=self.context.get_node_osdisk_size(),
os_disk_type=self.context.get_node_osdisk_type(),
min_count=self.context.get_min_count(enable_validation=True),
max_count=self.context.get_max_count(enable_validation=True),
enable_auto_scaling=self.context.get_enable_cluster_autoscaler(
enable_validation=True
),
)
mc.agent_pool_profiles = [agent_pool_profile]
return mc
def set_up_linux_profile(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
if not self.context.get_no_ssh_key(enable_validation=True):
ssh_config = self.models.ContainerServiceSshConfiguration(
public_keys=[
self.models.ContainerServiceSshPublicKey(
key_data=self.context.get_ssh_key_value(
enable_validation=True
)
)
]
)
linux_profile = self.models.ContainerServiceLinuxProfile(
admin_username=self.context.get_admin_username(), ssh=ssh_config
)
mc.linux_profile = linux_profile
return mc
def set_up_windows_profile(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
(
windows_admin_username,
windows_admin_password,
) = self.context.get_windows_admin_username_and_password()
if windows_admin_username or windows_admin_password:
windows_license_type = None
if self.context.get_enable_ahub():
windows_license_type = "Windows_Server"
windows_profile = self.models.ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type,
)
mc.windows_profile = windows_profile
self.context.remove_intermediate("windows_admin_username")
self.context.remove_intermediate("windows_admin_password")
return mc
def set_up_service_principal_profile(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
(
service_principal,
client_secret,
) = self.context.get_service_principal_and_client_secret()
enable_managed_identity = self.context.get_enable_managed_identity()
if not (
enable_managed_identity and
not service_principal and
not client_secret
):
service_principal_profile = (
self.models.ManagedClusterServicePrincipalProfile(
client_id=service_principal, secret=client_secret
)
)
mc.service_principal_profile = service_principal_profile
# clean up intermediates after `mc` is decorated
self.context.remove_intermediate("service_principal")
self.context.remove_intermediate("client_secret")
return mc
def construct_default_mc(self):
# An all-in-one function used to create the complete `ManagedCluster` object, which will later be
# passed as a parameter to the underlying SDK (mgmt-containerservice) to send the actual request.
# Note: to reduce the risk of regression introduced by refactoring, this function is not complete
# and is being implemented gradually.
# initialize the `ManagedCluster` object, also set up the intermediate named "subscription_id"
mc = self.init_mc()
# set up agent pool profile(s)
mc = self.set_up_agent_pool_profiles(mc)
# set up linux profile (for ssh access)
mc = self.set_up_linux_profile(mc)
# set up windows profile
mc = self.set_up_windows_profile(mc)
# set up service principal profile
mc = self.set_up_service_principal_profile(mc)
return mc
| true | true |
f730c134a28f77e3c847ee825ab9c72a1458d0e4 | 3,102 | py | Python | src/stk/molecular/topology_graphs/topology_graph/optimizers/collapser.py | andrewtarzia/stk | 1ac2ecbb5c9940fe49ce04cbf5603fd7538c475a | [
"MIT"
] | 21 | 2018-04-12T16:25:24.000Z | 2022-02-14T23:05:43.000Z | src/stk/molecular/topology_graphs/topology_graph/optimizers/collapser.py | JelfsMaterialsGroup/stk | 0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2 | [
"MIT"
] | 8 | 2019-03-19T12:36:36.000Z | 2020-11-11T12:46:00.000Z | src/stk/molecular/topology_graphs/topology_graph/optimizers/collapser.py | supramolecular-toolkit/stk | 0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2 | [
"MIT"
] | 5 | 2018-08-07T13:00:16.000Z | 2021-11-01T00:55:10.000Z | """
Collapser
=========
"""
from .optimizer import Optimizer
from .utilities import get_mch_bonds, get_long_bond_ids, get_subunits
import mchammer as mch
class Collapser(Optimizer):
"""
Performs rigid-body collapse of molecules [1]_.
Examples
--------
*Structure Optimization*
Using :class:`.Collapser` will lead to
:class:`.ConstructedMolecule` structures without long bonds.
.. testcode:: structure-optimization
import stk
bb1 = stk.BuildingBlock('NCCN', [stk.PrimaryAminoFactory()])
bb2 = stk.BuildingBlock('O=CCC=O', [stk.AldehydeFactory()])
polymer = stk.ConstructedMolecule(
topology_graph=stk.polymer.Linear(
building_blocks=(bb1, bb2),
repeating_unit='AB',
num_repeating_units=2,
optimizer=stk.Collapser(),
),
)
Optimisation with :mod:`stk` simply collects the final position
matrix. The optimisation's trajectory can be output using the
:mod:`MCHammer` implementation if required by the user [1]_.
The open-source optimization code :mod:`MCHammer` specializes in
the `collapsing` of molecules with long bonds like those
constructed by :mod:`stk`. This code is entirely nonphysical and
is, therefore, completely general to any chemistry.
References
----------
.. [1] https://github.com/andrewtarzia/MCHammer
"""
def __init__(
self,
step_size=0.1,
distance_threshold=1.5,
scale_steps=True,
):
"""
Initialize an instance of :class:`.Collapser`.
Parameters
----------
step_size : :class:`float`, optional
The relative size of the step to take during collapse in
Angstrom.
distance_threshold : :class:`float`, optional
Distance between distinct building blocks to use as
threshold for halting collapse in Angstrom.
scale_steps : :class:`bool`, optional
Whether to scale the step of each distinct building block
by its relative distance from the molecules centroid.
"""
self._optimizer = mch.Collapser(
step_size=step_size,
distance_threshold=distance_threshold,
scale_steps=scale_steps,
)
def optimize(self, state):
# Define MCHammer molecule to optimize.
mch_mol = mch.Molecule(
atoms=(
mch.Atom(
id=atom.get_id(),
element_string=atom.__class__.__name__,
) for atom in state.get_atoms()
),
bonds=get_mch_bonds(state),
position_matrix=state.get_position_matrix(),
)
# Run optimization.
mch_mol, result = self._optimizer.get_result(
mol=mch_mol,
bond_pair_ids=tuple(get_long_bond_ids(state)),
subunits=get_subunits(state),
)
return state.with_position_matrix(
position_matrix=mch_mol.get_position_matrix()
)
| 28.722222 | 69 | 0.604449 |
from .optimizer import Optimizer
from .utilities import get_mch_bonds, get_long_bond_ids, get_subunits
import mchammer as mch
class Collapser(Optimizer):
def __init__(
self,
step_size=0.1,
distance_threshold=1.5,
scale_steps=True,
):
self._optimizer = mch.Collapser(
step_size=step_size,
distance_threshold=distance_threshold,
scale_steps=scale_steps,
)
def optimize(self, state):
mch_mol = mch.Molecule(
atoms=(
mch.Atom(
id=atom.get_id(),
element_string=atom.__class__.__name__,
) for atom in state.get_atoms()
),
bonds=get_mch_bonds(state),
position_matrix=state.get_position_matrix(),
)
mch_mol, result = self._optimizer.get_result(
mol=mch_mol,
bond_pair_ids=tuple(get_long_bond_ids(state)),
subunits=get_subunits(state),
)
return state.with_position_matrix(
position_matrix=mch_mol.get_position_matrix()
)
| true | true |
f730c1eb989c9c84f7652a96a8ab7e841c0ef149 | 339 | py | Python | BERKE/OPENCV/OpenCv4cam.py | vektorelpython24proje/temelbilgiler | bced2723d247dbb8b10cf86e25ee209635f82921 | [
"MIT"
] | null | null | null | BERKE/OPENCV/OpenCv4cam.py | vektorelpython24proje/temelbilgiler | bced2723d247dbb8b10cf86e25ee209635f82921 | [
"MIT"
] | null | null | null | BERKE/OPENCV/OpenCv4cam.py | vektorelpython24proje/temelbilgiler | bced2723d247dbb8b10cf86e25ee209635f82921 | [
"MIT"
] | 3 | 2020-10-24T14:36:14.000Z | 2020-10-24T14:41:13.000Z | import cv2
# frame per second
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
while True:
ret,frame = cap.read()
frame[200:250,200:250] = frame[100:150,100:150]
frame[100:150,100:150] = [255,255,255]
cv2.imshow("ilkresim",frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows() | 18.833333 | 51 | 0.637168 | import cv2
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
while True:
ret,frame = cap.read()
frame[200:250,200:250] = frame[100:150,100:150]
frame[100:150,100:150] = [255,255,255]
cv2.imshow("ilkresim",frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows() | true | true |
f730c2199ac56dcd1e8d9c7c1237e1e4ccc3fe0f | 6,446 | py | Python | faker/providers/lorem/ru_RU/__init__.py | bdclauser/Faker | b676668214f5f4cf2849eea16d50c835ffba5be9 | [
"MIT"
] | 1 | 2021-01-21T03:44:59.000Z | 2021-01-21T03:44:59.000Z | faker/providers/lorem/ru_RU/__init__.py | bdclauser/Faker | b676668214f5f4cf2849eea16d50c835ffba5be9 | [
"MIT"
] | null | null | null | faker/providers/lorem/ru_RU/__init__.py | bdclauser/Faker | b676668214f5f4cf2849eea16d50c835ffba5be9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .. import Provider as LoremProvider
class Provider(LoremProvider):
word_list = (
'войти', 'монета', 'вскинуть', 'желание', 'экзамен', 'налоговый',
'вытаскивать', 'приятель', 'вздрагивать', 'куча', 'порт', 'точно',
'заплакать', 'изба', 'правление', 'художественный', 'мучительно',
'изображать', 'фонарик', 'миф', 'грустный', 'опасность', 'мера',
'пастух', 'факультет', 'мелькнуть', 'полевой', 'другой', 'выраженный',
'забирать', 'рот', 'народ', 'соответствие', 'тута', 'коммунизм',
'решение', 'плод', 'собеседник', 'возмутиться', 'достоинство',
'господь', 'болото', 'инфекция', 'голубчик', 'сынок', 'пространство',
'прощение', 'прежде', 'хотеть', 'ленинград', 'даль', 'развитый',
'близко', 'более', 'спорт', 'эпоха', 'ответить', 'освободить', 'совет',
'проход', 'палец', 'вчера', 'приличный', 'ярко', 'белье', 'кузнец',
'неожиданно', 'вперед', 'зато', 'кольцо', 'передо', 'мгновение',
'плавно', 'табак', 'число', 'изучить', 'тяжелый', 'рассуждение',
'салон', 'идея', 'что', 'светило', 'порода', 'сомнительный', 'бок',
'очко', 'неудобно', 'советовать', 'отдел', 'помолчать', 'поздравлять',
'пробовать', 'дошлый', 'смеяться', 'упорно', 'вздрогнуть', 'затянуться',
'танцевать', 'песенка', 'выбирать', 'правильный', 'намерение', 'издали',
'запустить', 'наслаждение', 'крыса', 'лететь', 'космос', 'радость',
'поезд', 'находить', 'гулять', 'горький', 'бочок', 'ночь', 'счастье',
'уничтожение', 'дьявол', 'коробка', 'спасть', 'кожа', 'провинция',
'прелесть', 'тюрьма', 'низкий', 'сверкать', 'темнеть', 'солнце',
'дружно', 'настать', 'блин', 'степь', 'самостоятельно', 'крутой',
'картинка', 'зачем', 'рабочий', 'необычный', 'армейский', 'труп',
'ягода', 'около', 'монета', 'естественный', 'юный', 'район', 'скрытый',
'поймать', 'строительство', 'палата', 'миг', 'триста', 'штаб', 'ломать',
'возможно', 'полюбить', 'человечек', 'легко', 'чувство', 'ручей',
'карман', 'деньги', 'неправда', 'сравнение', 'грудь', 'отъезд',
'возникновение', 'степь', 'возбуждение', 'деловой', 'следовательно',
'жидкий', 'сынок', 'художественный', 'поколение', 'расстегнуть', 'пища',
'ученый', 'секунда', 'успокоиться', 'вряд', 'аж', 'вскакивать', 'мимо',
'падать', 'потянуться', 'угроза', 'растеряться', 'бегать', 'стакан',
'о', 'кпсс', 'ныне', 'пол', 'реклама', 'при', 'школьный', 'премьера',
'дальний', 'потрясти', 'освобождение', 'покидать', 'наступать', 'жить',
'какой', 'обида', 'командование', 'девка', 'выражаться', 'головной',
'второй', 'князь', 'социалистический', 'головка', 'привлекать', 'через',
'господь', 'результат', 'отметить', 'ведь', 'падаль', 'покидать',
'художественный', 'правый', 'висеть', 'лапа', 'каюта', 'слишком',
'нервно', 'серьезный', 'зима', 'заработать', 'эффект', 'пропасть',
'плод', 'что', 'висеть', 'холодно', 'единый', 'выкинуть', 'мрачно',
'выгнать', 'умирать', 'иной', 'космос', 'природа', 'функция',
'поставить', 'оборот', 'услать', 'очередной', 'медицина', 'функция',
'зарплата', 'выдержать', 'расстройство', 'адвокат', 'задержать',
'появление', 'инвалид', 'интеллектуальный', 'исследование', 'господь',
'смертельный', 'спичка', 'вариант', 'рай', 'одиннадцать', 'чем',
'манера', 'магазин', 'поговорить', 'полоска', 'помимо', 'построить',
'домашний', 'механический', 'сохранять', 'отражение', 'научить',
'тесно', 'аллея', 'прежний', 'посидеть', 'славный', 'очутиться',
'лететь', 'невозможно', 'порядок', 'выразить', 'спешить', 'сынок',
'ребятишки', 'угроза', 'оставить', 'цвет', 'налево', 'парень',
'миллиард', 'горький', 'трубка', 'подробность', 'пасть', 'непривычный',
'угодный', 'засунуть', 'цель', 'запретить', 'дремать', 'разуметься',
'приходить', 'совещание', 'постоянный', 'анализ', 'терапия', 'приятель',
'процесс', 'академик', 'металл', 'развернуться', 'жестокий', 'интернет',
'банда', 'изменение', 'коллектив', 'похороны', 'устройство',
'торопливый', 'разводить', 'промолчать', 'подземный', 'пламя',
'редактор', 'теория', 'карандаш', 'упор', 'означать', 'бабочка',
'четыре', 'столетие', 'разнообразный', 'витрина', 'нож', 'команда',
'шлем', 'недостаток', 'протягивать', 'за', 'металл', 'добиться',
'сутки', 'четко', 'предоставить', 'тысяча', 'запеть', 'бригада',
'мелочь', 'выраженный', 'пересечь', 'сходить', 'вообще', 'рис', 'банк',
'бак', 'передо', 'назначить', 'важный', 'правление', 'палка', 'трясти',
'уронить', 'витрина', 'основание', 'да', 'перебивать', 'дыхание',
'применяться', 'июнь', 'бетонный', 'избегать', 'умолять', 'мягкий',
'заявление', 'конференция', 'встать', 'свежий', 'сопровождаться',
'цепочка', 'выражение', 'угол', 'ботинок', 'ложиться', 'инструкция',
'присесть', 'решетка', 'еврейский', 'порог', 'зеленый', 'граница',
'ставить', 'смелый', 'сустав', 'роса', 'демократия', 'вывести',
'конструкция', 'задрать', 'багровый', 'военный', 'направо', 'житель',
'товар', 'неправда', 'материя', 'командующий', 'кидать', 'заложить',
'лиловый', 'слать', 'горький', 'пространство', 'провал', 'мусор',
'наткнуться', 'торговля', 'монета', 'место', 'спалить', 'бровь',
'левый', 'хлеб', 'коричневый', 'потом', 'страсть', 'виднеться',
'роскошный', 'способ', 'костер', 'заведение', 'пропадать', 'слишком',
'пятеро', 'мальчишка', 'тусклый', 'неожиданный', 'плясать', 'дурацкий',
'дрогнуть', 'сбросить', 'прошептать', 'беспомощный', 'рота', 'песня',
'тревога', 'некоторый', 'термин', 'нажать', 'видимо', 'валюта', 'набор',
'боец', 'райком', 'новый', 'скользить', 'руководитель', 'волк',
'изредка', 'понятный', 'пропаганда', 'остановить', 'исполнять', 'ход',
'госпожа', 'печатать', 'командир', 'снимать', 'казнь', 'невыносимый',
'спорт', 'тревога', 'уточнить', 'актриса', 'полностью', 'покинуть',
'сверкающий', 'мотоцикл', 'дорогой', 'указанный', 'ремень', 'посвятить',
'один', 'а', 'доставать', 'хозяйка', 'носок', 'написать', 'еврейский',
'призыв', 'увеличиваться', 'равнодушный',
) | 70.835165 | 80 | 0.579429 |
from __future__ import unicode_literals
from .. import Provider as LoremProvider
class Provider(LoremProvider):
word_list = (
'войти', 'монета', 'вскинуть', 'желание', 'экзамен', 'налоговый',
'вытаскивать', 'приятель', 'вздрагивать', 'куча', 'порт', 'точно',
'заплакать', 'изба', 'правление', 'художественный', 'мучительно',
'изображать', 'фонарик', 'миф', 'грустный', 'опасность', 'мера',
'пастух', 'факультет', 'мелькнуть', 'полевой', 'другой', 'выраженный',
'забирать', 'рот', 'народ', 'соответствие', 'тута', 'коммунизм',
'решение', 'плод', 'собеседник', 'возмутиться', 'достоинство',
'господь', 'болото', 'инфекция', 'голубчик', 'сынок', 'пространство',
'прощение', 'прежде', 'хотеть', 'ленинград', 'даль', 'развитый',
'близко', 'более', 'спорт', 'эпоха', 'ответить', 'освободить', 'совет',
'проход', 'палец', 'вчера', 'приличный', 'ярко', 'белье', 'кузнец',
'неожиданно', 'вперед', 'зато', 'кольцо', 'передо', 'мгновение',
'плавно', 'табак', 'число', 'изучить', 'тяжелый', 'рассуждение',
'салон', 'идея', 'что', 'светило', 'порода', 'сомнительный', 'бок',
'очко', 'неудобно', 'советовать', 'отдел', 'помолчать', 'поздравлять',
'пробовать', 'дошлый', 'смеяться', 'упорно', 'вздрогнуть', 'затянуться',
'танцевать', 'песенка', 'выбирать', 'правильный', 'намерение', 'издали',
'запустить', 'наслаждение', 'крыса', 'лететь', 'космос', 'радость',
'поезд', 'находить', 'гулять', 'горький', 'бочок', 'ночь', 'счастье',
'уничтожение', 'дьявол', 'коробка', 'спасть', 'кожа', 'провинция',
'прелесть', 'тюрьма', 'низкий', 'сверкать', 'темнеть', 'солнце',
'дружно', 'настать', 'блин', 'степь', 'самостоятельно', 'крутой',
'картинка', 'зачем', 'рабочий', 'необычный', 'армейский', 'труп',
'ягода', 'около', 'монета', 'естественный', 'юный', 'район', 'скрытый',
'поймать', 'строительство', 'палата', 'миг', 'триста', 'штаб', 'ломать',
'возможно', 'полюбить', 'человечек', 'легко', 'чувство', 'ручей',
'карман', 'деньги', 'неправда', 'сравнение', 'грудь', 'отъезд',
'возникновение', 'степь', 'возбуждение', 'деловой', 'следовательно',
'жидкий', 'сынок', 'художественный', 'поколение', 'расстегнуть', 'пища',
'ученый', 'секунда', 'успокоиться', 'вряд', 'аж', 'вскакивать', 'мимо',
'падать', 'потянуться', 'угроза', 'растеряться', 'бегать', 'стакан',
'о', 'кпсс', 'ныне', 'пол', 'реклама', 'при', 'школьный', 'премьера',
'дальний', 'потрясти', 'освобождение', 'покидать', 'наступать', 'жить',
'какой', 'обида', 'командование', 'девка', 'выражаться', 'головной',
'второй', 'князь', 'социалистический', 'головка', 'привлекать', 'через',
'господь', 'результат', 'отметить', 'ведь', 'падаль', 'покидать',
'художественный', 'правый', 'висеть', 'лапа', 'каюта', 'слишком',
'нервно', 'серьезный', 'зима', 'заработать', 'эффект', 'пропасть',
'плод', 'что', 'висеть', 'холодно', 'единый', 'выкинуть', 'мрачно',
'выгнать', 'умирать', 'иной', 'космос', 'природа', 'функция',
'поставить', 'оборот', 'услать', 'очередной', 'медицина', 'функция',
'зарплата', 'выдержать', 'расстройство', 'адвокат', 'задержать',
'появление', 'инвалид', 'интеллектуальный', 'исследование', 'господь',
'смертельный', 'спичка', 'вариант', 'рай', 'одиннадцать', 'чем',
'манера', 'магазин', 'поговорить', 'полоска', 'помимо', 'построить',
'домашний', 'механический', 'сохранять', 'отражение', 'научить',
'тесно', 'аллея', 'прежний', 'посидеть', 'славный', 'очутиться',
'лететь', 'невозможно', 'порядок', 'выразить', 'спешить', 'сынок',
'ребятишки', 'угроза', 'оставить', 'цвет', 'налево', 'парень',
'миллиард', 'горький', 'трубка', 'подробность', 'пасть', 'непривычный',
'угодный', 'засунуть', 'цель', 'запретить', 'дремать', 'разуметься',
'приходить', 'совещание', 'постоянный', 'анализ', 'терапия', 'приятель',
'процесс', 'академик', 'металл', 'развернуться', 'жестокий', 'интернет',
'банда', 'изменение', 'коллектив', 'похороны', 'устройство',
'торопливый', 'разводить', 'промолчать', 'подземный', 'пламя',
'редактор', 'теория', 'карандаш', 'упор', 'означать', 'бабочка',
'четыре', 'столетие', 'разнообразный', 'витрина', 'нож', 'команда',
'шлем', 'недостаток', 'протягивать', 'за', 'металл', 'добиться',
'сутки', 'четко', 'предоставить', 'тысяча', 'запеть', 'бригада',
'мелочь', 'выраженный', 'пересечь', 'сходить', 'вообще', 'рис', 'банк',
'бак', 'передо', 'назначить', 'важный', 'правление', 'палка', 'трясти',
'уронить', 'витрина', 'основание', 'да', 'перебивать', 'дыхание',
'применяться', 'июнь', 'бетонный', 'избегать', 'умолять', 'мягкий',
'заявление', 'конференция', 'встать', 'свежий', 'сопровождаться',
'цепочка', 'выражение', 'угол', 'ботинок', 'ложиться', 'инструкция',
'присесть', 'решетка', 'еврейский', 'порог', 'зеленый', 'граница',
'ставить', 'смелый', 'сустав', 'роса', 'демократия', 'вывести',
'конструкция', 'задрать', 'багровый', 'военный', 'направо', 'житель',
'товар', 'неправда', 'материя', 'командующий', 'кидать', 'заложить',
'лиловый', 'слать', 'горький', 'пространство', 'провал', 'мусор',
'наткнуться', 'торговля', 'монета', 'место', 'спалить', 'бровь',
'левый', 'хлеб', 'коричневый', 'потом', 'страсть', 'виднеться',
'роскошный', 'способ', 'костер', 'заведение', 'пропадать', 'слишком',
'пятеро', 'мальчишка', 'тусклый', 'неожиданный', 'плясать', 'дурацкий',
'дрогнуть', 'сбросить', 'прошептать', 'беспомощный', 'рота', 'песня',
'тревога', 'некоторый', 'термин', 'нажать', 'видимо', 'валюта', 'набор',
'боец', 'райком', 'новый', 'скользить', 'руководитель', 'волк',
'изредка', 'понятный', 'пропаганда', 'остановить', 'исполнять', 'ход',
'госпожа', 'печатать', 'командир', 'снимать', 'казнь', 'невыносимый',
'спорт', 'тревога', 'уточнить', 'актриса', 'полностью', 'покинуть',
'сверкающий', 'мотоцикл', 'дорогой', 'указанный', 'ремень', 'посвятить',
'один', 'а', 'доставать', 'хозяйка', 'носок', 'написать', 'еврейский',
'призыв', 'увеличиваться', 'равнодушный',
) | true | true |
f730c23f0da51300b98e1b1ee705c0aa5cefff70 | 1,034 | py | Python | tiki/tiki/spiders/tiki.py | Necrophote/telecrawl | 8512e0ae9f6b44bb64cba29a13c382024f265ca5 | [
"MIT"
] | null | null | null | tiki/tiki/spiders/tiki.py | Necrophote/telecrawl | 8512e0ae9f6b44bb64cba29a13c382024f265ca5 | [
"MIT"
] | null | null | null | tiki/tiki/spiders/tiki.py | Necrophote/telecrawl | 8512e0ae9f6b44bb64cba29a13c382024f265ca5 | [
"MIT"
] | null | null | null | import scrapy
from scrapy.loader import ItemLoader
from tiki.items import TiviItem
class TikiSpider(scrapy.Spider):
# crawl from tiki
name = "tiki"
allowed_domains = ["tiki.vn"]
start_urls = {"https://tiki.vn/tivi/c5015"}
def parse(self, response):
tks = response.css('div.product-item')
for tk in tks:
loader = ItemLoader(item=TiviItem(), selector=tk)
# crawl product name and code from title attribute
loader.add_css('product_name', 'a::attr(title)')
loader.add_css('product_code', 'a::attr(title)')
# crawl official final price only
loader.add_css('price', '.final-price::text')
yield loader.load_item()
# yield next page
for a in response.css('li a.next'):
yield response.follow(a, callback=self.parse)
#tikitvs = response.css('div.product-item')
#for tikitv in tikitvs:
# yield {
# 'name': tikitv.css('a::attr(title)').get(),
# 'price': tikitv.css('.final-price::text').get(),
# }
#for a in response.css('li a.next'):
# yield response.follow(a, callback=self.parse) | 27.210526 | 53 | 0.679884 | import scrapy
from scrapy.loader import ItemLoader
from tiki.items import TiviItem
class TikiSpider(scrapy.Spider):
name = "tiki"
allowed_domains = ["tiki.vn"]
start_urls = {"https://tiki.vn/tivi/c5015"}
def parse(self, response):
tks = response.css('div.product-item')
for tk in tks:
loader = ItemLoader(item=TiviItem(), selector=tk)
loader.add_css('product_name', 'a::attr(title)')
loader.add_css('product_code', 'a::attr(title)')
loader.add_css('price', '.final-price::text')
yield loader.load_item()
for a in response.css('li a.next'):
yield response.follow(a, callback=self.parse)
| true | true |
f730c43933f5c965f1163cbe92ea2e00c357ef48 | 115 | py | Python | glance/contrib/plugins/artifacts_sample/__init__.py | wkoathp/glance | eb0c47047ddc28371f546437118986ed904f41d3 | [
"Apache-2.0"
] | 3 | 2015-12-22T09:04:44.000Z | 2017-10-18T15:26:03.000Z | glance/contrib/plugins/artifacts_sample/__init__.py | wkoathp/glance | eb0c47047ddc28371f546437118986ed904f41d3 | [
"Apache-2.0"
] | null | null | null | glance/contrib/plugins/artifacts_sample/__init__.py | wkoathp/glance | eb0c47047ddc28371f546437118986ed904f41d3 | [
"Apache-2.0"
] | null | null | null | from v1 import artifact as art1
from v2 import artifact as art2
MY_ARTIFACT = [art1.MyArtifact, art2.MyArtifact]
| 19.166667 | 48 | 0.791304 | from v1 import artifact as art1
from v2 import artifact as art2
MY_ARTIFACT = [art1.MyArtifact, art2.MyArtifact]
| true | true |
f730c5d7592773f3e022c3b161473f2b1d4a7b40 | 9,591 | py | Python | v6.0.6/system/fortios_system_arp_table.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 14 | 2018-09-25T20:35:25.000Z | 2021-07-14T04:30:54.000Z | v6.0.6/system/fortios_system_arp_table.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 32 | 2018-10-09T04:13:42.000Z | 2020-05-11T07:20:28.000Z | v6.0.6/system/fortios_system_arp_table.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 11 | 2018-10-09T00:14:53.000Z | 2021-11-03T10:54:09.000Z | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_arp_table
short_description: Configure ARP table in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and arp_table category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.6
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_arp_table:
description:
- Configure ARP table.
default: null
type: dict
suboptions:
id:
description:
- Unique integer ID of the entry.
required: true
type: int
interface:
description:
- Interface name. Source system.interface.name.
type: str
ip:
description:
- IP address.
type: str
mac:
description:
- MAC address.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure ARP table.
fortios_system_arp_table:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_arp_table:
id: "3"
interface: "<your_own_value> (source system.interface.name)"
ip: "<your_own_value>"
mac: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_arp_table_data(json):
option_list = ['id', 'interface', 'ip',
'mac']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_arp_table(data, fos):
vdom = data['vdom']
state = data['state']
system_arp_table_data = data['system_arp_table']
filtered_data = underscore_to_hyphen(filter_system_arp_table_data(system_arp_table_data))
if state == "present":
return fos.set('system',
'arp-table',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system',
'arp-table',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_arp_table']:
resp = system_arp_table(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_arp_table": {
"required": False, "type": "dict", "default": None,
"options": {
"id": {"required": True, "type": "int"},
"interface": {"required": False, "type": "str"},
"ip": {"required": False, "type": "str"},
"mac": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 29.151976 | 97 | 0.607966 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_arp_table
short_description: Configure ARP table in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and arp_table category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.6
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_arp_table:
description:
- Configure ARP table.
default: null
type: dict
suboptions:
id:
description:
- Unique integer ID of the entry.
required: true
type: int
interface:
description:
- Interface name. Source system.interface.name.
type: str
ip:
description:
- IP address.
type: str
mac:
description:
- MAC address.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure ARP table.
fortios_system_arp_table:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_arp_table:
id: "3"
interface: "<your_own_value> (source system.interface.name)"
ip: "<your_own_value>"
mac: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_arp_table_data(json):
option_list = ['id', 'interface', 'ip',
'mac']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_arp_table(data, fos):
vdom = data['vdom']
state = data['state']
system_arp_table_data = data['system_arp_table']
filtered_data = underscore_to_hyphen(filter_system_arp_table_data(system_arp_table_data))
if state == "present":
return fos.set('system',
'arp-table',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system',
'arp-table',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_arp_table']:
resp = system_arp_table(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_arp_table": {
"required": False, "type": "dict", "default": None,
"options": {
"id": {"required": True, "type": "int"},
"interface": {"required": False, "type": "str"},
"ip": {"required": False, "type": "str"},
"mac": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| true | true |
f730c69d6e5ae3c5d5a6129b26bdaf4232dd5224 | 161 | py | Python | coffee_small.py | meik99/CoffeeMakerApp | aa6a3785812f41138f66e58c195ed021ef2d4cc3 | [
"CC0-1.0"
] | null | null | null | coffee_small.py | meik99/CoffeeMakerApp | aa6a3785812f41138f66e58c195ed021ef2d4cc3 | [
"CC0-1.0"
] | null | null | null | coffee_small.py | meik99/CoffeeMakerApp | aa6a3785812f41138f66e58c195ed021ef2d4cc3 | [
"CC0-1.0"
] | null | null | null | import time
import RPi.GPIO as IO
PIN = 4
IO.setmode(IO.BCM)
IO.setup(PIN, IO.OUT)
IO.output(PIN, IO.HIGH)
time.sleep(15)
IO.output(PIN, IO.LOW)
IO.cleanup() | 12.384615 | 23 | 0.701863 | import time
import RPi.GPIO as IO
PIN = 4
IO.setmode(IO.BCM)
IO.setup(PIN, IO.OUT)
IO.output(PIN, IO.HIGH)
time.sleep(15)
IO.output(PIN, IO.LOW)
IO.cleanup() | true | true |
f730c70d8ffa1539142454cd5cf5157d8a5a5d00 | 226 | py | Python | historia/utils/__init__.py | eranimo/historia | 5e0b047d4bcdd534f48f8b9bf19d425b0b31a3fd | [
"MIT"
] | 6 | 2016-04-26T18:39:36.000Z | 2021-09-01T09:13:38.000Z | historia/utils/__init__.py | eranimo/historia | 5e0b047d4bcdd534f48f8b9bf19d425b0b31a3fd | [
"MIT"
] | null | null | null | historia/utils/__init__.py | eranimo/historia | 5e0b047d4bcdd534f48f8b9bf19d425b0b31a3fd | [
"MIT"
] | 4 | 2016-04-10T23:47:23.000Z | 2021-08-15T11:40:28.000Z | from historia.utils.id import unique_id
from historia.utils.color import random_country_colors
from historia.utils.store import Store
from historia.utils.timer import Timer
from historia.utils.trading import position_in_range
| 37.666667 | 54 | 0.867257 | from historia.utils.id import unique_id
from historia.utils.color import random_country_colors
from historia.utils.store import Store
from historia.utils.timer import Timer
from historia.utils.trading import position_in_range
| true | true |
f730c75227bfe93ee337638f1e0109a02d1051eb | 5,810 | py | Python | examples/python/01-list-properties.py | lkucalaba/tiscamera | e1fa7b21bb4dd777ae8039dfa072cfa2daa88244 | [
"Apache-2.0"
] | 241 | 2015-02-20T09:10:41.000Z | 2022-03-18T08:53:26.000Z | examples/python/01-list-properties.py | lkucalaba/tiscamera | e1fa7b21bb4dd777ae8039dfa072cfa2daa88244 | [
"Apache-2.0"
] | 435 | 2015-01-19T10:18:01.000Z | 2022-03-28T08:03:08.000Z | examples/python/01-list-properties.py | lkucalaba/tiscamera | e1fa7b21bb4dd777ae8039dfa072cfa2daa88244 | [
"Apache-2.0"
] | 141 | 2015-01-03T17:54:08.000Z | 2022-02-09T09:55:15.000Z | #!/usr/bin/env python3
# Copyright 2017 The Imaging Source Europe GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example will show you how to list available properties
#
import sys
import gi
gi.require_version("Tcam", "0.1")
gi.require_version("Gst", "1.0")
from gi.repository import Tcam, Gst
def list_properties(camera):
property_names = camera.get_tcam_property_names()
for name in property_names:
(ret, value,
min_value, max_value,
default_value, step_size,
value_type, flags,
category, group) = camera.get_tcam_property(name)
if not ret:
print("could not receive value {}".format(name))
continue
if value_type == "integer" or value_type == "double":
print("{}({}) value: {} default: {} min: {} max: {} grouping: {} - {}".format(name,
value_type,
value, default_value,
min_value, max_value,
category, group))
elif value_type == "string":
print("{}(string) value: {} default: {} grouping: {} - {}".format(name,
value,
default_value,
category,
group))
elif value_type == "button":
print("{}(button) grouping is {} - {}".format(name,
category,
group))
elif value_type == "boolean":
print("{}(boolean) value: {} default: {} grouping: {} - {}".format(name,
value,
default_value,
category,
group))
elif value_type == "enum":
enum_entries = camera.get_tcam_menu_entries(name)
print("{}(enum) value: {} default: {} grouping {} - {}".format(name,
value,
default_value,
category,
group))
print("Entries: ")
for entry in enum_entries:
print("\t {}".format(entry))
else:
print("This should not happen.")
def block_until_playing(pipeline):
while True:
# wait 0.1 seconds for something to happen
change_return, state, pending = pipeline.get_state(100000000)
if change_return == Gst.StateChangeReturn.SUCCESS:
return True
elif change_return == Gst.StateChangeReturn.FAILURE:
print("Failed to change state {} {} {}".format(change_return,
state,
pending))
return False
def main():
Gst.init(sys.argv) # init gstreamer
# this line sets the gstreamer default logging level
# it can be removed in normal applications
# gstreamer logging can contain verry useful information
# when debugging your application
# see https://gstreamer.freedesktop.org/documentation/tutorials/basic/debugging-tools.html
# for further details
Gst.debug_set_default_threshold(Gst.DebugLevel.WARNING)
pipeline = Gst.parse_launch("tcambin name=source ! fakesink")
if not pipeline:
print("Unable to create pipeline")
return 1
# set this to a specific camera serial if you
# do not want to use the default camera
serial = None
# get the tcambin to retrieve a property list through it
source = pipeline.get_by_name("source")
# serial is defined, thus make the source open that device
if serial is not None:
source.set_property("serial", serial)
print("Properties before state PLAYING:")
list_properties(source)
# in the READY state the camera will always be initialized
# in the PLAYING sta1te additional properties may appear from gstreamer elements
pipeline.set_state(Gst.State.PLAYING)
# helper function to ensure we have the right state
# alternatively wait for the first image
if not block_until_playing(pipeline):
print("Unable to start pipeline")
print("Properties during state PLAYING:")
list_properties(source)
pipeline.set_state(Gst.State.NULL)
return 0
if __name__ == "__main__":
sys.exit(main())
| 39.52381 | 111 | 0.492083 |
import sys
import gi
gi.require_version("Tcam", "0.1")
gi.require_version("Gst", "1.0")
from gi.repository import Tcam, Gst
def list_properties(camera):
property_names = camera.get_tcam_property_names()
for name in property_names:
(ret, value,
min_value, max_value,
default_value, step_size,
value_type, flags,
category, group) = camera.get_tcam_property(name)
if not ret:
print("could not receive value {}".format(name))
continue
if value_type == "integer" or value_type == "double":
print("{}({}) value: {} default: {} min: {} max: {} grouping: {} - {}".format(name,
value_type,
value, default_value,
min_value, max_value,
category, group))
elif value_type == "string":
print("{}(string) value: {} default: {} grouping: {} - {}".format(name,
value,
default_value,
category,
group))
elif value_type == "button":
print("{}(button) grouping is {} - {}".format(name,
category,
group))
elif value_type == "boolean":
print("{}(boolean) value: {} default: {} grouping: {} - {}".format(name,
value,
default_value,
category,
group))
elif value_type == "enum":
enum_entries = camera.get_tcam_menu_entries(name)
print("{}(enum) value: {} default: {} grouping {} - {}".format(name,
value,
default_value,
category,
group))
print("Entries: ")
for entry in enum_entries:
print("\t {}".format(entry))
else:
print("This should not happen.")
def block_until_playing(pipeline):
while True:
change_return, state, pending = pipeline.get_state(100000000)
if change_return == Gst.StateChangeReturn.SUCCESS:
return True
elif change_return == Gst.StateChangeReturn.FAILURE:
print("Failed to change state {} {} {}".format(change_return,
state,
pending))
return False
def main():
Gst.init(sys.argv)
Gst.debug_set_default_threshold(Gst.DebugLevel.WARNING)
pipeline = Gst.parse_launch("tcambin name=source ! fakesink")
if not pipeline:
print("Unable to create pipeline")
return 1
serial = None
source = pipeline.get_by_name("source")
if serial is not None:
source.set_property("serial", serial)
print("Properties before state PLAYING:")
list_properties(source)
pipeline.set_state(Gst.State.PLAYING)
if not block_until_playing(pipeline):
print("Unable to start pipeline")
print("Properties during state PLAYING:")
list_properties(source)
pipeline.set_state(Gst.State.NULL)
return 0
if __name__ == "__main__":
sys.exit(main())
| true | true |
f730c798c4b285ab0b5e4e74ccc09a60d81e0f7c | 999 | py | Python | xknx/io/__init__.py | Trance-Paradox/xknx | d5603361080f96aafd19c14d17fb1ff391064b3f | [
"MIT"
] | null | null | null | xknx/io/__init__.py | Trance-Paradox/xknx | d5603361080f96aafd19c14d17fb1ff391064b3f | [
"MIT"
] | null | null | null | xknx/io/__init__.py | Trance-Paradox/xknx | d5603361080f96aafd19c14d17fb1ff391064b3f | [
"MIT"
] | null | null | null | """
This package contains all objects managing Tunneling and Routing Connections..
- KNXIPInterface is the overall managing class.
- GatewayScanner searches for available KNX/IP devices in the local network.
- Routing uses UDP/Multicast to communicate with KNX/IP device.
- Tunnel uses UDP packets and builds a static tunnel with KNX/IP device.
"""
# flake8: noqa
from .connection import ConnectionConfig, ConnectionType
from .const import DEFAULT_MCAST_GRP, DEFAULT_MCAST_PORT
from .gateway_scanner import GatewayDescriptor, GatewayScanFilter, GatewayScanner
from .knxip_interface import KNXIPInterface, knx_interface_factory
from .routing import Routing
from .self_description import DescriptionQuery
from .tunnel import TCPTunnel, UDPTunnel
__all__ = [
"DEFAULT_MCAST_GRP",
"DEFAULT_MCAST_PORT",
"DescriptionQuery",
"GatewayScanFilter",
"GatewayScanner",
"ConnectionConfig",
"ConnectionType",
"KNXIPInterface",
"Routing",
"TCPTunnel",
"UDPTunnel",
]
| 32.225806 | 81 | 0.778779 |
from .connection import ConnectionConfig, ConnectionType
from .const import DEFAULT_MCAST_GRP, DEFAULT_MCAST_PORT
from .gateway_scanner import GatewayDescriptor, GatewayScanFilter, GatewayScanner
from .knxip_interface import KNXIPInterface, knx_interface_factory
from .routing import Routing
from .self_description import DescriptionQuery
from .tunnel import TCPTunnel, UDPTunnel
__all__ = [
"DEFAULT_MCAST_GRP",
"DEFAULT_MCAST_PORT",
"DescriptionQuery",
"GatewayScanFilter",
"GatewayScanner",
"ConnectionConfig",
"ConnectionType",
"KNXIPInterface",
"Routing",
"TCPTunnel",
"UDPTunnel",
]
| true | true |
f730c7e66e7b5401646dde6b3811d9692fd237b4 | 19,430 | py | Python | aiida/common/utils.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 180 | 2019-07-12T07:45:26.000Z | 2022-03-22T13:16:57.000Z | aiida/common/utils.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 2,325 | 2019-07-04T13:41:44.000Z | 2022-03-31T12:17:10.000Z | aiida/common/utils.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 88 | 2019-07-06T01:42:39.000Z | 2022-03-18T14:20:09.000Z | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Miscellaneous generic utility functions and classes."""
from datetime import datetime
import filecmp
import inspect
import io
import os
import re
import sys
from typing import Any, Dict
from uuid import UUID
from .lang import classproperty
def get_new_uuid():
"""
Return a new UUID (typically to be used for new nodes).
It uses the UUID version specified in
aiida.backends.settings.AIIDANODES_UUID_VERSION
"""
import uuid
return str(uuid.uuid4())
def validate_uuid(given_uuid: str) -> bool:
"""A simple check for the UUID validity."""
try:
parsed_uuid = UUID(given_uuid, version=4)
except ValueError:
# If not a valid UUID
return False
# Check if there was any kind of conversion of the hex during
# the validation
return str(parsed_uuid) == given_uuid
def validate_list_of_string_tuples(val, tuple_length):
"""
Check that:
1. ``val`` is a list or tuple
2. each element of the list:
a. is a list or tuple
b. is of length equal to the parameter tuple_length
c. each of the two elements is a string
Return if valid, raise ValidationError if invalid
"""
from aiida.common.exceptions import ValidationError
err_msg = (
'the value must be a list (or tuple) '
'of length-N list (or tuples), whose elements are strings; '
'N={}'.format(tuple_length)
)
if not isinstance(val, (list, tuple)):
raise ValidationError(err_msg)
for element in val:
if (
not isinstance(element, (list, tuple)) or (len(element) != tuple_length) or
not all(isinstance(s, str) for s in element)
):
raise ValidationError(err_msg)
return True
def get_unique_filename(filename, list_of_filenames):
"""
Return a unique filename that can be added to the list_of_filenames.
If filename is not in list_of_filenames, it simply returns the filename
string itself. Otherwise, it appends a integer number to the filename
(before the extension) until it finds a unique filename.
:param filename: the filename to add
:param list_of_filenames: the list of filenames to which filename
should be added, without name duplicates
:returns: Either filename or its modification, with a number appended
between the name and the extension.
"""
if filename not in list_of_filenames:
return filename
basename, ext = os.path.splitext(filename)
# Not optimized, but for the moment this should be fast enough
append_int = 1
while True:
new_filename = f'{basename:s}-{append_int:d}{ext:s}'
if new_filename not in list_of_filenames:
break
append_int += 1
return new_filename
def str_timedelta(dt, max_num_fields=3, short=False, negative_to_zero=False): # pylint: disable=invalid-name
"""
Given a dt in seconds, return it in a HH:MM:SS format.
:param dt: a TimeDelta object
:param max_num_fields: maximum number of non-zero fields to show
(for instance if the number of days is non-zero, shows only
days, hours and minutes, but not seconds)
:param short: if False, print always ``max_num_fields`` fields, even
if they are zero. If True, do not print the first fields, if they
are zero.
:param negative_to_zero: if True, set dt = 0 if dt < 0.
"""
if max_num_fields <= 0:
raise ValueError('max_num_fields must be > 0')
s_tot = dt.total_seconds() # Important to get more than 1 day, and for
# negative values. dt.seconds would give
# wrong results in these cases, see
# http://docs.python.org/2/library/datetime.html
s_tot = int(s_tot)
if negative_to_zero:
s_tot = max(s_tot, 0)
negative = (s_tot < 0)
s_tot = abs(s_tot)
negative_string = ' in the future' if negative else ' ago'
# For the moment stay away from months and years, difficult to get
days, remainder = divmod(s_tot, 3600 * 24)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
all_fields = [(days, 'D'), (hours, 'h'), (minutes, 'm'), (seconds, 's')]
fields = []
start_insert = False
counter = 0
for idx, field in enumerate(all_fields):
if field[0] != 0:
start_insert = True
if (len(all_fields) - idx) <= max_num_fields:
start_insert = True
if start_insert:
if counter >= max_num_fields:
break
fields.append(field)
counter += 1
if short:
while len(fields) > 1: # at least one element has to remain
if fields[0][0] != 0:
break
fields.pop(0) # remove first element
# Join the fields
raw_string = ':'.join(['{:02d}{}'.format(*f) for f in fields])
if raw_string.startswith('0'):
raw_string = raw_string[1:]
# Return the resulting string, appending a suitable string if the time
# is negative
return f'{raw_string}{negative_string}'
def get_class_string(obj):
"""
Return the string identifying the class of the object (module + object name,
joined by dots).
It works both for classes and for class instances.
"""
if inspect.isclass(obj):
return f'{obj.__module__}.{obj.__name__}'
return f'{obj.__module__}.{obj.__class__.__name__}'
def get_object_from_string(class_string):
"""
Given a string identifying an object (as returned by the get_class_string
method) load and return the actual object.
"""
import importlib
the_module, _, the_name = class_string.rpartition('.')
return getattr(importlib.import_module(the_module), the_name)
def grouper(n, iterable): # pylint: disable=invalid-name
"""
Given an iterable, returns an iterable that returns tuples of groups of
elements from iterable of length n, except the last one that has the
required length to exaust iterable (i.e., there is no filling applied).
:param n: length of each tuple (except the last one,that will have length
<= n
:param iterable: the iterable to divide in groups
"""
import itertools
iterator = iter(iterable)
while True:
chunk = tuple(itertools.islice(iterator, n))
if not chunk:
return
yield chunk
class ArrayCounter:
"""
A counter & a method that increments it and returns its value.
It is used in various tests.
"""
seq = None
def __init__(self):
self.seq = -1
def array_counter(self):
self.seq += 1
return self.seq
def are_dir_trees_equal(dir1, dir2):
"""
Compare two directories recursively. Files in each directory are
assumed to be equal if their names and contents are equal.
@param dir1: First directory path
@param dir2: Second directory path
@return: True if the directory trees are the same and
there were no errors while accessing the directories or files,
False otherwise.
"""
# Directory comparison
dirs_cmp = filecmp.dircmp(dir1, dir2)
if dirs_cmp.left_only or dirs_cmp.right_only or dirs_cmp.funny_files:
return (
False, 'Left directory: {}, right directory: {}, files only '
'in left directory: {}, files only in right directory: '
'{}, not comparable files: {}'.format(
dir1, dir2, dirs_cmp.left_only, dirs_cmp.right_only, dirs_cmp.funny_files
)
)
# If the directories contain the same files, compare the common files
(_, mismatch, errors) = filecmp.cmpfiles(dir1, dir2, dirs_cmp.common_files, shallow=False)
if mismatch:
return (False, f"The following files in the directories {dir1} and {dir2} don't match: {mismatch}")
if errors:
return (False, f"The following files in the directories {dir1} and {dir2} aren't regular: {errors}")
for common_dir in dirs_cmp.common_dirs:
new_dir1 = os.path.join(dir1, common_dir)
new_dir2 = os.path.join(dir2, common_dir)
res, msg = are_dir_trees_equal(new_dir1, new_dir2)
if not res:
return False, msg
return True, f'The given directories ({dir1} and {dir2}) are equal'
class Prettifier:
"""
Class to manage prettifiers (typically for labels of kpoints
in band plots)
"""
@classmethod
def _prettify_label_pass(cls, label):
"""
No-op prettifier, simply returns the same label
:param label: a string to prettify
"""
return label
@classmethod
def _prettify_label_agr(cls, label):
"""
Prettifier for XMGrace
:param label: a string to prettify
"""
label = (
label
.replace('GAMMA', r'\xG\f{}')
.replace('DELTA', r'\xD\f{}')
.replace('LAMBDA', r'\xL\f{}')
.replace('SIGMA', r'\xS\f{}')
) # yapf:disable
return re.sub(r'_(.?)', r'\\s\1\\N', label)
@classmethod
def _prettify_label_agr_simple(cls, label):
"""
Prettifier for XMGrace (for old label names)
:param label: a string to prettify
"""
if label == 'G':
return r'\xG'
return re.sub(r'(\d+)', r'\\s\1\\N', label)
@classmethod
def _prettify_label_gnuplot(cls, label):
"""
Prettifier for Gnuplot
:note: uses unicode, returns unicode strings (potentially, if needed)
:param label: a string to prettify
"""
label = (
label
.replace('GAMMA', 'Γ')
.replace('DELTA', 'Δ')
.replace('LAMBDA', 'Λ')
.replace('SIGMA', 'Σ')
) # yapf:disable
return re.sub(r'_(.?)', r'_{\1}', label)
@classmethod
def _prettify_label_gnuplot_simple(cls, label):
"""
Prettifier for Gnuplot (for old label names)
:note: uses unicode, returns unicode strings (potentially, if needed)
:param label: a string to prettify
"""
if label == 'G':
return 'Γ'
return re.sub(r'(\d+)', r'_{\1}', label)
@classmethod
def _prettify_label_latex(cls, label):
"""
Prettifier for matplotlib, using LaTeX syntax
:param label: a string to prettify
"""
label = (
label
.replace('GAMMA', r'$\Gamma$')
.replace('DELTA', r'$\Delta$')
.replace('LAMBDA', r'$\Lambda$')
.replace('SIGMA', r'$\Sigma$')
) # yapf:disable
label = re.sub(r'_(.?)', r'$_{\1}$', label)
# label += r"$_{\vphantom{0}}$"
return label
@classmethod
def _prettify_label_latex_simple(cls, label):
"""
Prettifier for matplotlib, using LaTeX syntax (for old label names)
:param label: a string to prettify
"""
if label == 'G':
return r'$\Gamma$'
return re.sub(r'(\d+)', r'$_{\1}$', label)
@classproperty
def prettifiers(cls) -> Dict[str, Any]: # pylint: disable=no-self-argument
"""
Property that returns a dictionary that for each string associates
the function to prettify a label
:return: a dictionary where keys are strings and values are functions
"""
return {
'agr_seekpath': cls._prettify_label_agr,
'agr_simple': cls._prettify_label_agr_simple,
'latex_simple': cls._prettify_label_latex_simple,
'latex_seekpath': cls._prettify_label_latex,
'gnuplot_simple': cls._prettify_label_gnuplot_simple,
'gnuplot_seekpath': cls._prettify_label_gnuplot,
'pass': cls._prettify_label_pass,
}
@classmethod
def get_prettifiers(cls):
"""
Return a list of valid prettifier strings
:return: a list of strings
"""
return sorted(cls.prettifiers.keys())
def __init__(self, format): # pylint: disable=redefined-builtin
"""
Create a class to pretttify strings of a given format
:param format: a string with the format to use to prettify.
Valid formats are obtained from self.prettifiers
"""
if format is None:
format = 'pass'
try:
self._prettifier_f = self.prettifiers[format] # pylint: disable=unsubscriptable-object
except KeyError:
raise ValueError(f"Unknown prettifier format {format}; valid formats: {', '.join(self.get_prettifiers())}")
def prettify(self, label):
"""
Prettify a label using the format passed in the initializer
:param label: the string to prettify
:return: a prettified string
"""
return self._prettifier_f(label)
def prettify_labels(labels, format=None): # pylint: disable=redefined-builtin
"""
Prettify label for typesetting in various formats
:param labels: a list of length-2 tuples, in the format(position, label)
:param format: a string with the format for the prettifier (e.g. 'agr',
'matplotlib', ...)
:return: the same list as labels, but with the second value possibly replaced
with a prettified version that typesets nicely in the selected format
"""
prettifier = Prettifier(format)
return [(pos, prettifier.prettify(label)) for pos, label in labels]
def join_labels(labels, join_symbol='|', threshold=1.e-6):
"""
Join labels with a joining symbol when they are very close
:param labels: a list of length-2 tuples, in the format(position, label)
:param join_symbol: the string to use to join different paths. By default, a pipe
:param threshold: the threshold to decide if two float values are the same and should
be joined
:return: the same list as labels, but with the second value possibly replaced
with strings joined when close enough
"""
if labels:
new_labels = [list(labels[0])]
# modify labels when in overlapping position
j = 0
for i in range(1, len(labels)):
if abs(labels[i][0] - labels[i - 1][0]) < threshold:
new_labels[j][1] += join_symbol + labels[i][1]
else:
new_labels.append(list(labels[i]))
j += 1
else:
new_labels = []
return new_labels
def strip_prefix(full_string, prefix):
"""
Strip the prefix from the given string and return it. If the prefix is not present
the original string will be returned unaltered
:param full_string: the string from which to remove the prefix
:param prefix: the prefix to remove
:return: the string with prefix removed
"""
if full_string.startswith(prefix):
return full_string.rsplit(prefix)[1]
return full_string
class Capturing:
"""
This class captures stdout and returns it
(as a list, split by lines).
Note: if you raise a SystemExit, you have to catch it outside.
E.g., in our tests, this works::
import sys
with self.assertRaises(SystemExit):
with Capturing() as output:
sys.exit()
But out of the testing environment, the code instead just exits.
To use it, access the obj.stdout_lines, or just iterate over the object
:param capture_stderr: if True, also captures sys.stderr. To access the
lines, use obj.stderr_lines. If False, obj.stderr_lines is None.
"""
# pylint: disable=attribute-defined-outside-init
def __init__(self, capture_stderr=False):
self.stdout_lines = []
super().__init__()
self._capture_stderr = capture_stderr
if self._capture_stderr:
self.stderr_lines = []
else:
self.stderr_lines = None
def __enter__(self):
"""Enter the context where all output is captured."""
self._stdout = sys.stdout
self._stringioout = io.StringIO()
sys.stdout = self._stringioout
if self._capture_stderr:
self._stderr = sys.stderr
self._stringioerr = io.StringIO()
sys.stderr = self._stringioerr
return self
def __exit__(self, *args):
"""Exit the context where all output is captured."""
self.stdout_lines.extend(self._stringioout.getvalue().splitlines())
sys.stdout = self._stdout
del self._stringioout # free up some memory
if self._capture_stderr:
self.stderr_lines.extend(self._stringioerr.getvalue().splitlines())
sys.stderr = self._stderr
del self._stringioerr # free up some memory
def __str__(self):
return str(self.stdout_lines)
def __iter__(self):
return iter(self.stdout_lines)
class ErrorAccumulator:
"""
Allows to run a number of functions and collect all the errors they raise
This allows to validate multiple things and tell the user about all the
errors encountered at once. Works best if the individual functions do not depend on each other.
Does not allow to trace the stack of each error, therefore do not use for debugging, but for
semantical checking with user friendly error messages.
"""
def __init__(self, *error_cls):
self.error_cls = error_cls
self.errors = {k: [] for k in self.error_cls}
def run(self, function, *args, **kwargs):
try:
function(*args, **kwargs)
except self.error_cls as err:
self.errors[err.__class__].append(err)
def success(self):
return bool(not any(self.errors.values()))
def result(self, raise_error=Exception):
if raise_error:
self.raise_errors(raise_error)
return self.success(), self.errors
def raise_errors(self, raise_cls):
if not self.success():
raise raise_cls(f'The following errors were encountered: {self.errors}')
class DatetimePrecision:
"""
A simple class which stores a datetime object with its precision. No
internal check is done (cause itis not possible).
precision: 1 (only full date)
2 (date plus hour)
3 (date + hour + minute)
4 (dare + hour + minute +second)
"""
def __init__(self, dtobj, precision):
""" Constructor to check valid datetime object and precision """
if not isinstance(dtobj, datetime):
raise TypeError('dtobj argument has to be a datetime object')
if not isinstance(precision, int):
raise TypeError('precision argument has to be an integer')
self.dtobj = dtobj
self.precision = precision
| 31.491086 | 119 | 0.616212 |
BDA', 'Λ')
.replace('SIGMA', 'Σ')
)
return re.sub(r'_(.?)', r'_{\1}', label)
@classmethod
def _prettify_label_gnuplot_simple(cls, label):
if label == 'G':
return 'Γ'
return re.sub(r'(\d+)', r'_{\1}', label)
@classmethod
def _prettify_label_latex(cls, label):
label = (
label
.replace('GAMMA', r'$\Gamma$')
.replace('DELTA', r'$\Delta$')
.replace('LAMBDA', r'$\Lambda$')
.replace('SIGMA', r'$\Sigma$')
)
label = re.sub(r'_(.?)', r'$_{\1}$', label)
return label
@classmethod
def _prettify_label_latex_simple(cls, label):
if label == 'G':
return r'$\Gamma$'
return re.sub(r'(\d+)', r'$_{\1}$', label)
@classproperty
def prettifiers(cls) -> Dict[str, Any]:
return {
'agr_seekpath': cls._prettify_label_agr,
'agr_simple': cls._prettify_label_agr_simple,
'latex_simple': cls._prettify_label_latex_simple,
'latex_seekpath': cls._prettify_label_latex,
'gnuplot_simple': cls._prettify_label_gnuplot_simple,
'gnuplot_seekpath': cls._prettify_label_gnuplot,
'pass': cls._prettify_label_pass,
}
@classmethod
def get_prettifiers(cls):
return sorted(cls.prettifiers.keys())
def __init__(self, format):
if format is None:
format = 'pass'
try:
self._prettifier_f = self.prettifiers[format]
except KeyError:
raise ValueError(f"Unknown prettifier format {format}; valid formats: {', '.join(self.get_prettifiers())}")
def prettify(self, label):
return self._prettifier_f(label)
def prettify_labels(labels, format=None):
prettifier = Prettifier(format)
return [(pos, prettifier.prettify(label)) for pos, label in labels]
def join_labels(labels, join_symbol='|', threshold=1.e-6):
if labels:
new_labels = [list(labels[0])]
j = 0
for i in range(1, len(labels)):
if abs(labels[i][0] - labels[i - 1][0]) < threshold:
new_labels[j][1] += join_symbol + labels[i][1]
else:
new_labels.append(list(labels[i]))
j += 1
else:
new_labels = []
return new_labels
def strip_prefix(full_string, prefix):
if full_string.startswith(prefix):
return full_string.rsplit(prefix)[1]
return full_string
class Capturing:
def __init__(self, capture_stderr=False):
self.stdout_lines = []
super().__init__()
self._capture_stderr = capture_stderr
if self._capture_stderr:
self.stderr_lines = []
else:
self.stderr_lines = None
def __enter__(self):
self._stdout = sys.stdout
self._stringioout = io.StringIO()
sys.stdout = self._stringioout
if self._capture_stderr:
self._stderr = sys.stderr
self._stringioerr = io.StringIO()
sys.stderr = self._stringioerr
return self
def __exit__(self, *args):
self.stdout_lines.extend(self._stringioout.getvalue().splitlines())
sys.stdout = self._stdout
del self._stringioout
if self._capture_stderr:
self.stderr_lines.extend(self._stringioerr.getvalue().splitlines())
sys.stderr = self._stderr
del self._stringioerr
def __str__(self):
return str(self.stdout_lines)
def __iter__(self):
return iter(self.stdout_lines)
class ErrorAccumulator:
def __init__(self, *error_cls):
self.error_cls = error_cls
self.errors = {k: [] for k in self.error_cls}
def run(self, function, *args, **kwargs):
try:
function(*args, **kwargs)
except self.error_cls as err:
self.errors[err.__class__].append(err)
def success(self):
return bool(not any(self.errors.values()))
def result(self, raise_error=Exception):
if raise_error:
self.raise_errors(raise_error)
return self.success(), self.errors
def raise_errors(self, raise_cls):
if not self.success():
raise raise_cls(f'The following errors were encountered: {self.errors}')
class DatetimePrecision:
def __init__(self, dtobj, precision):
if not isinstance(dtobj, datetime):
raise TypeError('dtobj argument has to be a datetime object')
if not isinstance(precision, int):
raise TypeError('precision argument has to be an integer')
self.dtobj = dtobj
self.precision = precision
| true | true |
f730c80708b6ee28cd92b114db169a45bb83104e | 119 | py | Python | upload/urls.py | caggri/FOFviz | 776ab387d832a86eea1a1b9064040d9b012494a7 | [
"MIT"
] | 2 | 2020-05-24T22:28:53.000Z | 2020-05-25T21:58:24.000Z | upload/urls.py | caggri/FOFviz | 776ab387d832a86eea1a1b9064040d9b012494a7 | [
"MIT"
] | null | null | null | upload/urls.py | caggri/FOFviz | 776ab387d832a86eea1a1b9064040d9b012494a7 | [
"MIT"
] | 1 | 2021-10-16T12:26:29.000Z | 2021-10-16T12:26:29.000Z | from django.urls import path
from . import views
urlpatterns = [
path('', views.upload_page, name='upload_page')
] | 19.833333 | 51 | 0.714286 | from django.urls import path
from . import views
urlpatterns = [
path('', views.upload_page, name='upload_page')
] | true | true |
f730c8765556ac16d2af1dc5795cdc3cd2145c3c | 1,321 | py | Python | test/test_settings_mapping_extended.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | test/test_settings_mapping_extended.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | test/test_settings_mapping_extended.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.settings_mapping_extended import SettingsMappingExtended
class TestSettingsMappingExtended(unittest.TestCase):
""" SettingsMappingExtended unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testSettingsMappingExtended(self):
"""
Test SettingsMappingExtended
"""
model = swagger_client.models.settings_mapping_extended.SettingsMappingExtended()
if __name__ == '__main__':
unittest.main() | 26.959184 | 89 | 0.74489 |
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.settings_mapping_extended import SettingsMappingExtended
class TestSettingsMappingExtended(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testSettingsMappingExtended(self):
model = swagger_client.models.settings_mapping_extended.SettingsMappingExtended()
if __name__ == '__main__':
unittest.main() | true | true |
f730c98fb8903a540d71dd9033f8551f5e81af7c | 1,316 | py | Python | garden_scorer/src/tracing.py | emilybache/BeeFriendly | 7582d8f7140f2d0088404d9cb1d47b6231606c49 | [
"MIT"
] | 4 | 2020-06-14T13:42:39.000Z | 2022-01-29T14:36:53.000Z | garden_scorer/src/tracing.py | emilybache/BeeFriendly | 7582d8f7140f2d0088404d9cb1d47b6231606c49 | [
"MIT"
] | 2 | 2022-02-13T15:19:44.000Z | 2022-02-25T12:32:16.000Z | garden_scorer/src/tracing.py | emilybache/BeeFriendly | 7582d8f7140f2d0088404d9cb1d47b6231606c49 | [
"MIT"
] | 4 | 2020-06-11T09:31:45.000Z | 2021-08-17T14:00:41.000Z | import logging
import opentracing
from jaeger_client import Config
def init_tracer(service):
logging.getLogger('').handlers = []
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
config = Config(
config={
'sampler': {
'type': 'const',
'param': 1,
},
'logging': True,
'reporter_batch_size': 1,
},
service_name=service,
)
# this call sets global variable opentracing.tracer
config.initialize_tracer()
def flask_to_scope(flask_tracer, request):
return opentracing.tracer.scope_manager.activate(
flask_tracer.get_span(request),
False,
)
def parse_baggage(headers, scope):
baggage = headers.get("jaeger-baggage")
print(f"found baggage: {baggage}")
if not baggage:
return
fields_as_dict = dict([f.split("=") for f in (baggage.split(","))])
if "session" in fields_as_dict.keys():
sessionId = fields_as_dict.get("session")
scope.span.set_tag("garden-session", sessionId)
#print(f"set session {sessionId}")
if "request" in fields_as_dict.keys():
requestId = fields_as_dict.get("request")
scope.span.set_tag("quizz-request", requestId)
#print(f"set request {requestId}")
| 26.857143 | 71 | 0.617781 | import logging
import opentracing
from jaeger_client import Config
def init_tracer(service):
logging.getLogger('').handlers = []
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
config = Config(
config={
'sampler': {
'type': 'const',
'param': 1,
},
'logging': True,
'reporter_batch_size': 1,
},
service_name=service,
)
config.initialize_tracer()
def flask_to_scope(flask_tracer, request):
return opentracing.tracer.scope_manager.activate(
flask_tracer.get_span(request),
False,
)
def parse_baggage(headers, scope):
baggage = headers.get("jaeger-baggage")
print(f"found baggage: {baggage}")
if not baggage:
return
fields_as_dict = dict([f.split("=") for f in (baggage.split(","))])
if "session" in fields_as_dict.keys():
sessionId = fields_as_dict.get("session")
scope.span.set_tag("garden-session", sessionId)
if "request" in fields_as_dict.keys():
requestId = fields_as_dict.get("request")
scope.span.set_tag("quizz-request", requestId)
| true | true |
f730c9ef609a4b060b7dbaf45e19609d9f71c5b5 | 2,359 | py | Python | nei/physics/shocks.py | EigenDev/NEI-Research | 5d6f110426f57668156665a69fcf9c83575ccdb8 | [
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | 3 | 2018-02-09T00:19:06.000Z | 2019-03-11T12:32:25.000Z | nei/physics/shocks.py | EigenDev/NEI-Research | 5d6f110426f57668156665a69fcf9c83575ccdb8 | [
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | 34 | 2018-02-07T19:15:27.000Z | 2018-08-09T19:01:40.000Z | nei/physics/shocks.py | EigenDev/NEI-Research | 5d6f110426f57668156665a69fcf9c83575ccdb8 | [
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | 3 | 2018-02-07T19:19:27.000Z | 2018-06-04T19:49:44.000Z | """
Shocks is a module that consists of canonical hydrodynamical
equations used for modeling astrophysical plasmas
Classes:
Shocks -- A class that emcompasses a multitude of hydrodynamical
shock equations relevant for plasma calculations.
Functions:
rh_density -- Returns the density jump relation derived from the
Rankine-Hugoniot jump relations
rh_temp -- Returns the temperature jump relations derived from
the Rankine-Hugoniot jump relations
"""
class MHD:
"""
Stores results of NEI simulation involving shock dynamics
"""
def __init__(self):
pass
def rh_density(self, init_dens, gamma, mach):
"""
Returns the density ratio according to the Rankine-Hugoniot
jump conditions
Parameters
------
init_dens: ~astropy.units.Quantity,
The initial density of the plasma pre-shock
gamma: float,
The specific heats ratios of the system
mach: int,
The mach number of the system
Returns
------
den_ratio: array-like
The density solution to the mass conservation equation as
defined by the Rankine-Hugoniot relations.
"""
dens_ratio = ((gamma+1)*mach**2)/(2+(gamma-1)*mach**2)
final_dens = dens_ratio*init_dens
return final_dens
def rh_temp(self, init_temp, gamma, mach):
"""
Returns the temperature ratio according to the Rankine-Hugoniot
jump conditions
Parameters
------
init_temp: ~astropy.units.Quantity,
The initial temperature of the plasma pre-shock
gamma: float,
The specific heats ratios of the system
mach: int,
The mach number of the system
Returns
------
final_temp: array-like
The temperature solutions to the energy conservation equation as
defined by the Rankine-Hugoniot relations.
"""
temp_ratio = ( ( ( (gamma + 1)+ 2 * gamma * (mach**2 - 1) ) *
( (gamma + 1) + (gamma-1)*(mach**2 - 1)) ) /
((gamma + 1)**2 * mach**2 ) )
final_temp = temp_ratio * init_temp
return final_temp | 27.430233 | 84 | 0.576939 |
class MHD:
def __init__(self):
pass
def rh_density(self, init_dens, gamma, mach):
dens_ratio = ((gamma+1)*mach**2)/(2+(gamma-1)*mach**2)
final_dens = dens_ratio*init_dens
return final_dens
def rh_temp(self, init_temp, gamma, mach):
temp_ratio = ( ( ( (gamma + 1)+ 2 * gamma * (mach**2 - 1) ) *
( (gamma + 1) + (gamma-1)*(mach**2 - 1)) ) /
((gamma + 1)**2 * mach**2 ) )
final_temp = temp_ratio * init_temp
return final_temp | true | true |
f730ca0f79f5f46c3e0c9a27ff65e95253be08cd | 2,053 | py | Python | smsgateway/south_migrations/0001_initial.py | vikingco/django-smsgateway | 91675e599a147f4d7e64ff4c4455dbf75ed753d3 | [
"BSD-3-Clause"
] | 13 | 2015-03-11T06:55:50.000Z | 2022-02-08T16:50:16.000Z | smsgateway/south_migrations/0001_initial.py | vikingco/django-smsgateway | 91675e599a147f4d7e64ff4c4455dbf75ed753d3 | [
"BSD-3-Clause"
] | 17 | 2015-03-19T12:27:41.000Z | 2019-12-09T14:21:21.000Z | smsgateway/south_migrations/0001_initial.py | vikingco/django-smsgateway | 91675e599a147f4d7e64ff4c4455dbf75ed753d3 | [
"BSD-3-Clause"
] | 7 | 2015-05-15T00:14:49.000Z | 2019-06-27T02:46:09.000Z |
from __future__ import absolute_import
from south.db import db
from django.db import models
from smsgateway.models import *
class Migration:
def forwards(self, orm):
# Adding model 'SMS'
db.create_table('smsgateway_sms', (
('id', orm['smsgateway.SMS:id']),
('sent', orm['smsgateway.SMS:sent']),
('content', orm['smsgateway.SMS:content']),
('sender', orm['smsgateway.SMS:sender']),
('to', orm['smsgateway.SMS:to']),
('operator', orm['smsgateway.SMS:operator']),
('gateway', orm['smsgateway.SMS:gateway']),
('backend', orm['smsgateway.SMS:backend']),
('gateway_ref', orm['smsgateway.SMS:gateway_ref']),
('direction', orm['smsgateway.SMS:direction']),
))
db.send_create_signal('smsgateway', ['SMS'])
def backwards(self, orm):
# Deleting model 'SMS'
db.delete_table('smsgateway_sms')
models = {
'smsgateway.sms': {
'backend': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '32', 'db_index': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'direction': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'gateway': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'gateway_ref': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'operator': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'sent': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'to': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['smsgateway']
| 40.254902 | 131 | 0.543595 |
from __future__ import absolute_import
from south.db import db
from django.db import models
from smsgateway.models import *
class Migration:
def forwards(self, orm):
db.create_table('smsgateway_sms', (
('id', orm['smsgateway.SMS:id']),
('sent', orm['smsgateway.SMS:sent']),
('content', orm['smsgateway.SMS:content']),
('sender', orm['smsgateway.SMS:sender']),
('to', orm['smsgateway.SMS:to']),
('operator', orm['smsgateway.SMS:operator']),
('gateway', orm['smsgateway.SMS:gateway']),
('backend', orm['smsgateway.SMS:backend']),
('gateway_ref', orm['smsgateway.SMS:gateway_ref']),
('direction', orm['smsgateway.SMS:direction']),
))
db.send_create_signal('smsgateway', ['SMS'])
def backwards(self, orm):
db.delete_table('smsgateway_sms')
models = {
'smsgateway.sms': {
'backend': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '32', 'db_index': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'direction': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'gateway': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'gateway_ref': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'operator': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'sent': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'to': ('django.db.models.fields.CharField', [], {'max_length': '32'})
}
}
complete_apps = ['smsgateway']
| true | true |
f730ca8c7c983ea347a4f2157407b637efc8c630 | 1,495 | py | Python | scripts/archive_gs.py | wsjeon/softlearning | 8ceba916e5b3b2df66a1aa51bed3ff703e394a56 | [
"MIT"
] | 1 | 2019-04-02T03:07:03.000Z | 2019-04-02T03:07:03.000Z | scripts/archive_gs.py | wsjeon/softlearning | 8ceba916e5b3b2df66a1aa51bed3ff703e394a56 | [
"MIT"
] | 11 | 2020-01-28T22:32:20.000Z | 2022-03-11T23:37:57.000Z | scripts/archive_gs.py | wsjeon/softlearning | 8ceba916e5b3b2df66a1aa51bed3ff703e394a56 | [
"MIT"
] | 1 | 2019-12-27T19:00:57.000Z | 2019-12-27T19:00:57.000Z | #!/usr/bin/python
import argparse
import os
import subprocess
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'archive_path', type=str, default=None, nargs='?')
parser.add_argument(
'--dry', action='store_true', default=False)
args = parser.parse_args()
return args
def sync_gs(args):
"""Archive files in google cloud storage bucket.
Moves files from `<bucket>/ray/results` to `<bucket>/archive/ray_results`.
TODO(hartikainen): Refactor this to use project config instead of
environment variables (e.g. `SAC_GS_BUCKET`).
"""
if 'SAC_GS_BUCKET' not in os.environ:
raise ValueError(
"'SAC_GS_BUCKET' environment variable needs to be set.")
bucket = os.environ['SAC_GS_BUCKET']
fresh_results_path = os.path.join(bucket, 'ray', 'results')
archive_results_path = os.path.join(bucket, 'archive', 'ray', 'results')
fresh_url = os.path.join(fresh_results_path, args.archive_path)
archive_url = os.path.join(archive_results_path, args.archive_path)
src_url, dst_url = (
fresh_url, archive_url
if args.unarchive
else archive_url, fresh_url)
command_parts = ['gsutil', '-m', 'mv', src_url, dst_url]
command = " ".join(command_parts)
if args.dry:
print(command)
return
subprocess.call(command, shell=True)
def main():
args = parse_args()
sync_gs(args)
if __name__ == '__main__':
main()
| 24.508197 | 78 | 0.660201 |
import argparse
import os
import subprocess
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'archive_path', type=str, default=None, nargs='?')
parser.add_argument(
'--dry', action='store_true', default=False)
args = parser.parse_args()
return args
def sync_gs(args):
if 'SAC_GS_BUCKET' not in os.environ:
raise ValueError(
"'SAC_GS_BUCKET' environment variable needs to be set.")
bucket = os.environ['SAC_GS_BUCKET']
fresh_results_path = os.path.join(bucket, 'ray', 'results')
archive_results_path = os.path.join(bucket, 'archive', 'ray', 'results')
fresh_url = os.path.join(fresh_results_path, args.archive_path)
archive_url = os.path.join(archive_results_path, args.archive_path)
src_url, dst_url = (
fresh_url, archive_url
if args.unarchive
else archive_url, fresh_url)
command_parts = ['gsutil', '-m', 'mv', src_url, dst_url]
command = " ".join(command_parts)
if args.dry:
print(command)
return
subprocess.call(command, shell=True)
def main():
args = parse_args()
sync_gs(args)
if __name__ == '__main__':
main()
| true | true |
f730cc80fc1c397a68a520a6bdf3278598eb7a4b | 1,100 | py | Python | opennem/spiders/nem/price.py | tourdownunder/opennem | deec3e2079db9d9d84171010fd0c239170d1e7ce | [
"MIT"
] | null | null | null | opennem/spiders/nem/price.py | tourdownunder/opennem | deec3e2079db9d9d84171010fd0c239170d1e7ce | [
"MIT"
] | 1 | 2020-09-06T04:17:59.000Z | 2020-09-06T04:17:59.000Z | opennem/spiders/nem/price.py | tourdownunder/opennem | deec3e2079db9d9d84171010fd0c239170d1e7ce | [
"MIT"
] | null | null | null | from opennem.pipelines.nem.opennem import NemwebUnitScadaOpenNEMStorePipeline
from opennem.spiders.nemweb import NemwebSpider
class NemwebLatestPriceSpider(NemwebSpider):
name = "au.nem.latest.price"
start_url = (
"http://www.nemweb.com.au/Reports/CURRENT/Dispatchprices_PRE_AP/"
)
limit = 1
pipelines_extra = set([NemwebUnitScadaOpenNEMStorePipeline,])
class NemwebCurrentPriceSpider(NemwebSpider):
name = "au.nem.current.price"
start_url = (
"http://www.nemweb.com.au/Reports/CURRENT/Dispatchprices_PRE_AP/"
)
limit = 0
pipelines_extra = set([NemwebUnitScadaOpenNEMStorePipeline,])
class NemwebArchivePriceSpider(NemwebSpider):
name = "au.nem.archive.price"
start_url = (
"http://www.nemweb.com.au/Reports/ARCHIVE/Dispatchprices_PRE_AP/"
)
limit = 0
pipelines_extra = set([NemwebUnitScadaOpenNEMStorePipeline,])
# Archives tend to contain large zips of embedded zips so throttle
# to limit memory use
custom_settings = {
"CONCURRENT_REQUESTS": 1,
"CONCURRENT_ITEMS": 1,
}
| 27.5 | 77 | 0.710909 | from opennem.pipelines.nem.opennem import NemwebUnitScadaOpenNEMStorePipeline
from opennem.spiders.nemweb import NemwebSpider
class NemwebLatestPriceSpider(NemwebSpider):
name = "au.nem.latest.price"
start_url = (
"http://www.nemweb.com.au/Reports/CURRENT/Dispatchprices_PRE_AP/"
)
limit = 1
pipelines_extra = set([NemwebUnitScadaOpenNEMStorePipeline,])
class NemwebCurrentPriceSpider(NemwebSpider):
name = "au.nem.current.price"
start_url = (
"http://www.nemweb.com.au/Reports/CURRENT/Dispatchprices_PRE_AP/"
)
limit = 0
pipelines_extra = set([NemwebUnitScadaOpenNEMStorePipeline,])
class NemwebArchivePriceSpider(NemwebSpider):
name = "au.nem.archive.price"
start_url = (
"http://www.nemweb.com.au/Reports/ARCHIVE/Dispatchprices_PRE_AP/"
)
limit = 0
pipelines_extra = set([NemwebUnitScadaOpenNEMStorePipeline,])
custom_settings = {
"CONCURRENT_REQUESTS": 1,
"CONCURRENT_ITEMS": 1,
}
| true | true |
f730cd205556860fc0fce04a73efaf2df2cf84e7 | 11,087 | py | Python | h1st/tests/core/test_schemas_validator.py | Mou-Ikkai/h1st | da47a8f1ad6af532c549e075fba19e3b3692de89 | [
"Apache-2.0"
] | 2 | 2020-08-21T07:49:08.000Z | 2020-08-21T07:49:13.000Z | h1st/tests/core/test_schemas_validator.py | Mou-Ikkai/h1st | da47a8f1ad6af532c549e075fba19e3b3692de89 | [
"Apache-2.0"
] | 3 | 2020-11-13T19:06:07.000Z | 2022-02-10T02:06:03.000Z | h1st/tests/core/test_schemas_validator.py | Mou-Ikkai/h1st | da47a8f1ad6af532c549e075fba19e3b3692de89 | [
"Apache-2.0"
] | null | null | null | from typing import Union, Optional, List
from unittest import TestCase
import pyarrow as pa
import pandas as pd
import numpy as np
from h1st.schema.schema_validator import SchemaValidator
dummy = lambda: None
class SchemaTestCase(TestCase):
def test_validate_schema(self):
# empty schema
self.assertTrue(SchemaValidator().validate_downstream_schema(pa.schema([]), pa.schema([])) == [])
self.assertTrue(SchemaValidator().validate_downstream_schema({}, {}) == [])
self.assertTrue(SchemaValidator().validate_downstream_schema(pa.schema([
('f1', pa.int16())
]), None) == [])
self.assertEqual(
SchemaValidator().validate_downstream_schema(None, pa.schema([])),
[],
)
# self.assertEqual(
# SchemaValidator().validate_downstream_schema({}, pa.schema([])).errors,
# ['Expects schema, receives {}'],
# )
# field is not available
self.assertEqual(
SchemaValidator().validate_downstream_schema(
pa.schema([('f1', pa.int32())]),
pa.schema([('f2', pa.int32())]),
).errors,
['Field "f2" is missing'],
)
# field is not compatible
self.assertEqual(
SchemaValidator().validate_downstream_schema(
pa.schema([('f1', pa.string())]),
pa.schema([('f1', pa.int32())]),
),
['Field "f1": Expects int32, receives string'],
)
# same schema
self.assertEqual(
SchemaValidator().validate_downstream_schema(
pa.schema([('f1', pa.int32())]),
pa.schema([('f1', pa.int32())]),
),
[],
)
# subset of schema
self.assertEqual(
SchemaValidator().validate_downstream_schema(
pa.schema([('f1', pa.int32(), ('f2', pa.int32()))]),
pa.schema([('f1', pa.int32())]),
),
[],
)
def test_dataframe(self):
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': pd.DataFrame},
{'type': pd.DataFrame}
).errors, [])
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': pd.DataFrame},
{'type': list}
).errors, ['Expects list, receives DataFrame'])
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': pd.DataFrame, 'fields': {
'abc': str,
'def': str,
'myfield': int,
}},
{'type': pd.DataFrame, 'fields': {
'abc': int,
'def': {'type': str},
'myfield': float,
}},
).errors, ['Field abc: Expects int, receives str', 'Field myfield: Expects float, receives int'])
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': pd.DataFrame, 'fields': {
'abc': str,
}},
pd.DataFrame,
).errors, [])
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': pd.DataFrame, 'fields': {
'abc': str,
}},
{'type': pd.DataFrame, 'fields': {
'abc': str,
'def': Optional[str], # optional allows missing column
}},
).errors, [])
self.assertEqual(
SchemaValidator().validate_downstream_schema({
'type': pd.DataFrame,
'fields': {
'Timestamp': pa.float64(),
'CarSpeed': pa.float64(),
'Gx': pa.float64(),
'Gy': pa.float64(),
'Label': pa.string(),
}
}, {
'type': pd.DataFrame,
'fields': {
'Timestamp': float,
'Label': str,
}
}).errors,
[]
)
def test_dict_schema(self):
self.assertEqual(SchemaValidator().validate_downstream_schema({}, {}), [])
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': list},
{'type': dict}
).errors, ['Expects dict, receives list'])
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': pd.DataFrame},
{'type': dict}
).errors, ['Expects dict, receives DataFrame'])
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': dict},
{
'type': dict,
'fields': {
'abc': str,
}
}
).errors, ['Field abc is missing'])
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': dict, 'fields': {'abc': int}},
{
'type': dict,
'fields': {
'abc': str,
}
}
).errors, ['Field abc: Expects str, receives int'])
# self.assertEqual(SchemaValidator().validate_downstream_schema({
# }, {
# 'df': pa.schema([('f1', pa.int32())]),
# }), [])
# self.assertEqual(SchemaValidator().validate_downstream_schema({
# 'df': pa.schema([]),
# }, {
# 'df': pa.schema([('f1', pa.int32())]),
# }).errors, ['Key "df": Field "f1" is missing'])
def test_list_schema(self):
self.assertEqual(
SchemaValidator().validate_downstream_schema(
float,
{'type': list},
).errors,
['Expects list, receives float']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
pa.list_(pa.int64()),
pa.list_(pa.float64()),
).errors,
['List type mismatch, Expects double, receives int64']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
pa.list_(pa.int64()),
List[int],
).errors,
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(List[int], List[int]).errors,
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(List[float], List[float]).errors,
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(List[str], List[int]).errors,
['List type mismatch, Expects int, receives str']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': list, 'item': float},
List[str],
).errors,
['List type mismatch, Expects str, receives float']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
List[str],
{'type': list, 'item': float},
).errors,
['List type mismatch, Expects float, receives str']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': list, 'item': str},
{'type': list, 'item': float},
).errors,
['List type mismatch, Expects float, receives str']
)
def test_python_type(self):
self.assertEqual(
SchemaValidator().validate_downstream_schema(str, int).errors,
["Expects int, receives str"]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(str, Optional[str]).errors,
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(str, pa.string()).errors,
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(int, pa.string()).errors,
["Expects string, receives int"]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(str, Union[str, int]).errors,
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(float, Union[str, int]).errors,
["Expects typing.Union[str, int], receives <class 'float'>"]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(Union[float, bytes], Union[str, int]).errors,
["Expects typing.Union[str, int], receives typing.Union[float, bytes]"]
)
def test_tensor(self):
self.assertEqual(
SchemaValidator().validate_downstream_schema(
float,
{'type': np.ndarray, 'item': pa.int32()},
).errors,
['Expects ndarray, receives float']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': np.ndarray, 'item': pa.int32()},
{'type': np.ndarray, 'item': pa.int32()},
),
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': np.ndarray, 'item': pa.int32()},
{'type': np.ndarray, 'item': pa.float64()},
).errors,
['Item type mismatch, Expects double, receives int32']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': np.ndarray, 'item': pa.int32(), 'shape': (2, 2)},
{'type': np.ndarray, 'item': pa.int32(), 'shape': (2, 2)},
),
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': np.ndarray, 'item': pa.int32(), 'shape': (None, 2)},
{'type': np.ndarray, 'item': pa.int32(), 'shape': (2, 2)},
),
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': np.ndarray, 'item': pa.int32(), 'shape': (None, None, 4)},
{'type': np.ndarray, 'item': pa.int32(), 'shape': (None, None, 8)},
),
['Expects shape (None, None, 8), receives shape (None, None, 4)']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': np.ndarray, 'item': pa.int32(), 'shape': (5, 2)},
{'type': np.ndarray, 'item': pa.int32(), 'shape': (2, 2)},
),
['Expects shape (2, 2), receives shape (5, 2)']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': np.ndarray, 'item': pa.int32(), 'shape': (2, 2, 2)},
{'type': np.ndarray, 'item': pa.int32(), 'shape': (2, 2)},
),
['Expects shape (2, 2), receives shape (2, 2, 2)']
)
| 32.89911 | 105 | 0.495355 | from typing import Union, Optional, List
from unittest import TestCase
import pyarrow as pa
import pandas as pd
import numpy as np
from h1st.schema.schema_validator import SchemaValidator
dummy = lambda: None
class SchemaTestCase(TestCase):
def test_validate_schema(self):
self.assertTrue(SchemaValidator().validate_downstream_schema(pa.schema([]), pa.schema([])) == [])
self.assertTrue(SchemaValidator().validate_downstream_schema({}, {}) == [])
self.assertTrue(SchemaValidator().validate_downstream_schema(pa.schema([
('f1', pa.int16())
]), None) == [])
self.assertEqual(
SchemaValidator().validate_downstream_schema(None, pa.schema([])),
[],
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
pa.schema([('f1', pa.int32())]),
pa.schema([('f2', pa.int32())]),
).errors,
['Field "f2" is missing'],
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
pa.schema([('f1', pa.string())]),
pa.schema([('f1', pa.int32())]),
),
['Field "f1": Expects int32, receives string'],
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
pa.schema([('f1', pa.int32())]),
pa.schema([('f1', pa.int32())]),
),
[],
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
pa.schema([('f1', pa.int32(), ('f2', pa.int32()))]),
pa.schema([('f1', pa.int32())]),
),
[],
)
def test_dataframe(self):
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': pd.DataFrame},
{'type': pd.DataFrame}
).errors, [])
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': pd.DataFrame},
{'type': list}
).errors, ['Expects list, receives DataFrame'])
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': pd.DataFrame, 'fields': {
'abc': str,
'def': str,
'myfield': int,
}},
{'type': pd.DataFrame, 'fields': {
'abc': int,
'def': {'type': str},
'myfield': float,
}},
).errors, ['Field abc: Expects int, receives str', 'Field myfield: Expects float, receives int'])
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': pd.DataFrame, 'fields': {
'abc': str,
}},
pd.DataFrame,
).errors, [])
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': pd.DataFrame, 'fields': {
'abc': str,
}},
{'type': pd.DataFrame, 'fields': {
'abc': str,
'def': Optional[str],
}},
).errors, [])
self.assertEqual(
SchemaValidator().validate_downstream_schema({
'type': pd.DataFrame,
'fields': {
'Timestamp': pa.float64(),
'CarSpeed': pa.float64(),
'Gx': pa.float64(),
'Gy': pa.float64(),
'Label': pa.string(),
}
}, {
'type': pd.DataFrame,
'fields': {
'Timestamp': float,
'Label': str,
}
}).errors,
[]
)
def test_dict_schema(self):
self.assertEqual(SchemaValidator().validate_downstream_schema({}, {}), [])
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': list},
{'type': dict}
).errors, ['Expects dict, receives list'])
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': pd.DataFrame},
{'type': dict}
).errors, ['Expects dict, receives DataFrame'])
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': dict},
{
'type': dict,
'fields': {
'abc': str,
}
}
).errors, ['Field abc is missing'])
self.assertEqual(SchemaValidator().validate_downstream_schema(
{'type': dict, 'fields': {'abc': int}},
{
'type': dict,
'fields': {
'abc': str,
}
}
).errors, ['Field abc: Expects str, receives int'])
def test_list_schema(self):
self.assertEqual(
SchemaValidator().validate_downstream_schema(
float,
{'type': list},
).errors,
['Expects list, receives float']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
pa.list_(pa.int64()),
pa.list_(pa.float64()),
).errors,
['List type mismatch, Expects double, receives int64']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
pa.list_(pa.int64()),
List[int],
).errors,
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(List[int], List[int]).errors,
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(List[float], List[float]).errors,
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(List[str], List[int]).errors,
['List type mismatch, Expects int, receives str']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': list, 'item': float},
List[str],
).errors,
['List type mismatch, Expects str, receives float']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
List[str],
{'type': list, 'item': float},
).errors,
['List type mismatch, Expects float, receives str']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': list, 'item': str},
{'type': list, 'item': float},
).errors,
['List type mismatch, Expects float, receives str']
)
def test_python_type(self):
self.assertEqual(
SchemaValidator().validate_downstream_schema(str, int).errors,
["Expects int, receives str"]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(str, Optional[str]).errors,
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(str, pa.string()).errors,
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(int, pa.string()).errors,
["Expects string, receives int"]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(str, Union[str, int]).errors,
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(float, Union[str, int]).errors,
["Expects typing.Union[str, int], receives <class 'float'>"]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(Union[float, bytes], Union[str, int]).errors,
["Expects typing.Union[str, int], receives typing.Union[float, bytes]"]
)
def test_tensor(self):
self.assertEqual(
SchemaValidator().validate_downstream_schema(
float,
{'type': np.ndarray, 'item': pa.int32()},
).errors,
['Expects ndarray, receives float']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': np.ndarray, 'item': pa.int32()},
{'type': np.ndarray, 'item': pa.int32()},
),
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': np.ndarray, 'item': pa.int32()},
{'type': np.ndarray, 'item': pa.float64()},
).errors,
['Item type mismatch, Expects double, receives int32']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': np.ndarray, 'item': pa.int32(), 'shape': (2, 2)},
{'type': np.ndarray, 'item': pa.int32(), 'shape': (2, 2)},
),
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': np.ndarray, 'item': pa.int32(), 'shape': (None, 2)},
{'type': np.ndarray, 'item': pa.int32(), 'shape': (2, 2)},
),
[]
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': np.ndarray, 'item': pa.int32(), 'shape': (None, None, 4)},
{'type': np.ndarray, 'item': pa.int32(), 'shape': (None, None, 8)},
),
['Expects shape (None, None, 8), receives shape (None, None, 4)']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': np.ndarray, 'item': pa.int32(), 'shape': (5, 2)},
{'type': np.ndarray, 'item': pa.int32(), 'shape': (2, 2)},
),
['Expects shape (2, 2), receives shape (5, 2)']
)
self.assertEqual(
SchemaValidator().validate_downstream_schema(
{'type': np.ndarray, 'item': pa.int32(), 'shape': (2, 2, 2)},
{'type': np.ndarray, 'item': pa.int32(), 'shape': (2, 2)},
),
['Expects shape (2, 2), receives shape (2, 2, 2)']
)
| true | true |
f730cdbd78f4b2942915fd52265954f7c4985bfb | 20,098 | py | Python | synapse/app/generic_worker.py | jklippel/synapse | 451f25172afc0ce46e416c73fa703c5edf279d54 | [
"Apache-2.0"
] | null | null | null | synapse/app/generic_worker.py | jklippel/synapse | 451f25172afc0ce46e416c73fa703c5edf279d54 | [
"Apache-2.0"
] | null | null | null | synapse/app/generic_worker.py | jklippel/synapse | 451f25172afc0ce46e416c73fa703c5edf279d54 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2016 OpenMarket Ltd
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from typing import Dict, Optional
from twisted.internet import address
from twisted.web.resource import IResource
from twisted.web.server import Request
import synapse
import synapse.events
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
from synapse.api.urls import (
CLIENT_API_PREFIX,
FEDERATION_PREFIX,
LEGACY_MEDIA_PREFIX,
MEDIA_PREFIX,
SERVER_KEY_V2_PREFIX,
)
from synapse.app import _base
from synapse.app._base import max_request_body_size, register_start
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.config.server import ListenerConfig
from synapse.federation.transport.server import TransportLayerServer
from synapse.http.server import JsonResource, OptionsResource
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.directory import DirectoryStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
from synapse.replication.slave.storage.keys import SlavedKeyStore
from synapse.replication.slave.storage.profile import SlavedProfileStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.rest.admin import register_servlets_for_media_repo
from synapse.rest.client.v1 import events, login, presence, room
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
from synapse.rest.client.v1.profile import (
ProfileAvatarURLRestServlet,
ProfileDisplaynameRestServlet,
ProfileRestServlet,
)
from synapse.rest.client.v1.push_rule import PushRuleRestServlet
from synapse.rest.client.v1.voip import VoipRestServlet
from synapse.rest.client.v2_alpha import (
account_data,
groups,
read_marker,
receipts,
room_keys,
sync,
tags,
user_directory,
)
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
from synapse.rest.client.v2_alpha.account_data import (
AccountDataServlet,
RoomAccountDataServlet,
)
from synapse.rest.client.v2_alpha.devices import DevicesRestServlet
from synapse.rest.client.v2_alpha.keys import (
KeyChangesServlet,
KeyQueryServlet,
OneTimeKeyServlet,
)
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
from synapse.rest.client.v2_alpha.sendtodevice import SendToDeviceRestServlet
from synapse.rest.client.versions import VersionsRestServlet
from synapse.rest.health import HealthResource
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.rest.synapse.client import build_synapse_client_resource_tree
from synapse.server import HomeServer
from synapse.storage.databases.main.censor_events import CensorEventsStore
from synapse.storage.databases.main.client_ips import ClientIpWorkerStore
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyStore
from synapse.storage.databases.main.media_repository import MediaRepositoryStore
from synapse.storage.databases.main.metrics import ServerMetricsStore
from synapse.storage.databases.main.monthly_active_users import (
MonthlyActiveUsersWorkerStore,
)
from synapse.storage.databases.main.presence import PresenceStore
from synapse.storage.databases.main.search import SearchWorkerStore
from synapse.storage.databases.main.stats import StatsStore
from synapse.storage.databases.main.transactions import TransactionWorkerStore
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
from synapse.storage.databases.main.user_directory import UserDirectoryStore
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.generic_worker")
class KeyUploadServlet(RestServlet):
"""An implementation of the `KeyUploadServlet` that responds to read only
requests, but otherwise proxies through to the master instance.
"""
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.http_client = hs.get_simple_http_client()
self.main_uri = hs.config.worker_main_http_uri
async def on_POST(self, request: Request, device_id: Optional[str]):
requester = await self.auth.get_user_by_req(request, allow_guest=True)
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
if device_id is not None:
# passing the device_id here is deprecated; however, we allow it
# for now for compatibility with older clients.
if requester.device_id is not None and device_id != requester.device_id:
logger.warning(
"Client uploading keys for a different device "
"(logged in as %s, uploading for %s)",
requester.device_id,
device_id,
)
else:
device_id = requester.device_id
if device_id is None:
raise SynapseError(
400, "To upload keys, you must pass device_id when authenticating"
)
if body:
# They're actually trying to upload something, proxy to main synapse.
# Proxy headers from the original request, such as the auth headers
# (in case the access token is there) and the original IP /
# User-Agent of the request.
headers = {
header: request.requestHeaders.getRawHeaders(header, [])
for header in (b"Authorization", b"User-Agent")
}
# Add the previous hop to the X-Forwarded-For header.
x_forwarded_for = request.requestHeaders.getRawHeaders(
b"X-Forwarded-For", []
)
# we use request.client here, since we want the previous hop, not the
# original client (as returned by request.getClientAddress()).
if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
previous_host = request.client.host.encode("ascii")
# If the header exists, add to the comma-separated list of the first
# instance of the header. Otherwise, generate a new header.
if x_forwarded_for:
x_forwarded_for = [
x_forwarded_for[0] + b", " + previous_host
] + x_forwarded_for[1:]
else:
x_forwarded_for = [previous_host]
headers[b"X-Forwarded-For"] = x_forwarded_for
# Replicate the original X-Forwarded-Proto header. Note that
# XForwardedForRequest overrides isSecure() to give us the original protocol
# used by the client, as opposed to the protocol used by our upstream proxy
# - which is what we want here.
headers[b"X-Forwarded-Proto"] = [
b"https" if request.isSecure() else b"http"
]
try:
result = await self.http_client.post_json_get_json(
self.main_uri + request.uri.decode("ascii"), body, headers=headers
)
except HttpResponseException as e:
raise e.to_synapse_error() from e
except RequestSendFailed as e:
raise SynapseError(502, "Failed to talk to master") from e
return 200, result
else:
# Just interested in counts.
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
return 200, {"one_time_key_counts": result}
class GenericWorkerSlavedStore(
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
# rather than going via the correct worker.
UserDirectoryStore,
StatsStore,
UIAuthWorkerStore,
EndToEndRoomKeyStore,
PresenceStore,
SlavedDeviceInboxStore,
SlavedDeviceStore,
SlavedReceiptsStore,
SlavedPushRuleStore,
SlavedGroupServerStore,
SlavedAccountDataStore,
SlavedPusherStore,
CensorEventsStore,
ClientIpWorkerStore,
SlavedEventStore,
SlavedKeyStore,
RoomStore,
DirectoryStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
SlavedTransactionStore,
SlavedProfileStore,
SlavedClientIpStore,
SlavedFilteringStore,
MonthlyActiveUsersWorkerStore,
MediaRepositoryStore,
ServerMetricsStore,
SearchWorkerStore,
TransactionWorkerStore,
BaseSlavedStore,
):
pass
class GenericWorkerServer(HomeServer):
DATASTORE_CLASS = GenericWorkerSlavedStore
def _listen_http(self, listener_config: ListenerConfig):
port = listener_config.port
bind_addresses = listener_config.bind_addresses
assert listener_config.http_options is not None
site_tag = listener_config.http_options.tag
if site_tag is None:
site_tag = port
# We always include a health resource.
resources = {"/health": HealthResource()} # type: Dict[str, IResource]
for res in listener_config.http_options.resources:
for name in res.names:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
elif name == "client":
resource = JsonResource(self, canonical_json=False)
RegisterRestServlet(self).register(resource)
login.register_servlets(self, resource)
ThreepidRestServlet(self).register(resource)
DevicesRestServlet(self).register(resource)
KeyQueryServlet(self).register(resource)
OneTimeKeyServlet(self).register(resource)
KeyChangesServlet(self).register(resource)
VoipRestServlet(self).register(resource)
PushRuleRestServlet(self).register(resource)
VersionsRestServlet(self).register(resource)
ProfileAvatarURLRestServlet(self).register(resource)
ProfileDisplaynameRestServlet(self).register(resource)
ProfileRestServlet(self).register(resource)
KeyUploadServlet(self).register(resource)
AccountDataServlet(self).register(resource)
RoomAccountDataServlet(self).register(resource)
sync.register_servlets(self, resource)
events.register_servlets(self, resource)
room.register_servlets(self, resource, True)
room.register_deprecated_servlets(self, resource)
InitialSyncRestServlet(self).register(resource)
room_keys.register_servlets(self, resource)
tags.register_servlets(self, resource)
account_data.register_servlets(self, resource)
receipts.register_servlets(self, resource)
read_marker.register_servlets(self, resource)
SendToDeviceRestServlet(self).register(resource)
user_directory.register_servlets(self, resource)
presence.register_servlets(self, resource)
groups.register_servlets(self, resource)
resources.update({CLIENT_API_PREFIX: resource})
resources.update(build_synapse_client_resource_tree(self))
elif name == "federation":
resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
elif name == "media":
if self.config.can_load_media_repo:
media_repo = self.get_media_repository_resource()
# We need to serve the admin servlets for media on the
# worker.
admin_resource = JsonResource(self, canonical_json=False)
register_servlets_for_media_repo(self, admin_resource)
resources.update(
{
MEDIA_PREFIX: media_repo,
LEGACY_MEDIA_PREFIX: media_repo,
"/_synapse/admin": admin_resource,
}
)
else:
logger.warning(
"A 'media' listener is configured but the media"
" repository is disabled. Ignoring."
)
if name == "openid" and "federation" not in res.names:
# Only load the openid resource separately if federation resource
# is not specified since federation resource includes openid
# resource.
resources.update(
{
FEDERATION_PREFIX: TransportLayerServer(
self, servlet_groups=["openid"]
)
}
)
if name in ["keys", "federation"]:
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
if name == "replication":
resources[REPLICATION_PREFIX] = ReplicationRestResource(self)
root_resource = create_resource_tree(resources, OptionsResource())
_base.listen_tcp(
bind_addresses,
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
max_request_body_size=max_request_body_size(self.config),
reactor=self.get_reactor(),
),
reactor=self.get_reactor(),
)
logger.info("Synapse worker now listening on port %d", port)
def start_listening(self):
for listener in self.config.worker_listeners:
if listener.type == "http":
self._listen_http(listener)
elif listener.type == "manhole":
_base.listen_manhole(
listener.bind_addresses, listener.port, manhole_globals={"hs": self}
)
elif listener.type == "metrics":
if not self.config.enable_metrics:
logger.warning(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
)
)
else:
_base.listen_metrics(listener.bind_addresses, listener.port)
else:
logger.warning("Unsupported listener type: %s", listener.type)
self.get_tcp_replication().start_replication(self)
def start(config_options):
try:
config = HomeServerConfig.load_config("Synapse worker", config_options)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
# For backwards compatibility let any of the old app names.
assert config.worker_app in (
"synapse.app.appservice",
"synapse.app.client_reader",
"synapse.app.event_creator",
"synapse.app.federation_reader",
"synapse.app.federation_sender",
"synapse.app.frontend_proxy",
"synapse.app.generic_worker",
"synapse.app.media_repository",
"synapse.app.pusher",
"synapse.app.synchrotron",
"synapse.app.user_dir",
)
if config.worker_app == "synapse.app.appservice":
if config.appservice.notify_appservices:
sys.stderr.write(
"\nThe appservices must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``notify_appservices: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the appservice to start since they will be disabled in the main config
config.appservice.notify_appservices = True
else:
# For other worker types we force this to off.
config.appservice.notify_appservices = False
if config.worker_app == "synapse.app.user_dir":
if config.server.update_user_directory:
sys.stderr.write(
"\nThe update_user_directory must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``update_user_directory: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the pushers to start since they will be disabled in the main config
config.server.update_user_directory = True
else:
# For other worker types we force this to off.
config.server.update_user_directory = False
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
if config.server.gc_seconds:
synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds
hs = GenericWorkerServer(
config.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
setup_logging(hs, config, use_worker_options=True)
hs.setup()
# Ensure the replication streamer is always started in case we write to any
# streams. Will no-op if no streams can be written to by this worker.
hs.get_replication_streamer()
register_start(_base.start, hs)
_base.start_worker_reactor("synapse-generic-worker", config)
if __name__ == "__main__":
with LoggingContext("main"):
start(sys.argv[1:])
| 41.524793 | 90 | 0.661359 |
import logging
import sys
from typing import Dict, Optional
from twisted.internet import address
from twisted.web.resource import IResource
from twisted.web.server import Request
import synapse
import synapse.events
from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
from synapse.api.urls import (
CLIENT_API_PREFIX,
FEDERATION_PREFIX,
LEGACY_MEDIA_PREFIX,
MEDIA_PREFIX,
SERVER_KEY_V2_PREFIX,
)
from synapse.app import _base
from synapse.app._base import max_request_body_size, register_start
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.config.server import ListenerConfig
from synapse.federation.transport.server import TransportLayerServer
from synapse.http.server import JsonResource, OptionsResource
from synapse.http.servlet import RestServlet, parse_json_object_from_request
from synapse.http.site import SynapseSite
from synapse.logging.context import LoggingContext
from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
from synapse.replication.slave.storage.deviceinbox import SlavedDeviceInboxStore
from synapse.replication.slave.storage.devices import SlavedDeviceStore
from synapse.replication.slave.storage.directory import DirectoryStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.filtering import SlavedFilteringStore
from synapse.replication.slave.storage.groups import SlavedGroupServerStore
from synapse.replication.slave.storage.keys import SlavedKeyStore
from synapse.replication.slave.storage.profile import SlavedProfileStore
from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore
from synapse.replication.slave.storage.pushers import SlavedPusherStore
from synapse.replication.slave.storage.receipts import SlavedReceiptsStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.rest.admin import register_servlets_for_media_repo
from synapse.rest.client.v1 import events, login, presence, room
from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet
from synapse.rest.client.v1.profile import (
ProfileAvatarURLRestServlet,
ProfileDisplaynameRestServlet,
ProfileRestServlet,
)
from synapse.rest.client.v1.push_rule import PushRuleRestServlet
from synapse.rest.client.v1.voip import VoipRestServlet
from synapse.rest.client.v2_alpha import (
account_data,
groups,
read_marker,
receipts,
room_keys,
sync,
tags,
user_directory,
)
from synapse.rest.client.v2_alpha._base import client_patterns
from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
from synapse.rest.client.v2_alpha.account_data import (
AccountDataServlet,
RoomAccountDataServlet,
)
from synapse.rest.client.v2_alpha.devices import DevicesRestServlet
from synapse.rest.client.v2_alpha.keys import (
KeyChangesServlet,
KeyQueryServlet,
OneTimeKeyServlet,
)
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
from synapse.rest.client.v2_alpha.sendtodevice import SendToDeviceRestServlet
from synapse.rest.client.versions import VersionsRestServlet
from synapse.rest.health import HealthResource
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.rest.synapse.client import build_synapse_client_resource_tree
from synapse.server import HomeServer
from synapse.storage.databases.main.censor_events import CensorEventsStore
from synapse.storage.databases.main.client_ips import ClientIpWorkerStore
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyStore
from synapse.storage.databases.main.media_repository import MediaRepositoryStore
from synapse.storage.databases.main.metrics import ServerMetricsStore
from synapse.storage.databases.main.monthly_active_users import (
MonthlyActiveUsersWorkerStore,
)
from synapse.storage.databases.main.presence import PresenceStore
from synapse.storage.databases.main.search import SearchWorkerStore
from synapse.storage.databases.main.stats import StatsStore
from synapse.storage.databases.main.transactions import TransactionWorkerStore
from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
from synapse.storage.databases.main.user_directory import UserDirectoryStore
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.generic_worker")
class KeyUploadServlet(RestServlet):
PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
def __init__(self, hs):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastore()
self.http_client = hs.get_simple_http_client()
self.main_uri = hs.config.worker_main_http_uri
async def on_POST(self, request: Request, device_id: Optional[str]):
requester = await self.auth.get_user_by_req(request, allow_guest=True)
user_id = requester.user.to_string()
body = parse_json_object_from_request(request)
if device_id is not None:
if requester.device_id is not None and device_id != requester.device_id:
logger.warning(
"Client uploading keys for a different device "
"(logged in as %s, uploading for %s)",
requester.device_id,
device_id,
)
else:
device_id = requester.device_id
if device_id is None:
raise SynapseError(
400, "To upload keys, you must pass device_id when authenticating"
)
if body:
# Proxy headers from the original request, such as the auth headers
# (in case the access token is there) and the original IP /
# User-Agent of the request.
headers = {
header: request.requestHeaders.getRawHeaders(header, [])
for header in (b"Authorization", b"User-Agent")
}
# Add the previous hop to the X-Forwarded-For header.
x_forwarded_for = request.requestHeaders.getRawHeaders(
b"X-Forwarded-For", []
)
# we use request.client here, since we want the previous hop, not the
# original client (as returned by request.getClientAddress()).
if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
previous_host = request.client.host.encode("ascii")
# If the header exists, add to the comma-separated list of the first
# instance of the header. Otherwise, generate a new header.
if x_forwarded_for:
x_forwarded_for = [
x_forwarded_for[0] + b", " + previous_host
] + x_forwarded_for[1:]
else:
x_forwarded_for = [previous_host]
headers[b"X-Forwarded-For"] = x_forwarded_for
# Replicate the original X-Forwarded-Proto header. Note that
# XForwardedForRequest overrides isSecure() to give us the original protocol
# used by the client, as opposed to the protocol used by our upstream proxy
# - which is what we want here.
headers[b"X-Forwarded-Proto"] = [
b"https" if request.isSecure() else b"http"
]
try:
result = await self.http_client.post_json_get_json(
self.main_uri + request.uri.decode("ascii"), body, headers=headers
)
except HttpResponseException as e:
raise e.to_synapse_error() from e
except RequestSendFailed as e:
raise SynapseError(502, "Failed to talk to master") from e
return 200, result
else:
# Just interested in counts.
result = await self.store.count_e2e_one_time_keys(user_id, device_id)
return 200, {"one_time_key_counts": result}
class GenericWorkerSlavedStore(
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
# rather than going via the correct worker.
UserDirectoryStore,
StatsStore,
UIAuthWorkerStore,
EndToEndRoomKeyStore,
PresenceStore,
SlavedDeviceInboxStore,
SlavedDeviceStore,
SlavedReceiptsStore,
SlavedPushRuleStore,
SlavedGroupServerStore,
SlavedAccountDataStore,
SlavedPusherStore,
CensorEventsStore,
ClientIpWorkerStore,
SlavedEventStore,
SlavedKeyStore,
RoomStore,
DirectoryStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
SlavedTransactionStore,
SlavedProfileStore,
SlavedClientIpStore,
SlavedFilteringStore,
MonthlyActiveUsersWorkerStore,
MediaRepositoryStore,
ServerMetricsStore,
SearchWorkerStore,
TransactionWorkerStore,
BaseSlavedStore,
):
pass
class GenericWorkerServer(HomeServer):
DATASTORE_CLASS = GenericWorkerSlavedStore
def _listen_http(self, listener_config: ListenerConfig):
port = listener_config.port
bind_addresses = listener_config.bind_addresses
assert listener_config.http_options is not None
site_tag = listener_config.http_options.tag
if site_tag is None:
site_tag = port
# We always include a health resource.
resources = {"/health": HealthResource()} # type: Dict[str, IResource]
for res in listener_config.http_options.resources:
for name in res.names:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
elif name == "client":
resource = JsonResource(self, canonical_json=False)
RegisterRestServlet(self).register(resource)
login.register_servlets(self, resource)
ThreepidRestServlet(self).register(resource)
DevicesRestServlet(self).register(resource)
KeyQueryServlet(self).register(resource)
OneTimeKeyServlet(self).register(resource)
KeyChangesServlet(self).register(resource)
VoipRestServlet(self).register(resource)
PushRuleRestServlet(self).register(resource)
VersionsRestServlet(self).register(resource)
ProfileAvatarURLRestServlet(self).register(resource)
ProfileDisplaynameRestServlet(self).register(resource)
ProfileRestServlet(self).register(resource)
KeyUploadServlet(self).register(resource)
AccountDataServlet(self).register(resource)
RoomAccountDataServlet(self).register(resource)
sync.register_servlets(self, resource)
events.register_servlets(self, resource)
room.register_servlets(self, resource, True)
room.register_deprecated_servlets(self, resource)
InitialSyncRestServlet(self).register(resource)
room_keys.register_servlets(self, resource)
tags.register_servlets(self, resource)
account_data.register_servlets(self, resource)
receipts.register_servlets(self, resource)
read_marker.register_servlets(self, resource)
SendToDeviceRestServlet(self).register(resource)
user_directory.register_servlets(self, resource)
presence.register_servlets(self, resource)
groups.register_servlets(self, resource)
resources.update({CLIENT_API_PREFIX: resource})
resources.update(build_synapse_client_resource_tree(self))
elif name == "federation":
resources.update({FEDERATION_PREFIX: TransportLayerServer(self)})
elif name == "media":
if self.config.can_load_media_repo:
media_repo = self.get_media_repository_resource()
# We need to serve the admin servlets for media on the
# worker.
admin_resource = JsonResource(self, canonical_json=False)
register_servlets_for_media_repo(self, admin_resource)
resources.update(
{
MEDIA_PREFIX: media_repo,
LEGACY_MEDIA_PREFIX: media_repo,
"/_synapse/admin": admin_resource,
}
)
else:
logger.warning(
"A 'media' listener is configured but the media"
" repository is disabled. Ignoring."
)
if name == "openid" and "federation" not in res.names:
# Only load the openid resource separately if federation resource
# is not specified since federation resource includes openid
# resource.
resources.update(
{
FEDERATION_PREFIX: TransportLayerServer(
self, servlet_groups=["openid"]
)
}
)
if name in ["keys", "federation"]:
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
if name == "replication":
resources[REPLICATION_PREFIX] = ReplicationRestResource(self)
root_resource = create_resource_tree(resources, OptionsResource())
_base.listen_tcp(
bind_addresses,
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
max_request_body_size=max_request_body_size(self.config),
reactor=self.get_reactor(),
),
reactor=self.get_reactor(),
)
logger.info("Synapse worker now listening on port %d", port)
def start_listening(self):
for listener in self.config.worker_listeners:
if listener.type == "http":
self._listen_http(listener)
elif listener.type == "manhole":
_base.listen_manhole(
listener.bind_addresses, listener.port, manhole_globals={"hs": self}
)
elif listener.type == "metrics":
if not self.config.enable_metrics:
logger.warning(
(
"Metrics listener configured, but "
"enable_metrics is not True!"
)
)
else:
_base.listen_metrics(listener.bind_addresses, listener.port)
else:
logger.warning("Unsupported listener type: %s", listener.type)
self.get_tcp_replication().start_replication(self)
def start(config_options):
try:
config = HomeServerConfig.load_config("Synapse worker", config_options)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
# For backwards compatibility let any of the old app names.
assert config.worker_app in (
"synapse.app.appservice",
"synapse.app.client_reader",
"synapse.app.event_creator",
"synapse.app.federation_reader",
"synapse.app.federation_sender",
"synapse.app.frontend_proxy",
"synapse.app.generic_worker",
"synapse.app.media_repository",
"synapse.app.pusher",
"synapse.app.synchrotron",
"synapse.app.user_dir",
)
if config.worker_app == "synapse.app.appservice":
if config.appservice.notify_appservices:
sys.stderr.write(
"\nThe appservices must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``notify_appservices: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the appservice to start since they will be disabled in the main config
config.appservice.notify_appservices = True
else:
# For other worker types we force this to off.
config.appservice.notify_appservices = False
if config.worker_app == "synapse.app.user_dir":
if config.server.update_user_directory:
sys.stderr.write(
"\nThe update_user_directory must be disabled in the main synapse process"
"\nbefore they can be run in a separate worker."
"\nPlease add ``update_user_directory: false`` to the main config"
"\n"
)
sys.exit(1)
# Force the pushers to start since they will be disabled in the main config
config.server.update_user_directory = True
else:
# For other worker types we force this to off.
config.server.update_user_directory = False
synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts
synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage
if config.server.gc_seconds:
synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds
hs = GenericWorkerServer(
config.server_name,
config=config,
version_string="Synapse/" + get_version_string(synapse),
)
setup_logging(hs, config, use_worker_options=True)
hs.setup()
# Ensure the replication streamer is always started in case we write to any
# streams. Will no-op if no streams can be written to by this worker.
hs.get_replication_streamer()
register_start(_base.start, hs)
_base.start_worker_reactor("synapse-generic-worker", config)
if __name__ == "__main__":
with LoggingContext("main"):
start(sys.argv[1:])
| true | true |
f730cf032970e255c2e4811c7121b858b9e1fb44 | 35,119 | py | Python | mrcnn/utils.py | mohamedelbeih/Hairloss-Areas-Semantic-Segmentation | 49d8a07d600c538e3b706a531af4029823b70236 | [
"MIT"
] | null | null | null | mrcnn/utils.py | mohamedelbeih/Hairloss-Areas-Semantic-Segmentation | 49d8a07d600c538e3b706a531af4029823b70236 | [
"MIT"
] | null | null | null | mrcnn/utils.py | mohamedelbeih/Hairloss-Areas-Semantic-Segmentation | 49d8a07d600c538e3b706a531af4029823b70236 | [
"MIT"
] | 1 | 2021-04-22T01:39:25.000Z | 2021-04-22T01:39:25.000Z | """
Mask R-CNN
Common utility functions and classes.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Waleed Abdulla
"""
import sys
import os
import logging
import math
import random
import numpy as np
import tensorflow as tf
import scipy
import skimage.color
import skimage.io
import skimage.transform
import urllib.request
import shutil
import warnings
from distutils.version import LooseVersion
# URL from which to download the latest COCO trained weights
COCO_MODEL_URL = "https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5"
############################################################
# Bounding Boxes
############################################################
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)
for i in range(mask.shape[-1]):
m = mask[:, :, i]
# Bounding box.
horizontal_indicies = np.where(np.any(m, axis=0))[0]
vertical_indicies = np.where(np.any(m, axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2, y1, y2 = 0, 0, 0, 0
boxes[i] = np.array([y1, x1, y2, x2])
return boxes.astype(np.int32)
def compute_iou(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficiency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def compute_overlaps(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
For better performance, pass the largest set first and the smaller second.
"""
# Areas of anchors and GT boxes
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(overlaps.shape[1]):
box2 = boxes2[i]
overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)
return overlaps
def compute_overlaps_masks(masks1, masks2):
"""Computes IoU overlaps between two sets of masks.
masks1, masks2: [Height, Width, instances]
"""
# If either set of masks is empty return empty result
if masks1.shape[-1] == 0 or masks2.shape[-1] == 0:
return np.zeros((masks1.shape[-1], masks2.shape[-1]))
# flatten masks and compute their areas
masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32)
masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32)
area1 = np.sum(masks1, axis=0)
area2 = np.sum(masks2, axis=0)
# intersections and union
intersections = np.dot(masks1.T, masks2)
union = area1[:, None] + area2[None, :] - intersections
overlaps = intersections / union
return overlaps
def non_max_suppression(boxes, scores, threshold):
"""Performs non-maximum suppression and returns indices of kept boxes.
boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.
scores: 1-D array of box scores.
threshold: Float. IoU threshold to use for filtering.
"""
assert boxes.shape[0] > 0
if boxes.dtype.kind != "f":
boxes = boxes.astype(np.float32)
# Compute box areas
y1 = boxes[:, 0]
x1 = boxes[:, 1]
y2 = boxes[:, 2]
x2 = boxes[:, 3]
area = (y2 - y1) * (x2 - x1)
# Get indicies of boxes sorted by scores (highest first)
ixs = scores.argsort()[::-1]
pick = []
while len(ixs) > 0:
# Pick top box and add its index to the list
i = ixs[0]
pick.append(i)
# Compute IoU of the picked box with the rest
iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])
# Identify boxes with IoU over the threshold. This
# returns indices into ixs[1:], so add 1 to get
# indices into ixs.
remove_ixs = np.where(iou > threshold)[0] + 1
# Remove indices of the picked and overlapped boxes.
ixs = np.delete(ixs, remove_ixs)
ixs = np.delete(ixs, 0)
return np.array(pick, dtype=np.int32)
def apply_box_deltas(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)]. Note that (y2, x2) is outside the box.
deltas: [N, (dy, dx, log(dh), log(dw))]
"""
boxes = boxes.astype(np.float32)
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= np.exp(deltas[:, 2])
width *= np.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
return np.stack([y1, x1, y2, x2], axis=1)
def box_refinement_graph(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]
"""
box = tf.cast(box, tf.float32)
gt_box = tf.cast(gt_box, tf.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = tf.math.log(gt_height / height)
dw = tf.math.log(gt_width / width)
result = tf.stack([dy, dx, dh, dw], axis=1)
return result
def box_refinement(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is
assumed to be outside the box.
"""
box = box.astype(np.float32)
gt_box = gt_box.astype(np.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = np.log(gt_height / height)
dw = np.log(gt_width / width)
return np.stack([dy, dx, dh, dw], axis=1)
############################################################
# Dataset
############################################################
class Dataset(object):
"""The base class for dataset classes.
To use it, create a new class that adds functions specific to the dataset
you want to use. For example:
class CatsAndDogsDataset(Dataset):
def load_cats_and_dogs(self):
...
def load_mask(self, image_id):
...
def image_reference(self, image_id):
...
See COCODataset and ShapesDataset as examples.
"""
def __init__(self, class_map=None):
self._image_ids = []
self.image_info = []
# Background is always the first class
self.class_info = [{"source": "", "id": 0, "name": "BG"}]
self.source_class_ids = {}
def add_class(self, source, class_id, class_name):
assert "." not in source, "Source name cannot contain a dot"
# Does the class exist already?
for info in self.class_info:
if info['source'] == source and info["id"] == class_id:
# source.class_id combination already available, skip
return
# Add the class
self.class_info.append({
"source": source,
"id": class_id,
"name": class_name,
})
def add_image(self, source, image_id, path, **kwargs):
image_info = {
"id": image_id,
"source": source,
"path": path,
}
image_info.update(kwargs)
self.image_info.append(image_info)
def image_reference(self, image_id):
"""Return a link to the image in its source Website or details about
the image that help looking it up or debugging it.
Override for your dataset, but pass to this function
if you encounter images not in your dataset.
"""
return ""
def prepare(self, class_map=None):
"""Prepares the Dataset class for use.
TODO: class map is not supported yet. When done, it should handle mapping
classes from different datasets to the same class ID.
"""
def clean_name(name):
"""Returns a shorter version of object names for cleaner display."""
return ",".join(name.split(",")[:1])
# Build (or rebuild) everything else from the info dicts.
self.num_classes = len(self.class_info)
self.class_ids = np.arange(self.num_classes)
self.class_names = [clean_name(c["name"]) for c in self.class_info]
self.num_images = len(self.image_info)
self._image_ids = np.arange(self.num_images)
# Mapping from source class and image IDs to internal IDs
self.class_from_source_map = {"{}.{}".format(info['source'], info['id']): id
for info, id in zip(self.class_info, self.class_ids)}
self.image_from_source_map = {"{}.{}".format(info['source'], info['id']): id
for info, id in zip(self.image_info, self.image_ids)}
# Map sources to class_ids they support
self.sources = list(set([i['source'] for i in self.class_info]))
self.source_class_ids = {}
# Loop over datasets
for source in self.sources:
self.source_class_ids[source] = []
# Find classes that belong to this dataset
for i, info in enumerate(self.class_info):
# Include BG class in all datasets
if i == 0 or source == info['source']:
self.source_class_ids[source].append(i)
def map_source_class_id(self, source_class_id):
"""Takes a source class ID and returns the int class ID assigned to it.
For example:
dataset.map_source_class_id("coco.12") -> 23
"""
return self.class_from_source_map[source_class_id]
def get_source_class_id(self, class_id, source):
"""Map an internal class ID to the corresponding class ID in the source dataset."""
info = self.class_info[class_id]
assert info['source'] == source
return info['id']
@property
def image_ids(self):
return self._image_ids
def source_image_link(self, image_id):
"""Returns the path or URL to the image.
Override this to return a URL to the image if it's available online for easy
debugging.
"""
return self.image_info[image_id]["path"]
def load_image(self, image_id):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
# Load image
image = skimage.io.imread(self.image_info[image_id]['path'])
# If grayscale. Convert to RGB for consistency.
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
# If has an alpha channel, remove it for consistency
if image.shape[-1] == 4:
image = image[..., :3]
return image
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. Override this
method to load instance masks and return them in the form of am
array of binary masks of shape [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
a binary mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# Override this function to load a mask from your dataset.
# Otherwise, it returns an empty mask.
logging.warning("You are using the default load_mask(), maybe you need to define your own one.")
mask = np.empty([0, 0, 0])
class_ids = np.empty([0], np.int32)
return mask, class_ids
def resize_image(image, min_dim=None, max_dim=None, min_scale=None, mode="square"):
"""Resizes an image keeping the aspect ratio unchanged.
min_dim: if provided, resizes the image such that it's smaller
dimension == min_dim
max_dim: if provided, ensures that the image longest side doesn't
exceed this value.
min_scale: if provided, ensure that the image is scaled up by at least
this percent even if min_dim doesn't require it.
mode: Resizing mode.
none: No resizing. Return the image unchanged.
square: Resize and pad with zeros to get a square image
of size [max_dim, max_dim].
pad64: Pads width and height with zeros to make them multiples of 64.
If min_dim or min_scale are provided, it scales the image up
before padding. max_dim is ignored in this mode.
The multiple of 64 is needed to ensure smooth scaling of feature
maps up and down the 6 levels of the FPN pyramid (2**6=64).
crop: Picks random crops from the image. First, scales the image based
on min_dim and min_scale, then picks a random crop of
size min_dim x min_dim. Can be used in training only.
max_dim is not used in this mode.
Returns:
image: the resized image
window: (y1, x1, y2, x2). If max_dim is provided, padding might
be inserted in the returned image. If so, this window is the
coordinates of the image part of the full image (excluding
the padding). The x2, y2 pixels are not included.
scale: The scale factor used to resize the image
padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]
"""
# Keep track of image dtype and return results in the same dtype
image_dtype = image.dtype
# Default window (y1, x1, y2, x2) and default scale == 1.
h, w = image.shape[:2]
window = (0, 0, h, w)
scale = 1
padding = [(0, 0), (0, 0), (0, 0)]
crop = None
if mode == "none":
return image, window, scale, padding, crop
# Scale?
if min_dim:
# Scale up but not down
scale = max(1, min_dim / min(h, w))
if min_scale and scale < min_scale:
scale = min_scale
# Does it exceed max dim?
if max_dim and mode == "square":
image_max = max(h, w)
if round(image_max * scale) > max_dim:
scale = max_dim / image_max
# Resize image using bilinear interpolation
if scale != 1:
image = resize(image, (round(h * scale), round(w * scale)),
preserve_range=True)
# Need padding or cropping?
if mode == "square":
# Get new height and width
h, w = image.shape[:2]
top_pad = (max_dim - h) // 2
bottom_pad = max_dim - h - top_pad
left_pad = (max_dim - w) // 2
right_pad = max_dim - w - left_pad
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
elif mode == "pad64":
h, w = image.shape[:2]
# Both sides must be divisible by 64
assert min_dim % 64 == 0, "Minimum dimension must be a multiple of 64"
# Height
if h % 64 > 0:
max_h = h - (h % 64) + 64
top_pad = (max_h - h) // 2
bottom_pad = max_h - h - top_pad
else:
top_pad = bottom_pad = 0
# Width
if w % 64 > 0:
max_w = w - (w % 64) + 64
left_pad = (max_w - w) // 2
right_pad = max_w - w - left_pad
else:
left_pad = right_pad = 0
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
elif mode == "crop":
# Pick a random crop
h, w = image.shape[:2]
y = random.randint(0, (h - min_dim))
x = random.randint(0, (w - min_dim))
crop = (y, x, min_dim, min_dim)
image = image[y:y + min_dim, x:x + min_dim]
window = (0, 0, min_dim, min_dim)
else:
raise Exception("Mode {} not supported".format(mode))
return image.astype(image_dtype), window, scale, padding, crop
def resize_mask(mask, scale, padding, crop=None):
"""Resizes a mask using the given scale and padding.
Typically, you get the scale and padding from resize_image() to
ensure both, the image and the mask, are resized consistently.
scale: mask scaling factor
padding: Padding to add to the mask in the form
[(top, bottom), (left, right), (0, 0)]
"""
# Suppress warning from scipy 0.13.0, the output shape of zoom() is
# calculated with round() instead of int()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0)
if crop is not None:
y, x, h, w = crop
mask = mask[y:y + h, x:x + w]
else:
mask = np.pad(mask, padding, mode='constant', constant_values=0)
return mask
def minimize_mask(bbox, mask, mini_shape):
"""Resize masks to a smaller version to reduce memory load.
Mini-masks can be resized back to image scale using expand_masks()
See inspect_data.ipynb notebook for more details.
"""
mini_mask = np.zeros(mini_shape + (mask.shape[-1],), dtype=bool)
for i in range(mask.shape[-1]):
# Pick slice and cast to bool in case load_mask() returned wrong dtype
m = mask[:, :, i].astype(bool)
y1, x1, y2, x2 = bbox[i][:4]
m = m[y1:y2, x1:x2]
if m.size == 0:
raise Exception("Invalid bounding box with area of zero")
# Resize with bilinear interpolation
m = resize(m, mini_shape)
mini_mask[:, :, i] = np.around(m).astype(np.bool)
return mini_mask
def expand_mask(bbox, mini_mask, image_shape):
"""Resizes mini masks back to image size. Reverses the change
of minimize_mask().
See inspect_data.ipynb notebook for more details.
"""
mask = np.zeros(image_shape[:2] + (mini_mask.shape[-1],), dtype=bool)
for i in range(mask.shape[-1]):
m = mini_mask[:, :, i]
y1, x1, y2, x2 = bbox[i][:4]
h = y2 - y1
w = x2 - x1
# Resize with bilinear interpolation
m = resize(m, (h, w))
mask[y1:y2, x1:x2, i] = np.around(m).astype(np.bool)
return mask
# TODO: Build and use this function to reduce code duplication
def mold_mask(mask, config):
pass
def unmold_mask(mask, bbox, image_shape):
"""Converts a mask generated by the neural network to a format similar
to its original shape.
mask: [height, width] of type float. A small, typically 28x28 mask.
bbox: [y1, x1, y2, x2]. The box to fit the mask in.
Returns a binary mask with the same size as the original image.
"""
threshold = 0.5
y1, x1, y2, x2 = bbox
mask = resize(mask, (y2 - y1, x2 - x1))
mask = np.where(mask >= threshold, 1, 0).astype(np.bool)
# Put the mask in the right location.
full_mask = np.zeros(image_shape[:2], dtype=np.bool)
full_mask[y1:y2, x1:x2] = mask
return full_mask
############################################################
# Anchors
############################################################
def generate_anchors(scales, ratios, shape, feature_stride, anchor_stride):
"""
scales: 1D array of anchor sizes in pixels. Example: [32, 64, 128]
ratios: 1D array of anchor ratios of width/height. Example: [0.5, 1, 2]
shape: [height, width] spatial shape of the feature map over which
to generate anchors.
feature_stride: Stride of the feature map relative to the image in pixels.
anchor_stride: Stride of anchors on the feature map. For example, if the
value is 2 then generate anchors for every other feature map pixel.
"""
# Get all combinations of scales and ratios
scales, ratios = np.meshgrid(np.array(scales), np.array(ratios))
scales = scales.flatten()
ratios = ratios.flatten()
# Enumerate heights and widths from scales and ratios
heights = scales / np.sqrt(ratios)
widths = scales * np.sqrt(ratios)
# Enumerate shifts in feature space
shifts_y = np.arange(0, shape[0], anchor_stride) * feature_stride
shifts_x = np.arange(0, shape[1], anchor_stride) * feature_stride
shifts_x, shifts_y = np.meshgrid(shifts_x, shifts_y)
# Enumerate combinations of shifts, widths, and heights
box_widths, box_centers_x = np.meshgrid(widths, shifts_x)
box_heights, box_centers_y = np.meshgrid(heights, shifts_y)
# Reshape to get a list of (y, x) and a list of (h, w)
box_centers = np.stack(
[box_centers_y, box_centers_x], axis=2).reshape([-1, 2])
box_sizes = np.stack([box_heights, box_widths], axis=2).reshape([-1, 2])
# Convert to corner coordinates (y1, x1, y2, x2)
boxes = np.concatenate([box_centers - 0.5 * box_sizes,
box_centers + 0.5 * box_sizes], axis=1)
return boxes
def generate_pyramid_anchors(scales, ratios, feature_shapes, feature_strides,
anchor_stride):
"""Generate anchors at different levels of a feature pyramid. Each scale
is associated with a level of the pyramid, but each ratio is used in
all levels of the pyramid.
Returns:
anchors: [N, (y1, x1, y2, x2)]. All generated anchors in one array. Sorted
with the same order of the given scales. So, anchors of scale[0] come
first, then anchors of scale[1], and so on.
"""
# Anchors
# [anchor_count, (y1, x1, y2, x2)]
anchors = []
for i in range(len(scales)):
anchors.append(generate_anchors(scales[i], ratios, feature_shapes[i],
feature_strides[i], anchor_stride))
return np.concatenate(anchors, axis=0)
############################################################
# Miscellaneous
############################################################
def trim_zeros(x):
"""It's common to have tensors larger than the available data and
pad with zeros. This function removes rows that are all zeros.
x: [rows, columns].
"""
assert len(x.shape) == 2
return x[~np.all(x == 0, axis=1)]
def compute_matches(gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold=0.5, score_threshold=0.0):
"""Finds matches between prediction and ground truth instances.
Returns:
gt_match: 1-D array. For each GT box it has the index of the matched
predicted box.
pred_match: 1-D array. For each predicted box, it has the index of
the matched ground truth box.
overlaps: [pred_boxes, gt_boxes] IoU overlaps.
"""
# Trim zero padding
# TODO: cleaner to do zero unpadding upstream
gt_boxes = trim_zeros(gt_boxes)
gt_masks = gt_masks[..., :gt_boxes.shape[0]]
pred_boxes = trim_zeros(pred_boxes)
pred_scores = pred_scores[:pred_boxes.shape[0]]
# Sort predictions by score from high to low
indices = np.argsort(pred_scores)[::-1]
pred_boxes = pred_boxes[indices]
pred_class_ids = pred_class_ids[indices]
pred_scores = pred_scores[indices]
pred_masks = pred_masks[..., indices]
# Compute IoU overlaps [pred_masks, gt_masks]
overlaps = compute_overlaps_masks(pred_masks, gt_masks)
# Loop through predictions and find matching ground truth boxes
match_count = 0
pred_match = -1 * np.ones([pred_boxes.shape[0]])
gt_match = -1 * np.ones([gt_boxes.shape[0]])
for i in range(len(pred_boxes)):
# Find best matching ground truth box
# 1. Sort matches by score
sorted_ixs = np.argsort(overlaps[i])[::-1]
# 2. Remove low scores
low_score_idx = np.where(overlaps[i, sorted_ixs] < score_threshold)[0]
if low_score_idx.size > 0:
sorted_ixs = sorted_ixs[:low_score_idx[0]]
# 3. Find the match
for j in sorted_ixs:
# If ground truth box is already matched, go to next one
if gt_match[j] > -1:
continue
# If we reach IoU smaller than the threshold, end the loop
iou = overlaps[i, j]
if iou < iou_threshold:
break
# Do we have a match?
if pred_class_ids[i] == gt_class_ids[j]:
match_count += 1
gt_match[j] = i
pred_match[i] = j
break
return gt_match, pred_match, overlaps
def compute_ap(gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold=0.5):
"""Compute Average Precision at a set IoU threshold (default 0.5).
Returns:
mAP: Mean Average Precision
precisions: List of precisions at different class score thresholds.
recalls: List of recall values at different class score thresholds.
overlaps: [pred_boxes, gt_boxes] IoU overlaps.
"""
# Get matches and overlaps
gt_match, pred_match, overlaps = compute_matches(
gt_boxes, gt_class_ids, gt_masks,
pred_boxes, pred_class_ids, pred_scores, pred_masks,
iou_threshold)
# Compute precision and recall at each prediction box step
precisions = np.cumsum(pred_match > -1) / (np.arange(len(pred_match)) + 1)
recalls = np.cumsum(pred_match > -1).astype(np.float32) / len(gt_match)
# Pad with start and end values to simplify the math
precisions = np.concatenate([[0], precisions, [0]])
recalls = np.concatenate([[0], recalls, [1]])
# Ensure precision values decrease but don't increase. This way, the
# precision value at each recall threshold is the maximum it can be
# for all following recall thresholds, as specified by the VOC paper.
for i in range(len(precisions) - 2, -1, -1):
precisions[i] = np.maximum(precisions[i], precisions[i + 1])
# Compute mean AP over recall range
indices = np.where(recalls[:-1] != recalls[1:])[0] + 1
mAP = np.sum((recalls[indices] - recalls[indices - 1]) *
precisions[indices])
return mAP, precisions, recalls, overlaps
def compute_ap_range(gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
iou_thresholds=None, verbose=1):
"""Compute AP over a range or IoU thresholds. Default range is 0.5-0.95."""
# Default is 0.5 to 0.95 with increments of 0.05
iou_thresholds = iou_thresholds or np.arange(0.5, 1.0, 0.05)
# Compute AP over range of IoU thresholds
AP = []
for iou_threshold in iou_thresholds:
ap, precisions, recalls, overlaps =\
compute_ap(gt_box, gt_class_id, gt_mask,
pred_box, pred_class_id, pred_score, pred_mask,
iou_threshold=iou_threshold)
if verbose:
print("AP @{:.2f}:\t {:.3f}".format(iou_threshold, ap))
AP.append(ap)
AP = np.array(AP).mean()
if verbose:
print("AP @{:.2f}-{:.2f}:\t {:.3f}".format(
iou_thresholds[0], iou_thresholds[-1], AP))
return AP
def compute_recall(pred_boxes, gt_boxes, iou):
"""Compute the recall at the given IoU threshold. It's an indication
of how many GT boxes were found by the given prediction boxes.
pred_boxes: [N, (y1, x1, y2, x2)] in image coordinates
gt_boxes: [N, (y1, x1, y2, x2)] in image coordinates
"""
# Measure overlaps
overlaps = compute_overlaps(pred_boxes, gt_boxes)
iou_max = np.max(overlaps, axis=1)
iou_argmax = np.argmax(overlaps, axis=1)
positive_ids = np.where(iou_max >= iou)[0]
matched_gt_boxes = iou_argmax[positive_ids]
recall = len(set(matched_gt_boxes)) / gt_boxes.shape[0]
return recall, positive_ids
# ## Batch Slicing
# Some custom layers support a batch size of 1 only, and require a lot of work
# to support batches greater than 1. This function slices an input tensor
# across the batch dimension and feeds batches of size 1. Effectively,
# an easy way to support batches > 1 quickly with little code modification.
# In the long run, it's more efficient to modify the code to support large
# batches and getting rid of this function. Consider this a temporary solution
def batch_slice(inputs, graph_fn, batch_size, names=None):
"""Splits inputs into slices and feeds each slice to a copy of the given
computation graph and then combines the results. It allows you to run a
graph on a batch of inputs even if the graph is written to support one
instance only.
inputs: list of tensors. All must have the same first dimension length
graph_fn: A function that returns a TF tensor that's part of a graph.
batch_size: number of slices to divide the data into.
names: If provided, assigns names to the resulting tensors.
"""
if not isinstance(inputs, list):
inputs = [inputs]
outputs = []
for i in range(batch_size):
inputs_slice = [x[i] for x in inputs]
output_slice = graph_fn(*inputs_slice)
if not isinstance(output_slice, (tuple, list)):
output_slice = [output_slice]
outputs.append(output_slice)
# Change outputs from a list of slices where each is
# a list of outputs to a list of outputs and each has
# a list of slices
outputs = list(zip(*outputs))
if names is None:
names = [None] * len(outputs)
result = [tf.stack(o, axis=0, name=n)
for o, n in zip(outputs, names)]
if len(result) == 1:
result = result[0]
return result
def download_trained_weights(coco_model_path, verbose=1):
"""Download COCO trained weights from Releases.
coco_model_path: local path of COCO trained weights
"""
if verbose > 0:
print("Downloading pretrained model to " + coco_model_path + " ...")
with urllib.request.urlopen(COCO_MODEL_URL) as resp, open(coco_model_path, 'wb') as out:
shutil.copyfileobj(resp, out)
if verbose > 0:
print("... done downloading pretrained model!")
def norm_boxes(boxes, shape):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [N, (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[N, (y1, x1, y2, x2)] in normalized coordinates
"""
h, w = shape
scale = np.array([h - 1, w - 1, h - 1, w - 1])
shift = np.array([0, 0, 1, 1])
return np.divide((boxes - shift), scale).astype(np.float32)
def denorm_boxes(boxes, shape):
"""Converts boxes from normalized coordinates to pixel coordinates.
boxes: [N, (y1, x1, y2, x2)] in normalized coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[N, (y1, x1, y2, x2)] in pixel coordinates
"""
h, w = shape
scale = np.array([h - 1, w - 1, h - 1, w - 1])
shift = np.array([0, 0, 1, 1])
return np.around(np.multiply(boxes, scale) + shift).astype(np.int32)
def resize(image, output_shape, order=1, mode='constant', cval=0, clip=True,
preserve_range=False, anti_aliasing=False, anti_aliasing_sigma=None):
"""A wrapper for Scikit-Image resize().
Scikit-Image generates warnings on every call to resize() if it doesn't
receive the right parameters. The right parameters depend on the version
of skimage. This solves the problem by using different parameters per
version. And it provides a central place to control resizing defaults.
"""
if LooseVersion(skimage.__version__) >= LooseVersion("0.14"):
# New in 0.14: anti_aliasing. Default it to False for backward
# compatibility with skimage 0.13.
return skimage.transform.resize(
image, output_shape,
order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range, anti_aliasing=anti_aliasing,
anti_aliasing_sigma=anti_aliasing_sigma)
else:
return skimage.transform.resize(
image, output_shape,
order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range) | 39.953356 | 105 | 0.599248 |
import sys
import os
import logging
import math
import random
import numpy as np
import tensorflow as tf
import scipy
import skimage.color
import skimage.io
import skimage.transform
import urllib.request
import shutil
import warnings
from distutils.version import LooseVersion
COCO_MODEL_URL = "https://github.com/matterport/Mask_RCNN/releases/download/v2.0/mask_rcnn_coco.h5"
2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = tf.math.log(gt_height / height)
dw = tf.math.log(gt_width / width)
result = tf.stack([dy, dx, dh, dw], axis=1)
return result
def box_refinement(box, gt_box):
box = box.astype(np.float32)
gt_box = gt_box.astype(np.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = np.log(gt_height / height)
dw = np.log(gt_width / width)
return np.stack([dy, dx, dh, dw], axis=1)
if min_scale and scale < min_scale:
scale = min_scale
if max_dim and mode == "square":
image_max = max(h, w)
if round(image_max * scale) > max_dim:
scale = max_dim / image_max
if scale != 1:
image = resize(image, (round(h * scale), round(w * scale)),
preserve_range=True)
if mode == "square":
h, w = image.shape[:2]
top_pad = (max_dim - h) // 2
bottom_pad = max_dim - h - top_pad
left_pad = (max_dim - w) // 2
right_pad = max_dim - w - left_pad
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
elif mode == "pad64":
h, w = image.shape[:2]
assert min_dim % 64 == 0, "Minimum dimension must be a multiple of 64"
if h % 64 > 0:
max_h = h - (h % 64) + 64
top_pad = (max_h - h) // 2
bottom_pad = max_h - h - top_pad
else:
top_pad = bottom_pad = 0
if w % 64 > 0:
max_w = w - (w % 64) + 64
left_pad = (max_w - w) // 2
right_pad = max_w - w - left_pad
else:
left_pad = right_pad = 0
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
elif mode == "crop":
h, w = image.shape[:2]
y = random.randint(0, (h - min_dim))
x = random.randint(0, (w - min_dim))
crop = (y, x, min_dim, min_dim)
image = image[y:y + min_dim, x:x + min_dim]
window = (0, 0, min_dim, min_dim)
else:
raise Exception("Mode {} not supported".format(mode))
return image.astype(image_dtype), window, scale, padding, crop
def resize_mask(mask, scale, padding, crop=None):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0)
if crop is not None:
y, x, h, w = crop
mask = mask[y:y + h, x:x + w]
else:
mask = np.pad(mask, padding, mode='constant', constant_values=0)
return mask
def minimize_mask(bbox, mask, mini_shape):
mini_mask = np.zeros(mini_shape + (mask.shape[-1],), dtype=bool)
for i in range(mask.shape[-1]):
m = mask[:, :, i].astype(bool)
y1, x1, y2, x2 = bbox[i][:4]
m = m[y1:y2, x1:x2]
if m.size == 0:
raise Exception("Invalid bounding box with area of zero")
m = resize(m, mini_shape)
mini_mask[:, :, i] = np.around(m).astype(np.bool)
return mini_mask
def expand_mask(bbox, mini_mask, image_shape):
mask = np.zeros(image_shape[:2] + (mini_mask.shape[-1],), dtype=bool)
for i in range(mask.shape[-1]):
m = mini_mask[:, :, i]
y1, x1, y2, x2 = bbox[i][:4]
h = y2 - y1
w = x2 - x1
m = resize(m, (h, w))
mask[y1:y2, x1:x2, i] = np.around(m).astype(np.bool)
return mask
def mold_mask(mask, config):
pass
def unmold_mask(mask, bbox, image_shape):
threshold = 0.5
y1, x1, y2, x2 = bbox
mask = resize(mask, (y2 - y1, x2 - x1))
mask = np.where(mask >= threshold, 1, 0).astype(np.bool)
full_mask = np.zeros(image_shape[:2], dtype=np.bool)
full_mask[y1:y2, x1:x2] = mask
return full_mask
1, 1])
return np.around(np.multiply(boxes, scale) + shift).astype(np.int32)
def resize(image, output_shape, order=1, mode='constant', cval=0, clip=True,
preserve_range=False, anti_aliasing=False, anti_aliasing_sigma=None):
if LooseVersion(skimage.__version__) >= LooseVersion("0.14"):
return skimage.transform.resize(
image, output_shape,
order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range, anti_aliasing=anti_aliasing,
anti_aliasing_sigma=anti_aliasing_sigma)
else:
return skimage.transform.resize(
image, output_shape,
order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range) | true | true |
f730cf4ecdd22af444f24b4e7185bfbd91d7f33c | 1,937 | py | Python | PyUE4Builder/actions/copy.py | rfsheffer/PyUE4Builder | 2b2a978431ba5426238a03ecaae5a73836a0598c | [
"MIT"
] | 12 | 2018-04-15T01:46:01.000Z | 2022-01-06T01:34:34.000Z | PyUE4Builder/actions/copy.py | rfsheffer/PyUE4Builder | 2b2a978431ba5426238a03ecaae5a73836a0598c | [
"MIT"
] | null | null | null | PyUE4Builder/actions/copy.py | rfsheffer/PyUE4Builder | 2b2a978431ba5426238a03ecaae5a73836a0598c | [
"MIT"
] | 2 | 2017-12-04T18:30:12.000Z | 2018-04-15T01:46:02.000Z | #!/usr/bin/env python
from actions.action import Action
import os
import contextlib
import re
import shutil
__author__ = "Ryan Sheffer"
__copyright__ = "Copyright 2020, Sheffer Online Services"
__credits__ = ["Ryan Sheffer", "VREAL"]
class Copy(Action):
"""
Copy Action
An action designed to copy file/s as part of a build process.
TODO: Setup wildcard like copying? Take advantage of a copy module with a lot of options.
TODO: Have many copying options, like many files in a folder to another folder. Whole dir trees, etc.
"""
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
self.copy_items = kwargs['copy'] if 'copy' in kwargs else []
def verify(self):
if not len(self.copy_items):
return 'No items to copy!'
for item in self.copy_items:
if type(item) is not list or len(item) != 2:
return 'Invalid copy item found in copy list!'
item[0] = self.replace_tags(item[0])
item[1] = self.replace_tags(item[1])
if not os.path.isfile(item[0]):
return 'Copy item ({}) does not exist!'.format(item[0])
return ''
def run(self):
for item in self.copy_items:
with contextlib.suppress(FileNotFoundError):
os.unlink(item[1])
os.makedirs(os.path.dirname(item[1]), exist_ok=True)
print('Copying {} to {}'.format(item[0], item[1]))
shutil.copy2(item[0], item[1])
return True
if __name__ == "__main__":
class VarClassTest(object):
def __init__(self):
self.HI2_there = "some\\cool\\path"
self.three = "another\\cool\\path"
print(Copy.replace_path_sections('hello\\{HI2_there}\\then\\there\\were\\{three}\\bla.exe', VarClassTest()))
print(Copy.replace_path_sections('hello\\then\\there\\{not_found}\\three.exe', VarClassTest()))
| 34.589286 | 112 | 0.621064 |
from actions.action import Action
import os
import contextlib
import re
import shutil
__author__ = "Ryan Sheffer"
__copyright__ = "Copyright 2020, Sheffer Online Services"
__credits__ = ["Ryan Sheffer", "VREAL"]
class Copy(Action):
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
self.copy_items = kwargs['copy'] if 'copy' in kwargs else []
def verify(self):
if not len(self.copy_items):
return 'No items to copy!'
for item in self.copy_items:
if type(item) is not list or len(item) != 2:
return 'Invalid copy item found in copy list!'
item[0] = self.replace_tags(item[0])
item[1] = self.replace_tags(item[1])
if not os.path.isfile(item[0]):
return 'Copy item ({}) does not exist!'.format(item[0])
return ''
def run(self):
for item in self.copy_items:
with contextlib.suppress(FileNotFoundError):
os.unlink(item[1])
os.makedirs(os.path.dirname(item[1]), exist_ok=True)
print('Copying {} to {}'.format(item[0], item[1]))
shutil.copy2(item[0], item[1])
return True
if __name__ == "__main__":
class VarClassTest(object):
def __init__(self):
self.HI2_there = "some\\cool\\path"
self.three = "another\\cool\\path"
print(Copy.replace_path_sections('hello\\{HI2_there}\\then\\there\\were\\{three}\\bla.exe', VarClassTest()))
print(Copy.replace_path_sections('hello\\then\\there\\{not_found}\\three.exe', VarClassTest()))
| true | true |
f730cf881812347d409f1942f37890aa47f0c756 | 493 | py | Python | objectModel/Python/cdm/enums/__init__.py | aaron-emde/CDM | 9472e9c7694821ac4a9bbe608557d2e65aabc73e | [
"CC-BY-4.0",
"MIT"
] | null | null | null | objectModel/Python/cdm/enums/__init__.py | aaron-emde/CDM | 9472e9c7694821ac4a9bbe608557d2e65aabc73e | [
"CC-BY-4.0",
"MIT"
] | 3 | 2021-05-11T23:57:12.000Z | 2021-08-04T05:03:05.000Z | objectModel/Python/cdm/enums/__init__.py | aaron-emde/CDM | 9472e9c7694821ac4a9bbe608557d2e65aabc73e | [
"CC-BY-4.0",
"MIT"
] | null | null | null | from .cdm_attribute_context_type import CdmAttributeContextType
from .cdm_data_format import CdmDataFormat
from .cdm_object_type import CdmObjectType
from .cdm_relationship_discovery_style import CdmRelationshipDiscoveryStyle
from .cdm_status_level import CdmStatusLevel
from .cdm_validation_step import CdmValidationStep
__all__ = [
'CdmAttributeContextType',
'CdmDataFormat',
'CdmObjectType',
'CdmRelationshipDiscoveryStyle',
'CdmStatusLevel',
'CdmValidationStep'
]
| 30.8125 | 75 | 0.827586 | from .cdm_attribute_context_type import CdmAttributeContextType
from .cdm_data_format import CdmDataFormat
from .cdm_object_type import CdmObjectType
from .cdm_relationship_discovery_style import CdmRelationshipDiscoveryStyle
from .cdm_status_level import CdmStatusLevel
from .cdm_validation_step import CdmValidationStep
__all__ = [
'CdmAttributeContextType',
'CdmDataFormat',
'CdmObjectType',
'CdmRelationshipDiscoveryStyle',
'CdmStatusLevel',
'CdmValidationStep'
]
| true | true |
f730cf98d59bd05918ee808444ad2fd2d6821f0d | 577 | py | Python | app/user/migrations/0026_auto_20170819_0854.py | Sovol2018/sovolo | 54250e42b4af3391d2f99690f45b93ab240563c2 | [
"MIT"
] | 2 | 2017-06-06T11:34:49.000Z | 2017-10-24T13:09:50.000Z | app/user/migrations/0026_auto_20170819_0854.py | Sovol2018/sovolo | 54250e42b4af3391d2f99690f45b93ab240563c2 | [
"MIT"
] | 346 | 2016-08-09T20:50:57.000Z | 2018-08-28T06:52:17.000Z | app/user/migrations/0026_auto_20170819_0854.py | hejob/sovolo | 8b73253d7bf0427c7ae0ebb6d8e3d70e118e8427 | [
"MIT"
] | 3 | 2017-11-27T14:07:57.000Z | 2018-08-13T15:51:01.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-18 23:54
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0025_remove_user_user_todo'),
]
operations = [
migrations.RenameField(
model_name='skill',
old_name='user_todo',
new_name='skilltodo',
),
migrations.RenameField(
model_name='skill',
old_name='frame_user',
new_name='userskill',
),
]
| 22.192308 | 48 | 0.580589 |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0025_remove_user_user_todo'),
]
operations = [
migrations.RenameField(
model_name='skill',
old_name='user_todo',
new_name='skilltodo',
),
migrations.RenameField(
model_name='skill',
old_name='frame_user',
new_name='userskill',
),
]
| true | true |
f730cfc2579f214ca1f28d1781de2677a358113d | 2,377 | py | Python | queue_util/serializers.py | EDITD/queue_util | 383267388bcfeb1c8bc0cf749f28d5073b067c1b | [
"MIT"
] | null | null | null | queue_util/serializers.py | EDITD/queue_util | 383267388bcfeb1c8bc0cf749f28d5073b067c1b | [
"MIT"
] | 21 | 2015-11-20T14:37:38.000Z | 2020-10-05T11:09:22.000Z | queue_util/serializers.py | EDITD/queue_util | 383267388bcfeb1c8bc0cf749f28d5073b067c1b | [
"MIT"
] | null | null | null | # kombu v4 will come out with the following commit in:
# https://github.com/celery/kombu/commit/010aae8ccf16ad2fa5a9c3d6f3b84b21e1c1677a
# which does the same thing, but this also allows us to not have to enable
# insecure serializers
from datetime import datetime
import msgpack
import six
from kombu.serialization import register
DATE_FORMAT = '%Y%m%dT%H:%M:%S.%f'
MESSAGE_CONTENT_TYPE = 'application/x-unicode-msgpack-with-dates'
MESSAGE_CONTENT_ENCODING = 'binary'
def decode_datetime(obj):
if b'__datetime__' in obj:
# This must have been produced by python2.
obj = datetime.strptime(obj[b'as_str'].decode('utf-8'), DATE_FORMAT)
elif '__datetime__' in obj:
as_str = obj['as_str']
# We are not done yet!! Just because the keys are unicode, doesn't mean that the
# values are!!
if six.PY3 and isinstance(as_str, six.binary_type):
as_str = as_str.decode('utf-8')
obj = datetime.strptime(as_str, DATE_FORMAT)
return obj
def encode_datetime(obj):
if isinstance(obj, datetime):
# We want to return a dict that can be parsed later.
# The dict should __always__ have unicode output.
if six.PY3:
as_str = obj.strftime(DATE_FORMAT)
elif six.PY2:
# We are in python2! But we want to output unicode.unicode_literals will take care
# of the keys, but strftime returns a bytestring in python2.
as_bytes = obj.strftime(DATE_FORMAT)
as_str = as_bytes.decode('utf-8')
return {'__datetime__': True, 'as_str': as_str}
return obj
def pack(s):
return msgpack.packb(
s,
use_bin_type=True,
unicode_errors='ignore',
default=encode_datetime,
)
def unpack(s):
return msgpack.unpackb(
s,
encoding='utf-8',
unicode_errors='ignore',
object_hook=decode_datetime,
)
register(
'unicode-msgpack-with-dates',
pack,
unpack,
content_type=MESSAGE_CONTENT_TYPE,
content_encoding=MESSAGE_CONTENT_ENCODING,
)
# This is around for compatibility reasons (so that we're able to decode any messages
# that are already in queues, with the old/non-date-aware content_type).
register(
'unicode-msgpack',
pack,
unpack,
content_type='application/x-unicode-msgpack',
content_encoding='binary',
)
| 27.964706 | 94 | 0.671435 |
from datetime import datetime
import msgpack
import six
from kombu.serialization import register
DATE_FORMAT = '%Y%m%dT%H:%M:%S.%f'
MESSAGE_CONTENT_TYPE = 'application/x-unicode-msgpack-with-dates'
MESSAGE_CONTENT_ENCODING = 'binary'
def decode_datetime(obj):
if b'__datetime__' in obj:
obj = datetime.strptime(obj[b'as_str'].decode('utf-8'), DATE_FORMAT)
elif '__datetime__' in obj:
as_str = obj['as_str']
# values are!!
if six.PY3 and isinstance(as_str, six.binary_type):
as_str = as_str.decode('utf-8')
obj = datetime.strptime(as_str, DATE_FORMAT)
return obj
def encode_datetime(obj):
if isinstance(obj, datetime):
# We want to return a dict that can be parsed later.
# The dict should __always__ have unicode output.
if six.PY3:
as_str = obj.strftime(DATE_FORMAT)
elif six.PY2:
# We are in python2! But we want to output unicode.unicode_literals will take care
# of the keys, but strftime returns a bytestring in python2.
as_bytes = obj.strftime(DATE_FORMAT)
as_str = as_bytes.decode('utf-8')
return {'__datetime__': True, 'as_str': as_str}
return obj
def pack(s):
return msgpack.packb(
s,
use_bin_type=True,
unicode_errors='ignore',
default=encode_datetime,
)
def unpack(s):
return msgpack.unpackb(
s,
encoding='utf-8',
unicode_errors='ignore',
object_hook=decode_datetime,
)
register(
'unicode-msgpack-with-dates',
pack,
unpack,
content_type=MESSAGE_CONTENT_TYPE,
content_encoding=MESSAGE_CONTENT_ENCODING,
)
# This is around for compatibility reasons (so that we're able to decode any messages
register(
'unicode-msgpack',
pack,
unpack,
content_type='application/x-unicode-msgpack',
content_encoding='binary',
)
| true | true |
f730cffb583fb3b775af4106c24d4268b0e55606 | 3,050 | py | Python | scripts/logs_analyzer.py | onzehub/onzecurrency | 08f82126be349a639a6421463a3d9038761b661f | [
"MIT"
] | null | null | null | scripts/logs_analyzer.py | onzehub/onzecurrency | 08f82126be349a639a6421463a3d9038761b661f | [
"MIT"
] | null | null | null | scripts/logs_analyzer.py | onzehub/onzecurrency | 08f82126be349a639a6421463a3d9038761b661f | [
"MIT"
] | null | null | null | # Follow every step every server's log has gone through
# Formulate a conclusion
# Also count blocks
# a search function, a 1 file assembler of all log files
import os, glob, sys
import argparse
from datetime import datetime
def main():
# Initialize parser
parser = argparse.ArgumentParser()
# Adding optional arguments
parser.add_argument("-a", "--assemble", help = "Assemble 1 file of all log files", action="store_true")
parser.add_argument("-s", "--search", help = "Search in log files", type=str, action="store")
parser.add_argument("-c", "--show-blocks-count", help = "Show blocks count", action="store_true")
# Read arguments from command line
args = parser.parse_args()
# Do not use ELIF, combining options doesn't work then
if args.assemble:
assemble()
if args.search:
search(args.search)
if args.show_blocks_count:
show_blocks_count()
# Create one_loggi file from all log files
def assemble():
messages = []
folder_path = 'log'
for filename in glob.glob(os.path.join(folder_path, '*')):
if filename == 'log/one_loggi' or filename.endswith('_blocks_count'):
continue
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
date, time, severity, function_name, message = line.split(' ', maxsplit=4)
updated_line = [date, time, filename , severity, function_name, message]
messages.append(' '.join(updated_line))
f.close()
messages.sort(key=lambda l: sort_key(l))
messages.reverse()
with open('log/one_loggi', 'w') as f:
for message in messages:
f.writelines(message)
# Search for a string in all log files
def search(search_term):
messages = []
folder_path = 'log'
for filename in glob.glob(os.path.join(folder_path, '*')):
if filename == 'log/one_loggi' or filename.endswith('_blocks_count'):
continue
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
date, time, severity, function_name, message = line.split(' ', maxsplit=4)
updated_line = [date, time, filename , severity, function_name, message]
if search_term in message:
messages.append(' '.join(updated_line))
f.close()
messages.sort(key=lambda l: sort_key(l))
messages.reverse()
for message in messages:
print(message)
def show_blocks_count():
folder_path = 'log'
for filename in sorted(glob.glob(os.path.join(folder_path, '*_blocks_count'))):
with open(filename, 'r') as f:
print(filename, f.read())
f.close()
def sort_key(line):
return datetime.strptime(line.split(' ')[1], '%H:%M:%S.%f')
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0) | 32.105263 | 107 | 0.612131 |
# Formulate a conclusion
# Also count blocks
# a search function, a 1 file assembler of all log files
import os, glob, sys
import argparse
from datetime import datetime
def main():
# Initialize parser
parser = argparse.ArgumentParser()
# Adding optional arguments
parser.add_argument("-a", "--assemble", help = "Assemble 1 file of all log files", action="store_true")
parser.add_argument("-s", "--search", help = "Search in log files", type=str, action="store")
parser.add_argument("-c", "--show-blocks-count", help = "Show blocks count", action="store_true")
# Read arguments from command line
args = parser.parse_args()
# Do not use ELIF, combining options doesn't work then
if args.assemble:
assemble()
if args.search:
search(args.search)
if args.show_blocks_count:
show_blocks_count()
def assemble():
messages = []
folder_path = 'log'
for filename in glob.glob(os.path.join(folder_path, '*')):
if filename == 'log/one_loggi' or filename.endswith('_blocks_count'):
continue
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
date, time, severity, function_name, message = line.split(' ', maxsplit=4)
updated_line = [date, time, filename , severity, function_name, message]
messages.append(' '.join(updated_line))
f.close()
messages.sort(key=lambda l: sort_key(l))
messages.reverse()
with open('log/one_loggi', 'w') as f:
for message in messages:
f.writelines(message)
def search(search_term):
messages = []
folder_path = 'log'
for filename in glob.glob(os.path.join(folder_path, '*')):
if filename == 'log/one_loggi' or filename.endswith('_blocks_count'):
continue
with open(filename, 'r') as f:
lines = f.readlines()
for line in lines:
date, time, severity, function_name, message = line.split(' ', maxsplit=4)
updated_line = [date, time, filename , severity, function_name, message]
if search_term in message:
messages.append(' '.join(updated_line))
f.close()
messages.sort(key=lambda l: sort_key(l))
messages.reverse()
for message in messages:
print(message)
def show_blocks_count():
folder_path = 'log'
for filename in sorted(glob.glob(os.path.join(folder_path, '*_blocks_count'))):
with open(filename, 'r') as f:
print(filename, f.read())
f.close()
def sort_key(line):
return datetime.strptime(line.split(' ')[1], '%H:%M:%S.%f')
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0) | true | true |
f730d0ca0994a465860202a1c7adb5a31237673d | 894 | py | Python | sentry_sdk/integrations/argv.py | Siecje/sentry-python | d8405491c60c5b7c3d2ec3ed97ab4bea104f4e51 | [
"BSD-2-Clause"
] | 1 | 2020-11-02T11:31:01.000Z | 2020-11-02T11:31:01.000Z | sentry_sdk/integrations/argv.py | Siecje/sentry-python | d8405491c60c5b7c3d2ec3ed97ab4bea104f4e51 | [
"BSD-2-Clause"
] | null | null | null | sentry_sdk/integrations/argv.py | Siecje/sentry-python | d8405491c60c5b7c3d2ec3ed97ab4bea104f4e51 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import absolute_import
import sys
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration
from sentry_sdk.scope import add_global_event_processor
if False:
from typing import Any
from typing import Dict
class ArgvIntegration(Integration):
identifier = "argv"
@staticmethod
def setup_once():
# type: () -> None
@add_global_event_processor
def processor(event, hint):
# type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
if Hub.current.get_integration(ArgvIntegration) is not None:
extra = event.setdefault("extra", {})
# If some event processor decided to set extra to e.g. an
# `int`, don't crash. Not here.
if isinstance(extra, dict):
extra["sys.argv"] = sys.argv
return event
| 28.83871 | 73 | 0.624161 | from __future__ import absolute_import
import sys
from sentry_sdk.hub import Hub
from sentry_sdk.integrations import Integration
from sentry_sdk.scope import add_global_event_processor
if False:
from typing import Any
from typing import Dict
class ArgvIntegration(Integration):
identifier = "argv"
@staticmethod
def setup_once():
@add_global_event_processor
def processor(event, hint):
if Hub.current.get_integration(ArgvIntegration) is not None:
extra = event.setdefault("extra", {})
if isinstance(extra, dict):
extra["sys.argv"] = sys.argv
return event
| true | true |
f730d1918786544a8b85c5af03acfaf23f6d5b25 | 13,005 | py | Python | packit/specfile.py | FilipSchad/packit | d57da48c649ed9e0212b6a7ca5fc6428c217bc71 | [
"MIT"
] | 23 | 2020-09-03T01:21:57.000Z | 2022-03-16T17:06:28.000Z | packit/specfile.py | FilipSchad/packit | d57da48c649ed9e0212b6a7ca5fc6428c217bc71 | [
"MIT"
] | 642 | 2020-08-03T06:52:35.000Z | 2022-03-31T19:35:38.000Z | packit/specfile.py | FilipSchad/packit | d57da48c649ed9e0212b6a7ca5fc6428c217bc71 | [
"MIT"
] | 19 | 2020-08-13T09:55:42.000Z | 2022-01-19T20:53:53.000Z | # Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import inspect
import re
from logging import getLogger
from pathlib import Path
from typing import Union, List, Optional, Dict
from packit.patches import PatchMetadata
from rebasehelper.helpers.macro_helper import MacroHelper
from rebasehelper.specfile import SpecFile, RebaseHelperError, saves, PatchObject
from rebasehelper.tags import Tag, Tags
try:
from rebasehelper.plugins.plugin_manager import plugin_manager
except ImportError:
from rebasehelper.versioneer import versioneers_runner
from packit.exceptions import PackitException
logger = getLogger(__name__)
class Specfile(SpecFile):
def __init__(self, path: Union[str, Path], sources_dir: Union[str, Path] = ""):
s = inspect.signature(SpecFile)
if "changelog_entry" in s.parameters:
super().__init__(
path=str(path), sources_location=str(sources_dir), changelog_entry=""
)
else:
super().__init__(path=str(path), sources_location=str(sources_dir))
self._patch_id_digits: Optional[int] = None
self._uses_autosetup: Optional[bool] = None
def update_spec(self):
if hasattr(self, "update"):
# new rebase-helper
self.update()
else:
# old rebase-helper
self._update_data()
def update_changelog_in_spec(self, changelog_entry):
if hasattr(self, "update_changelog"):
# new rebase-helper
self.update_changelog(changelog_entry)
else:
# old rebase-helper
self.changelog_entry = changelog_entry
new_log = self.get_new_log()
new_log.extend(self.spec_content.sections["%changelog"])
self.spec_content.sections["%changelog"] = new_log
self.save()
def set_spec_version(
self, version: str = None, release: str = None, changelog_entry: str = None
):
"""
Set version in spec, release and add a changelog_entry (if they are presented).
:param version: new version
:param release: new release
:param changelog_entry: accompanying changelog entry
"""
try:
if version:
# also this code adds 3 rpmbuild dirs into the upstream repo,
# we should ask rebase-helper not to do that
# using set_tag instead of set_version to turn off preserving macros
self.set_tag("Version", version, preserve_macros=False)
if release:
# using set_tag instead of set_release to turn off preserving macros
self.set_tag(
"Release", "{}%{{?dist}}".format(release), preserve_macros=False
)
if not changelog_entry:
return
self.update_changelog_in_spec(changelog_entry)
except RebaseHelperError as ex:
logger.error(f"Rebase-helper failed to change the spec file: {ex}")
raise PackitException("Rebase-helper didn't do the job.")
def write_spec_content(self):
if hasattr(self, "_write_spec_content"):
# new rebase-helper
self._write_spec_content()
else:
# old rebase-helper
self._write_spec_file_to_disc()
@staticmethod
def get_upstream_version(versioneer, package_name, category):
"""
Call the method of rebase-helper (due to the version of rebase-helper)
to get the latest upstream version of a package.
:param versioneer:
:param package_name: str
:param category:
:return: str version
"""
try:
get_version = plugin_manager.versioneers.run
except NameError:
get_version = versioneers_runner.run
return get_version(versioneer, package_name, category)
def get_release_number(self) -> str:
"""
Removed in rebasehelper=0.20.0
"""
release = self.header.release
dist = MacroHelper.expand("%{dist}")
if dist:
release = release.replace(dist, "")
return re.sub(r"([0-9.]*[0-9]+).*", r"\1", release)
@saves
def set_patches(
self, patch_list: List[PatchMetadata], patch_id_digits: int = 4
) -> None:
"""
Set given patches in the spec file
:param patch_list: [PatchMetadata]
:param patch_id_digits: Number of digits of the generated patch ID.
This is used to control whether to have 'Patch' or 'Patch1' or 'Patch0001'.
"""
if not patch_list:
return
if all(p.present_in_specfile for p in patch_list):
logger.debug(
"All patches are present in the spec file, nothing to do here 🚀"
)
return
# we could have generated patches before (via git-format-patch)
# so let's reload the spec
self.reload()
applied_patches: Dict[str, PatchObject] = {
p.get_patch_name(): p for p in self.get_applied_patches()
}
for patch_metadata in patch_list:
if patch_metadata.present_in_specfile:
logger.debug(
f"Patch {patch_metadata.name} is already present in the spec file."
)
continue
if patch_metadata.name in applied_patches:
logger.debug(
f"Patch {patch_metadata.name} is already defined in the spec file."
)
continue
self.add_patch(patch_metadata, patch_id_digits)
def add_patch(
self, patch_metadata: PatchMetadata, patch_id_digits: Optional[int] = 4
):
"""
Add provided patch to the spec file:
* Set Patch index to be +1 than the highest index of an existing specfile patch
* The Patch placement logic works like this:
* If there already are patches, then the patch is added after them
* If there are no existing patches, the patch is added after Source definitions
Args:
patch_metadata: Metadata of the patch to be added.
patch_id_digits: Number of digits of the generated patch ID. This is used to
control whether to have 'Patch' or 'Patch1' or 'Patch0001'.
"""
try:
patch_number_offset = max(x.index for x in self.get_applied_patches())
except ValueError:
logger.debug("There are no patches in the spec.")
# 0 is a valid patch index
patch_number_offset = -1
if patch_metadata.patch_id is not None:
if patch_metadata.patch_id <= patch_number_offset:
raise PackitException(
f"The 'patch_id' requested ({patch_metadata.patch_id}) for patch "
f"{patch_metadata.name} is less than or equal to the last used patch ID "
f"({patch_number_offset}). Re-ordering the patches using 'patch_id' is "
"not allowed - if you want to change the order of those patches, "
"please reorder the commits in your source-git repository."
)
patch_id = patch_metadata.patch_id
else:
# 0 is a valid patch index, but let's start with 1 which is more common, e.g.
# https://src.fedoraproject.org/rpms/glibc/blob/f6682c9bac5872385b3caae0cd51fe3dbfcbb88f/f/glibc.spec#_158
# https://src.fedoraproject.org/rpms/python3.10/blob/ac9a5093cb9f534ef2f65cbd1f50684c88b91eec/f/python3.10.spec#_267
patch_id = max(patch_number_offset + 1, 1)
new_content = "\n"
new_content += "\n".join(
line if line.startswith("#") else f"# {line}".strip()
for line in patch_metadata.specfile_comment.splitlines()
)
patch_id_str = f"{patch_id:0{patch_id_digits}d}" if patch_id_digits > 0 else ""
new_content += f"\nPatch{patch_id_str}: {patch_metadata.name}"
if self.get_applied_patches():
last_source_tag_line = [
t.line for t in self.tags.filter(name="Patch*", valid=None)
][-1]
else:
last_source_tag_line = [
t.line for t in self.tags.filter(name="Source*", valid=None)
][-1]
# Find first empty line after last_source_tag_line
for index, line in enumerate(
self.spec_content.section("%package")[last_source_tag_line:],
start=last_source_tag_line,
):
if not line:
where = index
break
else:
where = len(self.spec_content.section("%package"))
logger.debug(f"Adding patch {patch_metadata.name} to the spec file.")
self.spec_content.section("%package")[where:where] = new_content.splitlines()
self.save()
def get_source(self, source_name: str) -> Optional[Tag]:
"""
get specific Source from spec
:param source_name: precise name of the Source, e.g. Source1, or Source
:return: corresponding Source Tag
"""
# sanitize the name, this will also add index if there isn't one
source_name, *_ = Tags._sanitize_tag(source_name, 0, 0)
return next(self.tags.filter(name=source_name, valid=None), None)
def read_patch_comments(self) -> dict:
"""Read the spec again, detect comment lines right above a patch-line
and save it as an attribute to the patch for later retrieval.
Match patch-lines with the patch-data from rebase-helper on the name of
the patches.
Returns:
A dict where each patch name (the basename of the value of the
patch-line) has 0 or more comment lines associated with it.
"""
comment: List[str] = []
patch_comments = {}
for line in self.spec_content.section("%package"):
# An empty line clears the comment lines collected so far.
if not line.strip():
comment = []
# Remember a comment line.
if line.startswith("#"):
comment.append(line[1:].strip())
# Associate comments with patches and clear the comments
# collected.
if line.lower().startswith("patch"):
patch_name = Path(line.split(":", 1)[1].strip()).name
patch_comments[patch_name] = comment
comment = []
return patch_comments
@property
def patch_id_digits(self) -> int:
"""Detect and return the number of digits used in patch IDs (indices).
Look for the first patch-line, and use that as a reference.
0 - no patch ID at all, just a bare "Patch"
1 - no leading zeros for patch IDs
2 or more - the minimum number of digits to be used for patch IDs.
Returns:
Number of digits used on the first patch-line, or 0 if there is
no patch-line found.
"""
if self._patch_id_digits is not None:
return self._patch_id_digits
self._patch_id_digits = 1
for line in self.spec_content.section("%package"):
if line.lower().startswith("patch"):
match = re.match(r"^patch(\d*)\s*:.+", line, flags=re.IGNORECASE)
if not match[1]:
self._patch_id_digits = 0
elif match[1].startswith("0"):
self._patch_id_digits = len(match[1])
break
return self._patch_id_digits
@property
def uses_autosetup(self) -> bool:
"""Tell if the specfile uses %autosetup
Returns:
True if the file uses %autosetup, otherwise False.
"""
if self._uses_autosetup is not None:
return self._uses_autosetup
self._uses_autosetup = False
for line in self.spec_content.section("%prep"):
if line.startswith("%autosetup"):
self._uses_autosetup = True
break
return self._uses_autosetup
def remove_patches(self):
"""Remove all patch-lines from the spec file"""
content = []
stretch = []
for line in self.spec_content.section("%package"):
stretch.append(line)
# Empty lines save the current stretch into content.
if not line.strip():
content += stretch
stretch = []
# Patch-lines throw away the current stretch.
if line.lower().startswith("patch"):
stretch = []
# If there is an empty line at the end of content
# throw it away, to avoid duplicate lines.
if not content[-1].strip():
content.pop()
self.spec_content.replace_section("%package", content)
self.save()
| 38.026316 | 128 | 0.596463 |
import inspect
import re
from logging import getLogger
from pathlib import Path
from typing import Union, List, Optional, Dict
from packit.patches import PatchMetadata
from rebasehelper.helpers.macro_helper import MacroHelper
from rebasehelper.specfile import SpecFile, RebaseHelperError, saves, PatchObject
from rebasehelper.tags import Tag, Tags
try:
from rebasehelper.plugins.plugin_manager import plugin_manager
except ImportError:
from rebasehelper.versioneer import versioneers_runner
from packit.exceptions import PackitException
logger = getLogger(__name__)
class Specfile(SpecFile):
def __init__(self, path: Union[str, Path], sources_dir: Union[str, Path] = ""):
s = inspect.signature(SpecFile)
if "changelog_entry" in s.parameters:
super().__init__(
path=str(path), sources_location=str(sources_dir), changelog_entry=""
)
else:
super().__init__(path=str(path), sources_location=str(sources_dir))
self._patch_id_digits: Optional[int] = None
self._uses_autosetup: Optional[bool] = None
def update_spec(self):
if hasattr(self, "update"):
self.update()
else:
self._update_data()
def update_changelog_in_spec(self, changelog_entry):
if hasattr(self, "update_changelog"):
self.update_changelog(changelog_entry)
else:
self.changelog_entry = changelog_entry
new_log = self.get_new_log()
new_log.extend(self.spec_content.sections["%changelog"])
self.spec_content.sections["%changelog"] = new_log
self.save()
def set_spec_version(
self, version: str = None, release: str = None, changelog_entry: str = None
):
try:
if version:
self.set_tag("Version", version, preserve_macros=False)
if release:
self.set_tag(
"Release", "{}%{{?dist}}".format(release), preserve_macros=False
)
if not changelog_entry:
return
self.update_changelog_in_spec(changelog_entry)
except RebaseHelperError as ex:
logger.error(f"Rebase-helper failed to change the spec file: {ex}")
raise PackitException("Rebase-helper didn't do the job.")
def write_spec_content(self):
if hasattr(self, "_write_spec_content"):
# new rebase-helper
self._write_spec_content()
else:
# old rebase-helper
self._write_spec_file_to_disc()
@staticmethod
def get_upstream_version(versioneer, package_name, category):
try:
get_version = plugin_manager.versioneers.run
except NameError:
get_version = versioneers_runner.run
return get_version(versioneer, package_name, category)
def get_release_number(self) -> str:
release = self.header.release
dist = MacroHelper.expand("%{dist}")
if dist:
release = release.replace(dist, "")
return re.sub(r"([0-9.]*[0-9]+).*", r"\1", release)
@saves
def set_patches(
self, patch_list: List[PatchMetadata], patch_id_digits: int = 4
) -> None:
if not patch_list:
return
if all(p.present_in_specfile for p in patch_list):
logger.debug(
"All patches are present in the spec file, nothing to do here 🚀"
)
return
# we could have generated patches before (via git-format-patch)
# so let's reload the spec
self.reload()
applied_patches: Dict[str, PatchObject] = {
p.get_patch_name(): p for p in self.get_applied_patches()
}
for patch_metadata in patch_list:
if patch_metadata.present_in_specfile:
logger.debug(
f"Patch {patch_metadata.name} is already present in the spec file."
)
continue
if patch_metadata.name in applied_patches:
logger.debug(
f"Patch {patch_metadata.name} is already defined in the spec file."
)
continue
self.add_patch(patch_metadata, patch_id_digits)
def add_patch(
self, patch_metadata: PatchMetadata, patch_id_digits: Optional[int] = 4
):
try:
patch_number_offset = max(x.index for x in self.get_applied_patches())
except ValueError:
logger.debug("There are no patches in the spec.")
patch_number_offset = -1
if patch_metadata.patch_id is not None:
if patch_metadata.patch_id <= patch_number_offset:
raise PackitException(
f"The 'patch_id' requested ({patch_metadata.patch_id}) for patch "
f"{patch_metadata.name} is less than or equal to the last used patch ID "
f"({patch_number_offset}). Re-ordering the patches using 'patch_id' is "
"not allowed - if you want to change the order of those patches, "
"please reorder the commits in your source-git repository."
)
patch_id = patch_metadata.patch_id
else:
# https://src.fedoraproject.org/rpms/glibc/blob/f6682c9bac5872385b3caae0cd51fe3dbfcbb88f/f/glibc.spec#_158
# https://src.fedoraproject.org/rpms/python3.10/blob/ac9a5093cb9f534ef2f65cbd1f50684c88b91eec/f/python3.10.spec#_267
patch_id = max(patch_number_offset + 1, 1)
new_content = "\n"
new_content += "\n".join(
line if line.startswith("#") else f"# {line}".strip()
for line in patch_metadata.specfile_comment.splitlines()
)
patch_id_str = f"{patch_id:0{patch_id_digits}d}" if patch_id_digits > 0 else ""
new_content += f"\nPatch{patch_id_str}: {patch_metadata.name}"
if self.get_applied_patches():
last_source_tag_line = [
t.line for t in self.tags.filter(name="Patch*", valid=None)
][-1]
else:
last_source_tag_line = [
t.line for t in self.tags.filter(name="Source*", valid=None)
][-1]
# Find first empty line after last_source_tag_line
for index, line in enumerate(
self.spec_content.section("%package")[last_source_tag_line:],
start=last_source_tag_line,
):
if not line:
where = index
break
else:
where = len(self.spec_content.section("%package"))
logger.debug(f"Adding patch {patch_metadata.name} to the spec file.")
self.spec_content.section("%package")[where:where] = new_content.splitlines()
self.save()
def get_source(self, source_name: str) -> Optional[Tag]:
# sanitize the name, this will also add index if there isn't one
source_name, *_ = Tags._sanitize_tag(source_name, 0, 0)
return next(self.tags.filter(name=source_name, valid=None), None)
def read_patch_comments(self) -> dict:
comment: List[str] = []
patch_comments = {}
for line in self.spec_content.section("%package"):
if not line.strip():
comment = []
if line.startswith("#"):
comment.append(line[1:].strip())
if line.lower().startswith("patch"):
patch_name = Path(line.split(":", 1)[1].strip()).name
patch_comments[patch_name] = comment
comment = []
return patch_comments
@property
def patch_id_digits(self) -> int:
if self._patch_id_digits is not None:
return self._patch_id_digits
self._patch_id_digits = 1
for line in self.spec_content.section("%package"):
if line.lower().startswith("patch"):
match = re.match(r"^patch(\d*)\s*:.+", line, flags=re.IGNORECASE)
if not match[1]:
self._patch_id_digits = 0
elif match[1].startswith("0"):
self._patch_id_digits = len(match[1])
break
return self._patch_id_digits
@property
def uses_autosetup(self) -> bool:
if self._uses_autosetup is not None:
return self._uses_autosetup
self._uses_autosetup = False
for line in self.spec_content.section("%prep"):
if line.startswith("%autosetup"):
self._uses_autosetup = True
break
return self._uses_autosetup
def remove_patches(self):
content = []
stretch = []
for line in self.spec_content.section("%package"):
stretch.append(line)
if not line.strip():
content += stretch
stretch = []
if line.lower().startswith("patch"):
stretch = []
if not content[-1].strip():
content.pop()
self.spec_content.replace_section("%package", content)
self.save()
| true | true |
f730d1cc76d0f5de69d6d7933ece019dfff3c3cf | 598 | py | Python | setup.py | guate/division-politica | 98ecd566dfc5a0417240756e0b085b423f23181b | [
"MIT"
] | null | null | null | setup.py | guate/division-politica | 98ecd566dfc5a0417240756e0b085b423f23181b | [
"MIT"
] | 2 | 2015-12-14T20:21:05.000Z | 2015-12-16T23:01:21.000Z | setup.py | guate/division-politica | 98ecd566dfc5a0417240756e0b085b423f23181b | [
"MIT"
] | 2 | 2015-12-14T20:31:47.000Z | 2020-06-03T16:50:28.000Z | from setuptools import setup
with open('README.md', 'r') as ld:
long_description = ld.read()
setup_args = dict(
name='guate.division-politica',
use_scm_version=True,
packages=['guate.division_politica'],
include_package_data=True,
author='Darwin Monroy',
author_email='contact@darwinmonroy.com',
description='División Política de Guatemala',
long_description=long_description,
setup_requires=[
'setuptools_scm',
],
install_requires=[
'attrdict',
'chilero>=0.3.8'
]
)
if __name__ == '__main__':
setup(**setup_args)
| 22.148148 | 49 | 0.66388 | from setuptools import setup
with open('README.md', 'r') as ld:
long_description = ld.read()
setup_args = dict(
name='guate.division-politica',
use_scm_version=True,
packages=['guate.division_politica'],
include_package_data=True,
author='Darwin Monroy',
author_email='contact@darwinmonroy.com',
description='División Política de Guatemala',
long_description=long_description,
setup_requires=[
'setuptools_scm',
],
install_requires=[
'attrdict',
'chilero>=0.3.8'
]
)
if __name__ == '__main__':
setup(**setup_args)
| true | true |
f730d1d7c471e58cd2224140890fda9894003216 | 4,612 | py | Python | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/inbound_nat_rule_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/inbound_nat_rule_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/inbound_nat_rule_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class InboundNatRule(SubResource):
"""Inbound NAT rule of the load balancer.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:param frontend_ip_configuration: A reference to frontend IP addresses.
:type frontend_ip_configuration:
~azure.mgmt.network.v2018_01_01.models.SubResource
:ivar backend_ip_configuration: A reference to a private IP address
defined on a network interface of a VM. Traffic sent to the frontend port
of each of the frontend IP configurations is forwarded to the backend IP.
:vartype backend_ip_configuration:
~azure.mgmt.network.v2018_01_01.models.NetworkInterfaceIPConfiguration
:param protocol: Possible values include: 'Udp', 'Tcp', 'All'
:type protocol: str or
~azure.mgmt.network.v2018_01_01.models.TransportProtocol
:param frontend_port: The port for the external endpoint. Port numbers for
each rule must be unique within the Load Balancer. Acceptable values range
from 1 to 65534.
:type frontend_port: int
:param backend_port: The port used for the internal endpoint. Acceptable
values range from 1 to 65535.
:type backend_port: int
:param idle_timeout_in_minutes: The timeout for the TCP idle connection.
The value can be set between 4 and 30 minutes. The default value is 4
minutes. This element is only used when the protocol is set to TCP.
:type idle_timeout_in_minutes: int
:param enable_floating_ip: Configures a virtual machine's endpoint for the
floating IP capability required to configure a SQL AlwaysOn Availability
Group. This setting is required when using the SQL AlwaysOn Availability
Groups in SQL server. This setting can't be changed after you create the
endpoint.
:type enable_floating_ip: bool
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'backend_ip_configuration': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'backend_ip_configuration': {'key': 'properties.backendIPConfiguration', 'type': 'NetworkInterfaceIPConfiguration'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'frontend_port': {'key': 'properties.frontendPort', 'type': 'int'},
'backend_port': {'key': 'properties.backendPort', 'type': 'int'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'enable_floating_ip': {'key': 'properties.enableFloatingIP', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, frontend_ip_configuration=None, protocol=None, frontend_port: int=None, backend_port: int=None, idle_timeout_in_minutes: int=None, enable_floating_ip: bool=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(InboundNatRule, self).__init__(id=id, **kwargs)
self.frontend_ip_configuration = frontend_ip_configuration
self.backend_ip_configuration = None
self.protocol = protocol
self.frontend_port = frontend_port
self.backend_port = backend_port
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.enable_floating_ip = enable_floating_ip
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| 50.130435 | 282 | 0.686904 |
from .sub_resource_py3 import SubResource
class InboundNatRule(SubResource):
_validation = {
'backend_ip_configuration': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'frontend_ip_configuration': {'key': 'properties.frontendIPConfiguration', 'type': 'SubResource'},
'backend_ip_configuration': {'key': 'properties.backendIPConfiguration', 'type': 'NetworkInterfaceIPConfiguration'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'frontend_port': {'key': 'properties.frontendPort', 'type': 'int'},
'backend_port': {'key': 'properties.backendPort', 'type': 'int'},
'idle_timeout_in_minutes': {'key': 'properties.idleTimeoutInMinutes', 'type': 'int'},
'enable_floating_ip': {'key': 'properties.enableFloatingIP', 'type': 'bool'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, frontend_ip_configuration=None, protocol=None, frontend_port: int=None, backend_port: int=None, idle_timeout_in_minutes: int=None, enable_floating_ip: bool=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(InboundNatRule, self).__init__(id=id, **kwargs)
self.frontend_ip_configuration = frontend_ip_configuration
self.backend_ip_configuration = None
self.protocol = protocol
self.frontend_port = frontend_port
self.backend_port = backend_port
self.idle_timeout_in_minutes = idle_timeout_in_minutes
self.enable_floating_ip = enable_floating_ip
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| true | true |
f730d290273fed913ab812d544850db1a348c0e6 | 8,101 | py | Python | python/pyspark/accumulators.py | yqtaowhu/Spark | a381bce7285ec30f58f28f523dfcfe0c13221bbf | [
"Apache-2.0"
] | 25 | 2020-02-21T20:53:38.000Z | 2022-03-03T03:16:31.000Z | python/pyspark/accumulators.py | yqtaowhu/Spark | a381bce7285ec30f58f28f523dfcfe0c13221bbf | [
"Apache-2.0"
] | 7 | 2021-01-21T01:36:00.000Z | 2021-04-26T21:01:16.000Z | python/pyspark/accumulators.py | yqtaowhu/Spark | a381bce7285ec30f58f28f523dfcfe0c13221bbf | [
"Apache-2.0"
] | 5 | 2020-03-25T15:55:10.000Z | 2021-06-22T01:20:15.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> a = sc.accumulator(1)
>>> a.value
1
>>> a.value = 2
>>> a.value
2
>>> a += 5
>>> a.value
7
>>> sc.accumulator(1.0).value
1.0
>>> sc.accumulator(1j).value
1j
>>> rdd = sc.parallelize([1,2,3])
>>> def f(x):
... global a
... a += x
>>> rdd.foreach(f)
>>> a.value
13
>>> b = sc.accumulator(0)
>>> def g(x):
... b.add(x)
>>> rdd.foreach(g)
>>> b.value
6
>>> from pyspark.accumulators import AccumulatorParam
>>> class VectorAccumulatorParam(AccumulatorParam):
... def zero(self, value):
... return [0.0] * len(value)
... def addInPlace(self, val1, val2):
... for i in range(len(val1)):
... val1[i] += val2[i]
... return val1
>>> va = sc.accumulator([1.0, 2.0, 3.0], VectorAccumulatorParam())
>>> va.value
[1.0, 2.0, 3.0]
>>> def g(x):
... global va
... va += [x] * 3
>>> rdd.foreach(g)
>>> va.value
[7.0, 8.0, 9.0]
>>> rdd.map(lambda x: a.value).collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError:...
>>> def h(x):
... global a
... a.value = 7
>>> rdd.foreach(h) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError:...
>>> sc.accumulator([1.0, 2.0, 3.0]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
"""
import sys
import select
import struct
if sys.version < '3':
import SocketServer
else:
import socketserver as SocketServer
import threading
from pyspark.serializers import read_int, PickleSerializer
__all__ = ['Accumulator', 'AccumulatorParam']
pickleSer = PickleSerializer()
# Holds accumulators registered on the current machine, keyed by ID. This is then used to send
# the local accumulator updates back to the driver program at the end of a task.
_accumulatorRegistry = {}
def _deserialize_accumulator(aid, zero_value, accum_param):
from pyspark.accumulators import _accumulatorRegistry
accum = Accumulator(aid, zero_value, accum_param)
accum._deserialized = True
_accumulatorRegistry[aid] = accum
return accum
class Accumulator(object):
"""
A shared variable that can be accumulated, i.e., has a commutative and associative "add"
operation. Worker tasks on a Spark cluster can add values to an Accumulator with the C{+=}
operator, but only the driver program is allowed to access its value, using C{value}.
Updates from the workers get propagated automatically to the driver program.
While C{SparkContext} supports accumulators for primitive data types like C{int} and
C{float}, users can also define accumulators for custom types by providing a custom
L{AccumulatorParam} object. Refer to the doctest of this module for an example.
"""
def __init__(self, aid, value, accum_param):
"""Create a new Accumulator with a given initial value and AccumulatorParam object"""
from pyspark.accumulators import _accumulatorRegistry
self.aid = aid
self.accum_param = accum_param
self._value = value
self._deserialized = False
_accumulatorRegistry[aid] = self
def __reduce__(self):
"""Custom serialization; saves the zero value from our AccumulatorParam"""
param = self.accum_param
return (_deserialize_accumulator, (self.aid, param.zero(self._value), param))
@property
def value(self):
"""Get the accumulator's value; only usable in driver program"""
if self._deserialized:
raise Exception("Accumulator.value cannot be accessed inside tasks")
return self._value
@value.setter
def value(self, value):
"""Sets the accumulator's value; only usable in driver program"""
if self._deserialized:
raise Exception("Accumulator.value cannot be accessed inside tasks")
self._value = value
def add(self, term):
"""Adds a term to this accumulator's value"""
self._value = self.accum_param.addInPlace(self._value, term)
def __iadd__(self, term):
"""The += operator; adds a term to this accumulator's value"""
self.add(term)
return self
def __str__(self):
return str(self._value)
def __repr__(self):
return "Accumulator<id=%i, value=%s>" % (self.aid, self._value)
class AccumulatorParam(object):
"""
Helper object that defines how to accumulate values of a given type.
"""
def zero(self, value):
"""
Provide a "zero value" for the type, compatible in dimensions with the
provided C{value} (e.g., a zero vector)
"""
raise NotImplementedError
def addInPlace(self, value1, value2):
"""
Add two values of the accumulator's data type, returning a new value;
for efficiency, can also update C{value1} in place and return it.
"""
raise NotImplementedError
class AddingAccumulatorParam(AccumulatorParam):
"""
An AccumulatorParam that uses the + operators to add values. Designed for simple types
such as integers, floats, and lists. Requires the zero value for the underlying type
as a parameter.
"""
def __init__(self, zero_value):
self.zero_value = zero_value
def zero(self, value):
return self.zero_value
def addInPlace(self, value1, value2):
value1 += value2
return value1
# Singleton accumulator params for some standard types
INT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0)
FLOAT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0)
COMPLEX_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0j)
class _UpdateRequestHandler(SocketServer.StreamRequestHandler):
"""
This handler will keep polling updates from the same socket until the
server is shutdown.
"""
def handle(self):
from pyspark.accumulators import _accumulatorRegistry
while not self.server.server_shutdown:
# Poll every 1 second for new data -- don't block in case of shutdown.
r, _, _ = select.select([self.rfile], [], [], 1)
if self.rfile in r:
num_updates = read_int(self.rfile)
for _ in range(num_updates):
(aid, update) = pickleSer._read_with_length(self.rfile)
_accumulatorRegistry[aid] += update
# Write a byte in acknowledgement
self.wfile.write(struct.pack("!b", 1))
class AccumulatorServer(SocketServer.TCPServer):
"""
A simple TCP server that intercepts shutdown() in order to interrupt
our continuous polling on the handler.
"""
server_shutdown = False
def shutdown(self):
self.server_shutdown = True
SocketServer.TCPServer.shutdown(self)
self.server_close()
def _start_update_server():
"""Start a TCP server to receive accumulator updates in a daemon thread, and returns it"""
server = AccumulatorServer(("localhost", 0), _UpdateRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
| 30.115242 | 94 | 0.670658 |
import sys
import select
import struct
if sys.version < '3':
import SocketServer
else:
import socketserver as SocketServer
import threading
from pyspark.serializers import read_int, PickleSerializer
__all__ = ['Accumulator', 'AccumulatorParam']
pickleSer = PickleSerializer()
_accumulatorRegistry = {}
def _deserialize_accumulator(aid, zero_value, accum_param):
from pyspark.accumulators import _accumulatorRegistry
accum = Accumulator(aid, zero_value, accum_param)
accum._deserialized = True
_accumulatorRegistry[aid] = accum
return accum
class Accumulator(object):
def __init__(self, aid, value, accum_param):
from pyspark.accumulators import _accumulatorRegistry
self.aid = aid
self.accum_param = accum_param
self._value = value
self._deserialized = False
_accumulatorRegistry[aid] = self
def __reduce__(self):
param = self.accum_param
return (_deserialize_accumulator, (self.aid, param.zero(self._value), param))
@property
def value(self):
if self._deserialized:
raise Exception("Accumulator.value cannot be accessed inside tasks")
return self._value
@value.setter
def value(self, value):
if self._deserialized:
raise Exception("Accumulator.value cannot be accessed inside tasks")
self._value = value
def add(self, term):
self._value = self.accum_param.addInPlace(self._value, term)
def __iadd__(self, term):
self.add(term)
return self
def __str__(self):
return str(self._value)
def __repr__(self):
return "Accumulator<id=%i, value=%s>" % (self.aid, self._value)
class AccumulatorParam(object):
def zero(self, value):
raise NotImplementedError
def addInPlace(self, value1, value2):
raise NotImplementedError
class AddingAccumulatorParam(AccumulatorParam):
def __init__(self, zero_value):
self.zero_value = zero_value
def zero(self, value):
return self.zero_value
def addInPlace(self, value1, value2):
value1 += value2
return value1
INT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0)
FLOAT_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0)
COMPLEX_ACCUMULATOR_PARAM = AddingAccumulatorParam(0.0j)
class _UpdateRequestHandler(SocketServer.StreamRequestHandler):
def handle(self):
from pyspark.accumulators import _accumulatorRegistry
while not self.server.server_shutdown:
r, _, _ = select.select([self.rfile], [], [], 1)
if self.rfile in r:
num_updates = read_int(self.rfile)
for _ in range(num_updates):
(aid, update) = pickleSer._read_with_length(self.rfile)
_accumulatorRegistry[aid] += update
# Write a byte in acknowledgement
self.wfile.write(struct.pack("!b", 1))
class AccumulatorServer(SocketServer.TCPServer):
server_shutdown = False
def shutdown(self):
self.server_shutdown = True
SocketServer.TCPServer.shutdown(self)
self.server_close()
def _start_update_server():
server = AccumulatorServer(("localhost", 0), _UpdateRequestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
return server
if __name__ == "__main__":
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
| true | true |
f730d557ff0c45b46669b134b1aeeb556ec7ffa6 | 2,556 | py | Python | src/main.py | WillGITCode/raspberry-robot | b81814be34837f12074774df40cf86b9111be0a6 | [
"MIT"
] | null | null | null | src/main.py | WillGITCode/raspberry-robot | b81814be34837f12074774df40cf86b9111be0a6 | [
"MIT"
] | null | null | null | src/main.py | WillGITCode/raspberry-robot | b81814be34837f12074774df40cf86b9111be0a6 | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO
from time import sleep
from modules import ping_sensor
from modules import motor_board
from modules import control_surfaces
# create controller
gamePad = control_surfaces.XboxController()
# Motor board pins
motor_board.init_motor_pins([7, 11, 13, 15])
# Ping sensor
ping1 = ping_sensor.PingSensor(16)
# Servo 1 pin
servo1 = 18
def avoid_obstacles():
if ping1.get_distance() > 7:
motor_board.drive_forwards()
else:
motor_board.drive_backwards()
sleep(1)
motor_board.spin_left()
sleep(.5)
# Entry point
def main():
try:
while True:
try:
if gamePad.get_property('Start') == 1 or gamePad.get_property('Back'):
motor_board.drive_stop()
raise SystemExit(101)
if gamePad.get_property('A') == 1:
sleep(.001)
while gamePad.get_property('A') != 1:
avoid_obstacles()
elif gamePad.get_property('LeftJoystickY') >= 0.7:
# print("Backward")
while gamePad.get_property('LeftJoystickY') >= 0.7:
motor_board.drive_backwards()
elif gamePad.get_property('LeftJoystickY') <= -0.7:
# print("Forward")
while gamePad.get_property('LeftJoystickY') <= -0.7:
motor_board.drive_forwards()
elif gamePad.get_property('LeftJoystickX') >= 0.7:
# print("Left")
while gamePad.get_property('LeftJoystickX') >= 0.7:
motor_board.drive_left()
elif gamePad.get_property('LeftJoystickX') <= -0.7:
# print("Right")
while gamePad.get_property('LeftJoystickX') <= -0.7:
motor_board.drive_right()
elif gamePad.get_property('LeftTrigger') >= 0.7:
# print("spin_right")
while gamePad.get_property('LeftTrigger') >= 0.7:
motor_board.spin_right()
elif gamePad.get_property('RightTrigger') >= 0.7:
# print("spin_left")
while gamePad.get_property('RightTrigger') >= 0.7:
motor_board.spin_left()
else:
motor_board.drive_stop()
finally:
sleep(0.0001)
finally:
GPIO.cleanup()
if __name__ == '__main__':
main()
| 35.5 | 86 | 0.527387 | import RPi.GPIO as GPIO
from time import sleep
from modules import ping_sensor
from modules import motor_board
from modules import control_surfaces
gamePad = control_surfaces.XboxController()
motor_board.init_motor_pins([7, 11, 13, 15])
ping1 = ping_sensor.PingSensor(16)
servo1 = 18
def avoid_obstacles():
if ping1.get_distance() > 7:
motor_board.drive_forwards()
else:
motor_board.drive_backwards()
sleep(1)
motor_board.spin_left()
sleep(.5)
def main():
try:
while True:
try:
if gamePad.get_property('Start') == 1 or gamePad.get_property('Back'):
motor_board.drive_stop()
raise SystemExit(101)
if gamePad.get_property('A') == 1:
sleep(.001)
while gamePad.get_property('A') != 1:
avoid_obstacles()
elif gamePad.get_property('LeftJoystickY') >= 0.7:
while gamePad.get_property('LeftJoystickY') >= 0.7:
motor_board.drive_backwards()
elif gamePad.get_property('LeftJoystickY') <= -0.7:
while gamePad.get_property('LeftJoystickY') <= -0.7:
motor_board.drive_forwards()
elif gamePad.get_property('LeftJoystickX') >= 0.7:
while gamePad.get_property('LeftJoystickX') >= 0.7:
motor_board.drive_left()
elif gamePad.get_property('LeftJoystickX') <= -0.7:
while gamePad.get_property('LeftJoystickX') <= -0.7:
motor_board.drive_right()
elif gamePad.get_property('LeftTrigger') >= 0.7:
while gamePad.get_property('LeftTrigger') >= 0.7:
motor_board.spin_right()
elif gamePad.get_property('RightTrigger') >= 0.7:
while gamePad.get_property('RightTrigger') >= 0.7:
motor_board.spin_left()
else:
motor_board.drive_stop()
finally:
sleep(0.0001)
finally:
GPIO.cleanup()
if __name__ == '__main__':
main()
| true | true |
f730d5a0e8a7dfd1b3a7e4a2eaba92d308f2bc8f | 625 | py | Python | test/data/outofdate/Makefile.py | ActiveState/mk | 2d0afd81ce4e8a3f39885cae5a50ded7bece7f76 | [
"MIT"
] | 2 | 2015-12-21T22:35:16.000Z | 2017-08-29T14:47:38.000Z | test/data/outofdate/Makefile.py | ActiveState/mk | 2d0afd81ce4e8a3f39885cae5a50ded7bece7f76 | [
"MIT"
] | 1 | 2020-05-14T12:59:01.000Z | 2020-12-11T18:34:07.000Z | test/data/outofdate/Makefile.py | ActiveState/mk | 2d0afd81ce4e8a3f39885cae5a50ded7bece7f76 | [
"MIT"
] | null | null | null | import os
import shutil
import time
from mklib import Task
from mklib.common import relpath
class foo(Task):
default = True
results = ["foo.txt"]
deps = ["bar.txt"]
def make(self):
src = self.deps[0].path
dst = self.results[0].path
self.log.info("cp %s %s", relpath(src), relpath(dst))
shutil.copy(src, dst)
class bar(Task):
def make(self):
f = open("bar.txt", 'w')
f.write(str(time.time()))
f.close()
class clean(Task):
def make(self):
for p in ("foo.txt", "bar.txt"):
if os.path.exists(p):
os.remove(p)
| 21.551724 | 61 | 0.552 | import os
import shutil
import time
from mklib import Task
from mklib.common import relpath
class foo(Task):
default = True
results = ["foo.txt"]
deps = ["bar.txt"]
def make(self):
src = self.deps[0].path
dst = self.results[0].path
self.log.info("cp %s %s", relpath(src), relpath(dst))
shutil.copy(src, dst)
class bar(Task):
def make(self):
f = open("bar.txt", 'w')
f.write(str(time.time()))
f.close()
class clean(Task):
def make(self):
for p in ("foo.txt", "bar.txt"):
if os.path.exists(p):
os.remove(p)
| true | true |
f730d5bf2e956011187143d53e9143c85a16e283 | 72 | py | Python | QPublic/__init__.py | Quantamatics/QPublic | 4591125626755ae3f0b6d71c8e275adb79c21d10 | [
"MIT"
] | null | null | null | QPublic/__init__.py | Quantamatics/QPublic | 4591125626755ae3f0b6d71c8e275adb79c21d10 | [
"MIT"
] | null | null | null | QPublic/__init__.py | Quantamatics/QPublic | 4591125626755ae3f0b6d71c8e275adb79c21d10 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from QPublic import (MarketData)
| 24 | 38 | 0.861111 | from __future__ import absolute_import
from QPublic import (MarketData)
| true | true |
f730d81cd96a012f361da653f7b9e5d5f24304f6 | 1,281 | py | Python | mlearning2.py | tobiasaditya/datascience_beginner | fa6868073951259e0a5f8a702de0bcc17c13d295 | [
"MIT"
] | null | null | null | mlearning2.py | tobiasaditya/datascience_beginner | fa6868073951259e0a5f8a702de0bcc17c13d295 | [
"MIT"
] | null | null | null | mlearning2.py | tobiasaditya/datascience_beginner | fa6868073951259e0a5f8a702de0bcc17c13d295 | [
"MIT"
] | null | null | null | import seaborn as sb
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
person = {'finances':[1,2,3,4,5,6,7,8,7,6,5,4,3,2,1,7,4,1,8,5,2,9,6,3,9,8,7,6,5,4,3,2,1,1,9,7,3,8,2,7],
'management':[1,2,3,4,5,6,7,8,7,6,5,4,3,2,1,7,4,1,8,5,2,9,6,3,9,8,7,6,5,4,3,2,1,1,9,7,3,8,2,7],
'logistic':[1,2,3,4,5,6,7,8,7,6,5,4,3,2,1,7,4,1,8,5,2,9,6,3,9,8,7,6,5,4,3,2,1,1,9,7,3,8,2,7],
'get_work':[0,0,1,0,0,1,0,0,1,1,1,1,1,1,1,0,0,0,1,0,0,1,0,0,1,1,0,1,0,0,0,0,1,1,1,0,1,0,0,1]
}
database = pd.DataFrame(person, columns=['finances','management','logistic','get_work'])
print(database[['finances','management','logistic']])
x = database[['finances','management','logistic']]
y = database['get_work']
#30% buat tes, 70 buat training
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=0)
lr=LogisticRegression()
lr.fit(x_train,y_train)
y_predict=lr.predict(x_test)
confusion_mat = pd.crosstab(y_test,y_predict,rownames=["true"],colnames=["prediction"])
sb.heatmap(confusion_mat,annot=True)
print("Accuracy = ", metrics.accuracy_score(y_test,y_predict))
print(confusion_mat)
plt.show()
| 34.621622 | 106 | 0.673692 | import seaborn as sb
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
person = {'finances':[1,2,3,4,5,6,7,8,7,6,5,4,3,2,1,7,4,1,8,5,2,9,6,3,9,8,7,6,5,4,3,2,1,1,9,7,3,8,2,7],
'management':[1,2,3,4,5,6,7,8,7,6,5,4,3,2,1,7,4,1,8,5,2,9,6,3,9,8,7,6,5,4,3,2,1,1,9,7,3,8,2,7],
'logistic':[1,2,3,4,5,6,7,8,7,6,5,4,3,2,1,7,4,1,8,5,2,9,6,3,9,8,7,6,5,4,3,2,1,1,9,7,3,8,2,7],
'get_work':[0,0,1,0,0,1,0,0,1,1,1,1,1,1,1,0,0,0,1,0,0,1,0,0,1,1,0,1,0,0,0,0,1,1,1,0,1,0,0,1]
}
database = pd.DataFrame(person, columns=['finances','management','logistic','get_work'])
print(database[['finances','management','logistic']])
x = database[['finances','management','logistic']]
y = database['get_work']
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.3,random_state=0)
lr=LogisticRegression()
lr.fit(x_train,y_train)
y_predict=lr.predict(x_test)
confusion_mat = pd.crosstab(y_test,y_predict,rownames=["true"],colnames=["prediction"])
sb.heatmap(confusion_mat,annot=True)
print("Accuracy = ", metrics.accuracy_score(y_test,y_predict))
print(confusion_mat)
plt.show()
| true | true |
f730d894d50aaa291f54e9162a0aec5485653323 | 5,761 | py | Python | main.py | evlko/ICTonBot | f0bd4436c11f8907281671aa15969f13781c3d33 | [
"MIT"
] | 1 | 2020-11-22T09:41:20.000Z | 2020-11-22T09:41:20.000Z | main.py | evlko/ICTonBot | f0bd4436c11f8907281671aa15969f13781c3d33 | [
"MIT"
] | null | null | null | main.py | evlko/ICTonBot | f0bd4436c11f8907281671aa15969f13781c3d33 | [
"MIT"
] | 1 | 2020-11-24T17:44:59.000Z | 2020-11-24T17:44:59.000Z | import time
import telebot
import components.config as config
import components.dialogs as dialogs
from components.config import UserState
from components.core import bot, logger
from components.database.dbworker import DatabaseWorker
from components.dialogs import DialogEvent
from data.subject_list import subject_list
@bot.message_handler(commands=["start", "help"])
def start_messaging(message):
if not DatabaseWorker.contain_user(message.chat.id):
dialogs.welcome_message(message)
else:
DatabaseWorker.set_state(message.chat.id, config.UserState.START)
dialogs.welcome_message(message)
@bot.callback_query_handler(func=lambda call: str.isnumeric(call.data))
def on_dialog_event(call):
"""A function that catches dialog event callbacks."""
print(call)
dialog_event = DialogEvent(int(call.data))
# bot.delete_message(call.message.chat.id, call.message.message_id)
if dialog_event == DialogEvent.ASK_FOR_NAME:
dialogs.ask_for_name(call.message)
elif dialog_event == DialogEvent.ABOUT:
DatabaseWorker.set_state(call.message.chat.id, config.UserState.ABOUT)
dialogs.about(call.message)
elif dialog_event in [DialogEvent.BACK_FROM_ASK_NAME, DialogEvent.BACK_FROM_ABOUT]:
bot.delete_message(call.message.chat.id, call.message.message_id)
DatabaseWorker.set_state(call.message.chat.id, config.UserState.START)
dialogs.welcome_message(call.message)
elif dialog_event == DialogEvent.NEED_SUBJECTS_READY:
dialogs.ask_for_give_subjects(call.message)
elif dialog_event == DialogEvent.BACK_FROM_NEEDED_SUBJECTS:
bot.delete_message(call.message.chat.id, call.message.message_id)
dialogs.ask_for_faculty(call.message)
elif dialog_event == DialogEvent.BACK_FROM_FACULTY:
dialogs.ask_for_name(call.message)
elif dialog_event == DialogEvent.GIVE_SUBJECTS_READY:
dialogs.registered(call.message)
elif dialog_event == DialogEvent.BACK_FROM_GIVE_SUBJECTS:
bot.delete_message(call.message.chat.id, call.message.message_id)
dialogs.ask_for_needed_subjects(call.message)
elif dialog_event == DialogEvent.START_SEARCH:
DatabaseWorker.set_state(call.message.chat.id, config.UserState.SEARCHING)
dialogs.search_page(call.message)
@bot.callback_query_handler(func=lambda call: not str.isnumeric(call.data))
def on_string_callback(call):
"""A callback function that is used to catch string callbacks (for example for subjects)"""
current_state = DatabaseWorker.get_current_state(call.message.chat.id)
clicked_subject = str(call.data)
print("String callback was passed: " + call.data)
if current_state == config.UserState.NEEDED_SUBJECT_LIST.value[0]:
needed_subjects = DatabaseWorker.get_needed_subject_list(call.message.chat.id)
if clicked_subject in needed_subjects:
needed_subjects.remove(clicked_subject)
else:
needed_subjects.append(clicked_subject)
DatabaseWorker.set_needed_subject_list(call.message.chat.id, needed_subjects)
subjects = subject_list.keys()
markup = telebot.types.InlineKeyboardMarkup(row_width=1)
for subject in subjects:
if subject in needed_subjects:
markup.add(telebot.types.InlineKeyboardButton(text=subject + " ✅", callback_data=subject))
else:
markup.add(telebot.types.InlineKeyboardButton(text=subject, callback_data=subject))
markup.add(telebot.types.InlineKeyboardButton(text="Далее", callback_data=DialogEvent.NEED_SUBJECTS_READY))
markup.add(
telebot.types.InlineKeyboardButton(text="Назад", callback_data=DialogEvent.BACK_FROM_NEEDED_SUBJECTS))
with open("text_messages/ask_for_need_subjects.txt", "rt", encoding="utf-8") as f:
message_text = f.read()
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=message_text,
reply_markup=markup)
elif current_state == config.UserState.GIVE_SUBJECT_LIST.value[0]:
give_subjects = DatabaseWorker.get_give_subject_list(call.message.chat.id)
if clicked_subject in give_subjects:
give_subjects.remove(clicked_subject)
else:
give_subjects.append(clicked_subject)
DatabaseWorker.set_give_subject_list(call.message.chat.id, give_subjects)
subjects = subject_list.keys()
markup = telebot.types.InlineKeyboardMarkup(row_width=1)
for subject in subjects:
if subject in give_subjects:
markup.add(telebot.types.InlineKeyboardButton(text=subject + " ✅", callback_data=subject))
else:
markup.add(telebot.types.InlineKeyboardButton(text=subject, callback_data=subject))
markup.add(telebot.types.InlineKeyboardButton(text="Далее", callback_data=DialogEvent.GIVE_SUBJECTS_READY))
markup.add(
telebot.types.InlineKeyboardButton(text="Назад", callback_data=DialogEvent.BACK_FROM_GIVE_SUBJECTS))
with open("text_messages/ask_for_give_subjects.txt", "rt", encoding="utf-8") as f:
message_text = f.read()
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=message_text,
reply_markup=markup)
if __name__ == "__main__":
print(DialogEvent.ASK_FOR_NAME, type(DialogEvent.ASK_FOR_NAME))
# DatabaseWorker.set_state("192767028", UserState.START)
while True:
try:
bot.polling(none_stop=True)
except Exception as err:
logger.error(err)
time.sleep(5)
print("Internet error!")
| 41.746377 | 115 | 0.720361 | import time
import telebot
import components.config as config
import components.dialogs as dialogs
from components.config import UserState
from components.core import bot, logger
from components.database.dbworker import DatabaseWorker
from components.dialogs import DialogEvent
from data.subject_list import subject_list
@bot.message_handler(commands=["start", "help"])
def start_messaging(message):
if not DatabaseWorker.contain_user(message.chat.id):
dialogs.welcome_message(message)
else:
DatabaseWorker.set_state(message.chat.id, config.UserState.START)
dialogs.welcome_message(message)
@bot.callback_query_handler(func=lambda call: str.isnumeric(call.data))
def on_dialog_event(call):
print(call)
dialog_event = DialogEvent(int(call.data))
if dialog_event == DialogEvent.ASK_FOR_NAME:
dialogs.ask_for_name(call.message)
elif dialog_event == DialogEvent.ABOUT:
DatabaseWorker.set_state(call.message.chat.id, config.UserState.ABOUT)
dialogs.about(call.message)
elif dialog_event in [DialogEvent.BACK_FROM_ASK_NAME, DialogEvent.BACK_FROM_ABOUT]:
bot.delete_message(call.message.chat.id, call.message.message_id)
DatabaseWorker.set_state(call.message.chat.id, config.UserState.START)
dialogs.welcome_message(call.message)
elif dialog_event == DialogEvent.NEED_SUBJECTS_READY:
dialogs.ask_for_give_subjects(call.message)
elif dialog_event == DialogEvent.BACK_FROM_NEEDED_SUBJECTS:
bot.delete_message(call.message.chat.id, call.message.message_id)
dialogs.ask_for_faculty(call.message)
elif dialog_event == DialogEvent.BACK_FROM_FACULTY:
dialogs.ask_for_name(call.message)
elif dialog_event == DialogEvent.GIVE_SUBJECTS_READY:
dialogs.registered(call.message)
elif dialog_event == DialogEvent.BACK_FROM_GIVE_SUBJECTS:
bot.delete_message(call.message.chat.id, call.message.message_id)
dialogs.ask_for_needed_subjects(call.message)
elif dialog_event == DialogEvent.START_SEARCH:
DatabaseWorker.set_state(call.message.chat.id, config.UserState.SEARCHING)
dialogs.search_page(call.message)
@bot.callback_query_handler(func=lambda call: not str.isnumeric(call.data))
def on_string_callback(call):
current_state = DatabaseWorker.get_current_state(call.message.chat.id)
clicked_subject = str(call.data)
print("String callback was passed: " + call.data)
if current_state == config.UserState.NEEDED_SUBJECT_LIST.value[0]:
needed_subjects = DatabaseWorker.get_needed_subject_list(call.message.chat.id)
if clicked_subject in needed_subjects:
needed_subjects.remove(clicked_subject)
else:
needed_subjects.append(clicked_subject)
DatabaseWorker.set_needed_subject_list(call.message.chat.id, needed_subjects)
subjects = subject_list.keys()
markup = telebot.types.InlineKeyboardMarkup(row_width=1)
for subject in subjects:
if subject in needed_subjects:
markup.add(telebot.types.InlineKeyboardButton(text=subject + " ✅", callback_data=subject))
else:
markup.add(telebot.types.InlineKeyboardButton(text=subject, callback_data=subject))
markup.add(telebot.types.InlineKeyboardButton(text="Далее", callback_data=DialogEvent.NEED_SUBJECTS_READY))
markup.add(
telebot.types.InlineKeyboardButton(text="Назад", callback_data=DialogEvent.BACK_FROM_NEEDED_SUBJECTS))
with open("text_messages/ask_for_need_subjects.txt", "rt", encoding="utf-8") as f:
message_text = f.read()
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=message_text,
reply_markup=markup)
elif current_state == config.UserState.GIVE_SUBJECT_LIST.value[0]:
give_subjects = DatabaseWorker.get_give_subject_list(call.message.chat.id)
if clicked_subject in give_subjects:
give_subjects.remove(clicked_subject)
else:
give_subjects.append(clicked_subject)
DatabaseWorker.set_give_subject_list(call.message.chat.id, give_subjects)
subjects = subject_list.keys()
markup = telebot.types.InlineKeyboardMarkup(row_width=1)
for subject in subjects:
if subject in give_subjects:
markup.add(telebot.types.InlineKeyboardButton(text=subject + " ✅", callback_data=subject))
else:
markup.add(telebot.types.InlineKeyboardButton(text=subject, callback_data=subject))
markup.add(telebot.types.InlineKeyboardButton(text="Далее", callback_data=DialogEvent.GIVE_SUBJECTS_READY))
markup.add(
telebot.types.InlineKeyboardButton(text="Назад", callback_data=DialogEvent.BACK_FROM_GIVE_SUBJECTS))
with open("text_messages/ask_for_give_subjects.txt", "rt", encoding="utf-8") as f:
message_text = f.read()
bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=message_text,
reply_markup=markup)
if __name__ == "__main__":
print(DialogEvent.ASK_FOR_NAME, type(DialogEvent.ASK_FOR_NAME))
while True:
try:
bot.polling(none_stop=True)
except Exception as err:
logger.error(err)
time.sleep(5)
print("Internet error!")
| true | true |
f730d915401357d800d6479065f4d905aeb50b81 | 1,048 | py | Python | examples/pybullet/gym/pybullet_envs/baselines/train_pybullet_cartpole.py | frk2/bullet3 | 225d823e4dc3f952c6c39920c3f87390383e0602 | [
"Zlib"
] | 51 | 2018-11-11T12:47:38.000Z | 2022-03-06T08:39:43.000Z | examples/pybullet/gym/pybullet_envs/baselines/train_pybullet_cartpole.py | frk2/bullet3 | 225d823e4dc3f952c6c39920c3f87390383e0602 | [
"Zlib"
] | 2 | 2019-11-15T03:21:45.000Z | 2020-09-10T11:53:58.000Z | examples/pybullet/gym/pybullet_envs/baselines/train_pybullet_cartpole.py | frk2/bullet3 | 225d823e4dc3f952c6c39920c3f87390383e0602 | [
"Zlib"
] | 14 | 2018-12-12T09:12:14.000Z | 2021-10-17T14:30:25.000Z | #add parent dir to find package. Only needed for source code build, pip install doesn't need it.
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0,parentdir)
import gym
from pybullet_envs.bullet.cartpole_bullet import CartPoleBulletEnv
from baselines import deepq
def callback(lcl, glb):
# stop training if reward exceeds 199
is_solved = lcl['t'] > 100 and sum(lcl['episode_rewards'][-101:-1]) / 100 >= 199
return is_solved
def main():
env = CartPoleBulletEnv(renders=False)
model = deepq.models.mlp([64])
act = deepq.learn(
env,
q_func=model,
lr=1e-3,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to cartpole_model.pkl")
act.save("cartpole_model.pkl")
if __name__ == '__main__':
main()
| 26.2 | 96 | 0.68416 |
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0,parentdir)
import gym
from pybullet_envs.bullet.cartpole_bullet import CartPoleBulletEnv
from baselines import deepq
def callback(lcl, glb):
# stop training if reward exceeds 199
is_solved = lcl['t'] > 100 and sum(lcl['episode_rewards'][-101:-1]) / 100 >= 199
return is_solved
def main():
env = CartPoleBulletEnv(renders=False)
model = deepq.models.mlp([64])
act = deepq.learn(
env,
q_func=model,
lr=1e-3,
max_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
print_freq=10,
callback=callback
)
print("Saving model to cartpole_model.pkl")
act.save("cartpole_model.pkl")
if __name__ == '__main__':
main()
| true | true |
f730d987678190ed5d27c075040ca4013481b942 | 7,805 | py | Python | dev/enas/search_space/enas_search_space.py | dapatil211/deep_architect | feadfb545d166216e27532ea47e8efa178e0d142 | [
"MIT"
] | null | null | null | dev/enas/search_space/enas_search_space.py | dapatil211/deep_architect | feadfb545d166216e27532ea47e8efa178e0d142 | [
"MIT"
] | null | null | null | dev/enas/search_space/enas_search_space.py | dapatil211/deep_architect | feadfb545d166216e27532ea47e8efa178e0d142 | [
"MIT"
] | null | null | null | """
Search space from Efficient Neural Architecture Search (Pham'17)
"""
from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
from collections import OrderedDict
import tensorflow as tf
import numpy as np
from deep_architect.helpers import tensorflow_eager_support as htfe
from deep_architect.hyperparameters import D
from dev.enas.search_space.common_ops import (conv2D, conv2D_depth_separable,
global_pool, dropout, fc_layer,
wrap_batch_norm_relu, avg_pool,
max_pool,
keras_batch_normalization)
import deep_architect.modules as mo
TFEM = htfe.TensorflowEagerModule
class WeightSharer(object):
def __init__(self, isSharing):
self.name_to_weight = {}
self.name_to_np_fn = {}
self.weight_dict = {}
self.isSharing = isSharing
def get(self, name, construct_fn, np_fn):
if self.isSharing:
if name not in self.name_to_weight:
with tf.device('/gpu:0'):
self.name_to_weight[name] = construct_fn()
self.name_to_np_fn[name] = np_fn
print(name)
# self.weights_used.add(name)
# self.name_to_weight[name].gpu()
return self.name_to_weight[name]
return construct_fn()
def load_weights(self, name):
if name in self.weight_dict:
return self.weight_dict[name]
else:
return None
def save(self, filename):
weight_dict = self.weight_dict
for name in self.name_to_weight:
weight_dict[name] = self.name_to_np_fn[name](
self.name_to_weight[name])
np.save(filename, weight_dict)
def load(self, filename):
self.weight_dict = np.load(filename).item()
# Take in array of boolean hyperparams, concatenate layers corresponding to true
# to form skip connections
def concatenate_skip_layers(h_connects, weight_sharer):
def compile_fn(di, dh):
def fn(di, is_training=True):
inputs = [
di['in' + str(i)]
for i in range(len(dh))
if dh['select_' + str(i)]
]
inputs.append(di['in' + str(len(dh))])
with tf.device('/gpu:0'):
out = tf.add_n(inputs)
return {'out': tf.add_n(inputs)}
return fn
return TFEM(
'SkipConcat',
{'select_' + str(i): h_connects[i] for i in range(len(h_connects))},
compile_fn, ['in' + str(i) for i in range(len(h_connects) + 1)],
['out']).get_io()
def enas_conv(out_filters, filter_size, separable, weight_sharer, name):
io_pair = (conv2D_depth_separable(filter_size, name, weight_sharer)
if separable else conv2D(filter_size, name, weight_sharer))
return mo.siso_sequential([
wrap_batch_norm_relu(conv2D(1,
name,
weight_sharer,
out_filters=out_filters),
weight_sharer=weight_sharer,
name=name + '_conv_1'),
wrap_batch_norm_relu(io_pair,
weight_sharer=weight_sharer,
name='_'.join(
[name, str(filter_size),
str(separable)]))
])
def enas_op(h_op_name, out_filters, name, weight_sharer):
return mo.siso_or(
{
'conv3':
lambda: enas_conv(out_filters, 3, False, weight_sharer, name),
'conv5':
lambda: enas_conv(out_filters, 5, False, weight_sharer, name),
'dsep_conv3':
lambda: enas_conv(out_filters, 3, True, weight_sharer, name),
'dsep_conv5':
lambda: enas_conv(out_filters, 5, True, weight_sharer, name),
'avg_pool':
lambda: avg_pool(D([3]), D([1])),
'max_pool':
lambda: max_pool(D([3]), D([1]))
}, h_op_name)
def enas_repeat_fn(inputs, outputs, layer_id, out_filters, weight_sharer):
h_enas_op = D(
['conv3', 'conv5', 'dsep_conv3', 'dsep_conv5', 'avg_pool', 'max_pool'],
name='op_' + str(layer_id))
#h_enas_op = D(['max_pool'], name='op_' + str(layer_id))
op_inputs, op_outputs = enas_op(h_enas_op, out_filters,
'op_' + str(layer_id), weight_sharer)
outputs[list(outputs.keys())[-1]].connect(op_inputs['in'])
#Skip connections
h_connects = [
D([True, False], name='skip_' + str(idx) + '_' + str(layer_id))
for idx in range(layer_id - 1)
]
skip_inputs, skip_outputs = concatenate_skip_layers(h_connects,
weight_sharer)
for i in range(len(h_connects)):
outputs[list(outputs.keys())[i]].connect(skip_inputs['in' + str(i)])
op_outputs['out'].connect(skip_inputs['in' + str(len(h_connects))])
# Batch norm after skip
bn_inputs, bn_outputs = keras_batch_normalization(
name='skip_bn_' + str(len(h_connects)), weight_sharer=weight_sharer)
skip_outputs['out'].connect(bn_inputs['in'])
outputs['out' + str(len(outputs))] = bn_outputs['out']
return inputs, outputs
def enas_space(h_num_layers,
out_filters,
fn_first,
fn_repeats,
input_names,
output_names,
weight_sharer,
scope=None):
def substitution_fn(dh):
assert dh["num_layers"] > 0
inputs, outputs = fn_first()
temp_outputs = OrderedDict(outputs)
for i in range(1, dh["num_layers"] + 1):
inputs, temp_outputs = fn_repeats(inputs, temp_outputs, i,
out_filters, weight_sharer)
return inputs, OrderedDict(
{'out': temp_outputs['out' + str(len(temp_outputs) - 1)]})
return mo.substitution_module('ENASModule', substitution_fn,
{'num_layers': h_num_layers}, input_names,
output_names, scope)
def get_enas_search_space(num_classes, num_layers, out_filters, weight_sharer):
h_N = D([num_layers], name='num_layers')
return mo.siso_sequential([
enas_space(
h_N,
out_filters,
#mo.empty,
lambda: wrap_batch_norm_relu(conv2D(
3, 'stem', weight_sharer, out_filters=out_filters),
add_relu=False,
weight_sharer=weight_sharer,
name='stem'),
enas_repeat_fn,
['in'],
['out'],
weight_sharer),
global_pool(),
dropout(keep_prob=.9),
fc_layer(num_classes, 'softmax', weight_sharer),
])
class SSFEnasnet(mo.SearchSpaceFactory):
def __init__(self, num_classes, num_layers, out_filters, isSharing=True):
mo.SearchSpaceFactory.__init__(self, self._get_search_space)
self.num_classes = num_classes
self.weight_sharer = WeightSharer(isSharing)
self.num_layers = num_layers
self.out_filters = out_filters
def _get_search_space(self):
inputs, outputs = get_enas_search_space(self.num_classes,
self.num_layers,
self.out_filters,
self.weight_sharer)
return inputs, outputs, {}
| 36.471963 | 80 | 0.556054 | from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
from collections import OrderedDict
import tensorflow as tf
import numpy as np
from deep_architect.helpers import tensorflow_eager_support as htfe
from deep_architect.hyperparameters import D
from dev.enas.search_space.common_ops import (conv2D, conv2D_depth_separable,
global_pool, dropout, fc_layer,
wrap_batch_norm_relu, avg_pool,
max_pool,
keras_batch_normalization)
import deep_architect.modules as mo
TFEM = htfe.TensorflowEagerModule
class WeightSharer(object):
def __init__(self, isSharing):
self.name_to_weight = {}
self.name_to_np_fn = {}
self.weight_dict = {}
self.isSharing = isSharing
def get(self, name, construct_fn, np_fn):
if self.isSharing:
if name not in self.name_to_weight:
with tf.device('/gpu:0'):
self.name_to_weight[name] = construct_fn()
self.name_to_np_fn[name] = np_fn
print(name)
return self.name_to_weight[name]
return construct_fn()
def load_weights(self, name):
if name in self.weight_dict:
return self.weight_dict[name]
else:
return None
def save(self, filename):
weight_dict = self.weight_dict
for name in self.name_to_weight:
weight_dict[name] = self.name_to_np_fn[name](
self.name_to_weight[name])
np.save(filename, weight_dict)
def load(self, filename):
self.weight_dict = np.load(filename).item()
def concatenate_skip_layers(h_connects, weight_sharer):
def compile_fn(di, dh):
def fn(di, is_training=True):
inputs = [
di['in' + str(i)]
for i in range(len(dh))
if dh['select_' + str(i)]
]
inputs.append(di['in' + str(len(dh))])
with tf.device('/gpu:0'):
out = tf.add_n(inputs)
return {'out': tf.add_n(inputs)}
return fn
return TFEM(
'SkipConcat',
{'select_' + str(i): h_connects[i] for i in range(len(h_connects))},
compile_fn, ['in' + str(i) for i in range(len(h_connects) + 1)],
['out']).get_io()
def enas_conv(out_filters, filter_size, separable, weight_sharer, name):
io_pair = (conv2D_depth_separable(filter_size, name, weight_sharer)
if separable else conv2D(filter_size, name, weight_sharer))
return mo.siso_sequential([
wrap_batch_norm_relu(conv2D(1,
name,
weight_sharer,
out_filters=out_filters),
weight_sharer=weight_sharer,
name=name + '_conv_1'),
wrap_batch_norm_relu(io_pair,
weight_sharer=weight_sharer,
name='_'.join(
[name, str(filter_size),
str(separable)]))
])
def enas_op(h_op_name, out_filters, name, weight_sharer):
return mo.siso_or(
{
'conv3':
lambda: enas_conv(out_filters, 3, False, weight_sharer, name),
'conv5':
lambda: enas_conv(out_filters, 5, False, weight_sharer, name),
'dsep_conv3':
lambda: enas_conv(out_filters, 3, True, weight_sharer, name),
'dsep_conv5':
lambda: enas_conv(out_filters, 5, True, weight_sharer, name),
'avg_pool':
lambda: avg_pool(D([3]), D([1])),
'max_pool':
lambda: max_pool(D([3]), D([1]))
}, h_op_name)
def enas_repeat_fn(inputs, outputs, layer_id, out_filters, weight_sharer):
h_enas_op = D(
['conv3', 'conv5', 'dsep_conv3', 'dsep_conv5', 'avg_pool', 'max_pool'],
name='op_' + str(layer_id))
op_inputs, op_outputs = enas_op(h_enas_op, out_filters,
'op_' + str(layer_id), weight_sharer)
outputs[list(outputs.keys())[-1]].connect(op_inputs['in'])
h_connects = [
D([True, False], name='skip_' + str(idx) + '_' + str(layer_id))
for idx in range(layer_id - 1)
]
skip_inputs, skip_outputs = concatenate_skip_layers(h_connects,
weight_sharer)
for i in range(len(h_connects)):
outputs[list(outputs.keys())[i]].connect(skip_inputs['in' + str(i)])
op_outputs['out'].connect(skip_inputs['in' + str(len(h_connects))])
bn_inputs, bn_outputs = keras_batch_normalization(
name='skip_bn_' + str(len(h_connects)), weight_sharer=weight_sharer)
skip_outputs['out'].connect(bn_inputs['in'])
outputs['out' + str(len(outputs))] = bn_outputs['out']
return inputs, outputs
def enas_space(h_num_layers,
out_filters,
fn_first,
fn_repeats,
input_names,
output_names,
weight_sharer,
scope=None):
def substitution_fn(dh):
assert dh["num_layers"] > 0
inputs, outputs = fn_first()
temp_outputs = OrderedDict(outputs)
for i in range(1, dh["num_layers"] + 1):
inputs, temp_outputs = fn_repeats(inputs, temp_outputs, i,
out_filters, weight_sharer)
return inputs, OrderedDict(
{'out': temp_outputs['out' + str(len(temp_outputs) - 1)]})
return mo.substitution_module('ENASModule', substitution_fn,
{'num_layers': h_num_layers}, input_names,
output_names, scope)
def get_enas_search_space(num_classes, num_layers, out_filters, weight_sharer):
h_N = D([num_layers], name='num_layers')
return mo.siso_sequential([
enas_space(
h_N,
out_filters,
lambda: wrap_batch_norm_relu(conv2D(
3, 'stem', weight_sharer, out_filters=out_filters),
add_relu=False,
weight_sharer=weight_sharer,
name='stem'),
enas_repeat_fn,
['in'],
['out'],
weight_sharer),
global_pool(),
dropout(keep_prob=.9),
fc_layer(num_classes, 'softmax', weight_sharer),
])
class SSFEnasnet(mo.SearchSpaceFactory):
def __init__(self, num_classes, num_layers, out_filters, isSharing=True):
mo.SearchSpaceFactory.__init__(self, self._get_search_space)
self.num_classes = num_classes
self.weight_sharer = WeightSharer(isSharing)
self.num_layers = num_layers
self.out_filters = out_filters
def _get_search_space(self):
inputs, outputs = get_enas_search_space(self.num_classes,
self.num_layers,
self.out_filters,
self.weight_sharer)
return inputs, outputs, {}
| true | true |
f730da02becc00411ae503923f2d15f5e8664061 | 15,428 | py | Python | api/models/taco_models/fatchord_version.py | elainevoice/backend | 9b5fef59001fd6c2040affc80cd5cb9690c73795 | [
"Apache-2.0"
] | 3 | 2020-12-28T16:45:56.000Z | 2021-12-18T08:38:29.000Z | api/models/taco_models/fatchord_version.py | elainevoice/backend | 9b5fef59001fd6c2040affc80cd5cb9690c73795 | [
"Apache-2.0"
] | 1 | 2020-12-14T13:09:42.000Z | 2020-12-14T13:09:42.000Z | api/models/taco_models/fatchord_version.py | elainevoice/backend | 9b5fef59001fd6c2040affc80cd5cb9690c73795 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from api.models.utils.distribution import sample_from_discretized_mix_logistic
from api.models.utils.display import *
from api.models.utils.dsp import *
import os
import numpy as np
from pathlib import Path
from typing import Union
class ResBlock(nn.Module):
def __init__(self, dims):
super().__init__()
self.conv1 = nn.Conv1d(dims, dims, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(dims, dims, kernel_size=1, bias=False)
self.batch_norm1 = nn.BatchNorm1d(dims)
self.batch_norm2 = nn.BatchNorm1d(dims)
def forward(self, x):
residual = x
x = self.conv1(x)
x = self.batch_norm1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.batch_norm2(x)
return x + residual
class MelResNet(nn.Module):
def __init__(self, res_blocks, in_dims, compute_dims, res_out_dims, pad):
super().__init__()
k_size = pad * 2 + 1
self.conv_in = nn.Conv1d(in_dims, compute_dims, kernel_size=k_size, bias=False)
self.batch_norm = nn.BatchNorm1d(compute_dims)
self.layers = nn.ModuleList()
for i in range(res_blocks):
self.layers.append(ResBlock(compute_dims))
self.conv_out = nn.Conv1d(compute_dims, res_out_dims, kernel_size=1)
def forward(self, x):
x = self.conv_in(x)
x = self.batch_norm(x)
x = F.relu(x)
for f in self.layers: x = f(x)
x = self.conv_out(x)
return x
class Stretch2d(nn.Module):
def __init__(self, x_scale, y_scale):
super().__init__()
self.x_scale = x_scale
self.y_scale = y_scale
def forward(self, x):
b, c, h, w = x.size()
x = x.unsqueeze(-1).unsqueeze(3)
x = x.repeat(1, 1, 1, self.y_scale, 1, self.x_scale)
return x.view(b, c, h * self.y_scale, w * self.x_scale)
class UpsampleNetwork(nn.Module):
def __init__(self, feat_dims, upsample_scales, compute_dims,
res_blocks, res_out_dims, pad):
super().__init__()
total_scale = np.cumproduct(upsample_scales)[-1]
self.indent = pad * total_scale
self.resnet = MelResNet(res_blocks, feat_dims, compute_dims, res_out_dims, pad)
self.resnet_stretch = Stretch2d(total_scale, 1)
self.up_layers = nn.ModuleList()
for scale in upsample_scales:
k_size = (1, scale * 2 + 1)
padding = (0, scale)
stretch = Stretch2d(scale, 1)
conv = nn.Conv2d(1, 1, kernel_size=k_size, padding=padding, bias=False)
conv.weight.data.fill_(1. / k_size[1])
self.up_layers.append(stretch)
self.up_layers.append(conv)
def forward(self, m):
aux = self.resnet(m).unsqueeze(1)
aux = self.resnet_stretch(aux)
aux = aux.squeeze(1)
m = m.unsqueeze(1)
for f in self.up_layers: m = f(m)
m = m.squeeze(1)[:, :, self.indent:-self.indent]
return m.transpose(1, 2), aux.transpose(1, 2)
class WaveRNN(nn.Module):
def __init__(self, rnn_dims, fc_dims, bits, pad, upsample_factors,
feat_dims, compute_dims, res_out_dims, res_blocks,
hop_length, sample_rate, mode='RAW'):
super().__init__()
self.mode = mode
self.pad = pad
if self.mode == 'RAW':
self.n_classes = 2 ** bits
elif self.mode == 'MOL':
self.n_classes = 30
else:
RuntimeError("Unknown model mode value - ", self.mode)
# List of rnns to call `flatten_parameters()` on
self._to_flatten = []
self.rnn_dims = rnn_dims
self.aux_dims = res_out_dims // 4
self.hop_length = hop_length
self.sample_rate = sample_rate
self.upsample = UpsampleNetwork(feat_dims, upsample_factors, compute_dims, res_blocks, res_out_dims, pad)
self.I = nn.Linear(feat_dims + self.aux_dims + 1, rnn_dims)
self.rnn1 = nn.GRU(rnn_dims, rnn_dims, batch_first=True)
self.rnn2 = nn.GRU(rnn_dims + self.aux_dims, rnn_dims, batch_first=True)
self._to_flatten += [self.rnn1, self.rnn2]
self.fc1 = nn.Linear(rnn_dims + self.aux_dims, fc_dims)
self.fc2 = nn.Linear(fc_dims + self.aux_dims, fc_dims)
self.fc3 = nn.Linear(fc_dims, self.n_classes)
self.register_buffer('step', torch.zeros(1, dtype=torch.long))
self.num_params()
# Avoid fragmentation of RNN parameters and associated warning
self._flatten_parameters()
def forward(self, x, mels):
device = next(self.parameters()).device # use same device as parameters
# Although we `_flatten_parameters()` on init, when using DataParallel
# the model gets replicated, making it no longer guaranteed that the
# weights are contiguous in GPU memory. Hence, we must call it again
self._flatten_parameters()
if self.training:
self.step += 1
bsize = x.size(0)
h1 = torch.zeros(1, bsize, self.rnn_dims, device=device)
h2 = torch.zeros(1, bsize, self.rnn_dims, device=device)
mels, aux = self.upsample(mels)
aux_idx = [self.aux_dims * i for i in range(5)]
a1 = aux[:, :, aux_idx[0]:aux_idx[1]]
a2 = aux[:, :, aux_idx[1]:aux_idx[2]]
a3 = aux[:, :, aux_idx[2]:aux_idx[3]]
a4 = aux[:, :, aux_idx[3]:aux_idx[4]]
x = torch.cat([x.unsqueeze(-1), mels, a1], dim=2)
x = self.I(x)
res = x
x, _ = self.rnn1(x, h1)
x = x + res
res = x
x = torch.cat([x, a2], dim=2)
x, _ = self.rnn2(x, h2)
x = x + res
x = torch.cat([x, a3], dim=2)
x = F.relu(self.fc1(x))
x = torch.cat([x, a4], dim=2)
x = F.relu(self.fc2(x))
return self.fc3(x)
def generate(self, mels, save_path: Union[str, Path, None], batched, target, overlap, mu_law, silent=False):
self.eval()
device = next(self.parameters()).device # use same device as parameters
mu_law = mu_law if self.mode == 'RAW' else False
output = []
start = time.time()
rnn1 = self.get_gru_cell(self.rnn1)
rnn2 = self.get_gru_cell(self.rnn2)
with torch.no_grad():
mels = torch.as_tensor(mels, device=device)
wave_len = (mels.size(-1) - 1) * self.hop_length
mels = self.pad_tensor(mels.transpose(1, 2), pad=self.pad, side='both')
mels, aux = self.upsample(mels.transpose(1, 2))
if batched:
mels = self.fold_with_overlap(mels, target, overlap)
aux = self.fold_with_overlap(aux, target, overlap)
b_size, seq_len, _ = mels.size()
h1 = torch.zeros(b_size, self.rnn_dims, device=device)
h2 = torch.zeros(b_size, self.rnn_dims, device=device)
x = torch.zeros(b_size, 1, device=device)
d = self.aux_dims
aux_split = [aux[:, :, d * i:d * (i + 1)] for i in range(4)]
for i in range(seq_len):
m_t = mels[:, i, :]
a1_t, a2_t, a3_t, a4_t = \
(a[:, i, :] for a in aux_split)
x = torch.cat([x, m_t, a1_t], dim=1)
x = self.I(x)
h1 = rnn1(x, h1)
x = x + h1
inp = torch.cat([x, a2_t], dim=1)
h2 = rnn2(inp, h2)
x = x + h2
x = torch.cat([x, a3_t], dim=1)
x = F.relu(self.fc1(x))
x = torch.cat([x, a4_t], dim=1)
x = F.relu(self.fc2(x))
logits = self.fc3(x)
if self.mode == 'MOL':
sample = sample_from_discretized_mix_logistic(logits.unsqueeze(0).transpose(1, 2))
output.append(sample.view(-1))
# x = torch.FloatTensor([[sample]]).cuda()
x = sample.transpose(0, 1)
elif self.mode == 'RAW':
posterior = F.softmax(logits, dim=1)
distrib = torch.distributions.Categorical(posterior)
sample = 2 * distrib.sample().float() / (self.n_classes - 1.) - 1.
output.append(sample)
x = sample.unsqueeze(-1)
else:
raise RuntimeError("Unknown model mode value - ", self.mode)
if not silent and i % 100 == 0:
self.gen_display(i, seq_len, b_size, start)
output = torch.stack(output).transpose(0, 1)
output = output.cpu().numpy()
output = output.astype(np.float64)
if mu_law:
output = decode_mu_law(output, self.n_classes, False)
if batched:
output = self.xfade_and_unfold(output, target, overlap)
else:
output = output[0]
# Fade-out at the end to avoid signal cutting out suddenly
fade_out = np.linspace(1, 0, 20 * self.hop_length)
output = output[:wave_len]
output[-20 * self.hop_length:] *= fade_out
if save_path is not None:
save_wav(output, save_path)
self.train()
return output
def gen_display(self, i, seq_len, b_size, start):
gen_rate = (i + 1) / (time.time() - start) * b_size / 1000
pbar = progbar(i, seq_len)
msg = f'| {pbar} {i*b_size}/{seq_len*b_size} | Batch Size: {b_size} | Gen Rate: {gen_rate:.1f}kHz | '
stream(msg)
def get_gru_cell(self, gru):
gru_cell = nn.GRUCell(gru.input_size, gru.hidden_size)
gru_cell.weight_hh.data = gru.weight_hh_l0.data
gru_cell.weight_ih.data = gru.weight_ih_l0.data
gru_cell.bias_hh.data = gru.bias_hh_l0.data
gru_cell.bias_ih.data = gru.bias_ih_l0.data
return gru_cell
def pad_tensor(self, x, pad, side='both'):
# NB - this is just a quick method i need right now
# i.e., it won't generalise to other shapes/dims
b, t, c = x.size()
total = t + 2 * pad if side == 'both' else t + pad
padded = torch.zeros(b, total, c, device=x.device)
if side == 'before' or side == 'both':
padded[:, pad:pad + t, :] = x
elif side == 'after':
padded[:, :t, :] = x
return padded
def fold_with_overlap(self, x, target, overlap):
''' Fold the tensor with overlap for quick batched inference.
Overlap will be used for crossfading in xfade_and_unfold()
Args:
x (tensor) : Upsampled conditioning features.
shape=(1, timesteps, features)
target (int) : Target timesteps for each index of batch
overlap (int) : Timesteps for both xfade and rnn warmup
Return:
(tensor) : shape=(num_folds, target + 2 * overlap, features)
Details:
x = [[h1, h2, ... hn]]
Where each h is a vector of conditioning features
Eg: target=2, overlap=1 with x.size(1)=10
folded = [[h1, h2, h3, h4],
[h4, h5, h6, h7],
[h7, h8, h9, h10]]
'''
_, total_len, features = x.size()
# Calculate variables needed
num_folds = (total_len - overlap) // (target + overlap)
extended_len = num_folds * (overlap + target) + overlap
remaining = total_len - extended_len
# Pad if some time steps poking out
if remaining != 0:
num_folds += 1
padding = target + 2 * overlap - remaining
x = self.pad_tensor(x, padding, side='after')
folded = torch.zeros(num_folds, target + 2 * overlap, features, device=x.device)
# Get the values for the folded tensor
for i in range(num_folds):
start = i * (target + overlap)
end = start + target + 2 * overlap
folded[i] = x[:, start:end, :]
return folded
def xfade_and_unfold(self, y, target, overlap):
''' Applies a crossfade and unfolds into a 1d array.
Args:
y (ndarry) : Batched sequences of audio samples
shape=(num_folds, target + 2 * overlap)
dtype=np.float64
overlap (int) : Timesteps for both xfade and rnn warmup
Return:
(ndarry) : audio samples in a 1d array
shape=(total_len)
dtype=np.float64
Details:
y = [[seq1],
[seq2],
[seq3]]
Apply a gain envelope at both ends of the sequences
y = [[seq1_in, seq1_target, seq1_out],
[seq2_in, seq2_target, seq2_out],
[seq3_in, seq3_target, seq3_out]]
Stagger and add up the groups of samples:
[seq1_in, seq1_target, (seq1_out + seq2_in), seq2_target, ...]
'''
num_folds, length = y.shape
target = length - 2 * overlap
total_len = num_folds * (target + overlap) + overlap
# Need some silence for the rnn warmup
silence_len = overlap // 2
fade_len = overlap - silence_len
silence = np.zeros((silence_len), dtype=np.float64)
linear = np.ones((silence_len), dtype=np.float64)
# Equal power crossfade
t = np.linspace(-1, 1, fade_len, dtype=np.float64)
fade_in = np.sqrt(0.5 * (1 + t))
fade_out = np.sqrt(0.5 * (1 - t))
# Concat the silence to the fades
fade_in = np.concatenate([silence, fade_in])
fade_out = np.concatenate([linear, fade_out])
# Apply the gain to the overlap samples
y[:, :overlap] *= fade_in
y[:, -overlap:] *= fade_out
unfolded = np.zeros((total_len), dtype=np.float64)
# Loop to add up all the samples
for i in range(num_folds):
start = i * (target + overlap)
end = start + target + 2 * overlap
unfolded[start:end] += y[i]
return unfolded
def get_step(self):
return self.step.data.item()
def log(self, path, msg):
with open(path, 'a') as f:
print(msg, file=f)
def load(self, path: Union[str, Path]):
# Use device of model params as location for loaded state
device = next(self.parameters()).device
self.load_state_dict(torch.load(path, map_location=device), strict=False)
def save(self, path: Union[str, Path]):
# No optimizer argument because saving a model should not include data
# only relevant in the training process - it should only be properties
# of the model itself. Let caller take care of saving optimzier state.
torch.save(self.state_dict(), path)
def num_params(self, print_out=False):
parameters = filter(lambda p: p.requires_grad, self.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
if print_out:
print('Trainable Parameters: %.3fM' % parameters)
return parameters
def _flatten_parameters(self):
"""Calls `flatten_parameters` on all the rnns used by the WaveRNN. Used
to improve efficiency and avoid PyTorch yelling at us."""
[m.flatten_parameters() for m in self._to_flatten]
| 35.143508 | 113 | 0.565401 | import torch
import torch.nn as nn
import torch.nn.functional as F
from api.models.utils.distribution import sample_from_discretized_mix_logistic
from api.models.utils.display import *
from api.models.utils.dsp import *
import os
import numpy as np
from pathlib import Path
from typing import Union
class ResBlock(nn.Module):
def __init__(self, dims):
super().__init__()
self.conv1 = nn.Conv1d(dims, dims, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(dims, dims, kernel_size=1, bias=False)
self.batch_norm1 = nn.BatchNorm1d(dims)
self.batch_norm2 = nn.BatchNorm1d(dims)
def forward(self, x):
residual = x
x = self.conv1(x)
x = self.batch_norm1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.batch_norm2(x)
return x + residual
class MelResNet(nn.Module):
def __init__(self, res_blocks, in_dims, compute_dims, res_out_dims, pad):
super().__init__()
k_size = pad * 2 + 1
self.conv_in = nn.Conv1d(in_dims, compute_dims, kernel_size=k_size, bias=False)
self.batch_norm = nn.BatchNorm1d(compute_dims)
self.layers = nn.ModuleList()
for i in range(res_blocks):
self.layers.append(ResBlock(compute_dims))
self.conv_out = nn.Conv1d(compute_dims, res_out_dims, kernel_size=1)
def forward(self, x):
x = self.conv_in(x)
x = self.batch_norm(x)
x = F.relu(x)
for f in self.layers: x = f(x)
x = self.conv_out(x)
return x
class Stretch2d(nn.Module):
def __init__(self, x_scale, y_scale):
super().__init__()
self.x_scale = x_scale
self.y_scale = y_scale
def forward(self, x):
b, c, h, w = x.size()
x = x.unsqueeze(-1).unsqueeze(3)
x = x.repeat(1, 1, 1, self.y_scale, 1, self.x_scale)
return x.view(b, c, h * self.y_scale, w * self.x_scale)
class UpsampleNetwork(nn.Module):
def __init__(self, feat_dims, upsample_scales, compute_dims,
res_blocks, res_out_dims, pad):
super().__init__()
total_scale = np.cumproduct(upsample_scales)[-1]
self.indent = pad * total_scale
self.resnet = MelResNet(res_blocks, feat_dims, compute_dims, res_out_dims, pad)
self.resnet_stretch = Stretch2d(total_scale, 1)
self.up_layers = nn.ModuleList()
for scale in upsample_scales:
k_size = (1, scale * 2 + 1)
padding = (0, scale)
stretch = Stretch2d(scale, 1)
conv = nn.Conv2d(1, 1, kernel_size=k_size, padding=padding, bias=False)
conv.weight.data.fill_(1. / k_size[1])
self.up_layers.append(stretch)
self.up_layers.append(conv)
def forward(self, m):
aux = self.resnet(m).unsqueeze(1)
aux = self.resnet_stretch(aux)
aux = aux.squeeze(1)
m = m.unsqueeze(1)
for f in self.up_layers: m = f(m)
m = m.squeeze(1)[:, :, self.indent:-self.indent]
return m.transpose(1, 2), aux.transpose(1, 2)
class WaveRNN(nn.Module):
def __init__(self, rnn_dims, fc_dims, bits, pad, upsample_factors,
feat_dims, compute_dims, res_out_dims, res_blocks,
hop_length, sample_rate, mode='RAW'):
super().__init__()
self.mode = mode
self.pad = pad
if self.mode == 'RAW':
self.n_classes = 2 ** bits
elif self.mode == 'MOL':
self.n_classes = 30
else:
RuntimeError("Unknown model mode value - ", self.mode)
self._to_flatten = []
self.rnn_dims = rnn_dims
self.aux_dims = res_out_dims // 4
self.hop_length = hop_length
self.sample_rate = sample_rate
self.upsample = UpsampleNetwork(feat_dims, upsample_factors, compute_dims, res_blocks, res_out_dims, pad)
self.I = nn.Linear(feat_dims + self.aux_dims + 1, rnn_dims)
self.rnn1 = nn.GRU(rnn_dims, rnn_dims, batch_first=True)
self.rnn2 = nn.GRU(rnn_dims + self.aux_dims, rnn_dims, batch_first=True)
self._to_flatten += [self.rnn1, self.rnn2]
self.fc1 = nn.Linear(rnn_dims + self.aux_dims, fc_dims)
self.fc2 = nn.Linear(fc_dims + self.aux_dims, fc_dims)
self.fc3 = nn.Linear(fc_dims, self.n_classes)
self.register_buffer('step', torch.zeros(1, dtype=torch.long))
self.num_params()
self._flatten_parameters()
def forward(self, x, mels):
device = next(self.parameters()).device
self._flatten_parameters()
if self.training:
self.step += 1
bsize = x.size(0)
h1 = torch.zeros(1, bsize, self.rnn_dims, device=device)
h2 = torch.zeros(1, bsize, self.rnn_dims, device=device)
mels, aux = self.upsample(mels)
aux_idx = [self.aux_dims * i for i in range(5)]
a1 = aux[:, :, aux_idx[0]:aux_idx[1]]
a2 = aux[:, :, aux_idx[1]:aux_idx[2]]
a3 = aux[:, :, aux_idx[2]:aux_idx[3]]
a4 = aux[:, :, aux_idx[3]:aux_idx[4]]
x = torch.cat([x.unsqueeze(-1), mels, a1], dim=2)
x = self.I(x)
res = x
x, _ = self.rnn1(x, h1)
x = x + res
res = x
x = torch.cat([x, a2], dim=2)
x, _ = self.rnn2(x, h2)
x = x + res
x = torch.cat([x, a3], dim=2)
x = F.relu(self.fc1(x))
x = torch.cat([x, a4], dim=2)
x = F.relu(self.fc2(x))
return self.fc3(x)
def generate(self, mels, save_path: Union[str, Path, None], batched, target, overlap, mu_law, silent=False):
self.eval()
device = next(self.parameters()).device
mu_law = mu_law if self.mode == 'RAW' else False
output = []
start = time.time()
rnn1 = self.get_gru_cell(self.rnn1)
rnn2 = self.get_gru_cell(self.rnn2)
with torch.no_grad():
mels = torch.as_tensor(mels, device=device)
wave_len = (mels.size(-1) - 1) * self.hop_length
mels = self.pad_tensor(mels.transpose(1, 2), pad=self.pad, side='both')
mels, aux = self.upsample(mels.transpose(1, 2))
if batched:
mels = self.fold_with_overlap(mels, target, overlap)
aux = self.fold_with_overlap(aux, target, overlap)
b_size, seq_len, _ = mels.size()
h1 = torch.zeros(b_size, self.rnn_dims, device=device)
h2 = torch.zeros(b_size, self.rnn_dims, device=device)
x = torch.zeros(b_size, 1, device=device)
d = self.aux_dims
aux_split = [aux[:, :, d * i:d * (i + 1)] for i in range(4)]
for i in range(seq_len):
m_t = mels[:, i, :]
a1_t, a2_t, a3_t, a4_t = \
(a[:, i, :] for a in aux_split)
x = torch.cat([x, m_t, a1_t], dim=1)
x = self.I(x)
h1 = rnn1(x, h1)
x = x + h1
inp = torch.cat([x, a2_t], dim=1)
h2 = rnn2(inp, h2)
x = x + h2
x = torch.cat([x, a3_t], dim=1)
x = F.relu(self.fc1(x))
x = torch.cat([x, a4_t], dim=1)
x = F.relu(self.fc2(x))
logits = self.fc3(x)
if self.mode == 'MOL':
sample = sample_from_discretized_mix_logistic(logits.unsqueeze(0).transpose(1, 2))
output.append(sample.view(-1))
x = sample.transpose(0, 1)
elif self.mode == 'RAW':
posterior = F.softmax(logits, dim=1)
distrib = torch.distributions.Categorical(posterior)
sample = 2 * distrib.sample().float() / (self.n_classes - 1.) - 1.
output.append(sample)
x = sample.unsqueeze(-1)
else:
raise RuntimeError("Unknown model mode value - ", self.mode)
if not silent and i % 100 == 0:
self.gen_display(i, seq_len, b_size, start)
output = torch.stack(output).transpose(0, 1)
output = output.cpu().numpy()
output = output.astype(np.float64)
if mu_law:
output = decode_mu_law(output, self.n_classes, False)
if batched:
output = self.xfade_and_unfold(output, target, overlap)
else:
output = output[0]
fade_out = np.linspace(1, 0, 20 * self.hop_length)
output = output[:wave_len]
output[-20 * self.hop_length:] *= fade_out
if save_path is not None:
save_wav(output, save_path)
self.train()
return output
def gen_display(self, i, seq_len, b_size, start):
gen_rate = (i + 1) / (time.time() - start) * b_size / 1000
pbar = progbar(i, seq_len)
msg = f'| {pbar} {i*b_size}/{seq_len*b_size} | Batch Size: {b_size} | Gen Rate: {gen_rate:.1f}kHz | '
stream(msg)
def get_gru_cell(self, gru):
gru_cell = nn.GRUCell(gru.input_size, gru.hidden_size)
gru_cell.weight_hh.data = gru.weight_hh_l0.data
gru_cell.weight_ih.data = gru.weight_ih_l0.data
gru_cell.bias_hh.data = gru.bias_hh_l0.data
gru_cell.bias_ih.data = gru.bias_ih_l0.data
return gru_cell
def pad_tensor(self, x, pad, side='both'):
b, t, c = x.size()
total = t + 2 * pad if side == 'both' else t + pad
padded = torch.zeros(b, total, c, device=x.device)
if side == 'before' or side == 'both':
padded[:, pad:pad + t, :] = x
elif side == 'after':
padded[:, :t, :] = x
return padded
def fold_with_overlap(self, x, target, overlap):
_, total_len, features = x.size()
# Calculate variables needed
num_folds = (total_len - overlap) // (target + overlap)
extended_len = num_folds * (overlap + target) + overlap
remaining = total_len - extended_len
# Pad if some time steps poking out
if remaining != 0:
num_folds += 1
padding = target + 2 * overlap - remaining
x = self.pad_tensor(x, padding, side='after')
folded = torch.zeros(num_folds, target + 2 * overlap, features, device=x.device)
# Get the values for the folded tensor
for i in range(num_folds):
start = i * (target + overlap)
end = start + target + 2 * overlap
folded[i] = x[:, start:end, :]
return folded
def xfade_and_unfold(self, y, target, overlap):
num_folds, length = y.shape
target = length - 2 * overlap
total_len = num_folds * (target + overlap) + overlap
# Need some silence for the rnn warmup
silence_len = overlap // 2
fade_len = overlap - silence_len
silence = np.zeros((silence_len), dtype=np.float64)
linear = np.ones((silence_len), dtype=np.float64)
# Equal power crossfade
t = np.linspace(-1, 1, fade_len, dtype=np.float64)
fade_in = np.sqrt(0.5 * (1 + t))
fade_out = np.sqrt(0.5 * (1 - t))
# Concat the silence to the fades
fade_in = np.concatenate([silence, fade_in])
fade_out = np.concatenate([linear, fade_out])
# Apply the gain to the overlap samples
y[:, :overlap] *= fade_in
y[:, -overlap:] *= fade_out
unfolded = np.zeros((total_len), dtype=np.float64)
# Loop to add up all the samples
for i in range(num_folds):
start = i * (target + overlap)
end = start + target + 2 * overlap
unfolded[start:end] += y[i]
return unfolded
def get_step(self):
return self.step.data.item()
def log(self, path, msg):
with open(path, 'a') as f:
print(msg, file=f)
def load(self, path: Union[str, Path]):
# Use device of model params as location for loaded state
device = next(self.parameters()).device
self.load_state_dict(torch.load(path, map_location=device), strict=False)
def save(self, path: Union[str, Path]):
# No optimizer argument because saving a model should not include data
# only relevant in the training process - it should only be properties
# of the model itself. Let caller take care of saving optimzier state.
torch.save(self.state_dict(), path)
def num_params(self, print_out=False):
parameters = filter(lambda p: p.requires_grad, self.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1_000_000
if print_out:
print('Trainable Parameters: %.3fM' % parameters)
return parameters
def _flatten_parameters(self):
[m.flatten_parameters() for m in self._to_flatten]
| true | true |
f730daa97bd0bc4461d40eeb73adb796aceb822a | 6,036 | py | Python | micropython/neolamp/neolamp/api.py | hydronics2/esp8266 | cdd72145ce94bdcbbabb52429c5c160f73ce4207 | [
"BSD-2-Clause"
] | 68 | 2016-11-27T18:10:35.000Z | 2021-12-19T16:41:43.000Z | micropython/neolamp/neolamp/api.py | hydronics2/esp8266 | cdd72145ce94bdcbbabb52429c5c160f73ce4207 | [
"BSD-2-Clause"
] | 21 | 2017-01-03T12:23:22.000Z | 2022-01-06T16:19:25.000Z | micropython/neolamp/neolamp/api.py | hydronics2/esp8266 | cdd72145ce94bdcbbabb52429c5c160f73ce4207 | [
"BSD-2-Clause"
] | 34 | 2017-01-06T16:54:45.000Z | 2021-12-17T10:11:13.000Z | #
# Copyright (c) dushin.net All Rights Reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of dushin.net nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY dushin.net ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL dushin.net BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import logging
import uhttpd
class Handler :
def __init__(self, controller, verbose=False):
self.controller = controller
self.verbose = verbose
def get(self, api_request):
context = api_request['context']
if len(context) > 0 :
if context[0] == 'config' :
query_params = api_request['query_params']
return Handler.get_path(self.controller.config, context[1:], 'all' in query_params and query_params['all'] == 'true')
if context[0] == 'stats' :
return self.controller.get_stats()
if context[0] == 'color' :
return self.controller.get_color()
else :
raise uhttpd.BadRequestException("Bad get request: Missing operator in context")
@staticmethod
def get_path(tree, path, all=False) :
for c in path :
if c in tree :
tree = tree[c]
else :
raise uhttpd.NotFoundException("Invalid path: {}; '{}' not found.".format(path, c))
return Handler.serialize(tree) if not all else tree
@staticmethod
def serialize(node) :
node_type = type(node)
if node_type is dict :
return Handler.list_keys(node)
else :
return node
@staticmethod
def list_keys(node) :
ret = []
for key in node.keys() :
ret.append(key)
return ret
def post(self, api_request):
if self.verbose :
logging.info('post: api_request={}', api_request)
context = api_request['context']
if len(context) > 0 :
query_params = api_request['query_params']
operator = context[0]
if operator == 'mode' :
self.controller.set_mode(query_params['mode'])
elif operator == 'np' :
pin = None
num_pixels = None
if 'pin' in query_params :
pin = query_params['pin']
if 'num_pixels' in query_params :
num_pixels = query_params['num_pixels']
self.controller.set_np(pin=pin, num_pixels=num_pixels)
elif operator == 'lamp' :
if 'color_name' not in query_params :
raise uhttpd.BadRequestException("Expected name in query_params")
self.controller.set_color_name(query_params['color_name'])
elif operator == 'schedule' :
if 'name' not in query_params :
raise uhttpd.BadRequestException("Expected name in query_params")
self.controller.update_schedule(query_params['name'], api_request['body'])
elif operator == 'colorspec' :
if 'name' not in query_params :
raise uhttpd.BadRequestException("Expected name in query_params")
self.controller.set_colorspec(query_params['name'], api_request['body'])
elif operator == 'color' :
self.controller.set_color((
int(query_params['r']),
int(query_params['g']),
int(query_params['b'])
))
elif operator == 'reboot' :
self.controller.reboot()
elif operator == 'reset' :
self.controller.reset()
else :
raise uhttpd.BadRequestException("Bad post request: Unknown operator: {}".format(operator))
else :
raise uhttpd.BadRequestException("Bad post request: Missing operator in context")
def delete(self, api_request):
context = api_request['context']
if len(context) > 0 :
query_params = api_request['query_params']
operator = context[0]
if operator == 'schedule' :
if 'name' not in query_params :
raise uhttpd.BadRequestException("Expected name in query_params")
self.controller.delete_schedule(query_params['name'])
elif operator == 'colorspec' :
if 'name' not in query_params :
raise uhttpd.BadRequestException("Expected name in query_params")
self.controller.delete_colorspec(query_params['name'])
else :
raise uhttpd.BadRequestException("Bad delete request: Unknown operator: {}".format(operator))
else :
raise uhttpd.BadRequestException("Bad delete request: Missing operator in context")
| 45.383459 | 133 | 0.610007 |
import logging
import uhttpd
class Handler :
def __init__(self, controller, verbose=False):
self.controller = controller
self.verbose = verbose
def get(self, api_request):
context = api_request['context']
if len(context) > 0 :
if context[0] == 'config' :
query_params = api_request['query_params']
return Handler.get_path(self.controller.config, context[1:], 'all' in query_params and query_params['all'] == 'true')
if context[0] == 'stats' :
return self.controller.get_stats()
if context[0] == 'color' :
return self.controller.get_color()
else :
raise uhttpd.BadRequestException("Bad get request: Missing operator in context")
@staticmethod
def get_path(tree, path, all=False) :
for c in path :
if c in tree :
tree = tree[c]
else :
raise uhttpd.NotFoundException("Invalid path: {}; '{}' not found.".format(path, c))
return Handler.serialize(tree) if not all else tree
@staticmethod
def serialize(node) :
node_type = type(node)
if node_type is dict :
return Handler.list_keys(node)
else :
return node
@staticmethod
def list_keys(node) :
ret = []
for key in node.keys() :
ret.append(key)
return ret
def post(self, api_request):
if self.verbose :
logging.info('post: api_request={}', api_request)
context = api_request['context']
if len(context) > 0 :
query_params = api_request['query_params']
operator = context[0]
if operator == 'mode' :
self.controller.set_mode(query_params['mode'])
elif operator == 'np' :
pin = None
num_pixels = None
if 'pin' in query_params :
pin = query_params['pin']
if 'num_pixels' in query_params :
num_pixels = query_params['num_pixels']
self.controller.set_np(pin=pin, num_pixels=num_pixels)
elif operator == 'lamp' :
if 'color_name' not in query_params :
raise uhttpd.BadRequestException("Expected name in query_params")
self.controller.set_color_name(query_params['color_name'])
elif operator == 'schedule' :
if 'name' not in query_params :
raise uhttpd.BadRequestException("Expected name in query_params")
self.controller.update_schedule(query_params['name'], api_request['body'])
elif operator == 'colorspec' :
if 'name' not in query_params :
raise uhttpd.BadRequestException("Expected name in query_params")
self.controller.set_colorspec(query_params['name'], api_request['body'])
elif operator == 'color' :
self.controller.set_color((
int(query_params['r']),
int(query_params['g']),
int(query_params['b'])
))
elif operator == 'reboot' :
self.controller.reboot()
elif operator == 'reset' :
self.controller.reset()
else :
raise uhttpd.BadRequestException("Bad post request: Unknown operator: {}".format(operator))
else :
raise uhttpd.BadRequestException("Bad post request: Missing operator in context")
def delete(self, api_request):
context = api_request['context']
if len(context) > 0 :
query_params = api_request['query_params']
operator = context[0]
if operator == 'schedule' :
if 'name' not in query_params :
raise uhttpd.BadRequestException("Expected name in query_params")
self.controller.delete_schedule(query_params['name'])
elif operator == 'colorspec' :
if 'name' not in query_params :
raise uhttpd.BadRequestException("Expected name in query_params")
self.controller.delete_colorspec(query_params['name'])
else :
raise uhttpd.BadRequestException("Bad delete request: Unknown operator: {}".format(operator))
else :
raise uhttpd.BadRequestException("Bad delete request: Missing operator in context")
| true | true |
f730dae3cf13445bce8f10c0034724f7175109a5 | 32,617 | py | Python | tests/components/automation/test_sun.py | squirrel289/core | 6c5bcbfc3ee40927458e9188d6b79bf63933d3f9 | [
"Apache-2.0"
] | 6 | 2020-07-18T16:33:25.000Z | 2021-09-26T09:52:04.000Z | tests/components/sun/test_trigger.py | SicAriuSx83/core | 162c39258e68ae42fe4e1560ae91ed54f5662409 | [
"Apache-2.0"
] | 47 | 2020-07-23T07:14:33.000Z | 2022-03-31T06:01:46.000Z | tests/components/sun/test_trigger.py | SicAriuSx83/core | 162c39258e68ae42fe4e1560ae91ed54f5662409 | [
"Apache-2.0"
] | 3 | 2020-10-18T07:08:40.000Z | 2021-06-21T02:26:00.000Z | """The tests for the sun automation."""
from datetime import datetime
import pytest
from homeassistant.components import sun
import homeassistant.components.automation as automation
from homeassistant.const import SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import async_fire_time_changed, async_mock_service, mock_component
from tests.components.automation import common
ORIG_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
dt_util.set_default_time_zone(hass.config.time_zone)
hass.loop.run_until_complete(
async_setup_component(hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}})
)
def teardown():
"""Restore."""
dt_util.set_default_time_zone(ORIG_TIME_ZONE)
async def test_sunset_trigger(hass, calls, legacy_patchable_time):
"""Test the sunset trigger."""
now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 2, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "sun", "event": SUN_EVENT_SUNSET},
"action": {"service": "test.automation"},
}
},
)
await common.async_turn_off(hass)
await hass.async_block_till_done()
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 0
with patch("homeassistant.util.dt.utcnow", return_value=now):
await common.async_turn_on(hass)
await hass.async_block_till_done()
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_sunrise_trigger(hass, calls, legacy_patchable_time):
"""Test the sunrise trigger."""
now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 14, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "sun", "event": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_sunset_trigger_with_offset(hass, calls, legacy_patchable_time):
"""Test the sunset trigger with offset."""
now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 2, 30, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "sun",
"event": SUN_EVENT_SUNSET,
"offset": "0:30:00",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event", "offset"))
},
},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "sun - sunset - 0:30:00"
async def test_sunrise_trigger_with_offset(hass, calls, legacy_patchable_time):
"""Test the sunrise trigger with offset."""
now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 13, 30, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "sun",
"event": SUN_EVENT_SUNRISE,
"offset": "-0:30:00",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_action_before_sunrise_no_offset(hass, calls):
"""
Test if action was before sunrise.
Before sunrise is true from midnight until sunset, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "before": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = sunrise + 1s -> 'before sunrise' not true
now = datetime(2015, 9, 16, 13, 32, 44, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise -> 'before sunrise' true
now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'before sunrise' true
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = local midnight - 1s -> 'before sunrise' not true
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_after_sunrise_no_offset(hass, calls):
"""
Test if action was after sunrise.
After sunrise is true from sunrise until midnight, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "after": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = sunrise - 1s -> 'after sunrise' not true
now = datetime(2015, 9, 16, 13, 32, 42, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise + 1s -> 'after sunrise' true
now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'after sunrise' not true
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight - 1s -> 'after sunrise' true
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_before_sunrise_with_offset(hass, calls):
"""
Test if action was before sunrise with offset.
Before sunrise is true from midnight until sunset, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"before": SUN_EVENT_SUNRISE,
"before_offset": "+1:00:00",
},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = sunrise + 1s + 1h -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 16, 14, 32, 44, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise + 1h -> 'before sunrise' with offset +1h true
now = datetime(2015, 9, 16, 14, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = UTC midnight -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 0, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = UTC midnight - 1s -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 16, 23, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'before sunrise' with offset +1h true
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = local midnight - 1s -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = sunset -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 1, 56, 48, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = sunset -1s -> 'before sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 1, 56, 45, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_before_sunset_with_offset(hass, calls):
"""
Test if action was before sunset with offset.
Before sunset is true from midnight until sunset, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"before": "sunset",
"before_offset": "+1:00:00",
},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = local midnight -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = sunset + 1s + 1h -> 'before sunset' with offset +1h not true
now = datetime(2015, 9, 17, 2, 55, 25, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = sunset + 1h -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 17, 2, 55, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = UTC midnight -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 17, 0, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 3
# now = UTC midnight - 1s -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 16, 23, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 4
# now = sunrise -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 5
# now = sunrise -1s -> 'before sunset' with offset +1h true
now = datetime(2015, 9, 16, 13, 32, 42, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 6
# now = local midnight-1s -> 'after sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 6
async def test_if_action_after_sunrise_with_offset(hass, calls):
"""
Test if action was after sunrise with offset.
After sunrise is true from sunrise until midnight, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"after": SUN_EVENT_SUNRISE,
"after_offset": "+1:00:00",
},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = sunrise - 1s + 1h -> 'after sunrise' with offset +1h not true
now = datetime(2015, 9, 16, 14, 32, 42, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise + 1h -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 16, 14, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = UTC noon -> 'after sunrise' with offset +1h not true
now = datetime(2015, 9, 16, 12, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = UTC noon - 1s -> 'after sunrise' with offset +1h not true
now = datetime(2015, 9, 16, 11, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local noon -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 16, 19, 1, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = local noon - 1s -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 16, 18, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 3
# now = sunset -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 17, 1, 55, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 4
# now = sunset + 1s -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 17, 1, 55, 25, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 5
# now = local midnight-1s -> 'after sunrise' with offset +1h true
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 6
# now = local midnight -> 'after sunrise' with offset +1h not true
now = datetime(2015, 9, 17, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 6
async def test_if_action_after_sunset_with_offset(hass, calls):
"""
Test if action was after sunset with offset.
After sunset is true from sunset until midnight, local time.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"after": "sunset",
"after_offset": "+1:00:00",
},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-15 06:32:05 local, sunset: 2015-09-15 18:56:46 local
# sunrise: 2015-09-15 13:32:05 UTC, sunset: 2015-09-16 01:56:46 UTC
# now = sunset - 1s + 1h -> 'after sunset' with offset +1h not true
now = datetime(2015, 9, 16, 2, 56, 45, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunset + 1h -> 'after sunset' with offset +1h true
now = datetime(2015, 9, 16, 2, 56, 46, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = midnight-1s -> 'after sunset' with offset +1h true
now = datetime(2015, 9, 16, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = midnight -> 'after sunset' with offset +1h not true
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_before_and_after_during(hass, calls):
"""
Test if action was after sunset and before sunrise.
This is true from sunrise until sunset.
"""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"after": SUN_EVENT_SUNRISE,
"before": SUN_EVENT_SUNSET,
},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-09-16 06:32:43 local, sunset: 2015-09-16 18:55:24 local
# sunrise: 2015-09-16 13:32:43 UTC, sunset: 2015-09-17 01:55:24 UTC
# now = sunrise - 1s -> 'after sunrise' + 'before sunset' not true
now = datetime(2015, 9, 16, 13, 32, 42, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunset + 1s -> 'after sunrise' + 'before sunset' not true
now = datetime(2015, 9, 17, 1, 55, 25, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise -> 'after sunrise' + 'before sunset' true
now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = sunset -> 'after sunrise' + 'before sunset' true
now = datetime(2015, 9, 17, 1, 55, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = 9AM local -> 'after sunrise' + 'before sunset' true
now = datetime(2015, 9, 16, 16, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 3
async def test_if_action_before_sunrise_no_offset_kotzebue(hass, calls):
"""
Test if action was before sunrise.
Local timezone: Alaska time
Location: Kotzebue, which has a very skewed local timezone with sunrise
at 7 AM and sunset at 3AM during summer
After sunrise is true from sunrise until midnight, local time.
"""
tz = dt_util.get_time_zone("America/Anchorage")
dt_util.set_default_time_zone(tz)
hass.config.latitude = 66.5
hass.config.longitude = 162.4
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "before": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-07-24 07:17:24 local, sunset: 2015-07-25 03:16:27 local
# sunrise: 2015-07-24 15:17:24 UTC, sunset: 2015-07-25 11:16:27 UTC
# now = sunrise + 1s -> 'before sunrise' not true
now = datetime(2015, 7, 24, 15, 17, 25, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise -> 'before sunrise' true
now = datetime(2015, 7, 24, 15, 17, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'before sunrise' true
now = datetime(2015, 7, 24, 8, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = local midnight - 1s -> 'before sunrise' not true
now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_after_sunrise_no_offset_kotzebue(hass, calls):
"""
Test if action was after sunrise.
Local timezone: Alaska time
Location: Kotzebue, which has a very skewed local timezone with sunrise
at 7 AM and sunset at 3AM during summer
Before sunrise is true from midnight until sunrise, local time.
"""
tz = dt_util.get_time_zone("America/Anchorage")
dt_util.set_default_time_zone(tz)
hass.config.latitude = 66.5
hass.config.longitude = 162.4
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "after": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-07-24 07:17:24 local, sunset: 2015-07-25 03:16:27 local
# sunrise: 2015-07-24 15:17:24 UTC, sunset: 2015-07-25 11:16:27 UTC
# now = sunrise -> 'after sunrise' true
now = datetime(2015, 7, 24, 15, 17, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = sunrise - 1s -> 'after sunrise' not true
now = datetime(2015, 7, 24, 15, 17, 23, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'after sunrise' not true
now = datetime(2015, 7, 24, 8, 0, 1, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight - 1s -> 'after sunrise' true
now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_before_sunset_no_offset_kotzebue(hass, calls):
"""
Test if action was before sunrise.
Local timezone: Alaska time
Location: Kotzebue, which has a very skewed local timezone with sunrise
at 7 AM and sunset at 3AM during summer
Before sunset is true from midnight until sunset, local time.
"""
tz = dt_util.get_time_zone("America/Anchorage")
dt_util.set_default_time_zone(tz)
hass.config.latitude = 66.5
hass.config.longitude = 162.4
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "before": SUN_EVENT_SUNSET},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-07-24 07:17:24 local, sunset: 2015-07-25 03:16:27 local
# sunrise: 2015-07-24 15:17:24 UTC, sunset: 2015-07-25 11:16:27 UTC
# now = sunrise + 1s -> 'before sunrise' not true
now = datetime(2015, 7, 25, 11, 16, 28, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
# now = sunrise -> 'before sunrise' true
now = datetime(2015, 7, 25, 11, 16, 27, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'before sunrise' true
now = datetime(2015, 7, 24, 8, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
# now = local midnight - 1s -> 'before sunrise' not true
now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_after_sunset_no_offset_kotzebue(hass, calls):
"""
Test if action was after sunrise.
Local timezone: Alaska time
Location: Kotzebue, which has a very skewed local timezone with sunrise
at 7 AM and sunset at 3AM during summer
After sunset is true from sunset until midnight, local time.
"""
tz = dt_util.get_time_zone("America/Anchorage")
dt_util.set_default_time_zone(tz)
hass.config.latitude = 66.5
hass.config.longitude = 162.4
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "after": SUN_EVENT_SUNSET},
"action": {"service": "test.automation"},
}
},
)
# sunrise: 2015-07-24 07:17:24 local, sunset: 2015-07-25 03:16:27 local
# sunrise: 2015-07-24 15:17:24 UTC, sunset: 2015-07-25 11:16:27 UTC
# now = sunset -> 'after sunset' true
now = datetime(2015, 7, 25, 11, 16, 27, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = sunset - 1s -> 'after sunset' not true
now = datetime(2015, 7, 25, 11, 16, 26, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight -> 'after sunset' not true
now = datetime(2015, 7, 24, 8, 0, 1, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
# now = local midnight - 1s -> 'after sunset' true
now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
| 38.645735 | 86 | 0.616366 | from datetime import datetime
import pytest
from homeassistant.components import sun
import homeassistant.components.automation as automation
from homeassistant.const import SUN_EVENT_SUNRISE, SUN_EVENT_SUNSET
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import async_fire_time_changed, async_mock_service, mock_component
from tests.components.automation import common
ORIG_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE
@pytest.fixture
def calls(hass):
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass):
mock_component(hass, "group")
dt_util.set_default_time_zone(hass.config.time_zone)
hass.loop.run_until_complete(
async_setup_component(hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}})
)
def teardown():
dt_util.set_default_time_zone(ORIG_TIME_ZONE)
async def test_sunset_trigger(hass, calls, legacy_patchable_time):
now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 2, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "sun", "event": SUN_EVENT_SUNSET},
"action": {"service": "test.automation"},
}
},
)
await common.async_turn_off(hass)
await hass.async_block_till_done()
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 0
with patch("homeassistant.util.dt.utcnow", return_value=now):
await common.async_turn_on(hass)
await hass.async_block_till_done()
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_sunrise_trigger(hass, calls, legacy_patchable_time):
now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 14, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "sun", "event": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_sunset_trigger_with_offset(hass, calls, legacy_patchable_time):
now = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 2, 30, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "sun",
"event": SUN_EVENT_SUNSET,
"offset": "0:30:00",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(("platform", "event", "offset"))
},
},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "sun - sunset - 0:30:00"
async def test_sunrise_trigger_with_offset(hass, calls, legacy_patchable_time):
now = datetime(2015, 9, 13, 23, tzinfo=dt_util.UTC)
trigger_time = datetime(2015, 9, 16, 13, 30, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "sun",
"event": SUN_EVENT_SUNRISE,
"offset": "-0:30:00",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, trigger_time)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_action_before_sunrise_no_offset(hass, calls):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "before": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
now = datetime(2015, 9, 16, 13, 32, 44, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_after_sunrise_no_offset(hass, calls):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "after": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
now = datetime(2015, 9, 16, 13, 32, 42, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_before_sunrise_with_offset(hass, calls):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"before": SUN_EVENT_SUNRISE,
"before_offset": "+1:00:00",
},
"action": {"service": "test.automation"},
}
},
)
now = datetime(2015, 9, 16, 14, 32, 44, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
now = datetime(2015, 9, 16, 14, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 9, 17, 0, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 9, 16, 23, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
now = datetime(2015, 9, 17, 1, 56, 48, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
now = datetime(2015, 9, 17, 1, 56, 45, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_before_sunset_with_offset(hass, calls):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"before": "sunset",
"before_offset": "+1:00:00",
},
"action": {"service": "test.automation"},
}
},
)
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 9, 17, 2, 55, 25, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 9, 17, 2, 55, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
now = datetime(2015, 9, 17, 0, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 3
now = datetime(2015, 9, 16, 23, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 4
now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 5
now = datetime(2015, 9, 16, 13, 32, 42, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 6
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 6
async def test_if_action_after_sunrise_with_offset(hass, calls):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"after": SUN_EVENT_SUNRISE,
"after_offset": "+1:00:00",
},
"action": {"service": "test.automation"},
}
},
)
now = datetime(2015, 9, 16, 14, 32, 42, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
now = datetime(2015, 9, 16, 14, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 9, 16, 12, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 9, 16, 11, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 9, 16, 19, 1, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
now = datetime(2015, 9, 16, 18, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 3
now = datetime(2015, 9, 17, 1, 55, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 4
now = datetime(2015, 9, 17, 1, 55, 25, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 5
now = datetime(2015, 9, 17, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 6
now = datetime(2015, 9, 17, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 6
async def test_if_action_after_sunset_with_offset(hass, calls):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"after": "sunset",
"after_offset": "+1:00:00",
},
"action": {"service": "test.automation"},
}
},
)
now = datetime(2015, 9, 16, 2, 56, 45, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
now = datetime(2015, 9, 16, 2, 56, 46, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 9, 16, 6, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
now = datetime(2015, 9, 16, 7, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_before_and_after_during(hass, calls):
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "sun",
"after": SUN_EVENT_SUNRISE,
"before": SUN_EVENT_SUNSET,
},
"action": {"service": "test.automation"},
}
},
)
now = datetime(2015, 9, 16, 13, 32, 42, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
now = datetime(2015, 9, 17, 1, 55, 25, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
now = datetime(2015, 9, 16, 13, 32, 43, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 9, 17, 1, 55, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
now = datetime(2015, 9, 16, 16, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 3
async def test_if_action_before_sunrise_no_offset_kotzebue(hass, calls):
tz = dt_util.get_time_zone("America/Anchorage")
dt_util.set_default_time_zone(tz)
hass.config.latitude = 66.5
hass.config.longitude = 162.4
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "before": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
now = datetime(2015, 7, 24, 15, 17, 25, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
now = datetime(2015, 7, 24, 15, 17, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 7, 24, 8, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_after_sunrise_no_offset_kotzebue(hass, calls):
tz = dt_util.get_time_zone("America/Anchorage")
dt_util.set_default_time_zone(tz)
hass.config.latitude = 66.5
hass.config.longitude = 162.4
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "after": SUN_EVENT_SUNRISE},
"action": {"service": "test.automation"},
}
},
)
now = datetime(2015, 7, 24, 15, 17, 24, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 7, 24, 15, 17, 23, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 7, 24, 8, 0, 1, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_before_sunset_no_offset_kotzebue(hass, calls):
tz = dt_util.get_time_zone("America/Anchorage")
dt_util.set_default_time_zone(tz)
hass.config.latitude = 66.5
hass.config.longitude = 162.4
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "before": SUN_EVENT_SUNSET},
"action": {"service": "test.automation"},
}
},
)
now = datetime(2015, 7, 25, 11, 16, 28, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 0
now = datetime(2015, 7, 25, 11, 16, 27, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 7, 24, 8, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_if_action_after_sunset_no_offset_kotzebue(hass, calls):
tz = dt_util.get_time_zone("America/Anchorage")
dt_util.set_default_time_zone(tz)
hass.config.latitude = 66.5
hass.config.longitude = 162.4
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {"condition": "sun", "after": SUN_EVENT_SUNSET},
"action": {"service": "test.automation"},
}
},
)
now = datetime(2015, 7, 25, 11, 16, 27, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 7, 25, 11, 16, 26, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 7, 24, 8, 0, 1, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
now = datetime(2015, 7, 24, 7, 59, 59, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 2
| true | true |
f730db018b5a100d3b9690cd2c3518425836dcfb | 2,353 | py | Python | setup.py | wdv4758h/rsglob | 342f950c240b5d84c629ecf4fec348401975d2ba | [
"BSD-2-Clause"
] | null | null | null | setup.py | wdv4758h/rsglob | 342f950c240b5d84c629ecf4fec348401975d2ba | [
"BSD-2-Clause"
] | null | null | null | setup.py | wdv4758h/rsglob | 342f950c240b5d84c629ecf4fec348401975d2ba | [
"BSD-2-Clause"
] | null | null | null | import os
import sys
from setuptools import find_packages, setup, Extension
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError:
from pip.req import parse_requirements
try:
from setuptools_rust import RustExtension
except ImportError:
import subprocess
errno = subprocess.call(
[sys.executable, '-m', 'pip', 'install', 'setuptools-rust'])
if errno:
print("Please install setuptools-rust package")
raise SystemExit(errno)
else:
from setuptools_rust import RustExtension
def get_requirements(filename):
# parse_requirements() returns generator of pip.req.InstallRequirement instance
install_requires = parse_requirements(
os.path.join(ROOT_DIR, filename),
session=False,
)
# requirements is a list of requirement
requirements = list(map(lambda x: str(x).split()[0], install_requires))
return requirements
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
version = __import__('rsglob').VERSION
setup_requires = ['setuptools-rust>=0.6.0']
install_requires = get_requirements('requirements.txt')
test_requires = get_requirements('requirements-test.txt')
rust_extensions = [RustExtension('rsglob._rsglob', 'Cargo.toml')]
setup(
name='rsglob',
version=version,
url='https://github.com/wdv4758h/rsglob',
author='Chiu-Hsiang Hsu',
author_email='wdv4758h@gmail.com',
description=('Python glob in Rust'),
long_description=open("README.rst").read(),
download_url="https://github.com/wdv4758h/rsglob/archive/v{}.zip".format(
version
),
license='BSD',
tests_require=test_requires,
install_requires=install_requires,
packages=find_packages(),
rust_extensions=rust_extensions,
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 30.960526 | 83 | 0.677008 | import os
import sys
from setuptools import find_packages, setup, Extension
try:
from pip._internal.req import parse_requirements
except ImportError:
from pip.req import parse_requirements
try:
from setuptools_rust import RustExtension
except ImportError:
import subprocess
errno = subprocess.call(
[sys.executable, '-m', 'pip', 'install', 'setuptools-rust'])
if errno:
print("Please install setuptools-rust package")
raise SystemExit(errno)
else:
from setuptools_rust import RustExtension
def get_requirements(filename):
install_requires = parse_requirements(
os.path.join(ROOT_DIR, filename),
session=False,
)
requirements = list(map(lambda x: str(x).split()[0], install_requires))
return requirements
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
version = __import__('rsglob').VERSION
setup_requires = ['setuptools-rust>=0.6.0']
install_requires = get_requirements('requirements.txt')
test_requires = get_requirements('requirements-test.txt')
rust_extensions = [RustExtension('rsglob._rsglob', 'Cargo.toml')]
setup(
name='rsglob',
version=version,
url='https://github.com/wdv4758h/rsglob',
author='Chiu-Hsiang Hsu',
author_email='wdv4758h@gmail.com',
description=('Python glob in Rust'),
long_description=open("README.rst").read(),
download_url="https://github.com/wdv4758h/rsglob/archive/v{}.zip".format(
version
),
license='BSD',
tests_require=test_requires,
install_requires=install_requires,
packages=find_packages(),
rust_extensions=rust_extensions,
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| true | true |
f730db449a2b60d8fb643fdf8902b444b885c8dd | 8,163 | py | Python | tests/sentry/models/test_group.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | 1 | 2019-10-17T17:46:16.000Z | 2019-10-17T17:46:16.000Z | tests/sentry/models/test_group.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/models/test_group.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from datetime import timedelta
import pytest
from django.db.models import ProtectedError
from django.utils import timezone
from sentry.models import (
Group,
GroupRedirect,
GroupSnooze,
GroupStatus,
Release,
get_group_with_redirect,
)
from sentry.testutils import SnubaTestCase, TestCase
from sentry.testutils.helpers.datetime import iso_format, before_now
class GroupTest(TestCase, SnubaTestCase):
def setUp(self):
super(GroupTest, self).setUp()
self.min_ago = iso_format(before_now(minutes=1))
self.two_min_ago = iso_format(before_now(minutes=2))
self.just_over_one_min_ago = iso_format(before_now(seconds=61))
def test_is_resolved(self):
group = self.create_group(status=GroupStatus.RESOLVED)
assert group.is_resolved()
group.status = GroupStatus.IGNORED
assert not group.is_resolved()
group.status = GroupStatus.UNRESOLVED
assert not group.is_resolved()
group.last_seen = timezone.now() - timedelta(hours=12)
group.project.update_option("sentry:resolve_age", 24)
assert not group.is_resolved()
group.project.update_option("sentry:resolve_age", 1)
assert group.is_resolved()
def test_get_latest_event_no_events(self):
project = self.create_project()
group = self.create_group(project=project)
assert group.get_latest_event() is None
def test_get_latest_event(self):
self.store_event(
data={"event_id": "a" * 32, "fingerprint": ["group-1"], "timestamp": self.two_min_ago},
project_id=self.project.id,
)
self.store_event(
data={"event_id": "b" * 32, "fingerprint": ["group-1"], "timestamp": self.min_ago},
project_id=self.project.id,
)
group = Group.objects.first()
assert group.get_latest_event().event_id == "b" * 32
def test_get_latest_almost_identical_timestamps(self):
self.store_event(
data={
"event_id": "a" * 32,
"fingerprint": ["group-1"],
"timestamp": self.just_over_one_min_ago,
},
project_id=self.project.id,
)
self.store_event(
data={"event_id": "b" * 32, "fingerprint": ["group-1"], "timestamp": self.min_ago},
project_id=self.project.id,
)
group = Group.objects.first()
assert group.get_latest_event().event_id == "b" * 32
def test_is_ignored_with_expired_snooze(self):
group = self.create_group(status=GroupStatus.IGNORED)
GroupSnooze.objects.create(group=group, until=timezone.now() - timedelta(minutes=1))
assert not group.is_ignored()
def test_status_with_expired_snooze(self):
group = self.create_group(status=GroupStatus.IGNORED)
GroupSnooze.objects.create(group=group, until=timezone.now() - timedelta(minutes=1))
assert group.get_status() == GroupStatus.UNRESOLVED
def test_deleting_release_does_not_delete_group(self):
project = self.create_project()
release = Release.objects.create(version="a", organization_id=project.organization_id)
release.add_project(project)
group = self.create_group(project=project, first_release=release)
with pytest.raises(ProtectedError):
release.delete()
group = Group.objects.get(id=group.id)
assert group.first_release == release
def test_save_truncate_message(self):
assert len(self.create_group(message="x" * 300).message) == 255
assert self.create_group(message="\nfoo\n ").message == "foo"
assert self.create_group(message="foo").message == "foo"
assert self.create_group(message="").message == ""
def test_get_group_with_redirect(self):
group = self.create_group()
assert get_group_with_redirect(group.id) == (group, False)
duplicate_id = self.create_group().id
Group.objects.filter(id=duplicate_id).delete()
GroupRedirect.objects.create(group_id=group.id, previous_group_id=duplicate_id)
assert get_group_with_redirect(duplicate_id) == (group, True)
# We shouldn't end up in a case where the redirect points to a bad
# reference, but testing this path for completeness.
group.delete()
with pytest.raises(Group.DoesNotExist):
get_group_with_redirect(duplicate_id)
def test_get_group_with_redirect_from_qualified_short_id(self):
group = self.create_group()
assert group.qualified_short_id
assert get_group_with_redirect(
group.qualified_short_id, organization=group.project.organization
) == (group, False)
duplicate_group = self.create_group()
duplicate_id = duplicate_group.id
GroupRedirect.create_for_group(duplicate_group, group)
Group.objects.filter(id=duplicate_id).delete()
assert get_group_with_redirect(
duplicate_group.qualified_short_id, organization=group.project.organization
) == (group, True)
# We shouldn't end up in a case where the redirect points to a bad
# reference, but testing this path for completeness.
group.delete()
with pytest.raises(Group.DoesNotExist):
get_group_with_redirect(
duplicate_group.qualified_short_id, organization=group.project.organization
)
def test_invalid_shared_id(self):
with pytest.raises(Group.DoesNotExist):
Group.from_share_id("adc7a5b902184ce3818046302e94f8ec")
def test_qualified_share_id(self):
project = self.create_project(name="foo bar")
group = self.create_group(project=project, short_id=project.next_short_id())
short_id = group.qualified_short_id
assert short_id.startswith("FOO-BAR-")
group2 = Group.objects.by_qualified_short_id(group.organization.id, short_id)
assert group2 == group
group.update(status=GroupStatus.PENDING_DELETION)
with self.assertRaises(Group.DoesNotExist):
Group.objects.by_qualified_short_id(group.organization.id, short_id)
def test_first_last_release(self):
project = self.create_project()
release = Release.objects.create(version="a", organization_id=project.organization_id)
event = self.store_event(
data={"release": "a", "timestamp": self.min_ago}, project_id=project.id
)
group = event.group
release = Release.objects.get(version="a")
assert group.first_release == release
assert group.get_first_release() == release.version
assert group.get_last_release() == release.version
def test_first_release_from_tag(self):
project = self.create_project()
event = self.store_event(
data={"release": "a", "timestamp": self.min_ago}, project_id=project.id
)
group = event.group
assert group.get_first_release() == "a"
assert group.get_last_release() == "a"
def test_first_last_release_miss(self):
project = self.create_project()
release = Release.objects.create(version="a", organization_id=project.organization_id)
release.add_project(project)
group = self.create_group(project=project)
assert group.first_release is None
assert group.get_first_release() is None
assert group.get_last_release() is None
def test_get_email_subject(self):
project = self.create_project()
group = self.create_group(project=project)
assert group.get_email_subject() == "%s - %s" % (group.qualified_short_id, group.title)
def test_get_absolute_url(self):
project = self.create_project(name="pumped-quagga")
group = self.create_group(project=project)
result = group.get_absolute_url({"environment": u"d\u00E9v"})
assert (
result
== u"http://testserver/organizations/baz/issues/{}/?environment=d%C3%A9v".format(
group.id
)
)
| 36.119469 | 99 | 0.665932 | from __future__ import absolute_import
from datetime import timedelta
import pytest
from django.db.models import ProtectedError
from django.utils import timezone
from sentry.models import (
Group,
GroupRedirect,
GroupSnooze,
GroupStatus,
Release,
get_group_with_redirect,
)
from sentry.testutils import SnubaTestCase, TestCase
from sentry.testutils.helpers.datetime import iso_format, before_now
class GroupTest(TestCase, SnubaTestCase):
def setUp(self):
super(GroupTest, self).setUp()
self.min_ago = iso_format(before_now(minutes=1))
self.two_min_ago = iso_format(before_now(minutes=2))
self.just_over_one_min_ago = iso_format(before_now(seconds=61))
def test_is_resolved(self):
group = self.create_group(status=GroupStatus.RESOLVED)
assert group.is_resolved()
group.status = GroupStatus.IGNORED
assert not group.is_resolved()
group.status = GroupStatus.UNRESOLVED
assert not group.is_resolved()
group.last_seen = timezone.now() - timedelta(hours=12)
group.project.update_option("sentry:resolve_age", 24)
assert not group.is_resolved()
group.project.update_option("sentry:resolve_age", 1)
assert group.is_resolved()
def test_get_latest_event_no_events(self):
project = self.create_project()
group = self.create_group(project=project)
assert group.get_latest_event() is None
def test_get_latest_event(self):
self.store_event(
data={"event_id": "a" * 32, "fingerprint": ["group-1"], "timestamp": self.two_min_ago},
project_id=self.project.id,
)
self.store_event(
data={"event_id": "b" * 32, "fingerprint": ["group-1"], "timestamp": self.min_ago},
project_id=self.project.id,
)
group = Group.objects.first()
assert group.get_latest_event().event_id == "b" * 32
def test_get_latest_almost_identical_timestamps(self):
self.store_event(
data={
"event_id": "a" * 32,
"fingerprint": ["group-1"],
"timestamp": self.just_over_one_min_ago,
},
project_id=self.project.id,
)
self.store_event(
data={"event_id": "b" * 32, "fingerprint": ["group-1"], "timestamp": self.min_ago},
project_id=self.project.id,
)
group = Group.objects.first()
assert group.get_latest_event().event_id == "b" * 32
def test_is_ignored_with_expired_snooze(self):
group = self.create_group(status=GroupStatus.IGNORED)
GroupSnooze.objects.create(group=group, until=timezone.now() - timedelta(minutes=1))
assert not group.is_ignored()
def test_status_with_expired_snooze(self):
group = self.create_group(status=GroupStatus.IGNORED)
GroupSnooze.objects.create(group=group, until=timezone.now() - timedelta(minutes=1))
assert group.get_status() == GroupStatus.UNRESOLVED
def test_deleting_release_does_not_delete_group(self):
project = self.create_project()
release = Release.objects.create(version="a", organization_id=project.organization_id)
release.add_project(project)
group = self.create_group(project=project, first_release=release)
with pytest.raises(ProtectedError):
release.delete()
group = Group.objects.get(id=group.id)
assert group.first_release == release
def test_save_truncate_message(self):
assert len(self.create_group(message="x" * 300).message) == 255
assert self.create_group(message="\nfoo\n ").message == "foo"
assert self.create_group(message="foo").message == "foo"
assert self.create_group(message="").message == ""
def test_get_group_with_redirect(self):
group = self.create_group()
assert get_group_with_redirect(group.id) == (group, False)
duplicate_id = self.create_group().id
Group.objects.filter(id=duplicate_id).delete()
GroupRedirect.objects.create(group_id=group.id, previous_group_id=duplicate_id)
assert get_group_with_redirect(duplicate_id) == (group, True)
# reference, but testing this path for completeness.
group.delete()
with pytest.raises(Group.DoesNotExist):
get_group_with_redirect(duplicate_id)
def test_get_group_with_redirect_from_qualified_short_id(self):
group = self.create_group()
assert group.qualified_short_id
assert get_group_with_redirect(
group.qualified_short_id, organization=group.project.organization
) == (group, False)
duplicate_group = self.create_group()
duplicate_id = duplicate_group.id
GroupRedirect.create_for_group(duplicate_group, group)
Group.objects.filter(id=duplicate_id).delete()
assert get_group_with_redirect(
duplicate_group.qualified_short_id, organization=group.project.organization
) == (group, True)
# We shouldn't end up in a case where the redirect points to a bad
group.delete()
with pytest.raises(Group.DoesNotExist):
get_group_with_redirect(
duplicate_group.qualified_short_id, organization=group.project.organization
)
def test_invalid_shared_id(self):
with pytest.raises(Group.DoesNotExist):
Group.from_share_id("adc7a5b902184ce3818046302e94f8ec")
def test_qualified_share_id(self):
project = self.create_project(name="foo bar")
group = self.create_group(project=project, short_id=project.next_short_id())
short_id = group.qualified_short_id
assert short_id.startswith("FOO-BAR-")
group2 = Group.objects.by_qualified_short_id(group.organization.id, short_id)
assert group2 == group
group.update(status=GroupStatus.PENDING_DELETION)
with self.assertRaises(Group.DoesNotExist):
Group.objects.by_qualified_short_id(group.organization.id, short_id)
def test_first_last_release(self):
project = self.create_project()
release = Release.objects.create(version="a", organization_id=project.organization_id)
event = self.store_event(
data={"release": "a", "timestamp": self.min_ago}, project_id=project.id
)
group = event.group
release = Release.objects.get(version="a")
assert group.first_release == release
assert group.get_first_release() == release.version
assert group.get_last_release() == release.version
def test_first_release_from_tag(self):
project = self.create_project()
event = self.store_event(
data={"release": "a", "timestamp": self.min_ago}, project_id=project.id
)
group = event.group
assert group.get_first_release() == "a"
assert group.get_last_release() == "a"
def test_first_last_release_miss(self):
project = self.create_project()
release = Release.objects.create(version="a", organization_id=project.organization_id)
release.add_project(project)
group = self.create_group(project=project)
assert group.first_release is None
assert group.get_first_release() is None
assert group.get_last_release() is None
def test_get_email_subject(self):
project = self.create_project()
group = self.create_group(project=project)
assert group.get_email_subject() == "%s - %s" % (group.qualified_short_id, group.title)
def test_get_absolute_url(self):
project = self.create_project(name="pumped-quagga")
group = self.create_group(project=project)
result = group.get_absolute_url({"environment": u"d\u00E9v"})
assert (
result
== u"http://testserver/organizations/baz/issues/{}/?environment=d%C3%A9v".format(
group.id
)
)
| true | true |
f730dc094d5bd9f4ac6d2d6e6904d61429656ce2 | 944 | py | Python | fuzzer/handling_module.py | junorouse/dynamic-fuzzer | 7cb82c56da776211458338771854ecf7071ca1e7 | [
"MIT"
] | 17 | 2017-02-20T02:06:05.000Z | 2022-02-23T03:07:22.000Z | fuzzer/handling_module.py | junorouse/dynamic-fuzzer | 7cb82c56da776211458338771854ecf7071ca1e7 | [
"MIT"
] | 6 | 2017-02-09T06:15:34.000Z | 2020-10-02T05:42:25.000Z | fuzzer/handling_module.py | junorouse/dynamic-fuzzer | 7cb82c56da776211458338771854ecf7071ca1e7 | [
"MIT"
] | 2 | 2019-09-22T01:19:32.000Z | 2020-05-20T01:56:53.000Z | import importlib
from json import loads
from base64 import b64decode
import sys
import gevent.monkey
gevent.monkey.patch_all()
import gevent
import config
from os import getpid
from requests import post
def fetch(module):
global message
m = importlib.import_module('modules.'+module)
m.go(message)
def asynchronous():
threads = []
print("\033[91m>--------------------------------------------------------------------------------------<\033[37m")
for module in config.MODULE_LIST:
print("\033[36m"+module[1]+"\033[37m")
threads.append(gevent.spawn(fetch, module[1]))
gevent.joinall(threads)
x = sys.argv[1]
x = b64decode(x).decode("utf-8")
host = sys.argv[2]
host = b64decode(host).decode("utf-8")
pid = getpid()
data = {
'host': host,
'pid': str(pid),
}
post('http://localhost:8787/add', data=data)
message = loads(x)
asynchronous()
post('http://localhost:8787/delete', data=data)
| 20.085106 | 117 | 0.616525 | import importlib
from json import loads
from base64 import b64decode
import sys
import gevent.monkey
gevent.monkey.patch_all()
import gevent
import config
from os import getpid
from requests import post
def fetch(module):
global message
m = importlib.import_module('modules.'+module)
m.go(message)
def asynchronous():
threads = []
print("\033[91m>--------------------------------------------------------------------------------------<\033[37m")
for module in config.MODULE_LIST:
print("\033[36m"+module[1]+"\033[37m")
threads.append(gevent.spawn(fetch, module[1]))
gevent.joinall(threads)
x = sys.argv[1]
x = b64decode(x).decode("utf-8")
host = sys.argv[2]
host = b64decode(host).decode("utf-8")
pid = getpid()
data = {
'host': host,
'pid': str(pid),
}
post('http://localhost:8787/add', data=data)
message = loads(x)
asynchronous()
post('http://localhost:8787/delete', data=data)
| true | true |
f730dded7d098ef55bd8cd83fe02c10e14f1e36a | 16,100 | py | Python | dynamic_selection/trainer/archive/svd_classifier.py | Kthyeon/FINE | ae8a24a4a2514feafd9a9ed394af87f397708ccf | [
"MIT"
] | 2 | 2021-12-22T02:25:00.000Z | 2022-01-06T09:33:11.000Z | dynamic_selection/trainer/archive/svd_classifier.py | Kthyeon/FINE | ae8a24a4a2514feafd9a9ed394af87f397708ccf | [
"MIT"
] | null | null | null | dynamic_selection/trainer/archive/svd_classifier.py | Kthyeon/FINE | ae8a24a4a2514feafd9a9ed394af87f397708ccf | [
"MIT"
] | 2 | 2021-10-01T14:39:06.000Z | 2022-01-06T09:33:12.000Z | import torch
import numpy as np
from tqdm import tqdm
from sklearn import cluster
#bol_norm True -> Divide by norm of feature
def same_score(v_ortho_dict, features, labels, bol_norm=False):
features = torch.from_numpy(features).cuda()
scores = torch.zeros(features.shape[0])
for indx, feat in enumerate(features):
tmp_scores = torch.dot(v_ortho_dict[labels[indx]][0], feat).abs()
scores[indx] = (tmp_scores / torch.norm(feat, p=2)) if bol_norm else tmp_scores
return scores
def same_topk(label_list, scores, p):
output = []
for idx in range(len(np.unique(label_list))):
num_inst = int(p * np.sum(label_list==idx))
indexs = torch.tensor(range(len(label_list)))[label_list==idx]
tmp_sort, tmp_idx = torch.sort(scores[label_list==idx], descending=False)
# 못 들어간 애가 필요한거니까 이렇게!
output += indexs[tmp_idx[num_inst:]].numpy().tolist()
return torch.tensor(output).long()
#Classswise kmenas
def same_kmeans(label_list, scores, p=None):
output = []
for idx in range(len(np.unique(label_list))):
indexs = torch.tensor(range(len(scores)))[label_list==idx]
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(scores[indexs].reshape(-1, 1))
if torch.mean(scores[indexs][kmeans.labels_==0]) < torch.mean(scores[indexs][kmeans.labels_==1]):
kmeans.labels_ = 1 - kmeans.labels_
output += indexs[kmeans.labels_ == 0].numpy().tolist()
return torch.tensor(output).long()
#Total Kmeans
def same_kmeans_total(scores, p=None):
output = []
indexs = torch.tensor(range(len(scores)))
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(scores.reshape(-1, 1))
if torch.mean(scores[kmeans.labels_==0]) < torch.mean(scores[kmeans.labels_==1]):
kmeans.labels_ = 1 - kmeans.labels_
for idx, value in enumerate(kmeans.labels_):
if value == 0:
output.append(idx)
return torch.tensor(output).long(), None
def same_topk_index(orig_label_list, orig_out_list, prev_label_list, prev_out_list, p=None):
singular_dict, v_ortho_dict = get_singular_value_vector(prev_label_list, prev_out_list)
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
scores = same_score(v_ortho_dict, orig_out_list, orig_label_list)
output = same_topk(orig_label_list, scores, p)
return output.numpy()
def same_kmeans_index(orig_label_list, orig_out_list, prev_label_list, prev_out_list, p=None):
singular_dict, v_ortho_dict = get_singular_value_vector(prev_label_list, prev_out_list)
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
scores = same_score(v_ortho_dict, orig_out_list, orig_label_list)
output = same_kmeans(orig_label_list, scores, p)
return output.numpy()
def compute_noisy_ratio(data_loader):
isNoisy_list = np.empty((0,))
with tqdm(data_loader) as progress:
for _, (_, label, index, label_gt) in enumerate(progress):
isNoisy = label == label_gt
isNoisy_list = np.concatenate((isNoisy_list, isNoisy.cpu()))
print ('#############################')
print (isNoisy_list.sum(), isNoisy_list.shape)
print('purity in this dataset: {}'.format(isNoisy_list.sum() / isNoisy_list.shape))
def get_loss_list(model, data_loader):
loss_list = np.empty((0,))
with tqdm(data_loader) as progress:
for batch_idx, (data, label, index, label_gt) in enumerate(progress):
data = data.cuda()
label, label_gt = label.long().cuda(), label_gt.long().cuda()
_, prediction = model(data)
loss = torch.nn.CrossEntropyLoss(reduction='none')(prediction, label)
loss_list = np.concatenate((loss_list, loss.detach().cpu()))
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(loss_list.reshape(-1,1))
if np.mean(loss_list[kmeans.labels_==0]) > np.mean(loss_list[kmeans.labels_==1]):
clean_label = 1
else:
clean_label = 0
output=[]
for idx, value in enumerate(kmeans.labels_):
if value==clean_label:
output.append(idx)
return output
def iterative_eigen(number, label_list, out_list, teacher_idx=None):
sin_lbls = {}
for i in range(number):
tmp_lbl = torch.zeros(50000)
if teacher_idx !=None:
for num in (set(range(0,50000)) - set(teacher_idx)):
tmp_lbl[num] += 1
print(tmp_lbl.sum().item())
for k in range(i):
tmp_lbl += sin_lbls[k]
singular_dict, v_ortho_dict = get_singular_value_vector(label_list[tmp_lbl==0], out_list[tmp_lbl==0])
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
sing_lbl, sin_score_lbl = singular_label(v_ortho_dict, out_list, label_list)
sin_lbls[i]=sing_lbl
if i>0 and torch.all(torch.eq(sin_lbls[i], sin_lbls[i-1])):
print(i)
break
if number ==1:
output=[]
for idx, value in enumerate(sing_lbl):
if value==0:
output.append(idx)
else:
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(loss_list.reshape(-1,1))
if np.mean(sin_score_lbl[kmeans.labels_==0]) > np.mean(sin_score_lbl[kmeans.labels_==1]):
clean_label = 0
else:
clean_label = 1
output=[]
for idx, value in enumerate(kmeans.labels_):
if value==clean_label:
output.append(idx)
return output
def get_out_list(model, data_loader):
label_list = np.empty((0,))
model.eval()
model.cuda()
with tqdm(data_loader) as progress:
for batch_idx, (data, label, index, _) in enumerate(progress):
data = data.cuda()
# label, label_gt = label.long().cuda(), label_gt.long().cuda()
label = label.long()
output, _ = model(data)
label_list = np.concatenate((label_list, label.cpu()))
if batch_idx == 0:
out_list = output.detach().cpu()
else:
out_list = np.concatenate((out_list, output.detach().cpu()), axis=0)
return label_list, out_list
def get_singular_value_vector(label_list, out_list):
singular_dict = {}
v_ortho_dict = {}
for index in np.unique(label_list):
u, s, v = np.linalg.svd(out_list[label_list==index])
singular_dict[index] = s[0] / s[1]
v_ortho_dict[index] = torch.from_numpy(v[:2])
return singular_dict, v_ortho_dict
def singular_label(v_ortho_dict, model_represents, label):
model_represents = torch.from_numpy(model_represents).cuda()
sing_lbl = torch.zeros(model_represents.shape[0])
sin_score_lbl = torch.zeros(model_represents.shape[0])
for i, data in enumerate(model_represents):
sin_score_lbl[i] = torch.dot(v_ortho_dict[label[i]][0], data).abs() - torch.dot(v_ortho_dict[label[i]][1], data).abs()
if torch.dot(v_ortho_dict[label[i]][0], data).abs() < torch.dot(v_ortho_dict[label[i]][1], data).abs():
sing_lbl[i] = 1
return sing_lbl, sin_score_lbl
def kmean_singular_label(v_ortho_dict, model_represents, label):
model_represents = torch.from_numpy(model_represents).cuda()
sing_lbl = torch.zeros(model_represents.shape[0])
sin_score_lbl = torch.zeros(model_represents.shape[0])
for i, data in enumerate(model_represents):
sin_score_lbl[i] = torch.dot(v_ortho_dict[label[i]][0], data).abs() - torch.dot(v_ortho_dict[label[i]][1], data).abs()
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(sin_score_lbl.reshape(-1, 1))
if torch.mean(sin_score_lbl[kmeans.labels_==0]) < torch.mean(sin_score_lbl[kmeans.labels_==1]):
kmeans.labels_ = 1 - kmeans.labels_
output = []
for idx, value in enumerate(kmeans.labels_):
if value == 0:
output.append(idx)
return output
def kmean_singular_label2(v_ortho_dict, model_represents, label):
model_represents = torch.from_numpy(model_represents).cuda()
sing_lbl = torch.zeros(model_represents.shape[0])
sin_score_lbl = torch.zeros(model_represents.shape[0])
for i, data in enumerate(model_represents):
sin_score_lbl[i] = torch.dot(v_ortho_dict[label[i]][0], data).abs() / torch.norm(data, p=2)
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(sin_score_lbl.reshape(-1, 1))
if torch.mean(sin_score_lbl[kmeans.labels_==0]) < torch.mean(sin_score_lbl[kmeans.labels_==1]):
kmeans.labels_ = 1 - kmeans.labels_
output = []
for idx, value in enumerate(kmeans.labels_):
if value == 0:
output.append(idx)
return output
def kmean_eigen_out(label_list, out_list, teacher_idx=None):
singular_dict, v_ortho_dict = get_singular_value_vector(label_list, out_list)
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
output = kmean_singular_label(v_ortho_dict, out_list, label_list)
return output
def topk_eigen_kmean(label_list, out_list, teacher_idx=None):
singular_dict, v_ortho_dict = get_singular_value_vector(label_list, out_list)
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
output = kmean_singular_label2(v_ortho_dict, out_list, label_list)
return output
def get_anchor(label_list, out_list, teacher_idx=None):
label_list = torch.from_numpy(label_list).long().numpy()
singular_dict, v_ortho_dict = get_singular_value_vector(label_list, out_list)
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
model_represents = torch.from_numpy(out_list).cuda()
sin_score_lbl = [[] for _ in range(len(np.unique(label_list)))]
for i, data in enumerate(model_represents):
sin_score_lbl[label_list[i]].append(torch.dot(v_ortho_dict[label_list[i]][0], data).abs())
# classwise topk
v_ortho_dict_ = {}
for index in np.unique(label_list):
cls_score_lbl = sin_score_lbl[index]
topk_v, topk_i = torch.topk(torch.tensor(cls_score_lbl), k=50)
u, s, v = np.linalg.svd(model_represents[label_list==index][topk_i].cpu().numpy())
v_ortho_dict_[index] = torch.from_numpy(v[0]).unsqueeze(0).cuda()
output = kmean_singular_label2(v_ortho_dict_, model_represents.cpu().numpy(), label_list)
return output
def isNoisy_ratio(data_loader):
isNoisy_list = np.empty((0,))
with tqdm(data_loader) as progress:
for _, (_, label, index, label_gt) in enumerate(progress):
isNoisy = label == label_gt
isNoisy_list = np.concatenate((isNoisy_list, isNoisy.cpu()))
print ('#############################')
print (isNoisy_list.sum(), isNoisy_list.shape)
print('purity in this dataset: {}'.format(isNoisy_list.sum() / isNoisy_list.shape))
def extract_teacherIdx(teacher, data_loader, parse):
teacher.load_state_dict(torch.load('./checkpoint/' + parse.load_name)['state_dict'])
teacher = teacher.cuda()
if not parse.reinit:
model.load_state_dict(torch.load('./checkpoint/' + parse.load_name)['state_dict'])
for params in teacher.parameters():
params.requires_grad = False
if parse.distill_mode == 'eigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx = iterative_eigen(1,tea_label_list,tea_out_list)
elif parse.distill_mode == 'fulleigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx = iterative_eigen(100,tea_label_list,tea_out_list)
elif parse.distill_mode == 'kmean_eigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx = kmean_eigen_out(tea_label_list, tea_out_list)
elif parse.distill_mode == 'topk_eigen_kmean':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx = topk_eigen_kmean(tea_label_list, tea_out_list)
else:
teacher_idx = get_loss_list(teacher, data_loader)
print('||||||original||||||')
isNoisy_ratio(data_loader)
if parse.second_load_name !=None:
teacher.load_state_dict(torch.load('./checkpoint/' + parse.second_load_name)['state_dict'])
teacher = teacher.cuda()
if not parse.reinit:
model.load_state_dict(torch.load('./checkpoint/' + parse.second_load_name)['state_dict'])
for params in teacher.parameters():
params.requires_grad = False
if parse.distill_mode == 'eigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx2 = iterative_eigen(1,tea_label_list,tea_out_list,teacher_idx)
elif parse.distill_mode == 'fulleigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx2 = iterative_eigen(100,tea_label_list,tea_out_list)
else:
teacher_idx2 = get_loss_list(teacher, data_loader)
teacher_idx = list(set(teacher_idx) & set(teacher_idx2))
print('second_distillation')
if parse.third_load_name !=None:
teacher.load_state_dict(torch.load('./checkpoint/' + parse.third_load_name)['state_dict'])
teacher = teacher.cuda()
if not parse.reinit:
model.load_state_dict(torch.load('./checkpoint/' + parse.third_load_name)['state_dict'])
for params in teacher.parameters():
params.requires_grad = False
if parse.distill_mode == 'eigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx3 = iterative_eigen(1,tea_label_list,tea_out_list, teacher_idx)
elif parse.distill_mode == 'fulleigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx3 = iterative_eigen(100,tea_label_list,tea_out_list)
else:
teacher_idx3 = get_loss_list(teacher, data_loader)
teacher_idx = list(set(teacher_idx) & set(teacher_idx3))
print('third_ distillation')
return teacher_idx
# def get_loss_list_2d(model, data_loader, n_clusters=2, c_clusters=1):
# loss_list = np.empty((0, 2))
# model.cuda()
# with tqdm(data_loader) as progress:
# for batch_idx, (data, label, index, label_gt) in enumerate(progress):
# data = data.cuda()
# label, label_gt = label.long().cuda(), label_gt.long().cuda()
# _, pred = model(data)
# loss = torch.nn.CrossEntropyLoss(reduction='none')(pred, label)
# prob = torch.softmax(pred, dim=-1)
# top2_log_pred, top2_ind = torch.topk(torch.log(prob), k=n_clusters, dim=-1)
# is_pred_wrong = (top2_ind[:, 0] != label).bool()
# is_pred_correct = (top2_ind[:, 0] == label).bool()
# label_top1 = torch.stack([loss, -top2_log_pred[:, 0]], dim=1) # for pred wrong
# top2_log_pred = -top2_log_pred
# top2_log_pred[is_pred_wrong] = label_top1[is_pred_wrong]
# loss_list = np.concatenate((loss_list, top2_log_pred.detach().cpu().numpy()), axis=0)
# kmeans = cluster.KMeans(n_clusters=n_clusters, random_state=0).fit(loss_list.reshape(50000,2))
# mean_losses = []
# for itr in range(n_clusters):
# mean_losses.append(np.mean(loss_list[kmeans.labels_==itr][:, 0]))
# _, clean_labels = torch.topk(-torch.tensor(mean_losses), k=c_clusters)
# output=[]
# for idx, value in enumerate(kmeans.labels_):
# if value in clean_labels:
# output.append(idx)
# return output
| 39.655172 | 126 | 0.646149 | import torch
import numpy as np
from tqdm import tqdm
from sklearn import cluster
def same_score(v_ortho_dict, features, labels, bol_norm=False):
features = torch.from_numpy(features).cuda()
scores = torch.zeros(features.shape[0])
for indx, feat in enumerate(features):
tmp_scores = torch.dot(v_ortho_dict[labels[indx]][0], feat).abs()
scores[indx] = (tmp_scores / torch.norm(feat, p=2)) if bol_norm else tmp_scores
return scores
def same_topk(label_list, scores, p):
output = []
for idx in range(len(np.unique(label_list))):
num_inst = int(p * np.sum(label_list==idx))
indexs = torch.tensor(range(len(label_list)))[label_list==idx]
tmp_sort, tmp_idx = torch.sort(scores[label_list==idx], descending=False)
output += indexs[tmp_idx[num_inst:]].numpy().tolist()
return torch.tensor(output).long()
def same_kmeans(label_list, scores, p=None):
output = []
for idx in range(len(np.unique(label_list))):
indexs = torch.tensor(range(len(scores)))[label_list==idx]
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(scores[indexs].reshape(-1, 1))
if torch.mean(scores[indexs][kmeans.labels_==0]) < torch.mean(scores[indexs][kmeans.labels_==1]):
kmeans.labels_ = 1 - kmeans.labels_
output += indexs[kmeans.labels_ == 0].numpy().tolist()
return torch.tensor(output).long()
def same_kmeans_total(scores, p=None):
output = []
indexs = torch.tensor(range(len(scores)))
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(scores.reshape(-1, 1))
if torch.mean(scores[kmeans.labels_==0]) < torch.mean(scores[kmeans.labels_==1]):
kmeans.labels_ = 1 - kmeans.labels_
for idx, value in enumerate(kmeans.labels_):
if value == 0:
output.append(idx)
return torch.tensor(output).long(), None
def same_topk_index(orig_label_list, orig_out_list, prev_label_list, prev_out_list, p=None):
singular_dict, v_ortho_dict = get_singular_value_vector(prev_label_list, prev_out_list)
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
scores = same_score(v_ortho_dict, orig_out_list, orig_label_list)
output = same_topk(orig_label_list, scores, p)
return output.numpy()
def same_kmeans_index(orig_label_list, orig_out_list, prev_label_list, prev_out_list, p=None):
singular_dict, v_ortho_dict = get_singular_value_vector(prev_label_list, prev_out_list)
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
scores = same_score(v_ortho_dict, orig_out_list, orig_label_list)
output = same_kmeans(orig_label_list, scores, p)
return output.numpy()
def compute_noisy_ratio(data_loader):
isNoisy_list = np.empty((0,))
with tqdm(data_loader) as progress:
for _, (_, label, index, label_gt) in enumerate(progress):
isNoisy = label == label_gt
isNoisy_list = np.concatenate((isNoisy_list, isNoisy.cpu()))
print ('#############################')
print (isNoisy_list.sum(), isNoisy_list.shape)
print('purity in this dataset: {}'.format(isNoisy_list.sum() / isNoisy_list.shape))
def get_loss_list(model, data_loader):
loss_list = np.empty((0,))
with tqdm(data_loader) as progress:
for batch_idx, (data, label, index, label_gt) in enumerate(progress):
data = data.cuda()
label, label_gt = label.long().cuda(), label_gt.long().cuda()
_, prediction = model(data)
loss = torch.nn.CrossEntropyLoss(reduction='none')(prediction, label)
loss_list = np.concatenate((loss_list, loss.detach().cpu()))
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(loss_list.reshape(-1,1))
if np.mean(loss_list[kmeans.labels_==0]) > np.mean(loss_list[kmeans.labels_==1]):
clean_label = 1
else:
clean_label = 0
output=[]
for idx, value in enumerate(kmeans.labels_):
if value==clean_label:
output.append(idx)
return output
def iterative_eigen(number, label_list, out_list, teacher_idx=None):
sin_lbls = {}
for i in range(number):
tmp_lbl = torch.zeros(50000)
if teacher_idx !=None:
for num in (set(range(0,50000)) - set(teacher_idx)):
tmp_lbl[num] += 1
print(tmp_lbl.sum().item())
for k in range(i):
tmp_lbl += sin_lbls[k]
singular_dict, v_ortho_dict = get_singular_value_vector(label_list[tmp_lbl==0], out_list[tmp_lbl==0])
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
sing_lbl, sin_score_lbl = singular_label(v_ortho_dict, out_list, label_list)
sin_lbls[i]=sing_lbl
if i>0 and torch.all(torch.eq(sin_lbls[i], sin_lbls[i-1])):
print(i)
break
if number ==1:
output=[]
for idx, value in enumerate(sing_lbl):
if value==0:
output.append(idx)
else:
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(loss_list.reshape(-1,1))
if np.mean(sin_score_lbl[kmeans.labels_==0]) > np.mean(sin_score_lbl[kmeans.labels_==1]):
clean_label = 0
else:
clean_label = 1
output=[]
for idx, value in enumerate(kmeans.labels_):
if value==clean_label:
output.append(idx)
return output
def get_out_list(model, data_loader):
label_list = np.empty((0,))
model.eval()
model.cuda()
with tqdm(data_loader) as progress:
for batch_idx, (data, label, index, _) in enumerate(progress):
data = data.cuda()
label = label.long()
output, _ = model(data)
label_list = np.concatenate((label_list, label.cpu()))
if batch_idx == 0:
out_list = output.detach().cpu()
else:
out_list = np.concatenate((out_list, output.detach().cpu()), axis=0)
return label_list, out_list
def get_singular_value_vector(label_list, out_list):
singular_dict = {}
v_ortho_dict = {}
for index in np.unique(label_list):
u, s, v = np.linalg.svd(out_list[label_list==index])
singular_dict[index] = s[0] / s[1]
v_ortho_dict[index] = torch.from_numpy(v[:2])
return singular_dict, v_ortho_dict
def singular_label(v_ortho_dict, model_represents, label):
model_represents = torch.from_numpy(model_represents).cuda()
sing_lbl = torch.zeros(model_represents.shape[0])
sin_score_lbl = torch.zeros(model_represents.shape[0])
for i, data in enumerate(model_represents):
sin_score_lbl[i] = torch.dot(v_ortho_dict[label[i]][0], data).abs() - torch.dot(v_ortho_dict[label[i]][1], data).abs()
if torch.dot(v_ortho_dict[label[i]][0], data).abs() < torch.dot(v_ortho_dict[label[i]][1], data).abs():
sing_lbl[i] = 1
return sing_lbl, sin_score_lbl
def kmean_singular_label(v_ortho_dict, model_represents, label):
model_represents = torch.from_numpy(model_represents).cuda()
sing_lbl = torch.zeros(model_represents.shape[0])
sin_score_lbl = torch.zeros(model_represents.shape[0])
for i, data in enumerate(model_represents):
sin_score_lbl[i] = torch.dot(v_ortho_dict[label[i]][0], data).abs() - torch.dot(v_ortho_dict[label[i]][1], data).abs()
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(sin_score_lbl.reshape(-1, 1))
if torch.mean(sin_score_lbl[kmeans.labels_==0]) < torch.mean(sin_score_lbl[kmeans.labels_==1]):
kmeans.labels_ = 1 - kmeans.labels_
output = []
for idx, value in enumerate(kmeans.labels_):
if value == 0:
output.append(idx)
return output
def kmean_singular_label2(v_ortho_dict, model_represents, label):
model_represents = torch.from_numpy(model_represents).cuda()
sing_lbl = torch.zeros(model_represents.shape[0])
sin_score_lbl = torch.zeros(model_represents.shape[0])
for i, data in enumerate(model_represents):
sin_score_lbl[i] = torch.dot(v_ortho_dict[label[i]][0], data).abs() / torch.norm(data, p=2)
kmeans = cluster.KMeans(n_clusters=2, random_state=0).fit(sin_score_lbl.reshape(-1, 1))
if torch.mean(sin_score_lbl[kmeans.labels_==0]) < torch.mean(sin_score_lbl[kmeans.labels_==1]):
kmeans.labels_ = 1 - kmeans.labels_
output = []
for idx, value in enumerate(kmeans.labels_):
if value == 0:
output.append(idx)
return output
def kmean_eigen_out(label_list, out_list, teacher_idx=None):
singular_dict, v_ortho_dict = get_singular_value_vector(label_list, out_list)
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
output = kmean_singular_label(v_ortho_dict, out_list, label_list)
return output
def topk_eigen_kmean(label_list, out_list, teacher_idx=None):
singular_dict, v_ortho_dict = get_singular_value_vector(label_list, out_list)
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
output = kmean_singular_label2(v_ortho_dict, out_list, label_list)
return output
def get_anchor(label_list, out_list, teacher_idx=None):
label_list = torch.from_numpy(label_list).long().numpy()
singular_dict, v_ortho_dict = get_singular_value_vector(label_list, out_list)
for key in v_ortho_dict.keys():
v_ortho_dict[key] = v_ortho_dict[key].cuda()
model_represents = torch.from_numpy(out_list).cuda()
sin_score_lbl = [[] for _ in range(len(np.unique(label_list)))]
for i, data in enumerate(model_represents):
sin_score_lbl[label_list[i]].append(torch.dot(v_ortho_dict[label_list[i]][0], data).abs())
v_ortho_dict_ = {}
for index in np.unique(label_list):
cls_score_lbl = sin_score_lbl[index]
topk_v, topk_i = torch.topk(torch.tensor(cls_score_lbl), k=50)
u, s, v = np.linalg.svd(model_represents[label_list==index][topk_i].cpu().numpy())
v_ortho_dict_[index] = torch.from_numpy(v[0]).unsqueeze(0).cuda()
output = kmean_singular_label2(v_ortho_dict_, model_represents.cpu().numpy(), label_list)
return output
def isNoisy_ratio(data_loader):
isNoisy_list = np.empty((0,))
with tqdm(data_loader) as progress:
for _, (_, label, index, label_gt) in enumerate(progress):
isNoisy = label == label_gt
isNoisy_list = np.concatenate((isNoisy_list, isNoisy.cpu()))
print ('#############################')
print (isNoisy_list.sum(), isNoisy_list.shape)
print('purity in this dataset: {}'.format(isNoisy_list.sum() / isNoisy_list.shape))
def extract_teacherIdx(teacher, data_loader, parse):
teacher.load_state_dict(torch.load('./checkpoint/' + parse.load_name)['state_dict'])
teacher = teacher.cuda()
if not parse.reinit:
model.load_state_dict(torch.load('./checkpoint/' + parse.load_name)['state_dict'])
for params in teacher.parameters():
params.requires_grad = False
if parse.distill_mode == 'eigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx = iterative_eigen(1,tea_label_list,tea_out_list)
elif parse.distill_mode == 'fulleigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx = iterative_eigen(100,tea_label_list,tea_out_list)
elif parse.distill_mode == 'kmean_eigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx = kmean_eigen_out(tea_label_list, tea_out_list)
elif parse.distill_mode == 'topk_eigen_kmean':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx = topk_eigen_kmean(tea_label_list, tea_out_list)
else:
teacher_idx = get_loss_list(teacher, data_loader)
print('||||||original||||||')
isNoisy_ratio(data_loader)
if parse.second_load_name !=None:
teacher.load_state_dict(torch.load('./checkpoint/' + parse.second_load_name)['state_dict'])
teacher = teacher.cuda()
if not parse.reinit:
model.load_state_dict(torch.load('./checkpoint/' + parse.second_load_name)['state_dict'])
for params in teacher.parameters():
params.requires_grad = False
if parse.distill_mode == 'eigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx2 = iterative_eigen(1,tea_label_list,tea_out_list,teacher_idx)
elif parse.distill_mode == 'fulleigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx2 = iterative_eigen(100,tea_label_list,tea_out_list)
else:
teacher_idx2 = get_loss_list(teacher, data_loader)
teacher_idx = list(set(teacher_idx) & set(teacher_idx2))
print('second_distillation')
if parse.third_load_name !=None:
teacher.load_state_dict(torch.load('./checkpoint/' + parse.third_load_name)['state_dict'])
teacher = teacher.cuda()
if not parse.reinit:
model.load_state_dict(torch.load('./checkpoint/' + parse.third_load_name)['state_dict'])
for params in teacher.parameters():
params.requires_grad = False
if parse.distill_mode == 'eigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx3 = iterative_eigen(1,tea_label_list,tea_out_list, teacher_idx)
elif parse.distill_mode == 'fulleigen':
tea_label_list, tea_out_list = get_out_list(teacher, data_loader)
teacher_idx3 = iterative_eigen(100,tea_label_list,tea_out_list)
else:
teacher_idx3 = get_loss_list(teacher, data_loader)
teacher_idx = list(set(teacher_idx) & set(teacher_idx3))
print('third_ distillation')
return teacher_idx
| true | true |
f730de9b8bc194c6a46cc963b869fd564ac8a7d0 | 752 | py | Python | doc_finder/urls.py | sandipan1/document_finder | e867868e8d2ec96c8034e0a23bf28d75b38eca6e | [
"Apache-2.0"
] | null | null | null | doc_finder/urls.py | sandipan1/document_finder | e867868e8d2ec96c8034e0a23bf28d75b38eca6e | [
"Apache-2.0"
] | null | null | null | doc_finder/urls.py | sandipan1/document_finder | e867868e8d2ec96c8034e0a23bf28d75b38eca6e | [
"Apache-2.0"
] | null | null | null | """doc_finder URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.181818 | 77 | 0.710106 | from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| true | true |
f730ded6c31e174a53ff3335e49d999b7bc074f2 | 718 | py | Python | src/commands/__init__.py | darklab8/darklab_darkbot | 2a6bb2f1a423989f3fce18371eb07f56c9e98bcd | [
"MIT"
] | 1 | 2022-03-17T05:55:56.000Z | 2022-03-17T05:55:56.000Z | src/commands/__init__.py | darklab8/darklab_darkbot | 2a6bb2f1a423989f3fce18371eb07f56c9e98bcd | [
"MIT"
] | 1 | 2022-02-23T11:31:43.000Z | 2022-02-23T11:31:43.000Z | src/commands/__init__.py | darklab8/darklab_darkbot | 2a6bb2f1a423989f3fce18371eb07f56c9e98bcd | [
"MIT"
] | null | null | null | from discord.ext import commands
from .root import attach_root
from .base import attach_base
from .system import attach_system
from .region import attach_region
from .friend import attach_friend
from .enemy import attach_enemy
from .unrecognized import attach_unrecognized
from .forum import attach_forum
def attach_commands(bot, storage, chanell_controller) -> commands.Bot:
bot = attach_root(bot, storage, chanell_controller)
bot = attach_base(bot, storage)
bot = attach_system(bot, storage)
bot = attach_region(bot, storage)
bot = attach_friend(bot, storage)
bot = attach_enemy(bot, storage)
bot = attach_unrecognized(bot, storage)
bot = attach_forum(bot, storage)
return bot
| 31.217391 | 70 | 0.768802 | from discord.ext import commands
from .root import attach_root
from .base import attach_base
from .system import attach_system
from .region import attach_region
from .friend import attach_friend
from .enemy import attach_enemy
from .unrecognized import attach_unrecognized
from .forum import attach_forum
def attach_commands(bot, storage, chanell_controller) -> commands.Bot:
bot = attach_root(bot, storage, chanell_controller)
bot = attach_base(bot, storage)
bot = attach_system(bot, storage)
bot = attach_region(bot, storage)
bot = attach_friend(bot, storage)
bot = attach_enemy(bot, storage)
bot = attach_unrecognized(bot, storage)
bot = attach_forum(bot, storage)
return bot
| true | true |
f730def843346a0a824d97dd5d8478aa29af6a02 | 114 | py | Python | OnlySnarf/src/elements/__init__.py | sec-js/onlysnarf | c8c32abb5d6b22c08fc7e29b41211530fb583b85 | [
"MIT"
] | null | null | null | OnlySnarf/src/elements/__init__.py | sec-js/onlysnarf | c8c32abb5d6b22c08fc7e29b41211530fb583b85 | [
"MIT"
] | null | null | null | OnlySnarf/src/elements/__init__.py | sec-js/onlysnarf | c8c32abb5d6b22c08fc7e29b41211530fb583b85 | [
"MIT"
] | null | null | null | from .driver import ELEMENTS as driverElements
from .profile import ELEMENTS as profileElements
# from . import * | 28.5 | 48 | 0.807018 | from .driver import ELEMENTS as driverElements
from .profile import ELEMENTS as profileElements
| true | true |
f730df351880e20d36330b8671d36979568d9b3b | 1,894 | py | Python | src/cicd/utils/config.py | szn/snowflake-cicd | 63350c2e0a3dda77969371c07189cc00073799a8 | [
"MIT"
] | 1 | 2022-01-17T20:31:40.000Z | 2022-01-17T20:31:40.000Z | src/cicd/utils/config.py | szn/snowflake-cicd | 63350c2e0a3dda77969371c07189cc00073799a8 | [
"MIT"
] | null | null | null | src/cicd/utils/config.py | szn/snowflake-cicd | 63350c2e0a3dda77969371c07189cc00073799a8 | [
"MIT"
] | null | null | null | import configparser
import functools
from os import path
from pathlib import Path
class Config():
"""Config wrapper that reads global config and user config."""
PROJECT_ROOT = path.join(path.dirname(path.realpath(__file__)), '..')
CONFIG_INI = path.join(PROJECT_ROOT, 'config.ini')
HOME_DIR = Path.home()
CONN_INI = path.join(HOME_DIR, '.snowflake-cicd.ini')
def __init__(self):
pass
def __lazy_init(func):
"""Reads and parses global config file and user's conn file."""
@functools.wraps(func)
def wrap(self, *args, **kwargs):
if not hasattr(self, '_config'):
assert path.exists(Config.CONFIG_INI), f"Missing config file at path {Config.CONFIG_INI}"
self._config = configparser.ConfigParser()
self._config.read(Config.CONFIG_INI)
if not hasattr(self, '_conn'):
assert path.exists(Config.CONN_INI), f"Missing connection settings file at path {Config.CONN_INI}"
self._conn = configparser.ConfigParser()
self._conn.read(Config.CONN_INI)
return func(self, *args, **kwargs)
return wrap
@__lazy_init
def read_config(self, key, section='default', default=None) -> str:
"""Reads [section] key from user's conn file or use global file
if the key is missing."""
return self._conn[section].get(key,
self._config[section].get(key, default))
@__lazy_init
def read_user_config(self, key, section='default', default=None) -> str:
"""Reads [section] from user .snowflake-cicd.ini file."""
return self._conn[section].get(key, default)
@__lazy_init
def sql(self, query_id) -> str:
"""Returns value from config section 'queries'."""
return self._config['queries'].get(query_id)
config = Config()
| 35.735849 | 114 | 0.6283 | import configparser
import functools
from os import path
from pathlib import Path
class Config():
PROJECT_ROOT = path.join(path.dirname(path.realpath(__file__)), '..')
CONFIG_INI = path.join(PROJECT_ROOT, 'config.ini')
HOME_DIR = Path.home()
CONN_INI = path.join(HOME_DIR, '.snowflake-cicd.ini')
def __init__(self):
pass
def __lazy_init(func):
@functools.wraps(func)
def wrap(self, *args, **kwargs):
if not hasattr(self, '_config'):
assert path.exists(Config.CONFIG_INI), f"Missing config file at path {Config.CONFIG_INI}"
self._config = configparser.ConfigParser()
self._config.read(Config.CONFIG_INI)
if not hasattr(self, '_conn'):
assert path.exists(Config.CONN_INI), f"Missing connection settings file at path {Config.CONN_INI}"
self._conn = configparser.ConfigParser()
self._conn.read(Config.CONN_INI)
return func(self, *args, **kwargs)
return wrap
@__lazy_init
def read_config(self, key, section='default', default=None) -> str:
return self._conn[section].get(key,
self._config[section].get(key, default))
@__lazy_init
def read_user_config(self, key, section='default', default=None) -> str:
return self._conn[section].get(key, default)
@__lazy_init
def sql(self, query_id) -> str:
return self._config['queries'].get(query_id)
config = Config()
| true | true |
f730dfaef937c0ed1212f93f8a13aa36e4deb679 | 2,312 | py | Python | app/views/users/messages/views.py | dandye/DjanGoat | 72beb30afe3ddd5b31ce74a5d3b9da61d2c5df1d | [
"MIT"
] | 65 | 2017-08-18T15:12:03.000Z | 2021-08-14T16:50:07.000Z | app/views/users/messages/views.py | dandye/DjanGoat | 72beb30afe3ddd5b31ce74a5d3b9da61d2c5df1d | [
"MIT"
] | 83 | 2017-11-28T21:45:20.000Z | 2021-11-02T18:52:52.000Z | app/views/users/messages/views.py | dandye/DjanGoat | 72beb30afe3ddd5b31ce74a5d3b9da61d2c5df1d | [
"MIT"
] | 71 | 2017-08-17T14:58:01.000Z | 2022-02-02T17:09:49.000Z | from __future__ import unicode_literals
from django.http import HttpResponse
from django.contrib import messages
from django.views.decorators.http import require_http_methods
from django.shortcuts import render, redirect
from django.utils import timezone
from app.decorators import user_is_authenticated
from app.models import User, Message
from app.views import utils
@require_http_methods(["GET", "POST"])
@user_is_authenticated
def user_messages(request, user_id): # pylint: disable=unused-argument
current_user = utils.current_user(request)
if request.method == "GET":
return render(request, "users/messages/index.html", {
'current_user': current_user,
'available_recipients': User.objects.all()
})
else:
try:
cid = int(request.POST['creator_id'])
creator = User.objects.get(user_id=cid)
rid = int(request.POST['receiver_id'])
receiver = User.objects.get(user_id=rid)
msg = request.POST['message']
red = int(request.POST['read'])
now = timezone.now()
Message.objects.create(creator=creator, receiver=receiver,
message=msg, read=red,
created_at=now, updated_at=now)
return redirect("/users/" + str(current_user.id) + "/messages")
except Exception as e:
messages.add_message(request, messages.INFO, str(e))
return render(request, "users/messages/index.html", {
'current_user': current_user,
'available_receipients': User.objects.all()
})
# W0613 = unused-argument
@require_http_methods(["GET", "DELETE"])
@user_is_authenticated
def user_message(request, user_id, message_id): # pylint: disable=W0613
current_user = utils.current_user(request)
try:
message = Message.objects.get(pk=message_id)
if request.method == "GET":
return render(request, "users/messages/show.html", {
'current_user': current_user,
'message': message
})
else:
message.delete()
return HttpResponse("Success!")
except Exception:
return redirect("/users/" + str(current_user.id) + "/messages")
| 37.290323 | 75 | 0.62846 | from __future__ import unicode_literals
from django.http import HttpResponse
from django.contrib import messages
from django.views.decorators.http import require_http_methods
from django.shortcuts import render, redirect
from django.utils import timezone
from app.decorators import user_is_authenticated
from app.models import User, Message
from app.views import utils
@require_http_methods(["GET", "POST"])
@user_is_authenticated
def user_messages(request, user_id):
current_user = utils.current_user(request)
if request.method == "GET":
return render(request, "users/messages/index.html", {
'current_user': current_user,
'available_recipients': User.objects.all()
})
else:
try:
cid = int(request.POST['creator_id'])
creator = User.objects.get(user_id=cid)
rid = int(request.POST['receiver_id'])
receiver = User.objects.get(user_id=rid)
msg = request.POST['message']
red = int(request.POST['read'])
now = timezone.now()
Message.objects.create(creator=creator, receiver=receiver,
message=msg, read=red,
created_at=now, updated_at=now)
return redirect("/users/" + str(current_user.id) + "/messages")
except Exception as e:
messages.add_message(request, messages.INFO, str(e))
return render(request, "users/messages/index.html", {
'current_user': current_user,
'available_receipients': User.objects.all()
})
@require_http_methods(["GET", "DELETE"])
@user_is_authenticated
def user_message(request, user_id, message_id):
current_user = utils.current_user(request)
try:
message = Message.objects.get(pk=message_id)
if request.method == "GET":
return render(request, "users/messages/show.html", {
'current_user': current_user,
'message': message
})
else:
message.delete()
return HttpResponse("Success!")
except Exception:
return redirect("/users/" + str(current_user.id) + "/messages")
| true | true |
f730e068ae999111bb773778e3014d2a1af34e42 | 5,643 | py | Python | stats_csv_latency.py | maria-kuruvilla/temp_collective_new | c45b72cee7c17072507eb67790d1699f5684098a | [
"MIT"
] | null | null | null | stats_csv_latency.py | maria-kuruvilla/temp_collective_new | c45b72cee7c17072507eb67790d1699f5684098a | [
"MIT"
] | null | null | null | stats_csv_latency.py | maria-kuruvilla/temp_collective_new | c45b72cee7c17072507eb67790d1699f5684098a | [
"MIT"
] | null | null | null | """
Goal - to produce a csv file with temp, gs rep, loom and latency
"""
import os
import pathlib
from pprint import pprint
import numpy as np
from scipy import stats
from scipy.spatial import distance
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import trajectorytools as tt
import trajectorytools.plot as ttplot
import trajectorytools.socialcontext as ttsocial
from trajectorytools.constants import dir_of_data
import csv
import pickle
import argparse
def position(tr):
return(tr.s)
def speed(tr):
v = (position(tr)[2:] - position(tr)[:-2]) / 2
b = np.linalg.norm(v, axis=-1)
return(b*60)
def acceleration(tr):
a = position(tr)[2:] - 2 * position(tr)[1:-1] + position(tr)[:-2]
aa = np.linalg.norm(a, axis=-1)
return(aa*3600)
def filter(tr, roi = 5): #ind (for individual) starts from 0, roi - edge of region of interest
position_mask0 = np.ma.masked_where((tr.distance_to_origin[1:-1] > roi)|(tr.distance_to_origin[0:-2] > roi)|(tr.distance_to_origin[2:] > roi), position(tr)[1:-1,:,0],copy=False)
position_mask1 = np.ma.masked_where((tr.distance_to_origin[1:-1] > roi)|(tr.distance_to_origin[0:-2] > roi)|(tr.distance_to_origin[2:] > roi), position(tr)[1:-1,:,1],copy=False)
return(position_mask0,position_mask1)
def filter_speed(tr, roi = 5):
speed_mask = np.ma.masked_where((tr.distance_to_origin[1:-1] > roi)|(tr.distance_to_origin[0:-2] > roi)|(tr.distance_to_origin[2:] > roi), speed(tr),copy=False)
return(speed_mask)
def filter_acc(tr, roi = 5):
acc_mask = np.ma.masked_where((tr.distance_to_origin[1:-1] > roi)|(tr.distance_to_origin[0:-2] > roi)|(tr.distance_to_origin[2:] > roi), acceleration(tr),copy=False)
return(acc_mask)#[~acc_mask.mask].data)
def spikes_position_new(tr): #uses filter_speed
list1 = []
for j in range(tr.number_of_individuals):
list2 = [i for i, value in enumerate(filter_speed(tr,5)[:,j]) if value > 10]
list2.insert(0,100000000)
list1 = list1 + [value for i,value in enumerate(list2[1:]) if (value != (list2[i]+1))]
return(list1)
rows = []
with open('../../data/temp_collective/looms_roi.csv', 'r') as csvfile:
looms = csv.reader(csvfile)
for row in looms:
rows.append(row)
def loom_frame(temp, groupsize, rep):
if temp == 29:
cam = 'Cam 7'
elif temp == 25:
cam = 'Cam 8'
elif temp == 17:
cam = 'Cam 9'
elif temp == 13:
cam = 'Cam 10'
elif temp == 21:
cam = 'Cam 11'
elif temp == 9:
cam = 'Cam 12'
g = str(groupsize)
r = str(rep)
loom = np.zeros([5,1])
for i in range(len(rows)):
if rows[i][1]==cam and rows[i][3]==g and rows[i][4]==r:
for j in range(5):
loom[j] = int(rows[i][2]) + j*11403
return(loom)
def accurate_startles_frame(tr, temp, groupsize, rep,i): #i starts from 0 #uses filtered data
list1 = spikes_position_new(tr)
loom = loom_frame(temp, groupsize, rep)
list2 = [value for value in list1 if (value < (loom[i] + 700) and value > (loom[i]+500)) ]
return(list2)
def first_startle(tr, temp, groupsize, rep,i): #uses filtered data
a = accurate_startles_frame(tr, temp, groupsize, rep,i) # i starts from 0
if not a:
return(np.nan)
else:
return(min(a))
def latency(tr, temp, groupsize, rep): #uses filtred data
a = np.empty([5,1])
a.fill(np.nan)
b = loom_frame(temp, groupsize, rep)
for i in range(5):
a[i] = first_startle(tr, temp, groupsize, rep,i) - b[i]
return(np.nanmean(a))
def latency_loom(tr, temp, groupsize, rep, loom): #uses filtred data #loom starts from 0
b = loom_frame(temp, groupsize, rep)
lat_loom = first_startle(tr, temp, groupsize, rep,loom) - b[loom]
return(lat_loom)
temperature = range(9,30,4)
group = [1,2,4,8,16,32]
replication = range(10) # number of replicates per treatment
#output parent directory
parent_dir = '../../output/temp_collective/roi'
with open('../../data/temp_collective/roi/stats_loom_latency_nan.csv', mode='w') as stats_speed:
writer = csv.writer(stats_speed, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['Temperature', 'Groupsize', 'Replicate', 'loom','latency'])
for i in temperature:
print(i)
jj = 0 # to keep count of groups
for j in group:
for k in replication:
if j == 1:
trajectories_file_path = '../../data/temp_collective/roi/'+str(i)+'/' +str(j)+'/GS_'+str(j)+'_T_'+str(i)+'_roi_'+str(k+1)+'/trajectories.npy'
else:
trajectories_file_path = '../../data/temp_collective/roi/'+str(i)+'/' +str(j)+'/GS_'+str(j)+'_T_'+str(i)+'_roi_'+str(k+1)+'/trajectories_wo_gaps.npy'
try:
tr = tt.Trajectories.from_idtrackerai(trajectories_file_path, center=True).normalise_by('body_length')
tr.new_time_unit(tr.params['frame_rate'], 'seconds')
except FileNotFoundError:
print(i,j,k)
print('File not found')
continue
#perc_speed = np.percentile(filter_speed(tr,5).compressed(),90)
#perc_acc = np.percentile(filter_acc(tr,5).compressed(),90)
for loom in range(5):
lat = latency_loom(tr,i,j,k+1, loom)[0]
if np.isnan(lat) != True:
writer.writerow([i, j, k+1, loom+1,lat])
| 32.618497 | 181 | 0.606238 |
import os
import pathlib
from pprint import pprint
import numpy as np
from scipy import stats
from scipy.spatial import distance
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import trajectorytools as tt
import trajectorytools.plot as ttplot
import trajectorytools.socialcontext as ttsocial
from trajectorytools.constants import dir_of_data
import csv
import pickle
import argparse
def position(tr):
return(tr.s)
def speed(tr):
v = (position(tr)[2:] - position(tr)[:-2]) / 2
b = np.linalg.norm(v, axis=-1)
return(b*60)
def acceleration(tr):
a = position(tr)[2:] - 2 * position(tr)[1:-1] + position(tr)[:-2]
aa = np.linalg.norm(a, axis=-1)
return(aa*3600)
def filter(tr, roi = 5):
position_mask0 = np.ma.masked_where((tr.distance_to_origin[1:-1] > roi)|(tr.distance_to_origin[0:-2] > roi)|(tr.distance_to_origin[2:] > roi), position(tr)[1:-1,:,0],copy=False)
position_mask1 = np.ma.masked_where((tr.distance_to_origin[1:-1] > roi)|(tr.distance_to_origin[0:-2] > roi)|(tr.distance_to_origin[2:] > roi), position(tr)[1:-1,:,1],copy=False)
return(position_mask0,position_mask1)
def filter_speed(tr, roi = 5):
speed_mask = np.ma.masked_where((tr.distance_to_origin[1:-1] > roi)|(tr.distance_to_origin[0:-2] > roi)|(tr.distance_to_origin[2:] > roi), speed(tr),copy=False)
return(speed_mask)
def filter_acc(tr, roi = 5):
acc_mask = np.ma.masked_where((tr.distance_to_origin[1:-1] > roi)|(tr.distance_to_origin[0:-2] > roi)|(tr.distance_to_origin[2:] > roi), acceleration(tr),copy=False)
return(acc_mask)
def spikes_position_new(tr):
list1 = []
for j in range(tr.number_of_individuals):
list2 = [i for i, value in enumerate(filter_speed(tr,5)[:,j]) if value > 10]
list2.insert(0,100000000)
list1 = list1 + [value for i,value in enumerate(list2[1:]) if (value != (list2[i]+1))]
return(list1)
rows = []
with open('../../data/temp_collective/looms_roi.csv', 'r') as csvfile:
looms = csv.reader(csvfile)
for row in looms:
rows.append(row)
def loom_frame(temp, groupsize, rep):
if temp == 29:
cam = 'Cam 7'
elif temp == 25:
cam = 'Cam 8'
elif temp == 17:
cam = 'Cam 9'
elif temp == 13:
cam = 'Cam 10'
elif temp == 21:
cam = 'Cam 11'
elif temp == 9:
cam = 'Cam 12'
g = str(groupsize)
r = str(rep)
loom = np.zeros([5,1])
for i in range(len(rows)):
if rows[i][1]==cam and rows[i][3]==g and rows[i][4]==r:
for j in range(5):
loom[j] = int(rows[i][2]) + j*11403
return(loom)
def accurate_startles_frame(tr, temp, groupsize, rep,i): _position_new(tr)
loom = loom_frame(temp, groupsize, rep)
list2 = [value for value in list1 if (value < (loom[i] + 700) and value > (loom[i]+500)) ]
return(list2)
def first_startle(tr, temp, groupsize, rep,i):
a = accurate_startles_frame(tr, temp, groupsize, rep,i)
if not a:
return(np.nan)
else:
return(min(a))
def latency(tr, temp, groupsize, rep):
a = np.empty([5,1])
a.fill(np.nan)
b = loom_frame(temp, groupsize, rep)
for i in range(5):
a[i] = first_startle(tr, temp, groupsize, rep,i) - b[i]
return(np.nanmean(a))
def latency_loom(tr, temp, groupsize, rep, loom): frame(temp, groupsize, rep)
lat_loom = first_startle(tr, temp, groupsize, rep,loom) - b[loom]
return(lat_loom)
temperature = range(9,30,4)
group = [1,2,4,8,16,32]
replication = range(10)
parent_dir = '../../output/temp_collective/roi'
with open('../../data/temp_collective/roi/stats_loom_latency_nan.csv', mode='w') as stats_speed:
writer = csv.writer(stats_speed, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['Temperature', 'Groupsize', 'Replicate', 'loom','latency'])
for i in temperature:
print(i)
jj = 0 # to keep count of groups
for j in group:
for k in replication:
if j == 1:
trajectories_file_path = '../../data/temp_collective/roi/'+str(i)+'/' +str(j)+'/GS_'+str(j)+'_T_'+str(i)+'_roi_'+str(k+1)+'/trajectories.npy'
else:
trajectories_file_path = '../../data/temp_collective/roi/'+str(i)+'/' +str(j)+'/GS_'+str(j)+'_T_'+str(i)+'_roi_'+str(k+1)+'/trajectories_wo_gaps.npy'
try:
tr = tt.Trajectories.from_idtrackerai(trajectories_file_path, center=True).normalise_by('body_length')
tr.new_time_unit(tr.params['frame_rate'], 'seconds')
except FileNotFoundError:
print(i,j,k)
print('File not found')
continue
#perc_speed = np.percentile(filter_speed(tr,5).compressed(),90)
#perc_acc = np.percentile(filter_acc(tr,5).compressed(),90)
for loom in range(5):
lat = latency_loom(tr,i,j,k+1, loom)[0]
if np.isnan(lat) != True:
writer.writerow([i, j, k+1, loom+1,lat])
| true | true |
f730e06a010416203fc8fedf3c5013230e453033 | 300 | py | Python | 0-notes/job-search/Cracking_the_Coding_Interview/C03StacksQueues/questions/3.5-question.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | 0-notes/job-search/Cracking_the_Coding_Interview/C03StacksQueues/questions/3.5-question.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | 0-notes/job-search/Cracking_the_Coding_Interview/C03StacksQueues/questions/3.5-question.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | # 3.5 Sort Stack
# Write a program to sort a stack such that the smallest items are on top.
# You can use an additional temporary stack, but you may not copy the elements into
# any other data structure such as an array.
# The stack supports the following operations: push, pop, peek, and isEmpty.
| 42.857143 | 83 | 0.756667 | true | true | |
f730e09591819247a3e35ea00055913be97dcb22 | 44,059 | py | Python | prometheus_client/core.py | canonical-ols/client_python | 3088bc14d419d4f54efd2a4ea12469eedbfa0a37 | [
"Apache-2.0"
] | null | null | null | prometheus_client/core.py | canonical-ols/client_python | 3088bc14d419d4f54efd2a4ea12469eedbfa0a37 | [
"Apache-2.0"
] | null | null | null | prometheus_client/core.py | canonical-ols/client_python | 3088bc14d419d4f54efd2a4ea12469eedbfa0a37 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
from __future__ import unicode_literals
from collections import namedtuple
import copy
import json
import math
import mmap
import os
import re
import struct
import sys
from threading import Lock
import time
from timeit import default_timer
import types
from .decorator import decorate
if sys.version_info > (3,):
unicode = str
_METRIC_NAME_RE = re.compile(r'^[a-zA-Z_:][a-zA-Z0-9_:]*$')
_METRIC_LABEL_NAME_RE = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
_RESERVED_METRIC_LABEL_NAME_RE = re.compile(r'^__.*$')
_INF = float("inf")
_MINUS_INF = float("-inf")
_INITIAL_MMAP_SIZE = 1 << 20
_pack_integer_func = struct.Struct(b'i').pack
_pack_double_func = struct.Struct(b'd').pack
_unpack_integer = struct.Struct(b'i').unpack_from
_unpack_double = struct.Struct(b'd').unpack_from
# Timestamp and exemplar are optional.
# Value can be an int or a float.
# Timestamp can be a float containing a unixtime in seconds,
# a Timestamp object, or None.
# Exemplar can be an Exemplar object, or None.
Sample = namedtuple('Sample', ['name', 'labels', 'value', 'timestamp', 'exemplar'])
Sample.__new__.__defaults__ = (None, None)
class Timestamp(object):
'''A nanosecond-resolution timestamp.'''
def __init__(self, sec, nsec):
if nsec < 0 or nsec >= 1e9:
raise ValueError("Invalid value for nanoseconds in Timestamp: {}".format(nsec))
if sec < 0:
nsec = -nsec
self.sec = int(sec)
self.nsec = int(nsec)
def __str__(self):
return "{0}.{1:09d}".format(self.sec, self.nsec)
def __repr__(self):
return "Timestamp({0}, {1})".format(self.sec, self.nsec)
def __float__(self):
return float(self.sec) + float(self.nsec) / 1e9
def __eq__(self, other):
return type(self) == type(other) and self.sec == other.sec and self.nsec == other.nsec
def __ne__(self, other):
return not self == other
def __gt__(self, other):
return self.sec > other.sec or self.nsec > other.nsec
Exemplar = namedtuple('Exemplar', ['labels', 'value', 'timestamp'])
Exemplar.__new__.__defaults__ = (None,)
class CollectorRegistry(object):
'''Metric collector registry.
Collectors must have a no-argument method 'collect' that returns a list of
Metric objects. The returned metrics should be consistent with the Prometheus
exposition formats.
'''
def __init__(self, auto_describe=False):
self._collector_to_names = {}
self._names_to_collectors = {}
self._auto_describe = auto_describe
self._lock = Lock()
def register(self, collector):
'''Add a collector to the registry.'''
with self._lock:
names = self._get_names(collector)
duplicates = set(self._names_to_collectors).intersection(names)
if duplicates:
raise ValueError(
'Duplicated timeseries in CollectorRegistry: {0}'.format(
duplicates))
for name in names:
self._names_to_collectors[name] = collector
self._collector_to_names[collector] = names
def unregister(self, collector):
'''Remove a collector from the registry.'''
with self._lock:
for name in self._collector_to_names[collector]:
del self._names_to_collectors[name]
del self._collector_to_names[collector]
def _get_names(self, collector):
'''Get names of timeseries the collector produces.'''
desc_func = None
# If there's a describe function, use it.
try:
desc_func = collector.describe
except AttributeError:
pass
# Otherwise, if auto describe is enabled use the collect function.
if not desc_func and self._auto_describe:
desc_func = collector.collect
if not desc_func:
return []
result = []
type_suffixes = {
'counter': ['_total', '_created'],
'summary': ['', '_sum', '_count', '_created'],
'histogram': ['_bucket', '_sum', '_count', '_created'],
'gaugehistogram': ['_bucket', '_gsum', '_gcount'],
'info': ['_info'],
}
for metric in desc_func():
for suffix in type_suffixes.get(metric.type, ['']):
result.append(metric.name + suffix)
return result
def collect(self):
'''Yields metrics from the collectors in the registry.'''
collectors = None
with self._lock:
collectors = copy.copy(self._collector_to_names)
for collector in collectors:
for metric in collector.collect():
yield metric
def restricted_registry(self, names):
'''Returns object that only collects some metrics.
Returns an object which upon collect() will return
only samples with the given names.
Intended usage is:
generate_latest(REGISTRY.restricted_registry(['a_timeseries']))
Experimental.'''
names = set(names)
collectors = set()
with self._lock:
for name in names:
if name in self._names_to_collectors:
collectors.add(self._names_to_collectors[name])
metrics = []
for collector in collectors:
for metric in collector.collect():
samples = [s for s in metric.samples if s[0] in names]
if samples:
m = Metric(metric.name, metric.documentation, metric.type)
m.samples = samples
metrics.append(m)
class RestrictedRegistry(object):
def collect(self):
return metrics
return RestrictedRegistry()
def get_sample_value(self, name, labels=None):
'''Returns the sample value, or None if not found.
This is inefficient, and intended only for use in unittests.
'''
if labels is None:
labels = {}
for metric in self.collect():
for s in metric.samples:
if s.name == name and s.labels == labels:
return s.value
return None
REGISTRY = CollectorRegistry(auto_describe=True)
'''The default registry.'''
_METRIC_TYPES = (
'counter', 'gauge', 'summary', 'histogram',
'gaugehistogram', 'unknown', 'info', 'stateset',
)
class Metric(object):
'''A single metric family and its samples.
This is intended only for internal use by the instrumentation client.
Custom collectors should use GaugeMetricFamily, CounterMetricFamily
and SummaryMetricFamily instead.
'''
def __init__(self, name, documentation, typ, unit=''):
if unit and not name.endswith("_" + unit):
name += "_" + unit
if not _METRIC_NAME_RE.match(name):
raise ValueError('Invalid metric name: ' + name)
self.name = name
self.documentation = documentation
self.unit = unit
if typ == 'untyped':
typ = 'unknown'
if typ not in _METRIC_TYPES:
raise ValueError('Invalid metric type: ' + typ)
self.type = typ
self.samples = []
def add_sample(self, name, labels, value, timestamp=None, exemplar=None):
'''Add a sample to the metric.
Internal-only, do not use.'''
self.samples.append(Sample(name, labels, value, timestamp, exemplar))
def __eq__(self, other):
return (isinstance(other, Metric) and
self.name == other.name and
self.documentation == other.documentation and
self.type == other.type and
self.unit == other.unit and
self.samples == other.samples)
def __repr__(self):
return "Metric(%s, %s, %s, %s, %s)" % (
self.name,
self.documentation,
self.type,
self.unit,
self.samples,
)
class UnknownMetricFamily(Metric):
'''A single unknwon metric and its samples.
For use by custom collectors.
'''
def __init__(self, name, documentation, value=None, labels=None, unit=''):
Metric.__init__(self, name, documentation, 'unknown', unit)
if labels is not None and value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if value is not None:
self.add_metric([], value)
def add_metric(self, labels, value, timestamp=None):
'''Add a metric to the metric family.
Args:
labels: A list of label values
value: The value of the metric.
'''
self.samples.append(Sample(self.name, dict(zip(self._labelnames, labels)), value, timestamp))
# For backward compatibility.
UntypedMetricFamily = UnknownMetricFamily
class CounterMetricFamily(Metric):
'''A single counter and its samples.
For use by custom collectors.
'''
def __init__(self, name, documentation, value=None, labels=None, created=None, unit=''):
# Glue code for pre-OpenMetrics metrics.
if name.endswith('_total'):
name = name[:-6]
Metric.__init__(self, name, documentation, 'counter', unit)
if labels is not None and value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if value is not None:
self.add_metric([], value, created)
def add_metric(self, labels, value, created=None, timestamp=None):
'''Add a metric to the metric family.
Args:
labels: A list of label values
value: The value of the metric
created: Optional unix timestamp the child was created at.
'''
self.samples.append(Sample(self.name + '_total', dict(zip(self._labelnames, labels)), value, timestamp))
if created is not None:
self.samples.append(Sample(self.name + '_created', dict(zip(self._labelnames, labels)), created, timestamp))
class GaugeMetricFamily(Metric):
'''A single gauge and its samples.
For use by custom collectors.
'''
def __init__(self, name, documentation, value=None, labels=None, unit=''):
Metric.__init__(self, name, documentation, 'gauge', unit)
if labels is not None and value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if value is not None:
self.add_metric([], value)
def add_metric(self, labels, value, timestamp=None):
'''Add a metric to the metric family.
Args:
labels: A list of label values
value: A float
'''
self.samples.append(Sample(self.name, dict(zip(self._labelnames, labels)), value, timestamp))
class SummaryMetricFamily(Metric):
'''A single summary and its samples.
For use by custom collectors.
'''
def __init__(self, name, documentation, count_value=None, sum_value=None, labels=None, unit=''):
Metric.__init__(self, name, documentation, 'summary', unit)
if (sum_value is None) != (count_value is None):
raise ValueError('count_value and sum_value must be provided together.')
if labels is not None and count_value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if count_value is not None:
self.add_metric([], count_value, sum_value)
def add_metric(self, labels, count_value, sum_value, timestamp=None):
'''Add a metric to the metric family.
Args:
labels: A list of label values
count_value: The count value of the metric.
sum_value: The sum value of the metric.
'''
self.samples.append(Sample(self.name + '_count', dict(zip(self._labelnames, labels)), count_value, timestamp))
self.samples.append(Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp))
class HistogramMetricFamily(Metric):
'''A single histogram and its samples.
For use by custom collectors.
'''
def __init__(self, name, documentation, buckets=None, sum_value=None, labels=None, unit=''):
Metric.__init__(self, name, documentation, 'histogram', unit)
if (sum_value is None) != (buckets is None):
raise ValueError('buckets and sum_value must be provided together.')
if labels is not None and buckets is not None:
raise ValueError('Can only specify at most one of buckets and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if buckets is not None:
self.add_metric([], buckets, sum_value)
def add_metric(self, labels, buckets, sum_value, timestamp=None):
'''Add a metric to the metric family.
Args:
labels: A list of label values
buckets: A list of lists.
Each inner list can be a pair of bucket name and value,
or a triple of bucket name, value, and exemplar.
The buckets must be sorted, and +Inf present.
sum_value: The sum value of the metric.
'''
for b in buckets:
bucket, value = b[:2]
exemplar = None
if len(b) == 3:
exemplar = b[2]
self.samples.append(Sample(
self.name + '_bucket',
dict(list(zip(self._labelnames, labels)) + [('le', bucket)]),
value,
timestamp,
exemplar,
))
# +Inf is last and provides the count value.
self.samples.extend([
Sample(self.name + '_count', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp),
Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp),
])
class GaugeHistogramMetricFamily(Metric):
'''A single gauge histogram and its samples.
For use by custom collectors.
'''
def __init__(self, name, documentation, buckets=None, gsum_value=None, labels=None, unit=''):
Metric.__init__(self, name, documentation, 'gaugehistogram', unit)
if labels is not None and buckets is not None:
raise ValueError('Can only specify at most one of buckets and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if buckets is not None:
self.add_metric([], buckets, gsum_value)
def add_metric(self, labels, buckets, gsum_value, timestamp=None):
'''Add a metric to the metric family.
Args:
labels: A list of label values
buckets: A list of pairs of bucket names and values.
The buckets must be sorted, and +Inf present.
gsum_value: The sum value of the metric.
'''
for bucket, value in buckets:
self.samples.append(Sample(
self.name + '_bucket',
dict(list(zip(self._labelnames, labels)) + [('le', bucket)]),
value, timestamp))
# +Inf is last and provides the count value.
self.samples.extend([
Sample(self.name + '_gcount', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp),
Sample(self.name + '_gsum', dict(zip(self._labelnames, labels)), gsum_value, timestamp),
])
class InfoMetricFamily(Metric):
'''A single info and its samples.
For use by custom collectors.
'''
def __init__(self, name, documentation, value=None, labels=None):
Metric.__init__(self, name, documentation, 'info')
if labels is not None and value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if value is not None:
self.add_metric([], value)
def add_metric(self, labels, value, timestamp=None):
'''Add a metric to the metric family.
Args:
labels: A list of label values
value: A dict of labels
'''
self.samples.append(Sample(
self.name + '_info',
dict(dict(zip(self._labelnames, labels)), **value),
1,
timestamp,
))
class StateSetMetricFamily(Metric):
'''A single stateset and its samples.
For use by custom collectors.
'''
def __init__(self, name, documentation, value=None, labels=None):
Metric.__init__(self, name, documentation, 'stateset')
if labels is not None and value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if value is not None:
self.add_metric([], value)
def add_metric(self, labels, value, timestamp=None):
'''Add a metric to the metric family.
Args:
labels: A list of label values
value: A dict of string state names to booleans
'''
labels = tuple(labels)
for state, enabled in sorted(value.items()):
v = (1 if enabled else 0)
self.samples.append(Sample(
self.name,
dict(zip(self._labelnames + (self.name,), labels + (state,))),
v,
timestamp,
))
class _MutexValue(object):
'''A float protected by a mutex.'''
_multiprocess = False
def __init__(self, typ, metric_name, name, labelnames, labelvalues, **kwargs):
self._value = 0.0
self._lock = Lock()
def inc(self, amount):
with self._lock:
self._value += amount
def set(self, value):
with self._lock:
self._value = value
def get(self):
with self._lock:
return self._value
# struct.pack_into has atomicity issues because it will temporarily write 0 into
# the mmap, resulting in false reads to 0 when experiencing a lot of writes.
# Using direct assignment solves this issue.
def _pack_double(data, pos, value):
data[pos:pos + 8] = _pack_double_func(value)
def _pack_integer(data, pos, value):
data[pos:pos + 4] = _pack_integer_func(value)
class _MmapedDict(object):
"""A dict of doubles, backed by an mmapped file.
The file starts with a 4 byte int, indicating how much of it is used.
Then 4 bytes of padding.
There's then a number of entries, consisting of a 4 byte int which is the
size of the next field, a utf-8 encoded string key, padding to a 8 byte
alignment, and then a 8 byte float which is the value.
Not thread safe.
"""
def __init__(self, filename, read_mode=False):
self._f = open(filename, 'rb' if read_mode else 'a+b')
if os.fstat(self._f.fileno()).st_size == 0:
self._f.truncate(_INITIAL_MMAP_SIZE)
self._capacity = os.fstat(self._f.fileno()).st_size
self._m = mmap.mmap(self._f.fileno(), self._capacity, access=mmap.ACCESS_READ if read_mode else mmap.ACCESS_WRITE)
self._positions = {}
self._used = _unpack_integer(self._m, 0)[0]
if self._used == 0:
self._used = 8
_pack_integer(self._m, 0, self._used)
else:
if not read_mode:
for key, _, pos in self._read_all_values():
self._positions[key] = pos
def _init_value(self, key):
"""Initialize a value. Lock must be held by caller."""
encoded = key.encode('utf-8')
# Pad to be 8-byte aligned.
padded = encoded + (b' ' * (8 - (len(encoded) + 4) % 8))
value = struct.pack('i{0}sd'.format(len(padded)).encode(), len(encoded), padded, 0.0)
while self._used + len(value) > self._capacity:
self._capacity *= 2
self._f.truncate(self._capacity)
self._m = mmap.mmap(self._f.fileno(), self._capacity)
self._m[self._used:self._used + len(value)] = value
# Update how much space we've used.
self._used += len(value)
_pack_integer(self._m, 0, self._used)
self._positions[key] = self._used - 8
def _read_all_values(self):
"""Yield (key, value, pos). No locking is performed."""
pos = 8
# cache variables to local ones and prevent attributes lookup
# on every loop iteration
used = self._used
data = self._m
unpack_from = struct.unpack_from
while pos < used:
encoded_len = _unpack_integer(data, pos)[0]
pos += 4
encoded = unpack_from(('%ss' % encoded_len).encode(), data, pos)[0]
padded_len = encoded_len + (8 - (encoded_len + 4) % 8)
pos += padded_len
value = _unpack_double(data, pos)[0]
yield encoded.decode('utf-8'), value, pos
pos += 8
def read_all_values(self):
"""Yield (key, value, pos). No locking is performed."""
for k, v, _ in self._read_all_values():
yield k, v
def read_value(self, key):
if key not in self._positions:
self._init_value(key)
pos = self._positions[key]
# We assume that reading from an 8 byte aligned value is atomic
return _unpack_double(self._m, pos)[0]
def write_value(self, key, value):
if key not in self._positions:
self._init_value(key)
pos = self._positions[key]
# We assume that writing to an 8 byte aligned value is atomic
_pack_double(self._m, pos, value)
def close(self):
if self._f:
self._m.close()
self._m = None
self._f.close()
self._f = None
def _mmap_key(metric_name, name, labelnames, labelvalues):
"""Format a key for use in the mmap file."""
# ensure labels are in consistent order for identity
labels = dict(zip(labelnames, labelvalues))
return json.dumps([metric_name, name, labels], sort_keys=True)
def _MultiProcessValue(_pidFunc=os.getpid):
files = {}
values = []
pid = {'value': _pidFunc()}
# Use a single global lock when in multi-processing mode
# as we presume this means there is no threading going on.
# This avoids the need to also have mutexes in __MmapDict.
lock = Lock()
class _MmapedValue(object):
'''A float protected by a mutex backed by a per-process mmaped file.'''
_multiprocess = True
def __init__(self, typ, metric_name, name, labelnames, labelvalues, multiprocess_mode='', **kwargs):
self._params = typ, metric_name, name, labelnames, labelvalues, multiprocess_mode
with lock:
self.__check_for_pid_change()
self.__reset()
values.append(self)
def __reset(self):
typ, metric_name, name, labelnames, labelvalues, multiprocess_mode = self._params
if typ == 'gauge':
file_prefix = typ + '_' + multiprocess_mode
else:
file_prefix = typ
if file_prefix not in files:
filename = os.path.join(
os.environ['prometheus_multiproc_dir'],
'{0}_{1}.db'.format(file_prefix, pid['value']))
files[file_prefix] = _MmapedDict(filename)
self._file = files[file_prefix]
self._key = _mmap_key(metric_name, name, labelnames, labelvalues)
self._value = self._file.read_value(self._key)
def __check_for_pid_change(self):
actual_pid = _pidFunc()
if pid['value'] != actual_pid:
pid['value'] = actual_pid
# There has been a fork(), reset all the values.
for f in files.values():
f.close()
files.clear()
for value in values:
value.__reset()
def inc(self, amount):
with lock:
self.__check_for_pid_change()
self._value += amount
self._file.write_value(self._key, self._value)
def set(self, value):
with lock:
self.__check_for_pid_change()
self._value = value
self._file.write_value(self._key, self._value)
def get(self):
with lock:
self.__check_for_pid_change()
return self._value
return _MmapedValue
# Should we enable multi-process mode?
# This needs to be chosen before the first metric is constructed,
# and as that may be in some arbitrary library the user/admin has
# no control over we use an environment variable.
if 'prometheus_multiproc_dir' in os.environ:
_ValueClass = _MultiProcessValue()
else:
_ValueClass = _MutexValue
class _LabelWrapper(object):
'''Handles labels for the wrapped metric.'''
def __init__(self, wrappedClass, name, labelnames, **kwargs):
self._wrappedClass = wrappedClass
self._type = wrappedClass._type
self._name = name
self._labelnames = labelnames
self._kwargs = kwargs
self._lock = Lock()
self._metrics = {}
for l in labelnames:
if l.startswith('__'):
raise ValueError('Invalid label metric name: ' + l)
def labels(self, *labelvalues, **labelkwargs):
'''Return the child for the given labelset.
All metrics can have labels, allowing grouping of related time series.
Taking a counter as an example:
from prometheus_client import Counter
c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint'])
c.labels('get', '/').inc()
c.labels('post', '/submit').inc()
Labels can also be provided as keyword arguments:
from prometheus_client import Counter
c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint'])
c.labels(method='get', endpoint='/').inc()
c.labels(method='post', endpoint='/submit').inc()
See the best practices on [naming](http://prometheus.io/docs/practices/naming/)
and [labels](http://prometheus.io/docs/practices/instrumentation/#use-labels).
'''
if labelvalues and labelkwargs:
raise ValueError("Can't pass both *args and **kwargs")
if labelkwargs:
if sorted(labelkwargs) != sorted(self._labelnames):
raise ValueError('Incorrect label names')
labelvalues = tuple(unicode(labelkwargs[l]) for l in self._labelnames)
else:
if len(labelvalues) != len(self._labelnames):
raise ValueError('Incorrect label count')
labelvalues = tuple(unicode(l) for l in labelvalues)
with self._lock:
if labelvalues not in self._metrics:
self._metrics[labelvalues] = self._wrappedClass(
self._name,
self._labelnames,
labelvalues,
**self._kwargs
)
return self._metrics[labelvalues]
def remove(self, *labelvalues):
'''Remove the given labelset from the metric.'''
if len(labelvalues) != len(self._labelnames):
raise ValueError('Incorrect label count')
labelvalues = tuple(unicode(l) for l in labelvalues)
with self._lock:
del self._metrics[labelvalues]
def _samples(self):
with self._lock:
metrics = self._metrics.copy()
for labels, metric in metrics.items():
series_labels = list(zip(self._labelnames, labels))
for suffix, sample_labels, value in metric._samples():
yield (suffix, dict(series_labels + list(sample_labels.items())), value)
def _MetricWrapper(cls):
'''Provides common functionality for metrics.'''
def init(name, documentation, labelnames=(), namespace='', subsystem='', unit='', registry=REGISTRY, **kwargs):
full_name = ''
if namespace:
full_name += namespace + '_'
if subsystem:
full_name += subsystem + '_'
full_name += name
if unit and not full_name.endswith("_" + unit):
full_name += "_" + unit
if unit and cls._type in ('info', 'stateset'):
raise ValueError('Metric name is of a type that cannot have a unit: ' + full_name)
if cls._type == 'counter' and full_name.endswith('_total'):
full_name = full_name[:-6] # Munge to OpenMetrics.
if labelnames:
labelnames = tuple(labelnames)
for l in labelnames:
if not _METRIC_LABEL_NAME_RE.match(l):
raise ValueError('Invalid label metric name: ' + l)
if _RESERVED_METRIC_LABEL_NAME_RE.match(l):
raise ValueError('Reserved label metric name: ' + l)
if l in cls._reserved_labelnames:
raise ValueError('Reserved label metric name: ' + l)
collector = _LabelWrapper(cls, full_name, labelnames, **kwargs)
else:
collector = cls(full_name, (), (), **kwargs)
if not _METRIC_NAME_RE.match(full_name):
raise ValueError('Invalid metric name: ' + full_name)
def describe():
return [Metric(full_name, documentation, cls._type)]
collector.describe = describe
def collect():
metric = Metric(full_name, documentation, cls._type, unit)
for suffix, labels, value in collector._samples():
metric.add_sample(full_name + suffix, labels, value)
return [metric]
collector.collect = collect
if registry:
registry.register(collector)
return collector
init.__wrapped__ = cls
return init
@_MetricWrapper
class Counter(object):
'''A Counter tracks counts of events or running totals.
Example use cases for Counters:
- Number of requests processed
- Number of items that were inserted into a queue
- Total amount of data that a system has processed
Counters can only go up (and be reset when the process restarts). If your use case can go down,
you should use a Gauge instead.
An example for a Counter:
from prometheus_client import Counter
c = Counter('my_failures_total', 'Description of counter')
c.inc() # Increment by 1
c.inc(1.6) # Increment by given value
There are utilities to count exceptions raised:
@c.count_exceptions()
def f():
pass
with c.count_exceptions():
pass
# Count only one type of exception
with c.count_exceptions(ValueError):
pass
'''
_type = 'counter'
_reserved_labelnames = []
def __init__(self, name, labelnames, labelvalues):
if name.endswith('_total'):
name = name[:-6]
self._value = _ValueClass(self._type, name, name + '_total', labelnames, labelvalues)
self._created = time.time()
def inc(self, amount=1):
'''Increment counter by the given amount.'''
if amount < 0:
raise ValueError('Counters can only be incremented by non-negative amounts.')
self._value.inc(amount)
def count_exceptions(self, exception=Exception):
'''Count exceptions in a block of code or function.
Can be used as a function decorator or context manager.
Increments the counter when an exception of the given
type is raised up out of the code.
'''
return _ExceptionCounter(self, exception)
def _samples(self):
return (
('_total', {}, self._value.get()),
('_created', {}, self._created),
)
@_MetricWrapper
class Gauge(object):
'''Gauge metric, to report instantaneous values.
Examples of Gauges include:
- Inprogress requests
- Number of items in a queue
- Free memory
- Total memory
- Temperature
Gauges can go both up and down.
from prometheus_client import Gauge
g = Gauge('my_inprogress_requests', 'Description of gauge')
g.inc() # Increment by 1
g.dec(10) # Decrement by given value
g.set(4.2) # Set to a given value
There are utilities for common use cases:
g.set_to_current_time() # Set to current unixtime
# Increment when entered, decrement when exited.
@g.track_inprogress()
def f():
pass
with g.track_inprogress():
pass
A Gauge can also take its value from a callback:
d = Gauge('data_objects', 'Number of objects')
my_dict = {}
d.set_function(lambda: len(my_dict))
'''
_type = 'gauge'
_reserved_labelnames = []
_MULTIPROC_MODES = frozenset(('min', 'max', 'livesum', 'liveall', 'all'))
def __init__(self, name, labelnames, labelvalues, multiprocess_mode='all'):
if (_ValueClass._multiprocess and
multiprocess_mode not in self._MULTIPROC_MODES):
raise ValueError('Invalid multiprocess mode: ' + multiprocess_mode)
self._value = _ValueClass(
self._type, name, name, labelnames, labelvalues,
multiprocess_mode=multiprocess_mode)
def inc(self, amount=1):
'''Increment gauge by the given amount.'''
self._value.inc(amount)
def dec(self, amount=1):
'''Decrement gauge by the given amount.'''
self._value.inc(-amount)
def set(self, value):
'''Set gauge to the given value.'''
self._value.set(float(value))
def set_to_current_time(self):
'''Set gauge to the current unixtime.'''
self.set(time.time())
def track_inprogress(self):
'''Track inprogress blocks of code or functions.
Can be used as a function decorator or context manager.
Increments the gauge when the code is entered,
and decrements when it is exited.
'''
return _InprogressTracker(self)
def time(self):
'''Time a block of code or function, and set the duration in seconds.
Can be used as a function decorator or context manager.
'''
return _Timer(self.set)
def set_function(self, f):
'''Call the provided function to return the Gauge value.
The function must return a float, and may be called from
multiple threads. All other methods of the Gauge become NOOPs.
'''
def samples(self):
return (('', {}, float(f())),)
self._samples = types.MethodType(samples, self)
def _samples(self):
return (('', {}, self._value.get()),)
@_MetricWrapper
class Summary(object):
'''A Summary tracks the size and number of events.
Example use cases for Summaries:
- Response latency
- Request size
Example for a Summary:
from prometheus_client import Summary
s = Summary('request_size_bytes', 'Request size (bytes)')
s.observe(512) # Observe 512 (bytes)
Example for a Summary using time:
from prometheus_client import Summary
REQUEST_TIME = Summary('response_latency_seconds', 'Response latency (seconds)')
@REQUEST_TIME.time()
def create_response(request):
"""A dummy function"""
time.sleep(1)
Example for using the same Summary object as a context manager:
with REQUEST_TIME.time():
pass # Logic to be timed
'''
_type = 'summary'
_reserved_labelnames = ['quantile']
def __init__(self, name, labelnames, labelvalues):
self._count = _ValueClass(self._type, name, name + '_count', labelnames, labelvalues)
self._sum = _ValueClass(self._type, name, name + '_sum', labelnames, labelvalues)
self._created = time.time()
def observe(self, amount):
'''Observe the given amount.'''
self._count.inc(1)
self._sum.inc(amount)
def time(self):
'''Time a block of code or function, and observe the duration in seconds.
Can be used as a function decorator or context manager.
'''
return _Timer(self.observe)
def _samples(self):
return (
('_count', {}, self._count.get()),
('_sum', {}, self._sum.get()),
('_created', {}, self._created))
def _floatToGoString(d):
if d == _INF:
return '+Inf'
elif d == _MINUS_INF:
return '-Inf'
elif math.isnan(d):
return 'NaN'
else:
return repr(float(d))
@_MetricWrapper
class Histogram(object):
'''A Histogram tracks the size and number of events in buckets.
You can use Histograms for aggregatable calculation of quantiles.
Example use cases:
- Response latency
- Request size
Example for a Histogram:
from prometheus_client import Histogram
h = Histogram('request_size_bytes', 'Request size (bytes)')
h.observe(512) # Observe 512 (bytes)
Example for a Histogram using time:
from prometheus_client import Histogram
REQUEST_TIME = Histogram('response_latency_seconds', 'Response latency (seconds)')
@REQUEST_TIME.time()
def create_response(request):
"""A dummy function"""
time.sleep(1)
Example of using the same Histogram object as a context manager:
with REQUEST_TIME.time():
pass # Logic to be timed
The default buckets are intended to cover a typical web/rpc request from milliseconds to seconds.
They can be overridden by passing `buckets` keyword argument to `Histogram`.
'''
_type = 'histogram'
_reserved_labelnames = ['le']
def __init__(self, name, labelnames, labelvalues, buckets=(.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, _INF)):
self._created = time.time()
self._sum = _ValueClass(self._type, name, name + '_sum', labelnames, labelvalues)
buckets = [float(b) for b in buckets]
if buckets != sorted(buckets):
# This is probably an error on the part of the user,
# so raise rather than sorting for them.
raise ValueError('Buckets not in sorted order')
if buckets and buckets[-1] != _INF:
buckets.append(_INF)
if len(buckets) < 2:
raise ValueError('Must have at least two buckets')
self._upper_bounds = buckets
self._buckets = []
bucket_labelnames = labelnames + ('le',)
for b in buckets:
self._buckets.append(_ValueClass(
self._type,
name,
name + '_bucket',
bucket_labelnames,
labelvalues + (_floatToGoString(b),),
))
def observe(self, amount):
'''Observe the given amount.'''
self._sum.inc(amount)
for i, bound in enumerate(self._upper_bounds):
if amount <= bound:
self._buckets[i].inc(1)
break
def time(self):
'''Time a block of code or function, and observe the duration in seconds.
Can be used as a function decorator or context manager.
'''
return _Timer(self.observe)
def _samples(self):
samples = []
acc = 0
for i, bound in enumerate(self._upper_bounds):
acc += self._buckets[i].get()
samples.append(('_bucket', {'le': _floatToGoString(bound)}, acc))
samples.append(('_count', {}, acc))
samples.append(('_sum', {}, self._sum.get()))
samples.append(('_created', {}, self._created))
return tuple(samples)
@_MetricWrapper
class Info(object):
'''Info metric, key-value pairs.
Examples of Info include:
- Build information
- Version information
- Potential target metadata
Example usage:
from prometheus_client import Info
i = Info('my_build', 'Description of info')
i.info({'version': '1.2.3', 'buildhost': 'foo@bar'})
Info metrics do not work in multiprocess mode.
'''
_type = 'info'
_reserved_labelnames = []
def __init__(self, name, labelnames, labelvalues):
self._labelnames = set(labelnames)
self._lock = Lock()
self._value = {}
def info(self, val):
'''Set info metric.'''
if self._labelnames.intersection(val.keys()):
raise ValueError('Overlapping labels for Info metric, metric: %s child: %s' % (
self._labelnames, val))
with self._lock:
self._value = dict(val)
def _samples(self):
with self._lock:
return (('_info', self._value, 1.0,),)
@_MetricWrapper
class Enum(object):
'''Enum metric, which of a set of states is true.
Example usage:
from prometheus_client import Enum
e = Enum('task_state', 'Description of enum',
states=['starting', 'running', 'stopped'])
e.state('running')
The first listed state will be the default.
Enum metrics do not work in multiprocess mode.
'''
_type = 'stateset'
_reserved_labelnames = []
def __init__(self, name, labelnames, labelvalues, states=None):
if name in labelnames:
raise ValueError('Overlapping labels for Enum metric: %s' % (name,))
if not states:
raise ValueError('No states provided for Enum metric: %s' % (name,))
self._name = name
self._states = states
self._value = 0
self._lock = Lock()
def state(self, state):
'''Set enum metric state.'''
with self._lock:
self._value = self._states.index(state)
def _samples(self):
with self._lock:
return [
('', {self._name: s}, 1 if i == self._value else 0,)
for i, s
in enumerate(self._states)
]
class _ExceptionCounter(object):
def __init__(self, counter, exception):
self._counter = counter
self._exception = exception
def __enter__(self):
pass
def __exit__(self, typ, value, traceback):
if isinstance(value, self._exception):
self._counter.inc()
def __call__(self, f):
def wrapped(func, *args, **kwargs):
with self:
return func(*args, **kwargs)
return decorate(f, wrapped)
class _InprogressTracker(object):
def __init__(self, gauge):
self._gauge = gauge
def __enter__(self):
self._gauge.inc()
def __exit__(self, typ, value, traceback):
self._gauge.dec()
def __call__(self, f):
def wrapped(func, *args, **kwargs):
with self:
return func(*args, **kwargs)
return decorate(f, wrapped)
class _Timer(object):
def __init__(self, callback):
self._callback = callback
def _new_timer(self):
return self.__class__(self._callback)
def __enter__(self):
self._start = default_timer()
def __exit__(self, typ, value, traceback):
# Time can go backwards.
duration = max(default_timer() - self._start, 0)
self._callback(duration)
def __call__(self, f):
def wrapped(func, *args, **kwargs):
# Obtaining new instance of timer every time
# ensures thread safety and reentrancy.
with self._new_timer():
return func(*args, **kwargs)
return decorate(f, wrapped)
| 33.302343 | 142 | 0.599582 |
from __future__ import unicode_literals
from collections import namedtuple
import copy
import json
import math
import mmap
import os
import re
import struct
import sys
from threading import Lock
import time
from timeit import default_timer
import types
from .decorator import decorate
if sys.version_info > (3,):
unicode = str
_METRIC_NAME_RE = re.compile(r'^[a-zA-Z_:][a-zA-Z0-9_:]*$')
_METRIC_LABEL_NAME_RE = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
_RESERVED_METRIC_LABEL_NAME_RE = re.compile(r'^__.*$')
_INF = float("inf")
_MINUS_INF = float("-inf")
_INITIAL_MMAP_SIZE = 1 << 20
_pack_integer_func = struct.Struct(b'i').pack
_pack_double_func = struct.Struct(b'd').pack
_unpack_integer = struct.Struct(b'i').unpack_from
_unpack_double = struct.Struct(b'd').unpack_from
Sample = namedtuple('Sample', ['name', 'labels', 'value', 'timestamp', 'exemplar'])
Sample.__new__.__defaults__ = (None, None)
class Timestamp(object):
def __init__(self, sec, nsec):
if nsec < 0 or nsec >= 1e9:
raise ValueError("Invalid value for nanoseconds in Timestamp: {}".format(nsec))
if sec < 0:
nsec = -nsec
self.sec = int(sec)
self.nsec = int(nsec)
def __str__(self):
return "{0}.{1:09d}".format(self.sec, self.nsec)
def __repr__(self):
return "Timestamp({0}, {1})".format(self.sec, self.nsec)
def __float__(self):
return float(self.sec) + float(self.nsec) / 1e9
def __eq__(self, other):
return type(self) == type(other) and self.sec == other.sec and self.nsec == other.nsec
def __ne__(self, other):
return not self == other
def __gt__(self, other):
return self.sec > other.sec or self.nsec > other.nsec
Exemplar = namedtuple('Exemplar', ['labels', 'value', 'timestamp'])
Exemplar.__new__.__defaults__ = (None,)
class CollectorRegistry(object):
def __init__(self, auto_describe=False):
self._collector_to_names = {}
self._names_to_collectors = {}
self._auto_describe = auto_describe
self._lock = Lock()
def register(self, collector):
with self._lock:
names = self._get_names(collector)
duplicates = set(self._names_to_collectors).intersection(names)
if duplicates:
raise ValueError(
'Duplicated timeseries in CollectorRegistry: {0}'.format(
duplicates))
for name in names:
self._names_to_collectors[name] = collector
self._collector_to_names[collector] = names
def unregister(self, collector):
with self._lock:
for name in self._collector_to_names[collector]:
del self._names_to_collectors[name]
del self._collector_to_names[collector]
def _get_names(self, collector):
desc_func = None
try:
desc_func = collector.describe
except AttributeError:
pass
# Otherwise, if auto describe is enabled use the collect function.
if not desc_func and self._auto_describe:
desc_func = collector.collect
if not desc_func:
return []
result = []
type_suffixes = {
'counter': ['_total', '_created'],
'summary': ['', '_sum', '_count', '_created'],
'histogram': ['_bucket', '_sum', '_count', '_created'],
'gaugehistogram': ['_bucket', '_gsum', '_gcount'],
'info': ['_info'],
}
for metric in desc_func():
for suffix in type_suffixes.get(metric.type, ['']):
result.append(metric.name + suffix)
return result
def collect(self):
collectors = None
with self._lock:
collectors = copy.copy(self._collector_to_names)
for collector in collectors:
for metric in collector.collect():
yield metric
def restricted_registry(self, names):
names = set(names)
collectors = set()
with self._lock:
for name in names:
if name in self._names_to_collectors:
collectors.add(self._names_to_collectors[name])
metrics = []
for collector in collectors:
for metric in collector.collect():
samples = [s for s in metric.samples if s[0] in names]
if samples:
m = Metric(metric.name, metric.documentation, metric.type)
m.samples = samples
metrics.append(m)
class RestrictedRegistry(object):
def collect(self):
return metrics
return RestrictedRegistry()
def get_sample_value(self, name, labels=None):
if labels is None:
labels = {}
for metric in self.collect():
for s in metric.samples:
if s.name == name and s.labels == labels:
return s.value
return None
REGISTRY = CollectorRegistry(auto_describe=True)
_METRIC_TYPES = (
'counter', 'gauge', 'summary', 'histogram',
'gaugehistogram', 'unknown', 'info', 'stateset',
)
class Metric(object):
def __init__(self, name, documentation, typ, unit=''):
if unit and not name.endswith("_" + unit):
name += "_" + unit
if not _METRIC_NAME_RE.match(name):
raise ValueError('Invalid metric name: ' + name)
self.name = name
self.documentation = documentation
self.unit = unit
if typ == 'untyped':
typ = 'unknown'
if typ not in _METRIC_TYPES:
raise ValueError('Invalid metric type: ' + typ)
self.type = typ
self.samples = []
def add_sample(self, name, labels, value, timestamp=None, exemplar=None):
self.samples.append(Sample(name, labels, value, timestamp, exemplar))
def __eq__(self, other):
return (isinstance(other, Metric) and
self.name == other.name and
self.documentation == other.documentation and
self.type == other.type and
self.unit == other.unit and
self.samples == other.samples)
def __repr__(self):
return "Metric(%s, %s, %s, %s, %s)" % (
self.name,
self.documentation,
self.type,
self.unit,
self.samples,
)
class UnknownMetricFamily(Metric):
def __init__(self, name, documentation, value=None, labels=None, unit=''):
Metric.__init__(self, name, documentation, 'unknown', unit)
if labels is not None and value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if value is not None:
self.add_metric([], value)
def add_metric(self, labels, value, timestamp=None):
self.samples.append(Sample(self.name, dict(zip(self._labelnames, labels)), value, timestamp))
# For backward compatibility.
UntypedMetricFamily = UnknownMetricFamily
class CounterMetricFamily(Metric):
def __init__(self, name, documentation, value=None, labels=None, created=None, unit=''):
# Glue code for pre-OpenMetrics metrics.
if name.endswith('_total'):
name = name[:-6]
Metric.__init__(self, name, documentation, 'counter', unit)
if labels is not None and value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if value is not None:
self.add_metric([], value, created)
def add_metric(self, labels, value, created=None, timestamp=None):
self.samples.append(Sample(self.name + '_total', dict(zip(self._labelnames, labels)), value, timestamp))
if created is not None:
self.samples.append(Sample(self.name + '_created', dict(zip(self._labelnames, labels)), created, timestamp))
class GaugeMetricFamily(Metric):
def __init__(self, name, documentation, value=None, labels=None, unit=''):
Metric.__init__(self, name, documentation, 'gauge', unit)
if labels is not None and value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if value is not None:
self.add_metric([], value)
def add_metric(self, labels, value, timestamp=None):
self.samples.append(Sample(self.name, dict(zip(self._labelnames, labels)), value, timestamp))
class SummaryMetricFamily(Metric):
def __init__(self, name, documentation, count_value=None, sum_value=None, labels=None, unit=''):
Metric.__init__(self, name, documentation, 'summary', unit)
if (sum_value is None) != (count_value is None):
raise ValueError('count_value and sum_value must be provided together.')
if labels is not None and count_value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if count_value is not None:
self.add_metric([], count_value, sum_value)
def add_metric(self, labels, count_value, sum_value, timestamp=None):
self.samples.append(Sample(self.name + '_count', dict(zip(self._labelnames, labels)), count_value, timestamp))
self.samples.append(Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp))
class HistogramMetricFamily(Metric):
def __init__(self, name, documentation, buckets=None, sum_value=None, labels=None, unit=''):
Metric.__init__(self, name, documentation, 'histogram', unit)
if (sum_value is None) != (buckets is None):
raise ValueError('buckets and sum_value must be provided together.')
if labels is not None and buckets is not None:
raise ValueError('Can only specify at most one of buckets and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if buckets is not None:
self.add_metric([], buckets, sum_value)
def add_metric(self, labels, buckets, sum_value, timestamp=None):
for b in buckets:
bucket, value = b[:2]
exemplar = None
if len(b) == 3:
exemplar = b[2]
self.samples.append(Sample(
self.name + '_bucket',
dict(list(zip(self._labelnames, labels)) + [('le', bucket)]),
value,
timestamp,
exemplar,
))
# +Inf is last and provides the count value.
self.samples.extend([
Sample(self.name + '_count', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp),
Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp),
])
class GaugeHistogramMetricFamily(Metric):
def __init__(self, name, documentation, buckets=None, gsum_value=None, labels=None, unit=''):
Metric.__init__(self, name, documentation, 'gaugehistogram', unit)
if labels is not None and buckets is not None:
raise ValueError('Can only specify at most one of buckets and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if buckets is not None:
self.add_metric([], buckets, gsum_value)
def add_metric(self, labels, buckets, gsum_value, timestamp=None):
for bucket, value in buckets:
self.samples.append(Sample(
self.name + '_bucket',
dict(list(zip(self._labelnames, labels)) + [('le', bucket)]),
value, timestamp))
# +Inf is last and provides the count value.
self.samples.extend([
Sample(self.name + '_gcount', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp),
Sample(self.name + '_gsum', dict(zip(self._labelnames, labels)), gsum_value, timestamp),
])
class InfoMetricFamily(Metric):
def __init__(self, name, documentation, value=None, labels=None):
Metric.__init__(self, name, documentation, 'info')
if labels is not None and value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if value is not None:
self.add_metric([], value)
def add_metric(self, labels, value, timestamp=None):
self.samples.append(Sample(
self.name + '_info',
dict(dict(zip(self._labelnames, labels)), **value),
1,
timestamp,
))
class StateSetMetricFamily(Metric):
def __init__(self, name, documentation, value=None, labels=None):
Metric.__init__(self, name, documentation, 'stateset')
if labels is not None and value is not None:
raise ValueError('Can only specify at most one of value and labels.')
if labels is None:
labels = []
self._labelnames = tuple(labels)
if value is not None:
self.add_metric([], value)
def add_metric(self, labels, value, timestamp=None):
labels = tuple(labels)
for state, enabled in sorted(value.items()):
v = (1 if enabled else 0)
self.samples.append(Sample(
self.name,
dict(zip(self._labelnames + (self.name,), labels + (state,))),
v,
timestamp,
))
class _MutexValue(object):
_multiprocess = False
def __init__(self, typ, metric_name, name, labelnames, labelvalues, **kwargs):
self._value = 0.0
self._lock = Lock()
def inc(self, amount):
with self._lock:
self._value += amount
def set(self, value):
with self._lock:
self._value = value
def get(self):
with self._lock:
return self._value
# struct.pack_into has atomicity issues because it will temporarily write 0 into
# the mmap, resulting in false reads to 0 when experiencing a lot of writes.
# Using direct assignment solves this issue.
def _pack_double(data, pos, value):
data[pos:pos + 8] = _pack_double_func(value)
def _pack_integer(data, pos, value):
data[pos:pos + 4] = _pack_integer_func(value)
class _MmapedDict(object):
def __init__(self, filename, read_mode=False):
self._f = open(filename, 'rb' if read_mode else 'a+b')
if os.fstat(self._f.fileno()).st_size == 0:
self._f.truncate(_INITIAL_MMAP_SIZE)
self._capacity = os.fstat(self._f.fileno()).st_size
self._m = mmap.mmap(self._f.fileno(), self._capacity, access=mmap.ACCESS_READ if read_mode else mmap.ACCESS_WRITE)
self._positions = {}
self._used = _unpack_integer(self._m, 0)[0]
if self._used == 0:
self._used = 8
_pack_integer(self._m, 0, self._used)
else:
if not read_mode:
for key, _, pos in self._read_all_values():
self._positions[key] = pos
def _init_value(self, key):
encoded = key.encode('utf-8')
# Pad to be 8-byte aligned.
padded = encoded + (b' ' * (8 - (len(encoded) + 4) % 8))
value = struct.pack('i{0}sd'.format(len(padded)).encode(), len(encoded), padded, 0.0)
while self._used + len(value) > self._capacity:
self._capacity *= 2
self._f.truncate(self._capacity)
self._m = mmap.mmap(self._f.fileno(), self._capacity)
self._m[self._used:self._used + len(value)] = value
# Update how much space we've used.
self._used += len(value)
_pack_integer(self._m, 0, self._used)
self._positions[key] = self._used - 8
def _read_all_values(self):
pos = 8
used = self._used
data = self._m
unpack_from = struct.unpack_from
while pos < used:
encoded_len = _unpack_integer(data, pos)[0]
pos += 4
encoded = unpack_from(('%ss' % encoded_len).encode(), data, pos)[0]
padded_len = encoded_len + (8 - (encoded_len + 4) % 8)
pos += padded_len
value = _unpack_double(data, pos)[0]
yield encoded.decode('utf-8'), value, pos
pos += 8
def read_all_values(self):
for k, v, _ in self._read_all_values():
yield k, v
def read_value(self, key):
if key not in self._positions:
self._init_value(key)
pos = self._positions[key]
return _unpack_double(self._m, pos)[0]
def write_value(self, key, value):
if key not in self._positions:
self._init_value(key)
pos = self._positions[key]
_pack_double(self._m, pos, value)
def close(self):
if self._f:
self._m.close()
self._m = None
self._f.close()
self._f = None
def _mmap_key(metric_name, name, labelnames, labelvalues):
labels = dict(zip(labelnames, labelvalues))
return json.dumps([metric_name, name, labels], sort_keys=True)
def _MultiProcessValue(_pidFunc=os.getpid):
files = {}
values = []
pid = {'value': _pidFunc()}
lock = Lock()
class _MmapedValue(object):
_multiprocess = True
def __init__(self, typ, metric_name, name, labelnames, labelvalues, multiprocess_mode='', **kwargs):
self._params = typ, metric_name, name, labelnames, labelvalues, multiprocess_mode
with lock:
self.__check_for_pid_change()
self.__reset()
values.append(self)
def __reset(self):
typ, metric_name, name, labelnames, labelvalues, multiprocess_mode = self._params
if typ == 'gauge':
file_prefix = typ + '_' + multiprocess_mode
else:
file_prefix = typ
if file_prefix not in files:
filename = os.path.join(
os.environ['prometheus_multiproc_dir'],
'{0}_{1}.db'.format(file_prefix, pid['value']))
files[file_prefix] = _MmapedDict(filename)
self._file = files[file_prefix]
self._key = _mmap_key(metric_name, name, labelnames, labelvalues)
self._value = self._file.read_value(self._key)
def __check_for_pid_change(self):
actual_pid = _pidFunc()
if pid['value'] != actual_pid:
pid['value'] = actual_pid
for f in files.values():
f.close()
files.clear()
for value in values:
value.__reset()
def inc(self, amount):
with lock:
self.__check_for_pid_change()
self._value += amount
self._file.write_value(self._key, self._value)
def set(self, value):
with lock:
self.__check_for_pid_change()
self._value = value
self._file.write_value(self._key, self._value)
def get(self):
with lock:
self.__check_for_pid_change()
return self._value
return _MmapedValue
if 'prometheus_multiproc_dir' in os.environ:
_ValueClass = _MultiProcessValue()
else:
_ValueClass = _MutexValue
class _LabelWrapper(object):
def __init__(self, wrappedClass, name, labelnames, **kwargs):
self._wrappedClass = wrappedClass
self._type = wrappedClass._type
self._name = name
self._labelnames = labelnames
self._kwargs = kwargs
self._lock = Lock()
self._metrics = {}
for l in labelnames:
if l.startswith('__'):
raise ValueError('Invalid label metric name: ' + l)
def labels(self, *labelvalues, **labelkwargs):
if labelvalues and labelkwargs:
raise ValueError("Can't pass both *args and **kwargs")
if labelkwargs:
if sorted(labelkwargs) != sorted(self._labelnames):
raise ValueError('Incorrect label names')
labelvalues = tuple(unicode(labelkwargs[l]) for l in self._labelnames)
else:
if len(labelvalues) != len(self._labelnames):
raise ValueError('Incorrect label count')
labelvalues = tuple(unicode(l) for l in labelvalues)
with self._lock:
if labelvalues not in self._metrics:
self._metrics[labelvalues] = self._wrappedClass(
self._name,
self._labelnames,
labelvalues,
**self._kwargs
)
return self._metrics[labelvalues]
def remove(self, *labelvalues):
if len(labelvalues) != len(self._labelnames):
raise ValueError('Incorrect label count')
labelvalues = tuple(unicode(l) for l in labelvalues)
with self._lock:
del self._metrics[labelvalues]
def _samples(self):
with self._lock:
metrics = self._metrics.copy()
for labels, metric in metrics.items():
series_labels = list(zip(self._labelnames, labels))
for suffix, sample_labels, value in metric._samples():
yield (suffix, dict(series_labels + list(sample_labels.items())), value)
def _MetricWrapper(cls):
def init(name, documentation, labelnames=(), namespace='', subsystem='', unit='', registry=REGISTRY, **kwargs):
full_name = ''
if namespace:
full_name += namespace + '_'
if subsystem:
full_name += subsystem + '_'
full_name += name
if unit and not full_name.endswith("_" + unit):
full_name += "_" + unit
if unit and cls._type in ('info', 'stateset'):
raise ValueError('Metric name is of a type that cannot have a unit: ' + full_name)
if cls._type == 'counter' and full_name.endswith('_total'):
full_name = full_name[:-6] # Munge to OpenMetrics.
if labelnames:
labelnames = tuple(labelnames)
for l in labelnames:
if not _METRIC_LABEL_NAME_RE.match(l):
raise ValueError('Invalid label metric name: ' + l)
if _RESERVED_METRIC_LABEL_NAME_RE.match(l):
raise ValueError('Reserved label metric name: ' + l)
if l in cls._reserved_labelnames:
raise ValueError('Reserved label metric name: ' + l)
collector = _LabelWrapper(cls, full_name, labelnames, **kwargs)
else:
collector = cls(full_name, (), (), **kwargs)
if not _METRIC_NAME_RE.match(full_name):
raise ValueError('Invalid metric name: ' + full_name)
def describe():
return [Metric(full_name, documentation, cls._type)]
collector.describe = describe
def collect():
metric = Metric(full_name, documentation, cls._type, unit)
for suffix, labels, value in collector._samples():
metric.add_sample(full_name + suffix, labels, value)
return [metric]
collector.collect = collect
if registry:
registry.register(collector)
return collector
init.__wrapped__ = cls
return init
@_MetricWrapper
class Counter(object):
_type = 'counter'
_reserved_labelnames = []
def __init__(self, name, labelnames, labelvalues):
if name.endswith('_total'):
name = name[:-6]
self._value = _ValueClass(self._type, name, name + '_total', labelnames, labelvalues)
self._created = time.time()
def inc(self, amount=1):
if amount < 0:
raise ValueError('Counters can only be incremented by non-negative amounts.')
self._value.inc(amount)
def count_exceptions(self, exception=Exception):
return _ExceptionCounter(self, exception)
def _samples(self):
return (
('_total', {}, self._value.get()),
('_created', {}, self._created),
)
@_MetricWrapper
class Gauge(object):
_type = 'gauge'
_reserved_labelnames = []
_MULTIPROC_MODES = frozenset(('min', 'max', 'livesum', 'liveall', 'all'))
def __init__(self, name, labelnames, labelvalues, multiprocess_mode='all'):
if (_ValueClass._multiprocess and
multiprocess_mode not in self._MULTIPROC_MODES):
raise ValueError('Invalid multiprocess mode: ' + multiprocess_mode)
self._value = _ValueClass(
self._type, name, name, labelnames, labelvalues,
multiprocess_mode=multiprocess_mode)
def inc(self, amount=1):
self._value.inc(amount)
def dec(self, amount=1):
self._value.inc(-amount)
def set(self, value):
self._value.set(float(value))
def set_to_current_time(self):
self.set(time.time())
def track_inprogress(self):
return _InprogressTracker(self)
def time(self):
return _Timer(self.set)
def set_function(self, f):
def samples(self):
return (('', {}, float(f())),)
self._samples = types.MethodType(samples, self)
def _samples(self):
return (('', {}, self._value.get()),)
@_MetricWrapper
class Summary(object):
_type = 'summary'
_reserved_labelnames = ['quantile']
def __init__(self, name, labelnames, labelvalues):
self._count = _ValueClass(self._type, name, name + '_count', labelnames, labelvalues)
self._sum = _ValueClass(self._type, name, name + '_sum', labelnames, labelvalues)
self._created = time.time()
def observe(self, amount):
self._count.inc(1)
self._sum.inc(amount)
def time(self):
return _Timer(self.observe)
def _samples(self):
return (
('_count', {}, self._count.get()),
('_sum', {}, self._sum.get()),
('_created', {}, self._created))
def _floatToGoString(d):
if d == _INF:
return '+Inf'
elif d == _MINUS_INF:
return '-Inf'
elif math.isnan(d):
return 'NaN'
else:
return repr(float(d))
@_MetricWrapper
class Histogram(object):
_type = 'histogram'
_reserved_labelnames = ['le']
def __init__(self, name, labelnames, labelvalues, buckets=(.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, _INF)):
self._created = time.time()
self._sum = _ValueClass(self._type, name, name + '_sum', labelnames, labelvalues)
buckets = [float(b) for b in buckets]
if buckets != sorted(buckets):
# This is probably an error on the part of the user,
# so raise rather than sorting for them.
raise ValueError('Buckets not in sorted order')
if buckets and buckets[-1] != _INF:
buckets.append(_INF)
if len(buckets) < 2:
raise ValueError('Must have at least two buckets')
self._upper_bounds = buckets
self._buckets = []
bucket_labelnames = labelnames + ('le',)
for b in buckets:
self._buckets.append(_ValueClass(
self._type,
name,
name + '_bucket',
bucket_labelnames,
labelvalues + (_floatToGoString(b),),
))
def observe(self, amount):
self._sum.inc(amount)
for i, bound in enumerate(self._upper_bounds):
if amount <= bound:
self._buckets[i].inc(1)
break
def time(self):
return _Timer(self.observe)
def _samples(self):
samples = []
acc = 0
for i, bound in enumerate(self._upper_bounds):
acc += self._buckets[i].get()
samples.append(('_bucket', {'le': _floatToGoString(bound)}, acc))
samples.append(('_count', {}, acc))
samples.append(('_sum', {}, self._sum.get()))
samples.append(('_created', {}, self._created))
return tuple(samples)
@_MetricWrapper
class Info(object):
_type = 'info'
_reserved_labelnames = []
def __init__(self, name, labelnames, labelvalues):
self._labelnames = set(labelnames)
self._lock = Lock()
self._value = {}
def info(self, val):
if self._labelnames.intersection(val.keys()):
raise ValueError('Overlapping labels for Info metric, metric: %s child: %s' % (
self._labelnames, val))
with self._lock:
self._value = dict(val)
def _samples(self):
with self._lock:
return (('_info', self._value, 1.0,),)
@_MetricWrapper
class Enum(object):
_type = 'stateset'
_reserved_labelnames = []
def __init__(self, name, labelnames, labelvalues, states=None):
if name in labelnames:
raise ValueError('Overlapping labels for Enum metric: %s' % (name,))
if not states:
raise ValueError('No states provided for Enum metric: %s' % (name,))
self._name = name
self._states = states
self._value = 0
self._lock = Lock()
def state(self, state):
with self._lock:
self._value = self._states.index(state)
def _samples(self):
with self._lock:
return [
('', {self._name: s}, 1 if i == self._value else 0,)
for i, s
in enumerate(self._states)
]
class _ExceptionCounter(object):
def __init__(self, counter, exception):
self._counter = counter
self._exception = exception
def __enter__(self):
pass
def __exit__(self, typ, value, traceback):
if isinstance(value, self._exception):
self._counter.inc()
def __call__(self, f):
def wrapped(func, *args, **kwargs):
with self:
return func(*args, **kwargs)
return decorate(f, wrapped)
class _InprogressTracker(object):
def __init__(self, gauge):
self._gauge = gauge
def __enter__(self):
self._gauge.inc()
def __exit__(self, typ, value, traceback):
self._gauge.dec()
def __call__(self, f):
def wrapped(func, *args, **kwargs):
with self:
return func(*args, **kwargs)
return decorate(f, wrapped)
class _Timer(object):
def __init__(self, callback):
self._callback = callback
def _new_timer(self):
return self.__class__(self._callback)
def __enter__(self):
self._start = default_timer()
def __exit__(self, typ, value, traceback):
# Time can go backwards.
duration = max(default_timer() - self._start, 0)
self._callback(duration)
def __call__(self, f):
def wrapped(func, *args, **kwargs):
# Obtaining new instance of timer every time
# ensures thread safety and reentrancy.
with self._new_timer():
return func(*args, **kwargs)
return decorate(f, wrapped)
| true | true |
f730e0c9b1f31a3eee78a5557358da4420318669 | 4,761 | py | Python | sauronlab/model/audio.py | dmyersturnbull/sauronlab | c458b622efd3cb70c2547ec64ea9d11c293ab8f6 | [
"Apache-2.0"
] | null | null | null | sauronlab/model/audio.py | dmyersturnbull/sauronlab | c458b622efd3cb70c2547ec64ea9d11c293ab8f6 | [
"Apache-2.0"
] | 73 | 2021-02-03T21:32:44.000Z | 2022-03-28T05:14:24.000Z | sauronlab/model/audio.py | dmyersturnbull/sauronlab | c458b622efd3cb70c2547ec64ea9d11c293ab8f6 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from dataclasses import dataclass
import pydub
from sauronlab.core.core_imports import *
class AudioTools:
""" """
@classmethod
def save(
cls, audio_segment: pydub.AudioSegment, path: PathLike, audio_format: str = "flac"
) -> None:
path = Tools.prepped_file(path)
audio_segment.export(path, format=audio_format)
@classmethod
def load_pydub(cls, path: PathLike) -> pydub.AudioSegment:
path = str(Path(path))
# TODO sample_width=2, frame_rate=44100, channels=1 ???
return pydub.AudioSegment.from_file(path)
@dataclass(frozen=True)
class Waveform:
"""
Contains an array representing an audio waveform.
Aso has a sampling rate, a name, an optional description, and optional file path.
"""
name: str
path: Optional[str]
data: np.array
sampling_rate: float
minimum: Optional[float]
maximum: Optional[float]
description: Optional[str] = None
start_ms: Optional[float] = None
end_ms: Optional[float] = None
@property
def n_ms(self) -> float:
""""""
return len(self.data) / self.sampling_rate * 1000
def standardize(
self, minimum: float = 0, maximum: float = 255, ms_freq: int = 1000
) -> Waveform:
"""
Downsampling to **1000 Hz** and normalizes to between 0 and 255.
This is useful for various purposes in Sauronlab, such as embedding into plots.
Args:
minimum:
maximum:
ms_freq:
Returns:
"""
if minimum < 0 or maximum > 255:
raise OutOfRangeError("Must be between 0 and 255")
y = self.downsample(ms_freq).data
y = (y - y.min()) * (maximum - minimum) / (y.max() - y.min()) + minimum
y = y.round().astype(np.int32)
s = Waveform(self.name, self.path, y, 1000, minimum, maximum, self.description)
# s.n_ms = int(s.n_ms) # TODO: all ok, right?
return s
def normalize(self, minimum: float = -1, maximum: float = 1) -> Waveform:
"""
Constraints values between -1 and 1.
Args:
minimum: Normally -1
maximum: Normally 1
Returns:
The same Waveform as a copy
"""
y = (self.data - self.data.min()) * (maximum - minimum) / (
self.data.max() - self.data.min()
) + minimum
logger.error(f"Normalized {self.name}. max={y.max()}, min={y.min()}")
return Waveform(
self.name, self.path, y, self.sampling_rate, minimum, maximum, self.description
)
def downsample(self, new_sampling_hertz: float) -> Waveform:
"""
Downsamples to a new rate.
Splits data into discrete chunks and then calculates mean for those chunks.
Args:
new_sampling_hertz: A float such as 44100
Returns:
The same Waveform as a copy
"""
t0 = time.monotonic()
if new_sampling_hertz > self.sampling_rate:
raise OutOfRangeError(
f"New sampling rate is higher than current of {self.sampling_rate}"
)
chunk_size = int(self.sampling_rate / new_sampling_hertz)
groups = [self.data[x : x + chunk_size] for x in range(0, len(self.data), chunk_size)]
means = np.array([sum(group) / len(group) for group in groups])
z = Waveform(
self.name,
self.path,
means,
new_sampling_hertz,
self.minimum,
self.maximum,
self.description,
)
logger.debug(f"Downsampling waveform ({self.name}) took {round(time.monotonic()-t0, 1)} s")
return z
def slice_ms(self, start_ms: int, end_ms: int) -> Waveform:
"""
Gets a section of the waveform.
Args:
start_ms: The start milliseconds
end_ms: The end milliseconds
Returns:
The same Waveform as a copy
"""
a = int(round(self.sampling_rate * start_ms / 1000))
b = int(round(self.sampling_rate * end_ms / 1000))
return Waveform(
self.name,
self.path,
self.data[a:b],
self.sampling_rate,
self.minimum,
self.maximum,
self.description,
a,
b,
)
def __repr__(self):
me = self.__class__.__name__
sec = round(self.n_ms / 1000, 1)
return (
f"{me}({self.name} @ {self.sampling_rate}, n={len(self.data)}, {sec}s"
+ f" {self.minimum}-{self.maximum}"
)
def __str__(self):
return repr(self)
__all__ = ["AudioTools", "Waveform"]
| 28.508982 | 99 | 0.568788 | from __future__ import annotations
from dataclasses import dataclass
import pydub
from sauronlab.core.core_imports import *
class AudioTools:
@classmethod
def save(
cls, audio_segment: pydub.AudioSegment, path: PathLike, audio_format: str = "flac"
) -> None:
path = Tools.prepped_file(path)
audio_segment.export(path, format=audio_format)
@classmethod
def load_pydub(cls, path: PathLike) -> pydub.AudioSegment:
path = str(Path(path))
return pydub.AudioSegment.from_file(path)
@dataclass(frozen=True)
class Waveform:
name: str
path: Optional[str]
data: np.array
sampling_rate: float
minimum: Optional[float]
maximum: Optional[float]
description: Optional[str] = None
start_ms: Optional[float] = None
end_ms: Optional[float] = None
@property
def n_ms(self) -> float:
return len(self.data) / self.sampling_rate * 1000
def standardize(
self, minimum: float = 0, maximum: float = 255, ms_freq: int = 1000
) -> Waveform:
if minimum < 0 or maximum > 255:
raise OutOfRangeError("Must be between 0 and 255")
y = self.downsample(ms_freq).data
y = (y - y.min()) * (maximum - minimum) / (y.max() - y.min()) + minimum
y = y.round().astype(np.int32)
s = Waveform(self.name, self.path, y, 1000, minimum, maximum, self.description)
def normalize(self, minimum: float = -1, maximum: float = 1) -> Waveform:
y = (self.data - self.data.min()) * (maximum - minimum) / (
self.data.max() - self.data.min()
) + minimum
logger.error(f"Normalized {self.name}. max={y.max()}, min={y.min()}")
return Waveform(
self.name, self.path, y, self.sampling_rate, minimum, maximum, self.description
)
def downsample(self, new_sampling_hertz: float) -> Waveform:
t0 = time.monotonic()
if new_sampling_hertz > self.sampling_rate:
raise OutOfRangeError(
f"New sampling rate is higher than current of {self.sampling_rate}"
)
chunk_size = int(self.sampling_rate / new_sampling_hertz)
groups = [self.data[x : x + chunk_size] for x in range(0, len(self.data), chunk_size)]
means = np.array([sum(group) / len(group) for group in groups])
z = Waveform(
self.name,
self.path,
means,
new_sampling_hertz,
self.minimum,
self.maximum,
self.description,
)
logger.debug(f"Downsampling waveform ({self.name}) took {round(time.monotonic()-t0, 1)} s")
return z
def slice_ms(self, start_ms: int, end_ms: int) -> Waveform:
a = int(round(self.sampling_rate * start_ms / 1000))
b = int(round(self.sampling_rate * end_ms / 1000))
return Waveform(
self.name,
self.path,
self.data[a:b],
self.sampling_rate,
self.minimum,
self.maximum,
self.description,
a,
b,
)
def __repr__(self):
me = self.__class__.__name__
sec = round(self.n_ms / 1000, 1)
return (
f"{me}({self.name} @ {self.sampling_rate}, n={len(self.data)}, {sec}s"
+ f" {self.minimum}-{self.maximum}"
)
def __str__(self):
return repr(self)
__all__ = ["AudioTools", "Waveform"]
| true | true |
f730e1135c8fea9159d94e87c994c3afdad3a26d | 961 | py | Python | neurokit2/ecg/__init__.py | kassyray/NeuroKit | b84d110a71d5d17c0d1efde0d60d00446fda16cb | [
"MIT"
] | null | null | null | neurokit2/ecg/__init__.py | kassyray/NeuroKit | b84d110a71d5d17c0d1efde0d60d00446fda16cb | [
"MIT"
] | null | null | null | neurokit2/ecg/__init__.py | kassyray/NeuroKit | b84d110a71d5d17c0d1efde0d60d00446fda16cb | [
"MIT"
] | null | null | null | """Submodule for NeuroKit."""
# Aliases
from ..signal import signal_rate as ecg_rate
from .ecg_analyze import ecg_analyze
from .ecg_clean import ecg_clean
from .ecg_delineate import ecg_delineate
from .ecg_eventrelated import ecg_eventrelated
from .ecg_findpeaks import ecg_findpeaks
from .ecg_intervalrelated import ecg_intervalrelated
from .ecg_peaks import ecg_peaks
from .ecg_phase import ecg_phase
from .ecg_plot import ecg_plot
from .ecg_process import ecg_process
from .ecg_quality import ecg_quality
from .ecg_rsa import ecg_rsa
from .ecg_rsp import ecg_rsp
from .ecg_segment import ecg_segment
from .ecg_simulate import ecg_simulate
__all__ = [
"ecg_simulate",
"ecg_clean",
"ecg_findpeaks",
"ecg_peaks",
"ecg_segment",
"ecg_process",
"ecg_plot",
"ecg_delineate",
"ecg_rsp",
"ecg_phase",
"ecg_rsa",
"ecg_quality",
"ecg_eventrelated",
"ecg_intervalrelated",
"ecg_analyze",
"ecg_rate",
]
| 24.025 | 52 | 0.756504 |
from ..signal import signal_rate as ecg_rate
from .ecg_analyze import ecg_analyze
from .ecg_clean import ecg_clean
from .ecg_delineate import ecg_delineate
from .ecg_eventrelated import ecg_eventrelated
from .ecg_findpeaks import ecg_findpeaks
from .ecg_intervalrelated import ecg_intervalrelated
from .ecg_peaks import ecg_peaks
from .ecg_phase import ecg_phase
from .ecg_plot import ecg_plot
from .ecg_process import ecg_process
from .ecg_quality import ecg_quality
from .ecg_rsa import ecg_rsa
from .ecg_rsp import ecg_rsp
from .ecg_segment import ecg_segment
from .ecg_simulate import ecg_simulate
__all__ = [
"ecg_simulate",
"ecg_clean",
"ecg_findpeaks",
"ecg_peaks",
"ecg_segment",
"ecg_process",
"ecg_plot",
"ecg_delineate",
"ecg_rsp",
"ecg_phase",
"ecg_rsa",
"ecg_quality",
"ecg_eventrelated",
"ecg_intervalrelated",
"ecg_analyze",
"ecg_rate",
]
| true | true |
f730e11872186a9944e9e1f5e54f6d220b4f6018 | 889 | py | Python | user/admin.py | cxq1/c | 52507017f676b4ebed561581ced0d4edf15cdc70 | [
"MIT"
] | 1 | 2019-03-22T05:54:24.000Z | 2019-03-22T05:54:24.000Z | user/admin.py | cxq1/c | 52507017f676b4ebed561581ced0d4edf15cdc70 | [
"MIT"
] | 4 | 2021-04-08T18:40:39.000Z | 2021-06-10T17:40:34.000Z | user/admin.py | cxq1/c | 52507017f676b4ebed561581ced0d4edf15cdc70 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from .models import Profile,OAuthRelationship
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
class UserAdmin(BaseUserAdmin):
inlines = (ProfileInline, )
list_display = ('username', 'nickname', 'email', 'is_staff', 'is_active', 'is_superuser')
def nickname(self, obj):
return obj.profile.nickname
nickname.short_description = '昵称'
@admin.register(OAuthRelationship)
class OAuthRelationshipAdmin(admin.ModelAdmin):
list_display=('user','openid','oauth_type')
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'nickname') | 31.75 | 94 | 0.735658 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from .models import Profile,OAuthRelationship
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
class UserAdmin(BaseUserAdmin):
inlines = (ProfileInline, )
list_display = ('username', 'nickname', 'email', 'is_staff', 'is_active', 'is_superuser')
def nickname(self, obj):
return obj.profile.nickname
nickname.short_description = '昵称'
@admin.register(OAuthRelationship)
class OAuthRelationshipAdmin(admin.ModelAdmin):
list_display=('user','openid','oauth_type')
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'nickname') | true | true |
f730e24212521f633f0af8ecc5da5663ec75c84c | 215 | py | Python | setup.py | patrickwest/mgx-sim | f1457540dedf72076478f1af8765bb4d1ab197e9 | [
"BSD-3-Clause"
] | null | null | null | setup.py | patrickwest/mgx-sim | f1457540dedf72076478f1af8765bb4d1ab197e9 | [
"BSD-3-Clause"
] | null | null | null | setup.py | patrickwest/mgx-sim | f1457540dedf72076478f1af8765bb4d1ab197e9 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup
setup(
name="mgxsim",
version="0.0.1",
description="metagenomics utils.",
packages=["mgxsim"],
install_requires = [
"biopython",
"pandas",
],
)
| 15.357143 | 38 | 0.572093 | from setuptools import setup
setup(
name="mgxsim",
version="0.0.1",
description="metagenomics utils.",
packages=["mgxsim"],
install_requires = [
"biopython",
"pandas",
],
)
| true | true |
f730e2ab3a729344733257366e77e28fa9f64056 | 321 | py | Python | goetia/__init__.py | camillescott/goetia | 677e3ef028be6b70a2dacbcf7a4e83f4bb9fdf9a | [
"MIT"
] | 3 | 2020-02-27T00:53:02.000Z | 2021-03-09T06:26:48.000Z | goetia/__init__.py | camillescott/goetia | 677e3ef028be6b70a2dacbcf7a4e83f4bb9fdf9a | [
"MIT"
] | 1 | 2022-03-24T22:47:49.000Z | 2022-03-24T22:47:49.000Z | goetia/__init__.py | camillescott/goetia | 677e3ef028be6b70a2dacbcf7a4e83f4bb9fdf9a | [
"MIT"
] | null | null | null | import os
import cppyy
from .initializor import initialize
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'VERSION')) as fp:
__version__ = fp.read().strip()
initialize('goetia', 'libgoetiaCppyy.so', 'goetia.map')
del initialize
from goetia import goetia as libgoetia
from cppyy import nullptr
| 22.928571 | 85 | 0.760125 | import os
import cppyy
from .initializor import initialize
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'VERSION')) as fp:
__version__ = fp.read().strip()
initialize('goetia', 'libgoetiaCppyy.so', 'goetia.map')
del initialize
from goetia import goetia as libgoetia
from cppyy import nullptr
| true | true |
f730e2bdc6d22911b0eead0f336abb6bd7a95a50 | 6,476 | py | Python | app/lib/hydrofunctions/typing.py | edgewize/django-dashboard-dattaable | 1d8d02425ea77c248d2987f29e545bd97c62da77 | [
"MIT"
] | null | null | null | app/lib/hydrofunctions/typing.py | edgewize/django-dashboard-dattaable | 1d8d02425ea77c248d2987f29e545bd97c62da77 | [
"MIT"
] | 4 | 2020-04-11T16:34:58.000Z | 2021-06-02T01:26:35.000Z | app/lib/hydrofunctions/typing.py | edgewize/django-dashboard-dattaable | 1d8d02425ea77c248d2987f29e545bd97c62da77 | [
"MIT"
] | null | null | null | """
hydrofunctions.typing
~~~~~~~~~~~~~~~~~~~~~
This module contains functions for testing that user input is valid.
Why 'pre-check' user imputs, instead of using standard
python duck typing? These functions are meant to enhance an interactive
session for the user, and will check a user's parameters
before requesting data from an online resource. Otherwise, the server will
return a 404 code and the user will have no idea why. Hydrofunctions tries to raise
an exception (usually a TypeError) before a request is made, so that the user
can fix their request. It also tries to provide a helpful error message to an
interactive session user.
Suggested format for these functions:
* first check that the input is a string,
* then do a regular expression to check that the input is more or less valid.
* raise exceptions when user input breaks format.
-----
"""
from __future__ import absolute_import, print_function, division, unicode_literals
import re
def check_parameter_string(candidate, param):
"""Checks that a parameter is a string or a list of strings.
"""
parameters = {
"site": "NWIS station id(s) should be a string or list of strings,"
+ "often in the form of an eight digit number enclosed in quotes.",
"parameterCd": "NWIS parameter codes are five-digit strings that specify "
+ "the parameter that is being measured at the site. Common "
+ "codes are '00060' for stream stage in feet, '00065' for "
+ "stream discharge in cubic feet per second, and '72019' for "
+ "groundwater levels. Not all sites collect data for all "
+ "parameters. See a complete list of physical parameters here: "
+ "https://help.waterdata.usgs.gov/parameter_cd?group_cd=PHY "
+ "You may request multiple parameters by submitting a comma-"
+ "delimited string of codes with no spaces, or by submitting "
+ "a list of codes, like this: parameterCd = '00065,00060' or "
+ "parameterCd = ['00065', '00060'] ",
"county": "The NWIS county parameter accepts a five-digit string or "
+ "a list of five-digit strings to select all of the sites "
+ "within a county or list of counties. "
+ "Example: '51059' or ['51059', '51061'] are acceptable.",
"state": "This parameter uses US two-letter postal codes "
+ "such as 'MD' for Maryland or 'AZ' for Arizona.",
"default": "This parameter should be a string or a list of strings.",
}
if param in parameters:
msg = parameters[param] + " Actual value: {}".format(candidate)
else:
msg = (
"This parameter should be a string or a list of strings."
+ " Actual value: {}".format(candidate)
)
if candidate is None:
return None
elif isinstance(candidate, str) and candidate:
return candidate
elif (isinstance(candidate, list) or isinstance(candidate, tuple)) and candidate:
for s in candidate:
if not isinstance(s, str):
raise TypeError(msg + " bad element: {}".format(s))
return ",".join([str(s) for s in candidate])
else:
raise TypeError(msg)
def check_NWIS_bBox(input):
"""Checks that the USGS bBox is valid.
"""
msg = (
"NWIS bBox should be a string, list of strings, or tuple "
+ "containing the longitude and latitude of the lower left corner "
+ "of the bounding box, followed by the longitude and latitude "
+ "of the upper right corner of the bounding box. Most often in "
+ 'the form of "ll_long,ll_lat,ur_long,ur_lat" . '
+ "All latitude and longitude values should have less than 8 "
+ "places. "
+ "Actual value: {}".format(input)
)
if input is None:
return None
# assume that if it is a string it will be fine as is.
# don't accept a series of sites in a single string.
# Test for and reject empty strings: empty strings are false.
if isinstance(input, str) and input:
t = input.split(",")
if len(t) < 4:
raise TypeError(msg)
return input
# test for input is a list and it is not empty
elif (isinstance(input, list) or isinstance(input, tuple)) and input:
if len(input) < 4:
raise TypeError(msg)
# format: [-83.000000, 36.500000, -81.000000, 38.500000] ==> '-83.000000,36.500000,-81.000000,38.500000'
return ",".join([str(s) for s in input])
else:
raise TypeError(msg)
def check_NWIS_service(input):
"""Checks that the service is valid: either 'iv' or 'dv'"""
if input is None:
return None
if input == "iv" or input == "dv":
return input
else:
raise TypeError(
"The NWIS service type accepts 'dv' for daily values, "
"or 'iv' for instantaneous values. Actual value: "
"{}".format(input)
)
def check_datestr(input):
"""Checks that the start_date or end_date parameter is in yyyy-mm-dd format.
"""
# Use a regular expression to ensure in form of yyyy-mm-dd
if input is None:
return None
pattern = r"[1-2]\d\d\d-[0-1]\d-[0-3]\d\Z"
datestr = re.compile(pattern)
if isinstance(input, str) and datestr.match(input):
return input
else:
raise TypeError(
"Dates should be a string in the form of 'YYYY-MM-DD' "
"enclosed in quotes. Actual value: {}".format(input)
)
def check_period(input):
"""Checks that the period parameter in is the P##D format, where ## is
the number of days before now.
"""
if input is None:
return None
# TODO: check how many days maximum NWIS is willing to respond to.
# This pattern sets a maximum of 999 days (between 1 and 3 digits).
pattern = r"^P\d{1,3}D$"
periodstr = re.compile(pattern)
if isinstance(input, str) and periodstr.match(input):
return input
else:
raise TypeError(
"Period should be a string in the form of 'PxD', "
"where x represents the number of days before today, "
"with a maximum of 999 days. "
"Example: to request the previous 10 days, "
"enter 'period=P10D'. Actual value entered: {}".format(input)
)
| 40.987342 | 114 | 0.61782 | from __future__ import absolute_import, print_function, division, unicode_literals
import re
def check_parameter_string(candidate, param):
parameters = {
"site": "NWIS station id(s) should be a string or list of strings,"
+ "often in the form of an eight digit number enclosed in quotes.",
"parameterCd": "NWIS parameter codes are five-digit strings that specify "
+ "the parameter that is being measured at the site. Common "
+ "codes are '00060' for stream stage in feet, '00065' for "
+ "stream discharge in cubic feet per second, and '72019' for "
+ "groundwater levels. Not all sites collect data for all "
+ "parameters. See a complete list of physical parameters here: "
+ "https://help.waterdata.usgs.gov/parameter_cd?group_cd=PHY "
+ "You may request multiple parameters by submitting a comma-"
+ "delimited string of codes with no spaces, or by submitting "
+ "a list of codes, like this: parameterCd = '00065,00060' or "
+ "parameterCd = ['00065', '00060'] ",
"county": "The NWIS county parameter accepts a five-digit string or "
+ "a list of five-digit strings to select all of the sites "
+ "within a county or list of counties. "
+ "Example: '51059' or ['51059', '51061'] are acceptable.",
"state": "This parameter uses US two-letter postal codes "
+ "such as 'MD' for Maryland or 'AZ' for Arizona.",
"default": "This parameter should be a string or a list of strings.",
}
if param in parameters:
msg = parameters[param] + " Actual value: {}".format(candidate)
else:
msg = (
"This parameter should be a string or a list of strings."
+ " Actual value: {}".format(candidate)
)
if candidate is None:
return None
elif isinstance(candidate, str) and candidate:
return candidate
elif (isinstance(candidate, list) or isinstance(candidate, tuple)) and candidate:
for s in candidate:
if not isinstance(s, str):
raise TypeError(msg + " bad element: {}".format(s))
return ",".join([str(s) for s in candidate])
else:
raise TypeError(msg)
def check_NWIS_bBox(input):
msg = (
"NWIS bBox should be a string, list of strings, or tuple "
+ "containing the longitude and latitude of the lower left corner "
+ "of the bounding box, followed by the longitude and latitude "
+ "of the upper right corner of the bounding box. Most often in "
+ 'the form of "ll_long,ll_lat,ur_long,ur_lat" . '
+ "All latitude and longitude values should have less than 8 "
+ "places. "
+ "Actual value: {}".format(input)
)
if input is None:
return None
# Test for and reject empty strings: empty strings are false.
if isinstance(input, str) and input:
t = input.split(",")
if len(t) < 4:
raise TypeError(msg)
return input
# test for input is a list and it is not empty
elif (isinstance(input, list) or isinstance(input, tuple)) and input:
if len(input) < 4:
raise TypeError(msg)
# format: [-83.000000, 36.500000, -81.000000, 38.500000] ==> '-83.000000,36.500000,-81.000000,38.500000'
return ",".join([str(s) for s in input])
else:
raise TypeError(msg)
def check_NWIS_service(input):
if input is None:
return None
if input == "iv" or input == "dv":
return input
else:
raise TypeError(
"The NWIS service type accepts 'dv' for daily values, "
"or 'iv' for instantaneous values. Actual value: "
"{}".format(input)
)
def check_datestr(input):
# Use a regular expression to ensure in form of yyyy-mm-dd
if input is None:
return None
pattern = r"[1-2]\d\d\d-[0-1]\d-[0-3]\d\Z"
datestr = re.compile(pattern)
if isinstance(input, str) and datestr.match(input):
return input
else:
raise TypeError(
"Dates should be a string in the form of 'YYYY-MM-DD' "
"enclosed in quotes. Actual value: {}".format(input)
)
def check_period(input):
if input is None:
return None
# TODO: check how many days maximum NWIS is willing to respond to.
# This pattern sets a maximum of 999 days (between 1 and 3 digits).
pattern = r"^P\d{1,3}D$"
periodstr = re.compile(pattern)
if isinstance(input, str) and periodstr.match(input):
return input
else:
raise TypeError(
"Period should be a string in the form of 'PxD', "
"where x represents the number of days before today, "
"with a maximum of 999 days. "
"Example: to request the previous 10 days, "
"enter 'period=P10D'. Actual value entered: {}".format(input)
)
| true | true |
f730e4028c13ff210cb1005b2bee0aebae3b584f | 592 | py | Python | scripts/create_model_vs_data_txt.py | phenix-project/phenix_html | a56f221c450590abfd2f0d03737c582c8ac5e563 | [
"BSD-3-Clause"
] | null | null | null | scripts/create_model_vs_data_txt.py | phenix-project/phenix_html | a56f221c450590abfd2f0d03737c582c8ac5e563 | [
"BSD-3-Clause"
] | null | null | null | scripts/create_model_vs_data_txt.py | phenix-project/phenix_html | a56f221c450590abfd2f0d03737c582c8ac5e563 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import mmtbx.model_vs_data
import libtbx.load_env
from six.moves import cStringIO as StringIO
import os.path as op
import os
import sys
def run():
html_dir = libtbx.env.find_in_repositories(relative_path="phenix_html")
dest_dir = op.join(html_dir, "rst_files", "reference")
log = StringIO()
print(mmtbx.model_vs_data.msg, file=log)
print("""Default parameters:\n{{phil:mmtbx.model_vs_data}}""", file=log)
ofn = open(op.join(dest_dir, "model_vs_data.txt"), "w")
ofn.write(log.getvalue())
ofn.close()
if (__name__ == "__main__"):
run()
| 28.190476 | 74 | 0.738176 | from __future__ import print_function
import mmtbx.model_vs_data
import libtbx.load_env
from six.moves import cStringIO as StringIO
import os.path as op
import os
import sys
def run():
html_dir = libtbx.env.find_in_repositories(relative_path="phenix_html")
dest_dir = op.join(html_dir, "rst_files", "reference")
log = StringIO()
print(mmtbx.model_vs_data.msg, file=log)
print("""Default parameters:\n{{phil:mmtbx.model_vs_data}}""", file=log)
ofn = open(op.join(dest_dir, "model_vs_data.txt"), "w")
ofn.write(log.getvalue())
ofn.close()
if (__name__ == "__main__"):
run()
| true | true |
f730e40e6191fecbd2be8442259456de0521ad96 | 2,298 | py | Python | tests/ui/test_home.py | adele-angel/selenium-test-automation | 23aac2e195883ea72cf0f2668cdb7cedd885ec24 | [
"MIT"
] | null | null | null | tests/ui/test_home.py | adele-angel/selenium-test-automation | 23aac2e195883ea72cf0f2668cdb7cedd885ec24 | [
"MIT"
] | null | null | null | tests/ui/test_home.py | adele-angel/selenium-test-automation | 23aac2e195883ea72cf0f2668cdb7cedd885ec24 | [
"MIT"
] | null | null | null | import pytest
import allure
from config.credentials import Credentials
from framework.pages.HomePage import HomePage
from infra.screenshot_generator import get_screenshot
from infra.shared_steps import SharedSteps
from infra.string_util import identifier_generator
@allure.title('Test navigation into "New Project" page')
@allure.severity(allure.severity_level.CRITICAL)
@pytest.mark.sanity
@pytest.mark.home
@pytest.mark.project
def test_click_create_new_project(setup):
with allure.step('Setup driver'):
driver = setup
driver.get(Credentials.BASE_URL)
with allure.step('Login to OpenProject'):
SharedSteps.login_steps(driver)
with allure.step('Create a HomePage instance'):
home_page = HomePage(driver)
with allure.step('On "Home" page, click "+ Project" green button'):
home_page.click_new_project_button()
with allure.step('Verify navigation into "New Project" page'):
assert driver.title == Credentials.NEW_PROJECT_PAGE_TITLE, get_screenshot(driver, "home", "page_title", Credentials.NEW_PROJECT_PAGE_TITLE)
@allure.title('Test navigation into a selected project page')
@allure.severity(allure.severity_level.CRITICAL)
@pytest.mark.sanity
@pytest.mark.home
@pytest.mark.project
def test_select_project(setup):
with allure.step('Setup driver'):
driver = setup
driver.get(Credentials.BASE_URL)
with allure.step('Login to OpenProject'):
SharedSteps.login_steps(driver)
with allure.step('Create a HomePage instance'):
home_page = HomePage(driver)
with allure.step('Click "Select a project" menu button, and select a project from the drop-down'):
home_page.select_project(Credentials.HOME_PAGE_SELECTED_PROJECT)
with allure.step('Verify the value of the "identifier" field'):
# Note: OpenProject's identifier field doesn't match project requirements for special characters
expected_identifier = identifier_generator(Credentials.HOME_PAGE_SELECTED_PROJECT)
assert expected_identifier in driver.current_url, get_screenshot(driver, "home", "identifier", expected_identifier)
# Another option
assert f'title="{Credentials.HOME_PAGE_SELECTED_PROJECT}"' in driver.page_source, get_screenshot(driver, "home", "page_source")
| 38.949153 | 147 | 0.755004 | import pytest
import allure
from config.credentials import Credentials
from framework.pages.HomePage import HomePage
from infra.screenshot_generator import get_screenshot
from infra.shared_steps import SharedSteps
from infra.string_util import identifier_generator
@allure.title('Test navigation into "New Project" page')
@allure.severity(allure.severity_level.CRITICAL)
@pytest.mark.sanity
@pytest.mark.home
@pytest.mark.project
def test_click_create_new_project(setup):
with allure.step('Setup driver'):
driver = setup
driver.get(Credentials.BASE_URL)
with allure.step('Login to OpenProject'):
SharedSteps.login_steps(driver)
with allure.step('Create a HomePage instance'):
home_page = HomePage(driver)
with allure.step('On "Home" page, click "+ Project" green button'):
home_page.click_new_project_button()
with allure.step('Verify navigation into "New Project" page'):
assert driver.title == Credentials.NEW_PROJECT_PAGE_TITLE, get_screenshot(driver, "home", "page_title", Credentials.NEW_PROJECT_PAGE_TITLE)
@allure.title('Test navigation into a selected project page')
@allure.severity(allure.severity_level.CRITICAL)
@pytest.mark.sanity
@pytest.mark.home
@pytest.mark.project
def test_select_project(setup):
with allure.step('Setup driver'):
driver = setup
driver.get(Credentials.BASE_URL)
with allure.step('Login to OpenProject'):
SharedSteps.login_steps(driver)
with allure.step('Create a HomePage instance'):
home_page = HomePage(driver)
with allure.step('Click "Select a project" menu button, and select a project from the drop-down'):
home_page.select_project(Credentials.HOME_PAGE_SELECTED_PROJECT)
with allure.step('Verify the value of the "identifier" field'):
expected_identifier = identifier_generator(Credentials.HOME_PAGE_SELECTED_PROJECT)
assert expected_identifier in driver.current_url, get_screenshot(driver, "home", "identifier", expected_identifier)
assert f'title="{Credentials.HOME_PAGE_SELECTED_PROJECT}"' in driver.page_source, get_screenshot(driver, "home", "page_source")
| true | true |
f730e509a5cb334c83d0d7e50b92c0635bdffed4 | 782 | py | Python | dataset/wrapped_dataloader.py | Aquarium1222/Electricity-Forecasting | 9f945d3fd8006e5d77da08ff7861577965109ec8 | [
"MIT"
] | null | null | null | dataset/wrapped_dataloader.py | Aquarium1222/Electricity-Forecasting | 9f945d3fd8006e5d77da08ff7861577965109ec8 | [
"MIT"
] | null | null | null | dataset/wrapped_dataloader.py | Aquarium1222/Electricity-Forecasting | 9f945d3fd8006e5d77da08ff7861577965109ec8 | [
"MIT"
] | null | null | null | from dataset.electric_dataloader import ElectricDataloader
from dataset.preprocessor import Preprocessor
class WrappedDataloader:
def __init__(self, dataloader, func):
self.dataloader = dataloader
self.func = func
def __len__(self):
return len(self.dataloader)
def __iter__(self):
iter_dataloader = iter(self.dataloader)
for batch in iter_dataloader:
yield self.func(*batch)
def preprocess(x, y):
return x.transpose(0, 1), y.transpose(0, 1)
preprocessor = Preprocessor()
dataloader = ElectricDataloader(preprocessor)
train_loader = dataloader.train_loader
val_loader = dataloader.val_loader
train_loader = WrappedDataloader(train_loader, preprocess)
val_loader = WrappedDataloader(val_loader, preprocess)
| 26.066667 | 58 | 0.742967 | from dataset.electric_dataloader import ElectricDataloader
from dataset.preprocessor import Preprocessor
class WrappedDataloader:
def __init__(self, dataloader, func):
self.dataloader = dataloader
self.func = func
def __len__(self):
return len(self.dataloader)
def __iter__(self):
iter_dataloader = iter(self.dataloader)
for batch in iter_dataloader:
yield self.func(*batch)
def preprocess(x, y):
return x.transpose(0, 1), y.transpose(0, 1)
preprocessor = Preprocessor()
dataloader = ElectricDataloader(preprocessor)
train_loader = dataloader.train_loader
val_loader = dataloader.val_loader
train_loader = WrappedDataloader(train_loader, preprocess)
val_loader = WrappedDataloader(val_loader, preprocess)
| true | true |
f730e637b6cdd9ca1951b596c2df73147bceb07c | 13,442 | py | Python | mlir/test/python/integration/dialects/linalg/opsrun.py | mgehre-xlx/sycl | 2086745509ef4bc298d7bbec402a123dae68f25e | [
"Apache-2.0"
] | 61 | 2019-04-12T18:49:57.000Z | 2022-03-19T22:23:16.000Z | mlir/test/python/integration/dialects/linalg/opsrun.py | mgehre-xlx/sycl | 2086745509ef4bc298d7bbec402a123dae68f25e | [
"Apache-2.0"
] | 127 | 2019-04-09T00:55:50.000Z | 2022-03-21T15:35:41.000Z | mlir/test/python/integration/dialects/linalg/opsrun.py | mgehre-xlx/sycl | 2086745509ef4bc298d7bbec402a123dae68f25e | [
"Apache-2.0"
] | 10 | 2019-04-02T18:25:40.000Z | 2022-02-15T07:11:37.000Z | # RUN: %PYTHON %s 2>&1 | FileCheck %s
import ctypes
import sys
from mlir.ir import *
from mlir.dialects import builtin
from mlir.dialects import linalg
from mlir.dialects import std
from mlir.passmanager import *
from mlir.execution_engine import *
# Log everything to stderr and flush so that we have a unified stream to match
# errors/info emitted by MLIR to stderr.
def log(*args):
print(*args, file=sys.stderr)
sys.stderr.flush()
matmul_boiler = """
func @main() -> f32 attributes {llvm.emit_c_interface} {
%v0 = constant 0.0 : f32
%v1 = constant 1.0 : f32
%v2 = constant 2.0 : f32
%A = memref.alloc() : memref<4x16xf32>
%B = memref.alloc() : memref<16x8xf32>
%C = memref.alloc() : memref<4x8xf32>
linalg.fill(%v1, %A) : f32, memref<4x16xf32>
linalg.fill(%v2, %B) : f32, memref<16x8xf32>
linalg.fill(%v0, %C) : f32, memref<4x8xf32>
call @matmul_on_buffers(%A, %B, %C) :
(memref<4x16xf32>, memref<16x8xf32>, memref<4x8xf32>) -> ()
%c0 = constant 0 : index
%0 = memref.load %C[%c0, %c0] : memref<4x8xf32>
// TODO: FFI-based solution to allow testing and printing with python code.
return %0 : f32
}
"""
fill_boiler = """
func @main() -> i32 attributes {llvm.emit_c_interface} {
%O = memref.alloc() : memref<4x16xi32>
%min = constant -1000.0 : f64
%max = constant 1000.0 : f64
%seed = constant 42 : i32
call @fill_on_buffers(%min, %max, %seed, %O) :
(f64, f64, i32, memref<4x16xi32>) -> ()
%c0 = constant 0 : index
%0 = memref.load %O[%c0, %c0] : memref<4x16xi32>
// TODO: FFI-based solution to allow testing and printing with python code.
return %0 : i32
}
"""
conv_boiler = """
func @main() -> i32 attributes {llvm.emit_c_interface} {
%v0 = constant 0 : i32
%v1 = constant 1.0 : f64
%v2 = constant 2.0 : f64
%input = memref.alloc() : memref<1x4x16x1xf64>
%filter = memref.alloc() : memref<2x2x1xf64>
%output = memref.alloc() : memref<1x2x4x1xi32>
linalg.fill(%v1, %input) : f64, memref<1x4x16x1xf64>
linalg.fill(%v2, %filter) : f64, memref<2x2x1xf64>
linalg.fill(%v0, %output) : i32, memref<1x2x4x1xi32>
call @conv_on_buffers(%input, %filter, %output) :
(memref<1x4x16x1xf64>, memref<2x2x1xf64>, memref<1x2x4x1xi32>) -> ()
%c0 = constant 0 : index
%0 = memref.load %output[%c0, %c0, %c0, %c0] : memref<1x2x4x1xi32>
// TODO: FFI-based solution to allow testing and printing with python code.
return %0 : i32
}
"""
pooling_boiler = """
func @main() -> i32 attributes {llvm.emit_c_interface} {
%v0 = constant 0 : i32
%v42 = constant 42.0 : f64
%v77 = constant 77.0 : f64
%v-13 = constant -13.0 : f64
%v1 = constant 1.0 : f64
%input = memref.alloc() : memref<1x4x16x1xf64>
%shape = memref.alloc() : memref<2x2xf64>
%output = memref.alloc() : memref<1x2x4x1xi32>
linalg.fill(%v1, %input) : f64, memref<1x4x16x1xf64>
linalg.fill(%v1, %shape) : f64, memref<2x2xf64>
linalg.fill(%v0, %output) : i32, memref<1x2x4x1xi32>
%c0 = constant 0 : index
%c1 = constant 1 : index
%c2 = constant 2 : index
memref.store %v42, %input[%c0, %c0, %c0, %c0] : memref<1x4x16x1xf64>
memref.store %v77, %input[%c0, %c0, %c1, %c0] : memref<1x4x16x1xf64>
memref.store %v-13, %input[%c0, %c0, %c2, %c0] : memref<1x4x16x1xf64>
call @pooling_on_buffers(%input, %shape, %output) :
(memref<1x4x16x1xf64>, memref<2x2xf64>, memref<1x2x4x1xi32>) -> ()
%0 = memref.load %output[%c0, %c0, %c0, %c0] : memref<1x2x4x1xi32>
// TODO: FFI-based solution to allow testing and printing with python code.
return %0 : i32
}
"""
def transform(module, boilerplate):
import mlir.conversions
import mlir.dialects.linalg.passes
import mlir.transforms
# TODO: Allow cloning functions from one module to another.
# Atm we have to resort to string concatenation.
mod = Module.parse(
str(module.operation.regions[0].blocks[0].operations[0].operation) +
boilerplate)
pm = PassManager.parse(
"builtin.func(convert-linalg-to-loops, lower-affine, " +
"convert-scf-to-std), convert-vector-to-llvm," +
"convert-memref-to-llvm,convert-std-to-llvm")
pm.run(mod)
return mod
def test_matmul_builtin():
with Context() as ctx, Location.unknown():
module = Module.create()
f32 = F32Type.get()
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((4, 16), f32), MemRefType.get((16, 8), f32),
MemRefType.get((4, 8), f32))
def matmul_on_buffers(lhs, rhs, out):
linalg.matmul(lhs, rhs, outs=[out])
execution_engine = ExecutionEngine(transform(module, matmul_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result f32.
# Arguments must be passed as pointers.
c_float_p = ctypes.c_float * 1
res = c_float_p(-1.)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# CHECK: RESULT: 32.0
test_matmul_builtin()
def test_matmul_generic():
with Context() as ctx, Location.unknown():
module = Module.create()
f32 = F32Type.get()
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((4, 16), f32), MemRefType.get((16, 8), f32),
MemRefType.get((4, 8), f32))
def matmul_on_buffers(lhs, rhs, out):
linalg.matmul(lhs, rhs, outs=[out], emit_generic=True)
execution_engine = ExecutionEngine(transform(module, matmul_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result f32.
# Arguments must be passed as pointers.
c_float_p = ctypes.c_float * 1
res = c_float_p(-1.)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# CHECK: RESULT: 32.0
test_matmul_generic()
def test_fill_builtin():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(f64, f64, i32, MemRefType.get((4, 16), i32))
def fill_on_buffers(min, max, seed, out):
linalg.fill_rng_2d(min, max, seed, outs=[out])
execution_engine = ExecutionEngine(transform(module, fill_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result i32.
# Arguments must be passed as pointers.
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# CHECK: RESULT: -480
test_fill_builtin()
def test_fill_generic():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(f64, f64, i32, MemRefType.get((4, 16), i32))
def fill_on_buffers(min, max, seed, out):
linalg.fill_rng_2d(min, max, seed, outs=[out], emit_generic=True)
execution_engine = ExecutionEngine(transform(module, fill_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result i32.
# Arguments must be passed as pointers.
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# CHECK: RESULT: -480
test_fill_generic()
def test_conv_builtin():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2, 1), f64),
MemRefType.get((1, 2, 4, 1), i32))
def conv_on_buffers(input, filter, output):
linalg.depthwise_conv_2d_input_nhwc_filter_hwc_poly(
input, filter, outs=[output], strides=[2, 4], dilations=[1, 2])
execution_engine = ExecutionEngine(transform(module, conv_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result i32.
# Arguments must be passed as pointers.
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# CHECK: RESULT: 8
test_conv_builtin()
def test_conv_generic():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2, 1), f64),
MemRefType.get((1, 2, 4, 1), i32))
def conv_on_buffers(input, filter, output):
linalg.depthwise_conv_2d_input_nhwc_filter_hwc_poly(
input,
filter,
outs=[output],
strides=[2, 4],
dilations=[1, 2],
emit_generic=True)
execution_engine = ExecutionEngine(transform(module, conv_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result i32.
# Arguments must be passed as pointers.
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# CHECK: RESULT: 8
test_conv_generic()
def test_max_pooling_builtin():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2), f64),
MemRefType.get((1, 2, 4, 1), i32))
def pooling_on_buffers(input, shape, output):
linalg.pooling_nhwc_max(
input, shape, outs=[output], strides=[2, 4], dilations=[1, 2])
execution_engine = ExecutionEngine(transform(module, pooling_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result i32.
# Arguments must be passed as pointers.
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# 77 is not selected due to the dilation 2 in the second dimension.
# CHECK: RESULT: 42
test_max_pooling_builtin()
def test_max_pooling_generic():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2), f64),
MemRefType.get((1, 2, 4, 1), i32))
def pooling_on_buffers(input, shape, output):
linalg.pooling_nhwc_max(
input,
shape,
outs=[output],
strides=[2, 4],
dilations=[1, 2],
emit_generic=True)
execution_engine = ExecutionEngine(transform(module, pooling_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result i32.
# Arguments must be passed as pointers.
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# 77 is not selected due to the dilation 2 in the second dimension.
# CHECK: RESULT: 42
test_max_pooling_generic()
def test_min_pooling_builtin():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2), f64),
MemRefType.get((1, 2, 4, 1), i32))
def pooling_on_buffers(input, shape, output):
linalg.pooling_nhwc_min(
input, shape, outs=[output], strides=[2, 4], dilations=[1, 2])
execution_engine = ExecutionEngine(transform(module, pooling_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result i32.
# Arguments must be passed as pointers.
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# CHECK: RESULT: -13
test_min_pooling_builtin()
def test_min_pooling_generic():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2), f64),
MemRefType.get((1, 2, 4, 1), i32))
def pooling_on_buffers(input, shape, output):
linalg.pooling_nhwc_min(
input,
shape,
outs=[output],
strides=[2, 4],
dilations=[1, 2],
emit_generic=True)
execution_engine = ExecutionEngine(transform(module, pooling_boiler))
# TODO: FFI-based solution to allow testing and printing with python code.
# Prepare arguments: one result i32.
# Arguments must be passed as pointers.
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
# CHECK: RESULT: -13
test_min_pooling_generic()
| 30.411765 | 79 | 0.651912 |
import ctypes
import sys
from mlir.ir import *
from mlir.dialects import builtin
from mlir.dialects import linalg
from mlir.dialects import std
from mlir.passmanager import *
from mlir.execution_engine import *
def log(*args):
print(*args, file=sys.stderr)
sys.stderr.flush()
matmul_boiler = """
func @main() -> f32 attributes {llvm.emit_c_interface} {
%v0 = constant 0.0 : f32
%v1 = constant 1.0 : f32
%v2 = constant 2.0 : f32
%A = memref.alloc() : memref<4x16xf32>
%B = memref.alloc() : memref<16x8xf32>
%C = memref.alloc() : memref<4x8xf32>
linalg.fill(%v1, %A) : f32, memref<4x16xf32>
linalg.fill(%v2, %B) : f32, memref<16x8xf32>
linalg.fill(%v0, %C) : f32, memref<4x8xf32>
call @matmul_on_buffers(%A, %B, %C) :
(memref<4x16xf32>, memref<16x8xf32>, memref<4x8xf32>) -> ()
%c0 = constant 0 : index
%0 = memref.load %C[%c0, %c0] : memref<4x8xf32>
// TODO: FFI-based solution to allow testing and printing with python code.
return %0 : f32
}
"""
fill_boiler = """
func @main() -> i32 attributes {llvm.emit_c_interface} {
%O = memref.alloc() : memref<4x16xi32>
%min = constant -1000.0 : f64
%max = constant 1000.0 : f64
%seed = constant 42 : i32
call @fill_on_buffers(%min, %max, %seed, %O) :
(f64, f64, i32, memref<4x16xi32>) -> ()
%c0 = constant 0 : index
%0 = memref.load %O[%c0, %c0] : memref<4x16xi32>
// TODO: FFI-based solution to allow testing and printing with python code.
return %0 : i32
}
"""
conv_boiler = """
func @main() -> i32 attributes {llvm.emit_c_interface} {
%v0 = constant 0 : i32
%v1 = constant 1.0 : f64
%v2 = constant 2.0 : f64
%input = memref.alloc() : memref<1x4x16x1xf64>
%filter = memref.alloc() : memref<2x2x1xf64>
%output = memref.alloc() : memref<1x2x4x1xi32>
linalg.fill(%v1, %input) : f64, memref<1x4x16x1xf64>
linalg.fill(%v2, %filter) : f64, memref<2x2x1xf64>
linalg.fill(%v0, %output) : i32, memref<1x2x4x1xi32>
call @conv_on_buffers(%input, %filter, %output) :
(memref<1x4x16x1xf64>, memref<2x2x1xf64>, memref<1x2x4x1xi32>) -> ()
%c0 = constant 0 : index
%0 = memref.load %output[%c0, %c0, %c0, %c0] : memref<1x2x4x1xi32>
// TODO: FFI-based solution to allow testing and printing with python code.
return %0 : i32
}
"""
pooling_boiler = """
func @main() -> i32 attributes {llvm.emit_c_interface} {
%v0 = constant 0 : i32
%v42 = constant 42.0 : f64
%v77 = constant 77.0 : f64
%v-13 = constant -13.0 : f64
%v1 = constant 1.0 : f64
%input = memref.alloc() : memref<1x4x16x1xf64>
%shape = memref.alloc() : memref<2x2xf64>
%output = memref.alloc() : memref<1x2x4x1xi32>
linalg.fill(%v1, %input) : f64, memref<1x4x16x1xf64>
linalg.fill(%v1, %shape) : f64, memref<2x2xf64>
linalg.fill(%v0, %output) : i32, memref<1x2x4x1xi32>
%c0 = constant 0 : index
%c1 = constant 1 : index
%c2 = constant 2 : index
memref.store %v42, %input[%c0, %c0, %c0, %c0] : memref<1x4x16x1xf64>
memref.store %v77, %input[%c0, %c0, %c1, %c0] : memref<1x4x16x1xf64>
memref.store %v-13, %input[%c0, %c0, %c2, %c0] : memref<1x4x16x1xf64>
call @pooling_on_buffers(%input, %shape, %output) :
(memref<1x4x16x1xf64>, memref<2x2xf64>, memref<1x2x4x1xi32>) -> ()
%0 = memref.load %output[%c0, %c0, %c0, %c0] : memref<1x2x4x1xi32>
// TODO: FFI-based solution to allow testing and printing with python code.
return %0 : i32
}
"""
def transform(module, boilerplate):
import mlir.conversions
import mlir.dialects.linalg.passes
import mlir.transforms
mod = Module.parse(
str(module.operation.regions[0].blocks[0].operations[0].operation) +
boilerplate)
pm = PassManager.parse(
"builtin.func(convert-linalg-to-loops, lower-affine, " +
"convert-scf-to-std), convert-vector-to-llvm," +
"convert-memref-to-llvm,convert-std-to-llvm")
pm.run(mod)
return mod
def test_matmul_builtin():
with Context() as ctx, Location.unknown():
module = Module.create()
f32 = F32Type.get()
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((4, 16), f32), MemRefType.get((16, 8), f32),
MemRefType.get((4, 8), f32))
def matmul_on_buffers(lhs, rhs, out):
linalg.matmul(lhs, rhs, outs=[out])
execution_engine = ExecutionEngine(transform(module, matmul_boiler))
c_float_p = ctypes.c_float * 1
res = c_float_p(-1.)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
test_matmul_builtin()
def test_matmul_generic():
with Context() as ctx, Location.unknown():
module = Module.create()
f32 = F32Type.get()
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((4, 16), f32), MemRefType.get((16, 8), f32),
MemRefType.get((4, 8), f32))
def matmul_on_buffers(lhs, rhs, out):
linalg.matmul(lhs, rhs, outs=[out], emit_generic=True)
execution_engine = ExecutionEngine(transform(module, matmul_boiler))
c_float_p = ctypes.c_float * 1
res = c_float_p(-1.)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
test_matmul_generic()
def test_fill_builtin():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(f64, f64, i32, MemRefType.get((4, 16), i32))
def fill_on_buffers(min, max, seed, out):
linalg.fill_rng_2d(min, max, seed, outs=[out])
execution_engine = ExecutionEngine(transform(module, fill_boiler))
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
test_fill_builtin()
def test_fill_generic():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(f64, f64, i32, MemRefType.get((4, 16), i32))
def fill_on_buffers(min, max, seed, out):
linalg.fill_rng_2d(min, max, seed, outs=[out], emit_generic=True)
execution_engine = ExecutionEngine(transform(module, fill_boiler))
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
test_fill_generic()
def test_conv_builtin():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2, 1), f64),
MemRefType.get((1, 2, 4, 1), i32))
def conv_on_buffers(input, filter, output):
linalg.depthwise_conv_2d_input_nhwc_filter_hwc_poly(
input, filter, outs=[output], strides=[2, 4], dilations=[1, 2])
execution_engine = ExecutionEngine(transform(module, conv_boiler))
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
test_conv_builtin()
def test_conv_generic():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2, 1), f64),
MemRefType.get((1, 2, 4, 1), i32))
def conv_on_buffers(input, filter, output):
linalg.depthwise_conv_2d_input_nhwc_filter_hwc_poly(
input,
filter,
outs=[output],
strides=[2, 4],
dilations=[1, 2],
emit_generic=True)
execution_engine = ExecutionEngine(transform(module, conv_boiler))
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
test_conv_generic()
def test_max_pooling_builtin():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2), f64),
MemRefType.get((1, 2, 4, 1), i32))
def pooling_on_buffers(input, shape, output):
linalg.pooling_nhwc_max(
input, shape, outs=[output], strides=[2, 4], dilations=[1, 2])
execution_engine = ExecutionEngine(transform(module, pooling_boiler))
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
test_max_pooling_builtin()
def test_max_pooling_generic():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2), f64),
MemRefType.get((1, 2, 4, 1), i32))
def pooling_on_buffers(input, shape, output):
linalg.pooling_nhwc_max(
input,
shape,
outs=[output],
strides=[2, 4],
dilations=[1, 2],
emit_generic=True)
execution_engine = ExecutionEngine(transform(module, pooling_boiler))
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
test_max_pooling_generic()
def test_min_pooling_builtin():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2), f64),
MemRefType.get((1, 2, 4, 1), i32))
def pooling_on_buffers(input, shape, output):
linalg.pooling_nhwc_min(
input, shape, outs=[output], strides=[2, 4], dilations=[1, 2])
execution_engine = ExecutionEngine(transform(module, pooling_boiler))
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
test_min_pooling_builtin()
def test_min_pooling_generic():
with Context() as ctx, Location.unknown():
module = Module.create()
f64 = F64Type.get()
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
@builtin.FuncOp.from_py_func(
MemRefType.get((1, 4, 16, 1), f64), MemRefType.get((2, 2), f64),
MemRefType.get((1, 2, 4, 1), i32))
def pooling_on_buffers(input, shape, output):
linalg.pooling_nhwc_min(
input,
shape,
outs=[output],
strides=[2, 4],
dilations=[1, 2],
emit_generic=True)
execution_engine = ExecutionEngine(transform(module, pooling_boiler))
c_int_p = ctypes.c_int * 1
res = c_int_p(-1)
execution_engine.invoke("main", res)
log("RESULT: ", res[0])
test_min_pooling_generic()
| true | true |
f730e7211dcf7013c3950efeeb1f7cb0ef3dae31 | 316 | py | Python | webdemo/api/controllers/root.py | HandingHu/webdemo | b9fdb67b66da8e59097c962971b32a8d7a3fc471 | [
"Apache-2.0"
] | null | null | null | webdemo/api/controllers/root.py | HandingHu/webdemo | b9fdb67b66da8e59097c962971b32a8d7a3fc471 | [
"Apache-2.0"
] | null | null | null | webdemo/api/controllers/root.py | HandingHu/webdemo | b9fdb67b66da8e59097c962971b32a8d7a3fc471 | [
"Apache-2.0"
] | null | null | null |
from pecan import rest
from wsme import types as wtypes
from webdemo.api.controllers.v1 import controller as v1_controller
from webdemo.api import expose
class RootController(rest.RestController):
v1 = v1_controller.V1Controller()
@expose.expose(wtypes.text)
def get(self):
return "webdemo"
| 21.066667 | 66 | 0.756329 |
from pecan import rest
from wsme import types as wtypes
from webdemo.api.controllers.v1 import controller as v1_controller
from webdemo.api import expose
class RootController(rest.RestController):
v1 = v1_controller.V1Controller()
@expose.expose(wtypes.text)
def get(self):
return "webdemo"
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.