hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790291306b8d75507d391fba9b580cfd2e30e82e
| 240
|
py
|
Python
|
scripts/field/rienArrow.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/field/rienArrow.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/field/rienArrow.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
# Created by MechAviv
# ID :: [140010000]
# Snow Island : Dangerous Forest
if not "o" in sm.getQuestEx(21019, "arr"):
sm.avatarOriented("Effect/OnUserEff.img/guideEffect/aranTutorial/tutorialArrow3")
sm.setQuestEx(21019, "arr", "o")
| 40
| 85
| 0.720833
|
if not "o" in sm.getQuestEx(21019, "arr"):
sm.avatarOriented("Effect/OnUserEff.img/guideEffect/aranTutorial/tutorialArrow3")
sm.setQuestEx(21019, "arr", "o")
| true
| true
|
790292a54e8eb8cdf5d33f844f48868af3da1b12
| 11,103
|
py
|
Python
|
plotly_study/graph_objs/streamtube/hoverlabel/__init__.py
|
lucasiscovici/plotly_py
|
42ab769febb45fbbe0a3c677dc4306a4f59cea36
|
[
"MIT"
] | null | null | null |
plotly_study/graph_objs/streamtube/hoverlabel/__init__.py
|
lucasiscovici/plotly_py
|
42ab769febb45fbbe0a3c677dc4306a4f59cea36
|
[
"MIT"
] | null | null | null |
plotly_study/graph_objs/streamtube/hoverlabel/__init__.py
|
lucasiscovici/plotly_py
|
42ab769febb45fbbe0a3c677dc4306a4f59cea36
|
[
"MIT"
] | null | null | null |
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "streamtube.hoverlabel"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.streamtube.hoverlabel.Font
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.streamtube.hoverlabel.Font
constructor must be a dict or
an instance of plotly_study.graph_objs.streamtube.hoverlabel.Font"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.streamtube.hoverlabel import font as v_font
# Initialize validators
# ---------------------
self._validators["color"] = v_font.ColorValidator()
self._validators["colorsrc"] = v_font.ColorsrcValidator()
self._validators["family"] = v_font.FamilyValidator()
self._validators["familysrc"] = v_font.FamilysrcValidator()
self._validators["size"] = v_font.SizeValidator()
self._validators["sizesrc"] = v_font.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("familysrc", None)
self["familysrc"] = familysrc if familysrc is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Font"]
| 34.268519
| 88
| 0.565973
|
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
@property
def color(self):
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def size(self):
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def _parent_path_str(self):
return "streamtube.hoverlabel"
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
super(Font, self).__init__("font")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.streamtube.hoverlabel.Font
constructor must be a dict or
an instance of plotly_study.graph_objs.streamtube.hoverlabel.Font"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.streamtube.hoverlabel import font as v_font
# Initialize validators
# ---------------------
self._validators["color"] = v_font.ColorValidator()
self._validators["colorsrc"] = v_font.ColorsrcValidator()
self._validators["family"] = v_font.FamilyValidator()
self._validators["familysrc"] = v_font.FamilysrcValidator()
self._validators["size"] = v_font.SizeValidator()
self._validators["sizesrc"] = v_font.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("familysrc", None)
self["familysrc"] = familysrc if familysrc is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Font"]
| true
| true
|
79029309861f73ab19d622b1122083230ef6e760
| 26,947
|
py
|
Python
|
gcp/__main__.py
|
cleveranjos/Rapid-ML-Gateway
|
10a14abfce3351791331642c47eddfbf622e76d2
|
[
"MIT"
] | 3
|
2020-07-15T19:45:31.000Z
|
2020-09-30T16:15:48.000Z
|
gcp/__main__.py
|
cleveranjos/Rapid-ML-Gateway
|
10a14abfce3351791331642c47eddfbf622e76d2
|
[
"MIT"
] | 12
|
2020-07-15T17:00:24.000Z
|
2021-01-19T21:02:00.000Z
|
gcp/__main__.py
|
cleveranjos/Rapid-ML-Gateway
|
10a14abfce3351791331642c47eddfbf622e76d2
|
[
"MIT"
] | 2
|
2020-07-15T18:59:02.000Z
|
2020-10-07T17:22:52.000Z
|
#! /usr/bin/env python3
from ssedata import FunctionType
from google.protobuf.json_format import MessageToDict
import grpc
import argparse
import json
import logging
import logging.config
import os
import sys
import inspect
import time
from websocket import create_connection
import socket
import re
from concurrent import futures
from datetime import datetime
import requests
import configparser
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(PARENT_DIR, 'generated'))
sys.path.append(os.path.join(PARENT_DIR, 'helper_functions'))
import qlist
import pysize
from ssedata import FunctionType
import ServerSideExtension_pb2 as SSE
# import helper .py files
import qlist
import pysize
import ServerSideExtension_pb2 as SSE
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
config = configparser.ConfigParser()
class ExtensionService(SSE.ConnectorServicer):
"""
A simple SSE-plugin created for the HelloWorld example.
"""
def __init__(self, funcdef_file):
"""
Class initializer.
:param funcdef_file: a function definition JSON file
"""
self._function_definitions = funcdef_file
#self.ScriptEval = ScriptEval()
os.makedirs('logs', exist_ok=True)
log_file = os.path.join(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))), 'logger.config')
print(log_file)
logging.config.fileConfig(log_file)
logging.info(self._function_definitions)
logging.info('Logging enabled')
function_name = "none"
@property
def function_definitions(self):
"""
:return: json file with function definitions
"""
return self._function_definitions
@property
def functions(self):
"""
:return: Mapping of function id and implementation
"""
return {
0: '_rest_single',
1: '_rest_30',
2: '_ws_single',
3: '_ws_batch',
4: '_gcp_bq'
}
@staticmethod
def _get_function_id(context):
"""
Retrieve function id from header.
:param context: context
:return: function id
"""
metadata = dict(context.invocation_metadata())
header = SSE.FunctionRequestHeader()
header.ParseFromString(metadata['qlik-functionrequestheader-bin'])
return header.functionId
@staticmethod
def _rest_single(request, context):
"""
Rest using single variable
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
url = config.get(q_function_name, 'url')
logging.debug("Rest Url is set to {}" .format(url))
bCache = config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
response_rows = []
request_counter = 1
for request_rows in request:
logging.debug(
'Printing Request Rows - Request Counter {}' .format(request_counter))
request_counter = request_counter + 1
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
# Join with current timedate stamp
if (len(param) == 0):
logging.info('Exiting {} TimeStamp: {} due to Data being Empty ' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
else:
payload = '{"data":"' + param + '"}'
logging.debug('Showing Payload: {}'.format(payload))
resp = requests.post(url, data=payload)
logging.debug(
'Show Payload Response as Text: {}'.format(resp.text))
result = resp.text
result = result.replace('"', '')
result = result.strip()
logging.debug('Show Result: {}'.format(result))
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=response_rows)
logging.info('Exiting {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _ws_single(request, context):
"""
Single Row Processing for Websockets
:param request: iterable sequence of bundled rows
:return: the same iterable sequence as received
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
# Start by Gathering Environmental Varaiable
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
ws_url = config.get(q_function_name, 'ws_url')
token = config.get(q_function_name, 'token')
user_name = config.get(q_function_name, 'username')
ws_route = config.get(q_function_name, 'ws_route')
bCache = config.get(q_function_name, 'cache')
logging.debug('Pringint Route for WS {}' .format(ws_route))
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# In Future we will use the Token for Liencensing and Throttling
# Currently we are using Comblination of host+ipaddr+username for Client Identification
ws_url = ws_url + host + '_' + ip_addr+'_' + user_name+'_'
logging.debug('Websocket URL : {}' .format(ws_url))
ws = create_connection(ws_url)
response_rows = []
for request_rows in request:
# Iterate over rows
# Default code
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
result = ''
if (len(param) == 0):
logging.debug('Parameters are Empty')
result = 'Error'
else:
payload = '{"action":"' + ws_route + \
'","data":"' + param + '"}'
logging.debug('Showing Payload: {}'.format(payload))
ws.send(payload)
#logging.info('Show Payload Response: {}'.format(resp.text))
resp = json.loads(ws.recv())
logging.debug(resp)
result = resp['result']
logging.debug('Show Result: {}'.format(result))
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=response_rows)
ws.close()
logging.info('Exiting {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _ws_batch(request, context):
"""
Mirrors the input and sends back the same data.
:param request: iterable sequence of bundled rows
:return: the same iterable sequence as received
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
logging.debug('Calling qrag.ini section "{}' .format(q_function_name))
ws_url = config.get(q_function_name, 'ws_url')
token = config.get(q_function_name, 'token')
user_name = config.get(q_function_name, 'username')
batch_size = int(config.get(q_function_name, 'batch_size'))
logging.debug('Batch Size {}' .format(batch_size))
ws_route = config.get(q_function_name, 'ws_route')
logging.info('API Route : {}' .format(ws_route))
# setup Caching
bCache = config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
ws_url = ws_url + host + '_' + ip_addr+'_' + user_name+'_'
logging.debug('Full url for ws: {} '.format(ws_url))
ws = create_connection(ws_url)
response_rows = []
outer_counter = 1
inner_counter = 1
request_counter = 1
for request_rows in request:
logging.debug(
'Printing Request Rows - Request Counter {}' .format(request_counter))
request_counter += 1
temp = MessageToDict(request_rows)
logging.debug('Temp Message to Dict {}' .format(temp))
test_rows = temp['rows']
logging.debug('Test Rows: {}' .format(test_rows))
request_size = len(test_rows)
logging.debug(
'Bundled Row Number of Rows - {}' .format(request_size))
batches = list(qlist.divide_chunks(test_rows, batch_size))
for i in batches:
payload_t = {"action": ws_route}
logging.debug('PreFix Route Seletection {}' .format(payload_t))
logging.debug(len(batches))
payload_t["data"] = i
logging.debug('Size of payload {}' .format(
pysize.get_size(payload_t)))
logging.debug('Showing Payload: {}'.format(payload_t))
logging.debug('batch number {}'.format(outer_counter))
ws.send(json.dumps(payload_t))
logging.debug('message sent WS')
outer_counter += 1
payload_t.clear()
for j in i:
#logging.debug("Priniting i {}" .format(i))
resp = json.loads(ws.recv())
#logging.debug('Response Type : {}' .format(type(resp)))
logging.debug('Counter: {} Payload Size: {} Payload Response: {}'.format(
inner_counter, pysize.get_size(resp), resp))
inner_counter += 1
result = resp['result']
logging.debug('Log Resulst: {}' .format(result))
duals = iter([SSE.Dual(strData=result)])
# logging.debug(duals)
#logging.debug('Printing Duals {}' .format(duals))
# Yield the row data as bundled rows
response_rows.append(SSE.Row(duals=duals))
logging.debug(
'Exiting Inner Loop: Printing j {}' .format(j))
yield SSE.BundledRows(rows=response_rows)
ws.close()
logging.info('Exiting {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _rest_30(request, context):
"""
Aggregates the parameters to a single comma separated string.
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
url = config.get(q_function_name, 'url')
bCache = config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# Iterate over bundled rows
response_rows = []
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals]
if (len(param) == 0):
logging.debug('Parameters are Empty')
result = 'Error'
#logging.info('Showing Payload: {}'.format(param))
# Aggregate parameters to a single string
# Join payload via =','.join(param)
else:
payload = '{"data":"' + (','.join(param)) + '"}'
logging.debug('Showing Payload: {}'.format(payload))
resp = requests.post(url, data=payload)
logging.debug(
'Show Payload Response: {}'.format(resp.text))
result = resp.text
result = result.replace('"', '')
result = result.strip()
logging.debug('Show Result: {}'.format(result))
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=response_rows)
logging.info('Exiting Predict v2 TimeStamp: {}' .format(
datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _gcp_bq(request, context)
"""
Google Cloud Big Query Client Integration
November 2020
john.park@qlik.com
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
url = config.get(q_function_name, 'url')
bCache = config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# Iterate over bundled rows
response_rows = []
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals]
if (len(param) == 0):
logging.debug('Parameters are Empty')
result = 'Error'
#logging.info('Showing Payload: {}'.format(param))
# Aggregate parameters to a single string
# Join payload via =','.join(param)
else:
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=response_rows)
logging.info('Exiting gcp_bq TimeStamp: {}' .format(
datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _cache(request, context):
"""
Cache enabled. Add the datetime stamp to the end of each string value.
:param request: iterable sequence of bundled rows
:param context: not used.
:return: string
"""
# Iterate over bundled rows
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
# Join with current timedate stamp
result = param + ' ' + datetime.now().isoformat()
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=[SSE.Row(duals=duals)])
@staticmethod
def _no_cache(request, context):
"""
Cache disabled. Add the datetime stamp to the end of each string value.
:param request:
:param context: used for disabling the cache in the header.
:return: string
"""
# Disable caching.
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# Iterate over bundled rows
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
# Join with current timedate stamp
result = param + ' ' + datetime.now().isoformat()
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=[SSE.Row(duals=duals)])
def _get_call_info(self, context):
"""
Retreive useful information for the function call.
:param context: context
:return: string containing header info
"""
# Get metadata for the call from the context
metadata = dict(context.invocation_metadata())
# Get the function ID
func_header = SSE.FunctionRequestHeader()
func_header.ParseFromString(metadata['qlik-functionrequestheader-bin'])
func_id = func_header.functionId
# Get the common request header
common_header = SSE.CommonRequestHeader()
common_header.ParseFromString(metadata['qlik-commonrequestheader-bin'])
# Get capabilities
if not hasattr(self, 'capabilities'):
self.capabilities = self.GetCapabilities(None, context)
# Get the name of the capability called in the function
capability = [
function.name for function in self.capabilities.functions if function.functionId == func_id][0]
# Get the user ID using a regular expression
match = re.match(r"UserDirectory=(?P<UserDirectory>\w*)\W+UserId=(?P<UserId>\w*)",
common_header.userId, re.IGNORECASE)
if match:
userId = match.group('UserDirectory') + '/' + match.group('UserId')
else:
userId = common_header.userId
# Get the app ID
appId = common_header.appId
# Get the call's origin
peer = context.peer()
return "{0} - Capability '{1}' called by user {2} from app {3}".format(peer, capability, userId, appId)
@staticmethod
def _echo_table(request, context):
"""
Echo the input table.
:param request:
:param context:
:return:
"""
for request_rows in request:
response_rows = []
for row in request_rows.rows:
response_rows.append(row)
yield SSE.BundledRows(rows=response_rows)
def GetCapabilities(self, request, context):
"""
Get capabilities.
Note that either request or context is used in the implementation of this method, but still added as
parameters. The reason is that gRPC always sends both when making a function call and therefore we must include
them to avoid error messages regarding too many parameters provided from the client.
:param request: the request, not used in this method.
:param context: the context, not used in this method.
:return: the capabilities.
"""
logging.info('GetCapabilities')
# Create an instance of the Capabilities grpc message
# Enable(or disable) script evaluation
# Set values for pluginIdentifier and pluginVersion
capabilities = SSE.Capabilities(allowScript=True,
pluginIdentifier='Qlik Rapid API Gateway - Partner Engineering',
pluginVersion='v0.1.0')
# If user defined functions supported, add the definitions to the message
with open(self.function_definitions) as json_file:
# Iterate over each function definition and add data to the capabilities grpc message
for definition in json.load(json_file)['Functions']:
function = capabilities.functions.add()
function.name = definition['Name']
function.functionId = definition['Id']
function.functionType = definition['Type']
function.returnType = definition['ReturnType']
# Retrieve name and type of each parameter
for param_name, param_type in sorted(definition['Params'].items()):
function.params.add(name=param_name, dataType=param_type)
logging.info('Adding to capabilities: {}({})'.format(function.name,
[p.name for p in function.params]))
return capabilities
def ExecuteFunction(self, request_iterator, context):
"""
Execute function call.
:param request_iterator: an iterable sequence of Row.
:param context: the context.
:return: an iterable sequence of Row.
"""
func_id = self._get_function_id(context)
logging.info(self._get_call_info(context))
# Call corresponding function
logging.info('ExecuteFunctions (functionId: {})' .format(func_id))
# self.functions[func_id]))
current_function_def = (json.load(open(self.function_definitions))[
'Functions'])[func_id]
logging.debug(current_function_def)
global q_function_name
q_function_name = current_function_def["Name"]
logging.debug('Logical Method Called is: {}' .format(q_function_name))
current_qrap_type = current_function_def["QRAP_Type"]
qrag_function_name = '_' + current_qrap_type
logging.debug(
'This is the type of QRAG Method Name: {}' .format(current_qrap_type))
logging.debug(
'Physical Method Called is: {}' .format(qrag_function_name))
# Convers to Method Name to Physical Main Function
qrag_id = qlist.find_key(self.functions, qrag_function_name)
logging.debug('QRAG ID: {}' .format(qrag_id))
global function_name
function_name = self.functions[qrag_id]
return getattr(self, self.functions[qrag_id])(request_iterator, context)
def Serve(self, port, pem_dir):
"""
Sets up the gRPC Server with insecure connection on port
:param port: port to listen on.
:param pem_dir: Directory including certificates
:return: None
"""
# Create gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
SSE.add_ConnectorServicer_to_server(self, server)
if pem_dir:
# Secure connection
with open(os.path.join(pem_dir, 'sse_server_key.pem'), 'rb') as f:
private_key = f.read()
with open(os.path.join(pem_dir, 'sse_server_cert.pem'), 'rb') as f:
cert_chain = f.read()
with open(os.path.join(pem_dir, 'root_cert.pem'), 'rb') as f:
root_cert = f.read()
credentials = grpc.ssl_server_credentials(
[(private_key, cert_chain)], root_cert, True)
server.add_secure_port('[::]:{}'.format(port), credentials)
logging.info(
'*** Running server in secure mode on port: {} ***'.format(port))
else:
# Insecure connection
server.add_insecure_port('[::]:{}'.format(port))
logging.info(
'*** Running server in insecure mode on port: {} ***'.format(port))
# Start gRPC server
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
qrag_file = os.path.join(os.path.dirname(__file__), 'config', 'qrag.ini')
config.read(qrag_file)
print(qrag_file)
print(config.sections())
port = config.get('base', 'port')
parser.add_argument('--port', nargs='?', default=port)
parser.add_argument('--pem_dir', nargs='?')
parser.add_argument('--definition_file', nargs='?',
default='functions.json')
args = parser.parse_args()
# need to locate the file when script is called from outside it's location dir.
def_file = os.path.join(os.path.dirname(
os.path.abspath(__file__)), args.definition_file)
print(def_file)
logging.info('*** Server Configurations Port: {}, Pem_Dir: {}, def_file {} TimeStamp: {} ***'.format(
args.port, args.pem_dir, def_file, datetime.now().isoformat()))
calc = ExtensionService(def_file)
calc.Serve(args.port, args.pem_dir)
| 42.977671
| 119
| 0.575426
|
from ssedata import FunctionType
from google.protobuf.json_format import MessageToDict
import grpc
import argparse
import json
import logging
import logging.config
import os
import sys
import inspect
import time
from websocket import create_connection
import socket
import re
from concurrent import futures
from datetime import datetime
import requests
import configparser
PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(PARENT_DIR, 'generated'))
sys.path.append(os.path.join(PARENT_DIR, 'helper_functions'))
import qlist
import pysize
from ssedata import FunctionType
import ServerSideExtension_pb2 as SSE
import qlist
import pysize
import ServerSideExtension_pb2 as SSE
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
config = configparser.ConfigParser()
class ExtensionService(SSE.ConnectorServicer):
"""
A simple SSE-plugin created for the HelloWorld example.
"""
def __init__(self, funcdef_file):
"""
Class initializer.
:param funcdef_file: a function definition JSON file
"""
self._function_definitions = funcdef_file
os.makedirs('logs', exist_ok=True)
log_file = os.path.join(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))), 'logger.config')
print(log_file)
logging.config.fileConfig(log_file)
logging.info(self._function_definitions)
logging.info('Logging enabled')
function_name = "none"
@property
def function_definitions(self):
"""
:return: json file with function definitions
"""
return self._function_definitions
@property
def functions(self):
"""
:return: Mapping of function id and implementation
"""
return {
0: '_rest_single',
1: '_rest_30',
2: '_ws_single',
3: '_ws_batch',
4: '_gcp_bq'
}
@staticmethod
def _get_function_id(context):
"""
Retrieve function id from header.
:param context: context
:return: function id
"""
metadata = dict(context.invocation_metadata())
header = SSE.FunctionRequestHeader()
header.ParseFromString(metadata['qlik-functionrequestheader-bin'])
return header.functionId
@staticmethod
def _rest_single(request, context):
"""
Rest using single variable
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
url = config.get(q_function_name, 'url')
logging.debug("Rest Url is set to {}" .format(url))
bCache = config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
response_rows = []
request_counter = 1
for request_rows in request:
logging.debug(
'Printing Request Rows - Request Counter {}' .format(request_counter))
request_counter = request_counter + 1
for row in request_rows.rows:
param = [d.strData for d in row.duals][0]
if (len(param) == 0):
logging.info('Exiting {} TimeStamp: {} due to Data being Empty ' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
else:
payload = '{"data":"' + param + '"}'
logging.debug('Showing Payload: {}'.format(payload))
resp = requests.post(url, data=payload)
logging.debug(
'Show Payload Response as Text: {}'.format(resp.text))
result = resp.text
result = result.replace('"', '')
result = result.strip()
logging.debug('Show Result: {}'.format(result))
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=response_rows)
logging.info('Exiting {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _ws_single(request, context):
"""
Single Row Processing for Websockets
:param request: iterable sequence of bundled rows
:return: the same iterable sequence as received
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
# Start by Gathering Environmental Varaiable
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
ws_url = config.get(q_function_name, 'ws_url')
token = config.get(q_function_name, 'token')
user_name = config.get(q_function_name, 'username')
ws_route = config.get(q_function_name, 'ws_route')
bCache = config.get(q_function_name, 'cache')
logging.debug('Pringint Route for WS {}' .format(ws_route))
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# In Future we will use the Token for Liencensing and Throttling
# Currently we are using Comblination of host+ipaddr+username for Client Identification
ws_url = ws_url + host + '_' + ip_addr+'_' + user_name+'_'
logging.debug('Websocket URL : {}' .format(ws_url))
ws = create_connection(ws_url)
response_rows = []
for request_rows in request:
# Iterate over rows
# Default code
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
result = ''
if (len(param) == 0):
logging.debug('Parameters are Empty')
result = 'Error'
else:
payload = '{"action":"' + ws_route + \
'","data":"' + param + '"}'
logging.debug('Showing Payload: {}'.format(payload))
ws.send(payload)
#logging.info('Show Payload Response: {}'.format(resp.text))
resp = json.loads(ws.recv())
logging.debug(resp)
result = resp['result']
logging.debug('Show Result: {}'.format(result))
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=response_rows)
ws.close()
logging.info('Exiting {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _ws_batch(request, context):
"""
Mirrors the input and sends back the same data.
:param request: iterable sequence of bundled rows
:return: the same iterable sequence as received
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
logging.debug('Calling qrag.ini section "{}' .format(q_function_name))
ws_url = config.get(q_function_name, 'ws_url')
token = config.get(q_function_name, 'token')
user_name = config.get(q_function_name, 'username')
batch_size = int(config.get(q_function_name, 'batch_size'))
logging.debug('Batch Size {}' .format(batch_size))
ws_route = config.get(q_function_name, 'ws_route')
logging.info('API Route : {}' .format(ws_route))
bCache = config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
ws_url = ws_url + host + '_' + ip_addr+'_' + user_name+'_'
logging.debug('Full url for ws: {} '.format(ws_url))
ws = create_connection(ws_url)
response_rows = []
outer_counter = 1
inner_counter = 1
request_counter = 1
for request_rows in request:
logging.debug(
'Printing Request Rows - Request Counter {}' .format(request_counter))
request_counter += 1
temp = MessageToDict(request_rows)
logging.debug('Temp Message to Dict {}' .format(temp))
test_rows = temp['rows']
logging.debug('Test Rows: {}' .format(test_rows))
request_size = len(test_rows)
logging.debug(
'Bundled Row Number of Rows - {}' .format(request_size))
batches = list(qlist.divide_chunks(test_rows, batch_size))
for i in batches:
payload_t = {"action": ws_route}
logging.debug('PreFix Route Seletection {}' .format(payload_t))
logging.debug(len(batches))
payload_t["data"] = i
logging.debug('Size of payload {}' .format(
pysize.get_size(payload_t)))
logging.debug('Showing Payload: {}'.format(payload_t))
logging.debug('batch number {}'.format(outer_counter))
ws.send(json.dumps(payload_t))
logging.debug('message sent WS')
outer_counter += 1
payload_t.clear()
for j in i:
resp = json.loads(ws.recv())
logging.debug('Counter: {} Payload Size: {} Payload Response: {}'.format(
inner_counter, pysize.get_size(resp), resp))
inner_counter += 1
result = resp['result']
logging.debug('Log Resulst: {}' .format(result))
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
logging.debug(
'Exiting Inner Loop: Printing j {}' .format(j))
yield SSE.BundledRows(rows=response_rows)
ws.close()
logging.info('Exiting {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _rest_30(request, context):
"""
Aggregates the parameters to a single comma separated string.
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
url = config.get(q_function_name, 'url')
bCache = config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
response_rows = []
for request_rows in request:
for row in request_rows.rows:
param = [d.strData for d in row.duals]
if (len(param) == 0):
logging.debug('Parameters are Empty')
result = 'Error'
else:
payload = '{"data":"' + (','.join(param)) + '"}'
logging.debug('Showing Payload: {}'.format(payload))
resp = requests.post(url, data=payload)
logging.debug(
'Show Payload Response: {}'.format(resp.text))
result = resp.text
result = result.replace('"', '')
result = result.strip()
logging.debug('Show Result: {}'.format(result))
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=response_rows)
logging.info('Exiting Predict v2 TimeStamp: {}' .format(
datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _gcp_bq(request, context)
"""
Google Cloud Big Query Client Integration
November 2020
john.park@qlik.com
"""
logging.info('Entering {} TimeStamp: {}' .format(
function_name, datetime.now().strftime("%H:%M:%S.%f")))
url = config.get(q_function_name, 'url')
bCache = config.get(q_function_name, 'cache')
logging.debug("Caching is set to {}" .format(bCache))
if (bCache.lower() == "true"):
logging.info(
"Caching ****Enabled*** for {}" .format(q_function_name))
else:
logging.info(
"Caching ****Disabled**** for {}" .format(q_function_name))
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# Iterate over bundled rows
response_rows = []
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals]
if (len(param) == 0):
logging.debug('Parameters are Empty')
result = 'Error'
#logging.info('Showing Payload: {}'.format(param))
# Aggregate parameters to a single string
# Join payload via =','.join(param)
else:
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
response_rows.append(SSE.Row(duals=duals))
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=response_rows)
logging.info('Exiting gcp_bq TimeStamp: {}' .format(
datetime.now().strftime("%H:%M:%S.%f")))
@staticmethod
def _cache(request, context):
"""
Cache enabled. Add the datetime stamp to the end of each string value.
:param request: iterable sequence of bundled rows
:param context: not used.
:return: string
"""
# Iterate over bundled rows
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
# Join with current timedate stamp
result = param + ' ' + datetime.now().isoformat()
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=[SSE.Row(duals=duals)])
@staticmethod
def _no_cache(request, context):
"""
Cache disabled. Add the datetime stamp to the end of each string value.
:param request:
:param context: used for disabling the cache in the header.
:return: string
"""
# Disable caching.
md = (('qlik-cache', 'no-store'),)
context.send_initial_metadata(md)
# Iterate over bundled rows
for request_rows in request:
# Iterate over rows
for row in request_rows.rows:
# Retrieve string value of parameter and append to the params variable
# Length of param is 1 since one column is received, the [0] collects the first value in the list
param = [d.strData for d in row.duals][0]
# Join with current timedate stamp
result = param + ' ' + datetime.now().isoformat()
# Create an iterable of dual with the result
duals = iter([SSE.Dual(strData=result)])
# Yield the row data as bundled rows
yield SSE.BundledRows(rows=[SSE.Row(duals=duals)])
def _get_call_info(self, context):
"""
Retreive useful information for the function call.
:param context: context
:return: string containing header info
"""
# Get metadata for the call from the context
metadata = dict(context.invocation_metadata())
# Get the function ID
func_header = SSE.FunctionRequestHeader()
func_header.ParseFromString(metadata['qlik-functionrequestheader-bin'])
func_id = func_header.functionId
# Get the common request header
common_header = SSE.CommonRequestHeader()
common_header.ParseFromString(metadata['qlik-commonrequestheader-bin'])
# Get capabilities
if not hasattr(self, 'capabilities'):
self.capabilities = self.GetCapabilities(None, context)
# Get the name of the capability called in the function
capability = [
function.name for function in self.capabilities.functions if function.functionId == func_id][0]
# Get the user ID using a regular expression
match = re.match(r"UserDirectory=(?P<UserDirectory>\w*)\W+UserId=(?P<UserId>\w*)",
common_header.userId, re.IGNORECASE)
if match:
userId = match.group('UserDirectory') + '/' + match.group('UserId')
else:
userId = common_header.userId
# Get the app ID
appId = common_header.appId
# Get the call's origin
peer = context.peer()
return "{0} - Capability '{1}' called by user {2} from app {3}".format(peer, capability, userId, appId)
@staticmethod
def _echo_table(request, context):
"""
Echo the input table.
:param request:
:param context:
:return:
"""
for request_rows in request:
response_rows = []
for row in request_rows.rows:
response_rows.append(row)
yield SSE.BundledRows(rows=response_rows)
def GetCapabilities(self, request, context):
"""
Get capabilities.
Note that either request or context is used in the implementation of this method, but still added as
parameters. The reason is that gRPC always sends both when making a function call and therefore we must include
them to avoid error messages regarding too many parameters provided from the client.
:param request: the request, not used in this method.
:param context: the context, not used in this method.
:return: the capabilities.
"""
logging.info('GetCapabilities')
# Create an instance of the Capabilities grpc message
# Enable(or disable) script evaluation
# Set values for pluginIdentifier and pluginVersion
capabilities = SSE.Capabilities(allowScript=True,
pluginIdentifier='Qlik Rapid API Gateway - Partner Engineering',
pluginVersion='v0.1.0')
# If user defined functions supported, add the definitions to the message
with open(self.function_definitions) as json_file:
# Iterate over each function definition and add data to the capabilities grpc message
for definition in json.load(json_file)['Functions']:
function = capabilities.functions.add()
function.name = definition['Name']
function.functionId = definition['Id']
function.functionType = definition['Type']
function.returnType = definition['ReturnType']
# Retrieve name and type of each parameter
for param_name, param_type in sorted(definition['Params'].items()):
function.params.add(name=param_name, dataType=param_type)
logging.info('Adding to capabilities: {}({})'.format(function.name,
[p.name for p in function.params]))
return capabilities
def ExecuteFunction(self, request_iterator, context):
"""
Execute function call.
:param request_iterator: an iterable sequence of Row.
:param context: the context.
:return: an iterable sequence of Row.
"""
func_id = self._get_function_id(context)
logging.info(self._get_call_info(context))
# Call corresponding function
logging.info('ExecuteFunctions (functionId: {})' .format(func_id))
# self.functions[func_id]))
current_function_def = (json.load(open(self.function_definitions))[
'Functions'])[func_id]
logging.debug(current_function_def)
global q_function_name
q_function_name = current_function_def["Name"]
logging.debug('Logical Method Called is: {}' .format(q_function_name))
current_qrap_type = current_function_def["QRAP_Type"]
qrag_function_name = '_' + current_qrap_type
logging.debug(
'This is the type of QRAG Method Name: {}' .format(current_qrap_type))
logging.debug(
'Physical Method Called is: {}' .format(qrag_function_name))
# Convers to Method Name to Physical Main Function
qrag_id = qlist.find_key(self.functions, qrag_function_name)
logging.debug('QRAG ID: {}' .format(qrag_id))
global function_name
function_name = self.functions[qrag_id]
return getattr(self, self.functions[qrag_id])(request_iterator, context)
def Serve(self, port, pem_dir):
"""
Sets up the gRPC Server with insecure connection on port
:param port: port to listen on.
:param pem_dir: Directory including certificates
:return: None
"""
# Create gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
SSE.add_ConnectorServicer_to_server(self, server)
if pem_dir:
# Secure connection
with open(os.path.join(pem_dir, 'sse_server_key.pem'), 'rb') as f:
private_key = f.read()
with open(os.path.join(pem_dir, 'sse_server_cert.pem'), 'rb') as f:
cert_chain = f.read()
with open(os.path.join(pem_dir, 'root_cert.pem'), 'rb') as f:
root_cert = f.read()
credentials = grpc.ssl_server_credentials(
[(private_key, cert_chain)], root_cert, True)
server.add_secure_port('[::]:{}'.format(port), credentials)
logging.info(
'*** Running server in secure mode on port: {} ***'.format(port))
else:
# Insecure connection
server.add_insecure_port('[::]:{}'.format(port))
logging.info(
'*** Running server in insecure mode on port: {} ***'.format(port))
# Start gRPC server
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
qrag_file = os.path.join(os.path.dirname(__file__), 'config', 'qrag.ini')
config.read(qrag_file)
print(qrag_file)
print(config.sections())
port = config.get('base', 'port')
parser.add_argument('--port', nargs='?', default=port)
parser.add_argument('--pem_dir', nargs='?')
parser.add_argument('--definition_file', nargs='?',
default='functions.json')
args = parser.parse_args()
# need to locate the file when script is called from outside it's location dir.
def_file = os.path.join(os.path.dirname(
os.path.abspath(__file__)), args.definition_file)
print(def_file)
logging.info('*** Server Configurations Port: {}, Pem_Dir: {}, def_file {} TimeStamp: {} ***'.format(
args.port, args.pem_dir, def_file, datetime.now().isoformat()))
calc = ExtensionService(def_file)
calc.Serve(args.port, args.pem_dir)
| false
| true
|
7902933ebcd21869ec2d83ded7a2afc59a6a6bdf
| 1,015
|
py
|
Python
|
python/test/test_cinder_volume_source.py
|
adriangonz/seldon-deploy-sdk
|
c5504838630a87053387cec57ec2e1e7251971e2
|
[
"Apache-2.0"
] | 6
|
2021-02-18T14:37:54.000Z
|
2022-01-13T13:27:43.000Z
|
python/test/test_cinder_volume_source.py
|
adriangonz/seldon-deploy-sdk
|
c5504838630a87053387cec57ec2e1e7251971e2
|
[
"Apache-2.0"
] | 14
|
2021-01-04T16:32:03.000Z
|
2021-12-13T17:53:59.000Z
|
python/test/test_cinder_volume_source.py
|
adriangonz/seldon-deploy-sdk
|
c5504838630a87053387cec57ec2e1e7251971e2
|
[
"Apache-2.0"
] | 7
|
2021-03-17T09:05:55.000Z
|
2022-01-05T10:39:56.000Z
|
# coding: utf-8
"""
Seldon Deploy API
API to interact and manage the lifecycle of your machine learning models deployed through Seldon Deploy. # noqa: E501
OpenAPI spec version: v1alpha1
Contact: hello@seldon.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import seldon_deploy_sdk
from seldon_deploy_sdk.models.cinder_volume_source import CinderVolumeSource # noqa: E501
from seldon_deploy_sdk.rest import ApiException
class TestCinderVolumeSource(unittest.TestCase):
"""CinderVolumeSource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCinderVolumeSource(self):
"""Test CinderVolumeSource"""
# FIXME: construct object with mandatory attributes with example values
# model = seldon_deploy_sdk.models.cinder_volume_source.CinderVolumeSource() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.756098
| 122
| 0.730049
|
from __future__ import absolute_import
import unittest
import seldon_deploy_sdk
from seldon_deploy_sdk.models.cinder_volume_source import CinderVolumeSource
from seldon_deploy_sdk.rest import ApiException
class TestCinderVolumeSource(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testCinderVolumeSource(self):
s
if __name__ == '__main__':
unittest.main()
| true
| true
|
790293705ac4f54c3e677fc215193201ae66a8c8
| 1,076
|
py
|
Python
|
02_commCustom/receiveCustomMavlinkMSG.py
|
dmdobrea/HoverGames_Challenge2
|
a7f2d7a5898a67e06720f7db78dc7afa47701537
|
[
"BSD-3-Clause"
] | 3
|
2021-04-24T10:30:49.000Z
|
2021-12-04T04:58:06.000Z
|
02_commCustom/receiveCustomMavlinkMSG.py
|
dmdobrea/HoverGames_Challenge2
|
a7f2d7a5898a67e06720f7db78dc7afa47701537
|
[
"BSD-3-Clause"
] | null | null | null |
02_commCustom/receiveCustomMavlinkMSG.py
|
dmdobrea/HoverGames_Challenge2
|
a7f2d7a5898a67e06720f7db78dc7afa47701537
|
[
"BSD-3-Clause"
] | 1
|
2021-05-24T14:18:26.000Z
|
2021-05-24T14:18:26.000Z
|
from pymavlink import mavutil
#import time
mavutil.set_dialect("video_monitor")
# create a connection to FMU
hoverGames = mavutil.mavlink_connection("/dev/ttymxc2", baud=921600)
# wait for the heartbeat message to find the system id
hoverGames.wait_heartbeat()
print("Heartbeat from system (system %u component %u)" %(hoverGames.target_system, hoverGames.target_component))
while (True) :
msg = hoverGames.recv_match(type='VIDEO_MONITOR', blocking=True)
#check that the message is valid before attempting to use it
if not msg:
print('No message!\n')
continue
if msg.get_type() == "BAD_DATA":
if mavutil.all_printable(msg.data):
sys.stdout.write(msg.data)
sys.stdout.flush()
else:
#Message is valid, so use the attribute
print('Info: %s' % msg.info)
print('Latitude : %d' % msg.lat)
print('Longitude: %d' % msg.lon)
print('No.people: %d' % msg.no_people)
print('Confidence: %f' % msg.confidence)
print('\n')
#time.sleep(1.0):
| 30.742857
| 112
| 0.643123
|
from pymavlink import mavutil
mavutil.set_dialect("video_monitor")
hoverGames = mavutil.mavlink_connection("/dev/ttymxc2", baud=921600)
hoverGames.wait_heartbeat()
print("Heartbeat from system (system %u component %u)" %(hoverGames.target_system, hoverGames.target_component))
while (True) :
msg = hoverGames.recv_match(type='VIDEO_MONITOR', blocking=True)
if not msg:
print('No message!\n')
continue
if msg.get_type() == "BAD_DATA":
if mavutil.all_printable(msg.data):
sys.stdout.write(msg.data)
sys.stdout.flush()
else:
print('Info: %s' % msg.info)
print('Latitude : %d' % msg.lat)
print('Longitude: %d' % msg.lon)
print('No.people: %d' % msg.no_people)
print('Confidence: %f' % msg.confidence)
print('\n')
| true
| true
|
79029440277e967106f528a1a3f24a2937e0ceee
| 27,758
|
py
|
Python
|
readthedocs/api/v3/serializers.py
|
mehrdad-khojastefar/readthedocs.org
|
b958bb8d04c454324d612345890b13af54a19eb6
|
[
"MIT"
] | 2,092
|
2019-06-29T07:47:30.000Z
|
2022-03-31T14:54:59.000Z
|
readthedocs/api/v3/serializers.py
|
mehrdad-khojastefar/readthedocs.org
|
b958bb8d04c454324d612345890b13af54a19eb6
|
[
"MIT"
] | 2,389
|
2019-06-29T04:22:55.000Z
|
2022-03-31T22:57:49.000Z
|
readthedocs/api/v3/serializers.py
|
mehrdad-khojastefar/readthedocs.org
|
b958bb8d04c454324d612345890b13af54a19eb6
|
[
"MIT"
] | 1,185
|
2019-06-29T21:49:31.000Z
|
2022-03-30T09:57:15.000Z
|
import datetime
import urllib
from django.conf import settings
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.translation import ugettext as _
from rest_flex_fields import FlexFieldsModelSerializer
from rest_flex_fields.serializers import FlexFieldsSerializerMixin
from rest_framework import serializers
from readthedocs.builds.models import Build, Version
from readthedocs.core.utils import slugify
from readthedocs.core.utils.extend import SettingsOverrideObject
from readthedocs.oauth.models import RemoteOrganization, RemoteRepository
from readthedocs.organizations.models import Organization, Team
from readthedocs.projects.constants import (
LANGUAGES,
PROGRAMMING_LANGUAGES,
REPO_CHOICES,
)
from readthedocs.projects.models import (
EnvironmentVariable,
Project,
ProjectRelationship,
)
from readthedocs.redirects.models import TYPE_CHOICES as REDIRECT_TYPE_CHOICES
from readthedocs.redirects.models import Redirect
class UserSerializer(FlexFieldsModelSerializer):
class Meta:
model = User
fields = [
'username',
]
class BaseLinksSerializer(serializers.Serializer):
def _absolute_url(self, path):
scheme = 'http' if settings.DEBUG else 'https'
domain = settings.PRODUCTION_DOMAIN
return urllib.parse.urlunparse((scheme, domain, path, '', '', ''))
class BuildCreateSerializer(serializers.ModelSerializer):
"""
Used when triggering (create action) a ``Build`` for a specific ``Version``.
This serializer validates that no field is sent at all in the request.
"""
class Meta:
model = Build
fields = []
class BuildLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
version = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-builds-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'build_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_version(self, obj):
if obj.version:
path = reverse(
'projects-versions-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'version_slug': obj.version.slug,
},
)
return self._absolute_url(path)
return None
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class BuildURLsSerializer(BaseLinksSerializer, serializers.Serializer):
build = serializers.URLField(source='get_full_url')
project = serializers.SerializerMethodField()
version = serializers.SerializerMethodField()
def get_project(self, obj):
path = reverse(
'projects_detail',
kwargs={
'project_slug': obj.project.slug
}
)
return self._absolute_url(path)
def get_version(self, obj):
if obj.version:
path = reverse(
'project_version_detail',
kwargs={
'project_slug': obj.project.slug,
'version_slug': obj.version.slug
}
)
return self._absolute_url(path)
return None
class BuildConfigSerializer(FlexFieldsSerializerMixin, serializers.Serializer):
"""
Render ``Build.config`` property without modifying it.
.. note::
Any change on the output of that property will be reflected here,
which may produce incompatible changes in the API.
"""
def to_representation(self, instance): # pylint: disable=arguments-differ
# For now, we want to return the ``config`` object as it is without
# manipulating it.
return instance
class BuildStateSerializer(serializers.Serializer):
code = serializers.CharField(source='state')
name = serializers.SerializerMethodField()
def get_name(self, obj):
return obj.state.title()
class BuildSerializer(FlexFieldsModelSerializer):
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
version = serializers.SlugRelatedField(slug_field='slug', read_only=True)
created = serializers.DateTimeField(source='date')
finished = serializers.SerializerMethodField()
success = serializers.SerializerMethodField()
duration = serializers.IntegerField(source='length')
state = BuildStateSerializer(source='*')
_links = BuildLinksSerializer(source='*')
urls = BuildURLsSerializer(source='*')
class Meta:
model = Build
fields = [
'id',
'version',
'project',
'created',
'finished',
'duration',
'state',
'success',
'error',
'commit',
'_links',
'urls',
]
expandable_fields = {
'config': (BuildConfigSerializer,)
}
def get_finished(self, obj):
if obj.date and obj.length:
return obj.date + datetime.timedelta(seconds=obj.length)
def get_success(self, obj):
"""
Return ``None`` if the build is not finished.
This is needed because ``default=True`` in the model field.
"""
if obj.finished:
return obj.success
return None
class VersionLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-versions-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'version_slug': obj.slug,
},
)
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse(
'projects-versions-builds-list',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'parent_lookup_version__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class VersionDashboardURLsSerializer(BaseLinksSerializer, serializers.Serializer):
edit = serializers.SerializerMethodField()
def get_edit(self, obj):
path = reverse(
'project_version_detail',
kwargs={
'project_slug': obj.project.slug,
'version_slug': obj.slug,
})
return self._absolute_url(path)
class VersionURLsSerializer(BaseLinksSerializer, serializers.Serializer):
documentation = serializers.SerializerMethodField()
vcs = serializers.URLField(source='vcs_url')
dashboard = VersionDashboardURLsSerializer(source='*')
def get_documentation(self, obj):
return obj.project.get_docs_url(version_slug=obj.slug,)
class VersionSerializer(FlexFieldsModelSerializer):
ref = serializers.CharField()
downloads = serializers.SerializerMethodField()
urls = VersionURLsSerializer(source='*')
_links = VersionLinksSerializer(source='*')
class Meta:
model = Version
fields = [
'id',
'slug',
'verbose_name',
'identifier',
'ref',
'built',
'active',
'hidden',
'type',
'downloads',
'urls',
'_links',
]
expandable_fields = {
'last_build': (
BuildSerializer,
)
}
def get_downloads(self, obj):
downloads = obj.get_downloads()
data = {}
for k, v in downloads.items():
if k in ('html', 'pdf', 'epub'):
# Keep backward compatibility
if k == 'html':
k = 'htmlzip'
data[k] = ('http:' if settings.DEBUG else 'https:') + v
return data
class VersionUpdateSerializer(serializers.ModelSerializer):
"""
Used when modifying (update action) a ``Version``.
It only allows to make the Version active/non-active.
"""
class Meta:
model = Version
fields = [
'active',
'hidden',
]
class LanguageSerializer(serializers.Serializer):
code = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
def get_code(self, language):
return language
def get_name(self, language):
for code, name in LANGUAGES:
if code == language:
return name
return 'Unknown'
class ProgrammingLanguageSerializer(serializers.Serializer):
code = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
def get_code(self, programming_language):
return programming_language
def get_name(self, programming_language):
for code, name in PROGRAMMING_LANGUAGES:
if code == programming_language:
return name
return 'Unknown'
class ProjectURLsSerializer(BaseLinksSerializer, serializers.Serializer):
"""Serializer with all the user-facing URLs under Read the Docs."""
documentation = serializers.CharField(source='get_docs_url')
home = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
versions = serializers.SerializerMethodField()
def get_home(self, obj):
path = reverse('projects_detail', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse('builds_project_list', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_versions(self, obj):
path = reverse('project_version_list', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
class RepositorySerializer(serializers.Serializer):
url = serializers.CharField(source='repo')
type = serializers.ChoiceField(
source='repo_type',
choices=REPO_CHOICES,
)
class ProjectLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
versions = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
environmentvariables = serializers.SerializerMethodField()
redirects = serializers.SerializerMethodField()
subprojects = serializers.SerializerMethodField()
superproject = serializers.SerializerMethodField()
translations = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse('projects-detail', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_versions(self, obj):
path = reverse(
'projects-versions-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_environmentvariables(self, obj):
path = reverse(
'projects-environmentvariables-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_redirects(self, obj):
path = reverse(
'projects-redirects-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse(
'projects-builds-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_subprojects(self, obj):
path = reverse(
'projects-subprojects-list',
kwargs={
'parent_lookup_parent__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_superproject(self, obj):
path = reverse(
'projects-superproject',
kwargs={
'project_slug': obj.slug,
},
)
return self._absolute_url(path)
def get_translations(self, obj):
path = reverse(
'projects-translations-list',
kwargs={
'parent_lookup_main_language_project__slug': obj.slug,
},
)
return self._absolute_url(path)
class ProjectCreateSerializerBase(FlexFieldsModelSerializer):
"""Serializer used to Import a Project."""
repository = RepositorySerializer(source='*')
homepage = serializers.URLField(source='project_url', required=False)
class Meta:
model = Project
fields = (
'name',
'language',
'programming_language',
'repository',
'homepage',
)
def validate_name(self, value):
potential_slug = slugify(value)
if Project.objects.filter(slug=potential_slug).exists():
raise serializers.ValidationError(
_('Project with slug "{0}" already exists.').format(potential_slug),
)
return value
class ProjectCreateSerializer(SettingsOverrideObject):
_default_class = ProjectCreateSerializerBase
class ProjectUpdateSerializerBase(FlexFieldsModelSerializer):
"""Serializer used to modify a Project once imported."""
repository = RepositorySerializer(source='*')
homepage = serializers.URLField(
source='project_url',
required=False,
)
class Meta:
model = Project
fields = (
# Settings
'name',
'repository',
'language',
'programming_language',
'homepage',
# Advanced Settings -> General Settings
'default_version',
'default_branch',
'analytics_code',
'analytics_disabled',
'show_version_warning',
'single_version',
'external_builds_enabled',
# NOTE: we do not allow to change any setting that can be set via
# the YAML config file.
)
class ProjectUpdateSerializer(SettingsOverrideObject):
_default_class = ProjectUpdateSerializerBase
class ProjectSerializer(FlexFieldsModelSerializer):
"""
Project serializer.
.. note::
When using organizations, projects don't have the concept of users.
But we have organization.users.
"""
homepage = serializers.SerializerMethodField()
language = LanguageSerializer()
programming_language = ProgrammingLanguageSerializer()
repository = RepositorySerializer(source='*')
urls = ProjectURLsSerializer(source='*')
subproject_of = serializers.SerializerMethodField()
translation_of = serializers.SerializerMethodField()
default_branch = serializers.CharField(source='get_default_branch')
tags = serializers.StringRelatedField(many=True)
if not settings.RTD_ALLOW_ORGANIZATIONS:
users = UserSerializer(many=True)
_links = ProjectLinksSerializer(source='*')
# TODO: adapt these fields with the proper names in the db and then remove
# them from here
created = serializers.DateTimeField(source='pub_date')
modified = serializers.DateTimeField(source='modified_date')
class Meta:
model = Project
fields = [
'id',
'name',
'slug',
'created',
'modified',
'language',
'programming_language',
'homepage',
'repository',
'default_version',
'default_branch',
'subproject_of',
'translation_of',
'urls',
'tags',
# NOTE: ``expandable_fields`` must not be included here. Otherwise,
# they will be tried to be rendered and fail
# 'users',
# 'active_versions',
'_links',
]
if not settings.RTD_ALLOW_ORGANIZATIONS:
fields.append('users')
expandable_fields = {
# NOTE: this has to be a Model method, can't be a
# ``SerializerMethodField`` as far as I know
'active_versions': (
VersionSerializer,
{
'many': True,
}
)
}
if settings.RTD_ALLOW_ORGANIZATIONS:
expandable_fields.update({
'organization': (
'readthedocs.api.v3.serializers.OrganizationSerializer',
# NOTE: we cannot have a Project with multiple organizations.
{'source': 'organizations.first'},
),
'teams': (
serializers.SlugRelatedField,
{
'slug_field': 'slug',
'many': True,
'read_only': True,
},
),
})
def get_homepage(self, obj):
# Overridden only to return ``None`` when the project_url is ``''``
return obj.project_url or None
def get_translation_of(self, obj):
if obj.main_language_project:
return self.__class__(obj.main_language_project).data
def get_subproject_of(self, obj):
try:
return self.__class__(obj.superprojects.first().parent).data
except Exception:
return None
class SubprojectCreateSerializer(FlexFieldsModelSerializer):
"""Serializer used to define a Project as subproject of another Project."""
child = serializers.SlugRelatedField(
slug_field='slug',
queryset=Project.objects.none(),
)
class Meta:
model = ProjectRelationship
fields = [
'child',
'alias',
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.parent_project = self.context['parent']
user = self.context['request'].user
self.fields['child'].queryset = (
self.parent_project.get_subproject_candidates(user)
)
# Give users a better error message.
self.fields['child'].error_messages['does_not_exist'] = _(
'Project with {slug_name}={value} is not valid as subproject'
)
def validate_alias(self, value):
# Check there is not a subproject with this alias already
subproject = self.parent_project.subprojects.filter(alias=value)
if subproject.exists():
raise serializers.ValidationError(
_('A subproject with this alias already exists'),
)
return value
# pylint: disable=arguments-differ
def validate(self, data):
self.parent_project.is_valid_as_superproject(
serializers.ValidationError
)
return data
class SubprojectLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
parent = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-subprojects-detail',
kwargs={
'parent_lookup_parent__slug': obj.parent.slug,
'alias_slug': obj.alias,
},
)
return self._absolute_url(path)
def get_parent(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.parent.slug,
},
)
return self._absolute_url(path)
class ChildProjectSerializer(ProjectSerializer):
"""
Serializer to render a Project when listed under ProjectRelationship.
It's exactly the same as ``ProjectSerializer`` but without some fields.
"""
class Meta(ProjectSerializer.Meta):
fields = [
field for field in ProjectSerializer.Meta.fields
if field not in ['subproject_of']
]
class SubprojectSerializer(FlexFieldsModelSerializer):
"""Serializer to render a subproject (``ProjectRelationship``)."""
child = ChildProjectSerializer()
_links = SubprojectLinksSerializer(source='*')
class Meta:
model = ProjectRelationship
fields = [
'child',
'alias',
'_links',
]
class SubprojectDestroySerializer(FlexFieldsModelSerializer):
"""Serializer used to remove a subproject relationship to a Project."""
class Meta:
model = ProjectRelationship
fields = (
'alias',
)
class RedirectLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-redirects-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'redirect_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class RedirectSerializerBase(serializers.ModelSerializer):
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
created = serializers.DateTimeField(source='create_dt', read_only=True)
modified = serializers.DateTimeField(source='update_dt', read_only=True)
_links = RedirectLinksSerializer(source='*', read_only=True)
type = serializers.ChoiceField(source='redirect_type', choices=REDIRECT_TYPE_CHOICES)
class Meta:
model = Redirect
fields = [
'pk',
'created',
'modified',
'project',
'type',
'from_url',
'to_url',
'_links',
]
class RedirectCreateSerializer(RedirectSerializerBase):
pass
class RedirectDetailSerializer(RedirectSerializerBase):
"""Override RedirectSerializerBase to sanitize the empty fields."""
from_url = serializers.SerializerMethodField()
to_url = serializers.SerializerMethodField()
def get_from_url(self, obj):
# Overridden only to return ``None`` when the description is ``''``
return obj.from_url or None
def get_to_url(self, obj):
# Overridden only to return ``None`` when the description is ``''``
return obj.to_url or None
class EnvironmentVariableLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-environmentvariables-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'environmentvariable_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class EnvironmentVariableSerializer(serializers.ModelSerializer):
value = serializers.CharField(write_only=True)
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
_links = EnvironmentVariableLinksSerializer(source='*', read_only=True)
class Meta:
model = EnvironmentVariable
fields = [
'pk',
'created',
'modified',
'name',
'value',
'public',
'project',
'_links',
]
class OrganizationLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
projects = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'organizations-detail',
kwargs={
'organization_slug': obj.slug,
})
return self._absolute_url(path)
def get_projects(self, obj):
path = reverse(
'organizations-projects-list',
kwargs={
'parent_lookup_organizations__slug': obj.slug,
},
)
return self._absolute_url(path)
class TeamSerializer(FlexFieldsModelSerializer):
# TODO: add ``projects`` as flex field when we have a
# /organizations/<slug>/teams/<slug>/projects endpoint
created = serializers.DateTimeField(source='pub_date')
modified = serializers.DateTimeField(source='modified_date')
class Meta:
model = Team
fields = (
'name',
'slug',
'created',
'modified',
'access',
)
expandable_fields = {
'members': (UserSerializer, {'many': True}),
}
class OrganizationSerializer(FlexFieldsModelSerializer):
created = serializers.DateTimeField(source='pub_date')
modified = serializers.DateTimeField(source='modified_date')
owners = UserSerializer(many=True)
_links = OrganizationLinksSerializer(source='*')
class Meta:
model = Organization
fields = (
'name',
'description',
'url',
'slug',
'email',
'owners',
'created',
'modified',
'disabled',
'_links',
)
expandable_fields = {
'projects': (ProjectSerializer, {'many': True}),
'teams': (TeamSerializer, {'many': True}),
}
class RemoteOrganizationSerializer(serializers.ModelSerializer):
class Meta:
model = RemoteOrganization
fields = [
'pk',
'slug',
'name',
'avatar_url',
'url',
'vcs_provider',
'created',
'modified',
]
read_only_fields = fields
class RemoteRepositorySerializer(FlexFieldsModelSerializer):
admin = serializers.SerializerMethodField('is_admin')
class Meta:
model = RemoteRepository
fields = [
'pk',
'name',
'full_name',
'description',
'admin',
'avatar_url',
'ssh_url',
'clone_url',
'html_url',
'vcs',
'vcs_provider',
'private',
'default_branch',
'created',
'modified',
]
read_only_fields = fields
expandable_fields = {
'remote_organization': (
RemoteOrganizationSerializer, {'source': 'organization'}
),
'projects': (
ProjectSerializer, {'many': True}
)
}
def is_admin(self, obj):
request = self.context['request']
# Use annotated value from RemoteRepositoryViewSet queryset
if hasattr(obj, '_admin'):
return obj._admin
return obj.remote_repository_relations.filter(
user=request.user, admin=True
).exists()
| 28.180711
| 89
| 0.595468
|
import datetime
import urllib
from django.conf import settings
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.translation import ugettext as _
from rest_flex_fields import FlexFieldsModelSerializer
from rest_flex_fields.serializers import FlexFieldsSerializerMixin
from rest_framework import serializers
from readthedocs.builds.models import Build, Version
from readthedocs.core.utils import slugify
from readthedocs.core.utils.extend import SettingsOverrideObject
from readthedocs.oauth.models import RemoteOrganization, RemoteRepository
from readthedocs.organizations.models import Organization, Team
from readthedocs.projects.constants import (
LANGUAGES,
PROGRAMMING_LANGUAGES,
REPO_CHOICES,
)
from readthedocs.projects.models import (
EnvironmentVariable,
Project,
ProjectRelationship,
)
from readthedocs.redirects.models import TYPE_CHOICES as REDIRECT_TYPE_CHOICES
from readthedocs.redirects.models import Redirect
class UserSerializer(FlexFieldsModelSerializer):
class Meta:
model = User
fields = [
'username',
]
class BaseLinksSerializer(serializers.Serializer):
def _absolute_url(self, path):
scheme = 'http' if settings.DEBUG else 'https'
domain = settings.PRODUCTION_DOMAIN
return urllib.parse.urlunparse((scheme, domain, path, '', '', ''))
class BuildCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Build
fields = []
class BuildLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
version = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-builds-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'build_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_version(self, obj):
if obj.version:
path = reverse(
'projects-versions-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'version_slug': obj.version.slug,
},
)
return self._absolute_url(path)
return None
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class BuildURLsSerializer(BaseLinksSerializer, serializers.Serializer):
build = serializers.URLField(source='get_full_url')
project = serializers.SerializerMethodField()
version = serializers.SerializerMethodField()
def get_project(self, obj):
path = reverse(
'projects_detail',
kwargs={
'project_slug': obj.project.slug
}
)
return self._absolute_url(path)
def get_version(self, obj):
if obj.version:
path = reverse(
'project_version_detail',
kwargs={
'project_slug': obj.project.slug,
'version_slug': obj.version.slug
}
)
return self._absolute_url(path)
return None
class BuildConfigSerializer(FlexFieldsSerializerMixin, serializers.Serializer):
def to_representation(self, instance):
return instance
class BuildStateSerializer(serializers.Serializer):
code = serializers.CharField(source='state')
name = serializers.SerializerMethodField()
def get_name(self, obj):
return obj.state.title()
class BuildSerializer(FlexFieldsModelSerializer):
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
version = serializers.SlugRelatedField(slug_field='slug', read_only=True)
created = serializers.DateTimeField(source='date')
finished = serializers.SerializerMethodField()
success = serializers.SerializerMethodField()
duration = serializers.IntegerField(source='length')
state = BuildStateSerializer(source='*')
_links = BuildLinksSerializer(source='*')
urls = BuildURLsSerializer(source='*')
class Meta:
model = Build
fields = [
'id',
'version',
'project',
'created',
'finished',
'duration',
'state',
'success',
'error',
'commit',
'_links',
'urls',
]
expandable_fields = {
'config': (BuildConfigSerializer,)
}
def get_finished(self, obj):
if obj.date and obj.length:
return obj.date + datetime.timedelta(seconds=obj.length)
def get_success(self, obj):
if obj.finished:
return obj.success
return None
class VersionLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-versions-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'version_slug': obj.slug,
},
)
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse(
'projects-versions-builds-list',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'parent_lookup_version__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class VersionDashboardURLsSerializer(BaseLinksSerializer, serializers.Serializer):
edit = serializers.SerializerMethodField()
def get_edit(self, obj):
path = reverse(
'project_version_detail',
kwargs={
'project_slug': obj.project.slug,
'version_slug': obj.slug,
})
return self._absolute_url(path)
class VersionURLsSerializer(BaseLinksSerializer, serializers.Serializer):
documentation = serializers.SerializerMethodField()
vcs = serializers.URLField(source='vcs_url')
dashboard = VersionDashboardURLsSerializer(source='*')
def get_documentation(self, obj):
return obj.project.get_docs_url(version_slug=obj.slug,)
class VersionSerializer(FlexFieldsModelSerializer):
ref = serializers.CharField()
downloads = serializers.SerializerMethodField()
urls = VersionURLsSerializer(source='*')
_links = VersionLinksSerializer(source='*')
class Meta:
model = Version
fields = [
'id',
'slug',
'verbose_name',
'identifier',
'ref',
'built',
'active',
'hidden',
'type',
'downloads',
'urls',
'_links',
]
expandable_fields = {
'last_build': (
BuildSerializer,
)
}
def get_downloads(self, obj):
downloads = obj.get_downloads()
data = {}
for k, v in downloads.items():
if k in ('html', 'pdf', 'epub'):
if k == 'html':
k = 'htmlzip'
data[k] = ('http:' if settings.DEBUG else 'https:') + v
return data
class VersionUpdateSerializer(serializers.ModelSerializer):
class Meta:
model = Version
fields = [
'active',
'hidden',
]
class LanguageSerializer(serializers.Serializer):
code = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
def get_code(self, language):
return language
def get_name(self, language):
for code, name in LANGUAGES:
if code == language:
return name
return 'Unknown'
class ProgrammingLanguageSerializer(serializers.Serializer):
code = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
def get_code(self, programming_language):
return programming_language
def get_name(self, programming_language):
for code, name in PROGRAMMING_LANGUAGES:
if code == programming_language:
return name
return 'Unknown'
class ProjectURLsSerializer(BaseLinksSerializer, serializers.Serializer):
documentation = serializers.CharField(source='get_docs_url')
home = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
versions = serializers.SerializerMethodField()
def get_home(self, obj):
path = reverse('projects_detail', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse('builds_project_list', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_versions(self, obj):
path = reverse('project_version_list', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
class RepositorySerializer(serializers.Serializer):
url = serializers.CharField(source='repo')
type = serializers.ChoiceField(
source='repo_type',
choices=REPO_CHOICES,
)
class ProjectLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
versions = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
environmentvariables = serializers.SerializerMethodField()
redirects = serializers.SerializerMethodField()
subprojects = serializers.SerializerMethodField()
superproject = serializers.SerializerMethodField()
translations = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse('projects-detail', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_versions(self, obj):
path = reverse(
'projects-versions-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_environmentvariables(self, obj):
path = reverse(
'projects-environmentvariables-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_redirects(self, obj):
path = reverse(
'projects-redirects-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse(
'projects-builds-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_subprojects(self, obj):
path = reverse(
'projects-subprojects-list',
kwargs={
'parent_lookup_parent__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_superproject(self, obj):
path = reverse(
'projects-superproject',
kwargs={
'project_slug': obj.slug,
},
)
return self._absolute_url(path)
def get_translations(self, obj):
path = reverse(
'projects-translations-list',
kwargs={
'parent_lookup_main_language_project__slug': obj.slug,
},
)
return self._absolute_url(path)
class ProjectCreateSerializerBase(FlexFieldsModelSerializer):
repository = RepositorySerializer(source='*')
homepage = serializers.URLField(source='project_url', required=False)
class Meta:
model = Project
fields = (
'name',
'language',
'programming_language',
'repository',
'homepage',
)
def validate_name(self, value):
potential_slug = slugify(value)
if Project.objects.filter(slug=potential_slug).exists():
raise serializers.ValidationError(
_('Project with slug "{0}" already exists.').format(potential_slug),
)
return value
class ProjectCreateSerializer(SettingsOverrideObject):
_default_class = ProjectCreateSerializerBase
class ProjectUpdateSerializerBase(FlexFieldsModelSerializer):
repository = RepositorySerializer(source='*')
homepage = serializers.URLField(
source='project_url',
required=False,
)
class Meta:
model = Project
fields = (
'name',
'repository',
'language',
'programming_language',
'homepage',
'default_version',
'default_branch',
'analytics_code',
'analytics_disabled',
'show_version_warning',
'single_version',
'external_builds_enabled',
)
class ProjectUpdateSerializer(SettingsOverrideObject):
_default_class = ProjectUpdateSerializerBase
class ProjectSerializer(FlexFieldsModelSerializer):
homepage = serializers.SerializerMethodField()
language = LanguageSerializer()
programming_language = ProgrammingLanguageSerializer()
repository = RepositorySerializer(source='*')
urls = ProjectURLsSerializer(source='*')
subproject_of = serializers.SerializerMethodField()
translation_of = serializers.SerializerMethodField()
default_branch = serializers.CharField(source='get_default_branch')
tags = serializers.StringRelatedField(many=True)
if not settings.RTD_ALLOW_ORGANIZATIONS:
users = UserSerializer(many=True)
_links = ProjectLinksSerializer(source='*')
created = serializers.DateTimeField(source='pub_date')
modified = serializers.DateTimeField(source='modified_date')
class Meta:
model = Project
fields = [
'id',
'name',
'slug',
'created',
'modified',
'language',
'programming_language',
'homepage',
'repository',
'default_version',
'default_branch',
'subproject_of',
'translation_of',
'urls',
'tags',
'_links',
]
if not settings.RTD_ALLOW_ORGANIZATIONS:
fields.append('users')
expandable_fields = {
# ``SerializerMethodField`` as far as I know
'active_versions': (
VersionSerializer,
{
'many': True,
}
)
}
if settings.RTD_ALLOW_ORGANIZATIONS:
expandable_fields.update({
'organization': (
'readthedocs.api.v3.serializers.OrganizationSerializer',
# NOTE: we cannot have a Project with multiple organizations.
{'source': 'organizations.first'},
),
'teams': (
serializers.SlugRelatedField,
{
'slug_field': 'slug',
'many': True,
'read_only': True,
},
),
})
def get_homepage(self, obj):
# Overridden only to return ``None`` when the project_url is ``''``
return obj.project_url or None
def get_translation_of(self, obj):
if obj.main_language_project:
return self.__class__(obj.main_language_project).data
def get_subproject_of(self, obj):
try:
return self.__class__(obj.superprojects.first().parent).data
except Exception:
return None
class SubprojectCreateSerializer(FlexFieldsModelSerializer):
child = serializers.SlugRelatedField(
slug_field='slug',
queryset=Project.objects.none(),
)
class Meta:
model = ProjectRelationship
fields = [
'child',
'alias',
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.parent_project = self.context['parent']
user = self.context['request'].user
self.fields['child'].queryset = (
self.parent_project.get_subproject_candidates(user)
)
# Give users a better error message.
self.fields['child'].error_messages['does_not_exist'] = _(
'Project with {slug_name}={value} is not valid as subproject'
)
def validate_alias(self, value):
# Check there is not a subproject with this alias already
subproject = self.parent_project.subprojects.filter(alias=value)
if subproject.exists():
raise serializers.ValidationError(
_('A subproject with this alias already exists'),
)
return value
# pylint: disable=arguments-differ
def validate(self, data):
self.parent_project.is_valid_as_superproject(
serializers.ValidationError
)
return data
class SubprojectLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
parent = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-subprojects-detail',
kwargs={
'parent_lookup_parent__slug': obj.parent.slug,
'alias_slug': obj.alias,
},
)
return self._absolute_url(path)
def get_parent(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.parent.slug,
},
)
return self._absolute_url(path)
class ChildProjectSerializer(ProjectSerializer):
class Meta(ProjectSerializer.Meta):
fields = [
field for field in ProjectSerializer.Meta.fields
if field not in ['subproject_of']
]
class SubprojectSerializer(FlexFieldsModelSerializer):
child = ChildProjectSerializer()
_links = SubprojectLinksSerializer(source='*')
class Meta:
model = ProjectRelationship
fields = [
'child',
'alias',
'_links',
]
class SubprojectDestroySerializer(FlexFieldsModelSerializer):
class Meta:
model = ProjectRelationship
fields = (
'alias',
)
class RedirectLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-redirects-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'redirect_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class RedirectSerializerBase(serializers.ModelSerializer):
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
created = serializers.DateTimeField(source='create_dt', read_only=True)
modified = serializers.DateTimeField(source='update_dt', read_only=True)
_links = RedirectLinksSerializer(source='*', read_only=True)
type = serializers.ChoiceField(source='redirect_type', choices=REDIRECT_TYPE_CHOICES)
class Meta:
model = Redirect
fields = [
'pk',
'created',
'modified',
'project',
'type',
'from_url',
'to_url',
'_links',
]
class RedirectCreateSerializer(RedirectSerializerBase):
pass
class RedirectDetailSerializer(RedirectSerializerBase):
from_url = serializers.SerializerMethodField()
to_url = serializers.SerializerMethodField()
def get_from_url(self, obj):
# Overridden only to return ``None`` when the description is ``''``
return obj.from_url or None
def get_to_url(self, obj):
# Overridden only to return ``None`` when the description is ``''``
return obj.to_url or None
class EnvironmentVariableLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-environmentvariables-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'environmentvariable_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class EnvironmentVariableSerializer(serializers.ModelSerializer):
value = serializers.CharField(write_only=True)
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
_links = EnvironmentVariableLinksSerializer(source='*', read_only=True)
class Meta:
model = EnvironmentVariable
fields = [
'pk',
'created',
'modified',
'name',
'value',
'public',
'project',
'_links',
]
class OrganizationLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
projects = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'organizations-detail',
kwargs={
'organization_slug': obj.slug,
})
return self._absolute_url(path)
def get_projects(self, obj):
path = reverse(
'organizations-projects-list',
kwargs={
'parent_lookup_organizations__slug': obj.slug,
},
)
return self._absolute_url(path)
class TeamSerializer(FlexFieldsModelSerializer):
# TODO: add ``projects`` as flex field when we have a
# /organizations/<slug>/teams/<slug>/projects endpoint
created = serializers.DateTimeField(source='pub_date')
modified = serializers.DateTimeField(source='modified_date')
class Meta:
model = Team
fields = (
'name',
'slug',
'created',
'modified',
'access',
)
expandable_fields = {
'members': (UserSerializer, {'many': True}),
}
class OrganizationSerializer(FlexFieldsModelSerializer):
created = serializers.DateTimeField(source='pub_date')
modified = serializers.DateTimeField(source='modified_date')
owners = UserSerializer(many=True)
_links = OrganizationLinksSerializer(source='*')
class Meta:
model = Organization
fields = (
'name',
'description',
'url',
'slug',
'email',
'owners',
'created',
'modified',
'disabled',
'_links',
)
expandable_fields = {
'projects': (ProjectSerializer, {'many': True}),
'teams': (TeamSerializer, {'many': True}),
}
class RemoteOrganizationSerializer(serializers.ModelSerializer):
class Meta:
model = RemoteOrganization
fields = [
'pk',
'slug',
'name',
'avatar_url',
'url',
'vcs_provider',
'created',
'modified',
]
read_only_fields = fields
class RemoteRepositorySerializer(FlexFieldsModelSerializer):
admin = serializers.SerializerMethodField('is_admin')
class Meta:
model = RemoteRepository
fields = [
'pk',
'name',
'full_name',
'description',
'admin',
'avatar_url',
'ssh_url',
'clone_url',
'html_url',
'vcs',
'vcs_provider',
'private',
'default_branch',
'created',
'modified',
]
read_only_fields = fields
expandable_fields = {
'remote_organization': (
RemoteOrganizationSerializer, {'source': 'organization'}
),
'projects': (
ProjectSerializer, {'many': True}
)
}
def is_admin(self, obj):
request = self.context['request']
# Use annotated value from RemoteRepositoryViewSet queryset
if hasattr(obj, '_admin'):
return obj._admin
return obj.remote_repository_relations.filter(
user=request.user, admin=True
).exists()
| true
| true
|
7902958aba3b36b6c985cbc6b8b862ef5e83942c
| 9,239
|
py
|
Python
|
deepchembed/dce.py
|
hanghu/AutoChemCluster
|
2ab4ae996b300a90637b124707905201c89d74d8
|
[
"MIT"
] | 2
|
2019-05-15T06:31:35.000Z
|
2019-08-31T13:13:21.000Z
|
deepchembed/dce.py
|
hanghu/AutoChemCluster
|
2ab4ae996b300a90637b124707905201c89d74d8
|
[
"MIT"
] | 7
|
2019-05-02T19:01:40.000Z
|
2022-02-10T00:11:00.000Z
|
deepchembed/dce.py
|
hanghu/AutoChemCluster
|
2ab4ae996b300a90637b124707905201c89d74d8
|
[
"MIT"
] | 1
|
2019-08-17T11:34:56.000Z
|
2019-08-17T11:34:56.000Z
|
"""
DeepChEmbed (DCE) Models
"""
from dimreducer import DeepAutoEncoder
from cluster import KMeansLayer
from cluster import KMeans
from keras import Model
from keras import optimizers
from keras.utils import normalize
import numpy as np
class DCE():
"""
The class to build a deep chemical embedding model.
Attributes:
autoencoder_dims: a list of dimensions for encoder, the first
element as input dimension, and the last one as
hidden layer dimension.
n_clusters: int, number of clusters for clustering layer.
alpha: float, parameters for soft label assigning.
update_interval: int, indicating every number of epoches, the harhened
labels will be upadated and/or convergence cretia will
be examed.
max_iteration: int, maximum iteration for the combined training
clustering_tol: float, convergence cretia for clustering layer
model: keras Model variable
HARDENING_FUNCS: smoothsetp hardening functions for unsupervised DCE
training, up to 9th order
"""
HARDENING_FUNCS = {
1: lambda x: x,
3: lambda x: (-2*x + 3) * x**2,
5: lambda x: ((6*x - 15)*x + 10) * x**3,
7: lambda x: (((-20*x + 70)*x - 84)*x + 35) * x**4,
9: lambda x: ((((70*x - 315)*x + 540)*x -420)*x + 126) * x**5}
def __init__(self, autoencoder_dims, n_clusters, update_interval=50,
max_iteration=1e4, clustering_tol=1e-4, alpha=1.0):
"""Construtor of DCE. """
self.autoencoder_dims = autoencoder_dims
self.n_clusters = n_clusters
self.alpha = alpha
self.update_interval = update_interval
self.max_iteration = max_iteration
self.clustering_tol = clustering_tol
self.model = None
return
def build_model(self, norm=True, act='relu'):
"""Build DCE using the initialized attributes
Args:
norm: boolean, wheher to add a normalization layer at the begining
of the autoencoder
act: string, keras activation function name for autoencoder
"""
autoencoder = DeepAutoEncoder(self.autoencoder_dims, act)
autoencoder.build_model(norm=norm)
embeding = autoencoder.model.get_layer(name='embedding_layer').output
clustering = KMeansLayer(self.n_clusters, alpha=self.alpha,
name='clustering')(embeding)
self.model = Model(inputs=autoencoder.model.input,
outputs=[clustering,autoencoder.model.output])
return
def train_model(self, data_train,
labels_train=None, data_test=None, labels_test=None,
verbose=1,
compiled=False, clustering_loss='kld',
decoder_loss='mse',clustering_loss_weight=0.5,
hardening_order=1, hardening_strength=2.0,
compiled=False,
optimizer='adam', lr=0.001, decay=0.0):
"""Train DCE Model:
If labels_train are not present, train DCE model in a unsupervised
learning process; otherwise, train DCE model in a supervised learning
process.
Args:
data_train: input training data
labels_train: true labels of traning data
data_test: input test data
labels_test: true lables of testing data
verbose: 0, turn off the screen prints
clustering_loss: string, clustering layer loss function
decoder_loss:, string, decoder loss function
clustering_loss_weight: float in [0,1], w_c,
harderning_order: odd int, the order of hardening function
harderning_strength: float >=1.0, the streng of the harderning
compiled: boolean, indicating if the model is compiled or not
optmizer: string, keras optimizers
lr: learning rate
dacay: learning rate dacay
Returns:
train_loss: training loss
test_loss: only if data_test and labels_test are not None in
supervised learning process
"""
if (not compiled):
assert clustering_loss_weight <= 1 and clustering_loss_weight >= 0
if optimizer == 'adam':
dce_optimizer = optimizers.Adam(lr=lr,decay=decay)
elif optimizer == 'sgd':
dce_optimizer = optimizers.sgd(lr=lr,decay=decay)
else:
raise Exception('Input optimizer was not found')
self.model.compile(loss={'clustering': clustering_loss,
'decoder_output': decoder_loss},
loss_weights=[clustering_loss_weight,
1 - clustering_loss_weight],
optimizer=dce_optimizer)
if (labels_train is not None):
supervised_learning = True
if verbose >= 1: print('Starting supervised learning')
else:
supervised_learning = False
if verbose >= 1: print('Starting unsupervised learning')
# initializing model by using sklean-Kmeans as guess
kmeans_init = KMeans(n_clusters=self.n_clusters)
kmeans_init.build_model()
encoder = Model(inputs=self.model.input,
outputs=self.model.get_layer(\
name='embedding_layer').output)
kmeans_init.model.fit(encoder.predict(data_train))
y_pred_last = kmeans_init.model.labels_
self.model.get_layer(name='clustering').\
set_weights([kmeans_init.model.cluster_centers_])
# Prepare training: p disctribution methods
if not supervised_learning:
# Unsupervised Learning
assert hardening_order in DCE.HARDENING_FUNCS.keys()
assert hardening_strength >= 1.0
h_func = DCE.HARDENING_FUNCS[hardening_order]
else:
# Supervised Learning
assert len(labels_train) == len(data_train)
assert len(np.unique(labels_train)) == self.n_clusters
p = np.zeros(shape=(len(labels_train), self.n_clusters))
for i in range(len(labels_train)):
p[i][labels_train[i]] = 1.0
if data_test is not None:
assert len(labels_test) == len(data_test)
assert len(np.unique(labels_test)) == self.n_clusters
p_test = np.zeros(shape=(len(labels_test), self.n_clusters))
for i in range(len(labels_test)):
p_test[i][labels_test[i]] = 1.0
validation_loss = []
# training start:
loss = []
for iteration in range(int(self.max_iteration)):
if iteration % self.update_interval == 0:
# updating p for unsupervised learning process
q, _ = self.model.predict(data_train)
if not supervised_learning:
p = DCE.hardening(q, h_func, hardening_strength)
# get label change i
y_pred = q.argmax(1)
delta_label_i = np.sum(y_pred != y_pred_last).\
astype(np.float32) / y_pred.shape[0]
y_pred_last = y_pred
# exam convergence
if iteration > 0 and delta_label_i < self.clustering_tol:
print(str(delta_label_i) +' < ' + str(self.clustering_tol))
print('Reached tolerance threshold. Stopping training.')
break
loss.append(self.model.train_on_batch(x=data_train,
y=[p,data_train]))
if supervised_learning and data_test is not None:
validation_loss.append(self.model.test_on_batch(
x=data_test, y=[p_test,data_test]))
if verbose > 0 and iteration % self.update_interval == 0:
print('Epoch: ' + str(iteration))
if verbose == 1:
print(' Total_loss = ' + str(loss[iteration][0]) +
';Delta_label = ' + str(delta_label_i))
print(' Clustering_loss = ' + str(loss[iteration][1]) +
'; Decoder_loss = ' + str(loss[iteration][2]))
if iteration == self.max_iteration - 1:
print('Reached maximum iteration. Stopping training.')
if data_test is None:
return np.array(loss).T
else:
return [np.array(loss).T, np.array(validation_loss).T]
@staticmethod
def hardening(q, h_func, stength):
"""hardening distribution P and return Q
Args:
q: input distributions.
h_func: input harderning function.
strength: hardening strength.
returns:
p: hardened and normatlized distributions.
"""
q = h_func(q)
weight = q ** stength / q.sum(0)
return (weight.T / weight.sum(1)).T
| 41.430493
| 79
| 0.573763
|
from dimreducer import DeepAutoEncoder
from cluster import KMeansLayer
from cluster import KMeans
from keras import Model
from keras import optimizers
from keras.utils import normalize
import numpy as np
class DCE():
HARDENING_FUNCS = {
1: lambda x: x,
3: lambda x: (-2*x + 3) * x**2,
5: lambda x: ((6*x - 15)*x + 10) * x**3,
7: lambda x: (((-20*x + 70)*x - 84)*x + 35) * x**4,
9: lambda x: ((((70*x - 315)*x + 540)*x -420)*x + 126) * x**5}
def __init__(self, autoencoder_dims, n_clusters, update_interval=50,
max_iteration=1e4, clustering_tol=1e-4, alpha=1.0):
self.autoencoder_dims = autoencoder_dims
self.n_clusters = n_clusters
self.alpha = alpha
self.update_interval = update_interval
self.max_iteration = max_iteration
self.clustering_tol = clustering_tol
self.model = None
return
def build_model(self, norm=True, act='relu'):
autoencoder = DeepAutoEncoder(self.autoencoder_dims, act)
autoencoder.build_model(norm=norm)
embeding = autoencoder.model.get_layer(name='embedding_layer').output
clustering = KMeansLayer(self.n_clusters, alpha=self.alpha,
name='clustering')(embeding)
self.model = Model(inputs=autoencoder.model.input,
outputs=[clustering,autoencoder.model.output])
return
def train_model(self, data_train,
labels_train=None, data_test=None, labels_test=None,
verbose=1,
compiled=False, clustering_loss='kld',
decoder_loss='mse',clustering_loss_weight=0.5,
hardening_order=1, hardening_strength=2.0,
compiled=False,
optimizer='adam', lr=0.001, decay=0.0):
if (not compiled):
assert clustering_loss_weight <= 1 and clustering_loss_weight >= 0
if optimizer == 'adam':
dce_optimizer = optimizers.Adam(lr=lr,decay=decay)
elif optimizer == 'sgd':
dce_optimizer = optimizers.sgd(lr=lr,decay=decay)
else:
raise Exception('Input optimizer was not found')
self.model.compile(loss={'clustering': clustering_loss,
'decoder_output': decoder_loss},
loss_weights=[clustering_loss_weight,
1 - clustering_loss_weight],
optimizer=dce_optimizer)
if (labels_train is not None):
supervised_learning = True
if verbose >= 1: print('Starting supervised learning')
else:
supervised_learning = False
if verbose >= 1: print('Starting unsupervised learning')
kmeans_init = KMeans(n_clusters=self.n_clusters)
kmeans_init.build_model()
encoder = Model(inputs=self.model.input,
outputs=self.model.get_layer(\
name='embedding_layer').output)
kmeans_init.model.fit(encoder.predict(data_train))
y_pred_last = kmeans_init.model.labels_
self.model.get_layer(name='clustering').\
set_weights([kmeans_init.model.cluster_centers_])
if not supervised_learning:
assert hardening_order in DCE.HARDENING_FUNCS.keys()
assert hardening_strength >= 1.0
h_func = DCE.HARDENING_FUNCS[hardening_order]
else:
assert len(labels_train) == len(data_train)
assert len(np.unique(labels_train)) == self.n_clusters
p = np.zeros(shape=(len(labels_train), self.n_clusters))
for i in range(len(labels_train)):
p[i][labels_train[i]] = 1.0
if data_test is not None:
assert len(labels_test) == len(data_test)
assert len(np.unique(labels_test)) == self.n_clusters
p_test = np.zeros(shape=(len(labels_test), self.n_clusters))
for i in range(len(labels_test)):
p_test[i][labels_test[i]] = 1.0
validation_loss = []
loss = []
for iteration in range(int(self.max_iteration)):
if iteration % self.update_interval == 0:
q, _ = self.model.predict(data_train)
if not supervised_learning:
p = DCE.hardening(q, h_func, hardening_strength)
y_pred = q.argmax(1)
delta_label_i = np.sum(y_pred != y_pred_last).\
astype(np.float32) / y_pred.shape[0]
y_pred_last = y_pred
if iteration > 0 and delta_label_i < self.clustering_tol:
print(str(delta_label_i) +' < ' + str(self.clustering_tol))
print('Reached tolerance threshold. Stopping training.')
break
loss.append(self.model.train_on_batch(x=data_train,
y=[p,data_train]))
if supervised_learning and data_test is not None:
validation_loss.append(self.model.test_on_batch(
x=data_test, y=[p_test,data_test]))
if verbose > 0 and iteration % self.update_interval == 0:
print('Epoch: ' + str(iteration))
if verbose == 1:
print(' Total_loss = ' + str(loss[iteration][0]) +
';Delta_label = ' + str(delta_label_i))
print(' Clustering_loss = ' + str(loss[iteration][1]) +
'; Decoder_loss = ' + str(loss[iteration][2]))
if iteration == self.max_iteration - 1:
print('Reached maximum iteration. Stopping training.')
if data_test is None:
return np.array(loss).T
else:
return [np.array(loss).T, np.array(validation_loss).T]
@staticmethod
def hardening(q, h_func, stength):
q = h_func(q)
weight = q ** stength / q.sum(0)
return (weight.T / weight.sum(1)).T
| true
| true
|
790295940172654157a92646c0d15692f2074d43
| 439
|
py
|
Python
|
tests/bench/web_traffic_jobs/de.wikipedia.org/test_web_traffic_de.wikipedia.org_pyaf_default_clean.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/bench/web_traffic_jobs/de.wikipedia.org/test_web_traffic_de.wikipedia.org_pyaf_default_clean.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/bench/web_traffic_jobs/de.wikipedia.org/test_web_traffic_de.wikipedia.org_pyaf_default_clean.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import pyaf.Bench.web_traffic.Forecaster as fo
PROJECTS = ['de.wikipedia.org']
data_dir = 'data/web-traffic-time-series-forecasting'
lForecaster = fo.cProjectForecaster()
lForecaster.mDataDirectory = data_dir
lForecaster.mBackendName = 'pyaf_default_clean'
lForecaster.mKeysFileName = 'key_1.csv.zip'
last_date = '2016-12-31'
horizon = 60
lForecaster.mKeysFileName = 'key_1.csv.zip'
lForecaster.forecast(PROJECTS, last_date , horizon)
| 27.4375
| 53
| 0.794989
|
import pyaf.Bench.web_traffic.Forecaster as fo
PROJECTS = ['de.wikipedia.org']
data_dir = 'data/web-traffic-time-series-forecasting'
lForecaster = fo.cProjectForecaster()
lForecaster.mDataDirectory = data_dir
lForecaster.mBackendName = 'pyaf_default_clean'
lForecaster.mKeysFileName = 'key_1.csv.zip'
last_date = '2016-12-31'
horizon = 60
lForecaster.mKeysFileName = 'key_1.csv.zip'
lForecaster.forecast(PROJECTS, last_date , horizon)
| true
| true
|
7902965ea82d2a1661b5d7f36b1f090d6ec9ca44
| 2,089
|
py
|
Python
|
mars/tensor/fft/irfft2.py
|
haijohn/mars
|
672b3a33a70565f01b1a3f508908445491d85acf
|
[
"Apache-2.0"
] | 1
|
2021-11-30T12:07:21.000Z
|
2021-11-30T12:07:21.000Z
|
mars/tensor/fft/irfft2.py
|
JeffroMF/mars
|
2805241ac55b50c4f6319baa41113fbf8c723832
|
[
"Apache-2.0"
] | null | null | null |
mars/tensor/fft/irfft2.py
|
JeffroMF/mars
|
2805241ac55b50c4f6319baa41113fbf8c723832
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..datasource import tensor as astensor
from .core import TensorRealIFFTNMixin, validate_fftn, TensorRealFFTN
class TensorIRFFT2(TensorRealFFTN, TensorRealIFFTNMixin):
_op_type_ = OperandDef.IRFFT2
def __init__(self, shape=None, axes=None, norm=None, **kw):
super().__init__(_shape=shape, _axes=axes, _norm=norm, **kw)
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input tensor
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
Normalization mode (see `mt.fft`). Default is None.
Returns
-------
out : Tensor
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
if len(axes) != 2:
raise ValueError("axes length should be 2")
a = astensor(a)
axes = validate_fftn(a, s=s, axes=axes, norm=norm)
op = TensorIRFFT2(shape=s, axes=axes, norm=norm, dtype=np.dtype(np.float_))
return op(a)
| 30.720588
| 79
| 0.674485
|
import numpy as np
from ... import opcodes as OperandDef
from ..datasource import tensor as astensor
from .core import TensorRealIFFTNMixin, validate_fftn, TensorRealFFTN
class TensorIRFFT2(TensorRealFFTN, TensorRealIFFTNMixin):
_op_type_ = OperandDef.IRFFT2
def __init__(self, shape=None, axes=None, norm=None, **kw):
super().__init__(_shape=shape, _axes=axes, _norm=norm, **kw)
def irfft2(a, s=None, axes=(-2, -1), norm=None):
if len(axes) != 2:
raise ValueError("axes length should be 2")
a = astensor(a)
axes = validate_fftn(a, s=s, axes=axes, norm=norm)
op = TensorIRFFT2(shape=s, axes=axes, norm=norm, dtype=np.dtype(np.float_))
return op(a)
| true
| true
|
79029717b6e62cbc9fc8eac9d690a9a1a31aa5e4
| 760
|
py
|
Python
|
kube_hunter/modules/discovery/etcd.py
|
vipulgupta2048/kube-hunter
|
fe3dba90d8cc6f947846e6a099a2562e7c2f88b1
|
[
"Apache-2.0"
] | 1
|
2021-09-13T21:52:52.000Z
|
2021-09-13T21:52:52.000Z
|
kube_hunter/modules/discovery/etcd.py
|
vipulgupta2048/kube-hunter
|
fe3dba90d8cc6f947846e6a099a2562e7c2f88b1
|
[
"Apache-2.0"
] | 2
|
2021-05-20T20:17:17.000Z
|
2022-02-26T09:20:16.000Z
|
kube_hunter/modules/discovery/etcd.py
|
vipulgupta2048/kube-hunter
|
fe3dba90d8cc6f947846e6a099a2562e7c2f88b1
|
[
"Apache-2.0"
] | 1
|
2020-07-29T07:50:10.000Z
|
2020-07-29T07:50:10.000Z
|
import json
import logging
import requests
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Event, OpenPortEvent, Service
from kube_hunter.core.types import Discovery
class EtcdAccessEvent(Service, Event):
"""Etcd is a DB that stores cluster's data, it contains configuration and current
state information, and might contain secrets"""
def __init__(self):
Service.__init__(self, name="Etcd")
@handler.subscribe(OpenPortEvent, predicate= lambda p: p.port == 2379)
class EtcdRemoteAccess(Discovery):
"""Etcd service
check for the existence of etcd service
"""
def __init__(self, event):
self.event = event
def execute(self):
self.publish_event(EtcdAccessEvent())
| 28.148148
| 85
| 0.735526
|
import json
import logging
import requests
from kube_hunter.core.events import handler
from kube_hunter.core.events.types import Event, OpenPortEvent, Service
from kube_hunter.core.types import Discovery
class EtcdAccessEvent(Service, Event):
def __init__(self):
Service.__init__(self, name="Etcd")
@handler.subscribe(OpenPortEvent, predicate= lambda p: p.port == 2379)
class EtcdRemoteAccess(Discovery):
def __init__(self, event):
self.event = event
def execute(self):
self.publish_event(EtcdAccessEvent())
| true
| true
|
79029782dda1542723bf7287a0f87d6c1c34669f
| 9,788
|
py
|
Python
|
result_buffer.py
|
zivaharoni/capacity-rl
|
20ed628b3bc9f3b08996f289e7855121f3addf71
|
[
"MIT"
] | 2
|
2020-01-11T23:24:23.000Z
|
2020-12-26T10:01:38.000Z
|
result_buffer.py
|
zivaharoni/capacity-rl
|
20ed628b3bc9f3b08996f289e7855121f3addf71
|
[
"MIT"
] | null | null | null |
result_buffer.py
|
zivaharoni/capacity-rl
|
20ed628b3bc9f3b08996f289e7855121f3addf71
|
[
"MIT"
] | 1
|
2020-03-11T00:49:26.000Z
|
2020-03-11T00:49:26.000Z
|
import numpy as np
import matplotlib.pyplot as plt
import time
import csv
import os
import scipy.io as mat4py
import logging
logger = logging.getLogger("logger")
class ResultBuffer(object):
def __init__(self, log_path, episode_types):
self.log_path = log_path
self.current_episode = None
self.episodes = {e_type: list() for e_type in episode_types}
self.average_reward = 0.0
self.initial_reward = 0.0
self.average_reward_counter = 0
self.n_cluster = 0
for episode_type in self.episodes.keys():
with open(os.path.join(self.log_path,'{}.csv'.format(episode_type)), mode='w') as result_file:
writer = csv.writer(result_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(self.title_csv())
def update_episode(self, **kwargs):
if self.current_episode is None:
raise ValueError("There is no initiated episodes object")
self.current_episode.add(**kwargs)
def add_episode(self, episode_type, lr, noise_std, buffer_size):
if episode_type in self.episodes.keys():
idx = len(self.episodes[episode_type])
episode_name = "{}_{:03d}".format(episode_type,idx)
self.episodes[episode_type].append(Episode(episode_name, lr, noise_std, buffer_size, self.average_reward))
self.current_episode = self.episodes[episode_type][-1]
else:
raise ValueError("Invalid episode type added to result buffer")
def finalize_episode(self, update_average_reward=None):
self.current_episode.summarize()
if update_average_reward is not None:
new_average = self.current_episode.final_stats['online_rewards']
if np.abs(new_average-self.initial_reward) > 0.05:
self.initial_reward = new_average
self.average_reward_counter = 0
self.average_reward = (self.average_reward_counter * self.average_reward + new_average) / (self.average_reward_counter + 1)
self.average_reward_counter += 1
logger.info(self.current_episode)
self.write_all()
def write_all(self):
for episode_type in self.episodes.keys():
with open(os.path.join(self.log_path,'{}.csv'.format(episode_type)), mode='a') as result_file:
writer = csv.writer(result_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i, episode in enumerate(self.episodes[episode_type]):
if episode is not None:
if "eval" in episode.name:
try:
episode.save(self.log_path)
except:
logger.info("Saving state evolution failed")
writer.writerow(episode.csv())
self.episodes[episode_type][i] = None
@staticmethod
def title():
text = list()
text.append('{:^20}'.format('Epi'))
text.append('{:^10}'.format('time'))
text.append('{:^9}'.format('lr'))
text.append('{:^9}'.format('noise'))
text.append('{:^12}'.format('buffer size'))
text.append('{:^9}'.format('#of updates'))
text.append('{:^20}'.format('average_reward'))
text.append('{:^20}'.format('actor grad norm'))
text.append('{:^20}'.format('critic grad norm'))
text.append('{:^9}'.format('q_loss'))
text.append('{:^6}'.format('rewards'))
return " | ".join(text)
@staticmethod
def title_csv():
text = list()
text.append('{}'.format('Epi'))
text.append('{}'.format('time'))
text.append('{}'.format('lr'))
text.append('{}'.format('noise'))
text.append('{}'.format('buffer size'))
text.append('{}'.format('#of updates'))
text.append('{}'.format('average_reward'))
text.append('{}'.format('actor grad norm'))
text.append('{}'.format('critic grad norm'))
text.append('{}'.format('q_loss'))
text.append('{}'.format('rewards'))
return text
class Episode(object):
def __init__(self, name, lr, noise_std, buffer_size, average_reward):
# general stats
self.name = name
self.average_reward = average_reward
self.lr = lr
self.noise_std = noise_std
self.buffer_size = buffer_size
self.total_time = time.time()
# training stats
self.stats = dict()
self.final_stats = dict()
def add(self, **kwargs):
for key,val in kwargs.items():
if key not in self.stats.keys():
self.stats[key] = list()
self.stats[key].append(val)
def summarize(self):
# updates counter
if 'global_step_critic' in self.stats.keys():
self.final_stats['global_step'] = self.stats['global_step_critic']
# average rewards
if 'online_rewards' in self.stats.keys():
self.stats['online_rewards'] = np.array(self.stats['online_rewards'])
self.stats['online_rewards'] = np.reshape(self.stats['online_rewards'], [self.stats['online_rewards'].shape[1], -1])
self.final_stats['online_rewards'] = np.mean(self.stats['online_rewards'][:,10:])
# value function error
if 'q_loss' in self.stats.keys():
self.final_stats['q_loss'] = np.mean(self.stats['q_loss'])
# state/action/disturbance evolution
if 'states' in self.stats.keys():
self.final_stats['states'] = np.transpose(np.squeeze(np.array(self.stats['states'])))
if 'actions' in self.stats.keys():
self.final_stats['actions'] = np.swapaxes(np.array(self.stats['actions']), 0, 1)
if 'disturbance' in self.stats.keys():
self.final_stats['disturbance'] = np.transpose(np.array(self.stats['disturbance']))
# gradient stats
if 'g_norm_critic' in self.stats.keys():
self.final_stats['g_norm_critic'] = (np.mean(np.squeeze(np.array(self.stats['g_norm_critic']))),
np.min(np.squeeze(np.array(self.stats['g_norm_critic']))),
np.max(np.squeeze(np.array(self.stats['g_norm_critic']))))
if 'g_norm_actor' in self.stats.keys():
self.final_stats['g_norm_actor'] = (np.mean(np.squeeze(np.array(self.stats['g_norm_actor']))),
np.min(np.squeeze(np.array(self.stats['g_norm_actor']))),
np.max(np.squeeze(np.array(self.stats['g_norm_actor']))))
if 'global_step_actor' in self.stats.keys():
self.final_stats['global_step'] = self.stats['global_step_actor'][-1]
self.total_time = time.time() - self.total_time
del self.stats
def save(self, path):
mat4py.savemat(os.path.join(path, "states", 'states_evol.mat'), {'states': self.final_stats['states']})
mat4py.savemat(os.path.join(path, "states", 'actions_evol.mat'), {'actions': self.final_stats['actions']})
mat4py.savemat(os.path.join(path, "states", 'outputs_evol.mat'), {'disturbance': self.final_stats['disturbance']})
def csv(self):
text = list()
text.append('{}'.format(self.name))
text.append('{:.1f}'.format(self.total_time))
if "eval" not in self.name:
text.append('{:.2e}'.format(self.lr))
text.append('{:.2e}'.format(self.noise_std))
text.append('{}'.format(self.buffer_size))
text.append('{}'.format(self.final_stats['global_step']))
text.append('{:^20}'.format(self.average_reward))
if "eval" not in self.name:
text.append('{}'.format(self.final_stats['g_norm_actor']))
text.append('{}'.format(self.final_stats['g_norm_critic']))
text.append('{:.2e}'.format(self.final_stats['q_loss']))
text.append('{:.5f}'.format(self.final_stats['online_rewards']))
return text
def __repr__(self):
text = list()
text.append('{:^20}'.format(self.name))
text.append('{:^10.1f}'.format(self.total_time))
if "eval" not in self.name:
text.append('{:^9.2e}'.format(self.lr))
text.append('{:^9.2e}'.format(self.noise_std))
text.append('{:^d}'.format(self.buffer_size))
text.append('{}'.format(self.final_stats['global_step']))
text.append('{:^20}'.format(self.average_reward))
if "eval" not in self.name:
mi, ma, mea = self.final_stats['g_norm_actor']
text.append('{:5.2e},{:5.2e},{:5.2e}'.format(mi, ma, mea))
mi, ma, mea = self.final_stats['g_norm_critic']
text.append('{:5.2e},{:5.2e},{:5.2e}'.format(mi, ma, mea))
text.append('{:^10.2e}'.format(self.final_stats['q_loss']))
if "pol" in self.name:
mi, ma, mea = self.final_stats['g_norm_critic']
text.append('{:5.2e},{:5.2e},{:5.2e}'.format(mi, ma, mea))
text.append('{:^10.2e}'.format(self.final_stats['q_loss']))
if len(self.final_stats.keys()) > 0 :
text.append('{:^6.5f}'.format(self.final_stats['online_rewards']))
return " | ".join(text)
class Figure(object):
def __init__(self, name, log_path, y_data, x_data=None, options = None, labels = None):
self.fig = plt.figure()
self.fig.set_size_inches(18.5, 10.5)
for y in y_data:
plt.plot(x_data, y)
plt.legend(labels)
plt.title(" ".join(name.split("_")))
self.fig.savefig(os.path.join(log_path, "plots", name))
plt.close()
| 41.651064
| 135
| 0.581324
|
import numpy as np
import matplotlib.pyplot as plt
import time
import csv
import os
import scipy.io as mat4py
import logging
logger = logging.getLogger("logger")
class ResultBuffer(object):
def __init__(self, log_path, episode_types):
self.log_path = log_path
self.current_episode = None
self.episodes = {e_type: list() for e_type in episode_types}
self.average_reward = 0.0
self.initial_reward = 0.0
self.average_reward_counter = 0
self.n_cluster = 0
for episode_type in self.episodes.keys():
with open(os.path.join(self.log_path,'{}.csv'.format(episode_type)), mode='w') as result_file:
writer = csv.writer(result_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(self.title_csv())
def update_episode(self, **kwargs):
if self.current_episode is None:
raise ValueError("There is no initiated episodes object")
self.current_episode.add(**kwargs)
def add_episode(self, episode_type, lr, noise_std, buffer_size):
if episode_type in self.episodes.keys():
idx = len(self.episodes[episode_type])
episode_name = "{}_{:03d}".format(episode_type,idx)
self.episodes[episode_type].append(Episode(episode_name, lr, noise_std, buffer_size, self.average_reward))
self.current_episode = self.episodes[episode_type][-1]
else:
raise ValueError("Invalid episode type added to result buffer")
def finalize_episode(self, update_average_reward=None):
self.current_episode.summarize()
if update_average_reward is not None:
new_average = self.current_episode.final_stats['online_rewards']
if np.abs(new_average-self.initial_reward) > 0.05:
self.initial_reward = new_average
self.average_reward_counter = 0
self.average_reward = (self.average_reward_counter * self.average_reward + new_average) / (self.average_reward_counter + 1)
self.average_reward_counter += 1
logger.info(self.current_episode)
self.write_all()
def write_all(self):
for episode_type in self.episodes.keys():
with open(os.path.join(self.log_path,'{}.csv'.format(episode_type)), mode='a') as result_file:
writer = csv.writer(result_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i, episode in enumerate(self.episodes[episode_type]):
if episode is not None:
if "eval" in episode.name:
try:
episode.save(self.log_path)
except:
logger.info("Saving state evolution failed")
writer.writerow(episode.csv())
self.episodes[episode_type][i] = None
@staticmethod
def title():
text = list()
text.append('{:^20}'.format('Epi'))
text.append('{:^10}'.format('time'))
text.append('{:^9}'.format('lr'))
text.append('{:^9}'.format('noise'))
text.append('{:^12}'.format('buffer size'))
text.append('{:^9}'.format('#of updates'))
text.append('{:^20}'.format('average_reward'))
text.append('{:^20}'.format('actor grad norm'))
text.append('{:^20}'.format('critic grad norm'))
text.append('{:^9}'.format('q_loss'))
text.append('{:^6}'.format('rewards'))
return " | ".join(text)
@staticmethod
def title_csv():
text = list()
text.append('{}'.format('Epi'))
text.append('{}'.format('time'))
text.append('{}'.format('lr'))
text.append('{}'.format('noise'))
text.append('{}'.format('buffer size'))
text.append('{}'.format('#of updates'))
text.append('{}'.format('average_reward'))
text.append('{}'.format('actor grad norm'))
text.append('{}'.format('critic grad norm'))
text.append('{}'.format('q_loss'))
text.append('{}'.format('rewards'))
return text
class Episode(object):
def __init__(self, name, lr, noise_std, buffer_size, average_reward):
self.name = name
self.average_reward = average_reward
self.lr = lr
self.noise_std = noise_std
self.buffer_size = buffer_size
self.total_time = time.time()
self.stats = dict()
self.final_stats = dict()
def add(self, **kwargs):
for key,val in kwargs.items():
if key not in self.stats.keys():
self.stats[key] = list()
self.stats[key].append(val)
def summarize(self):
if 'global_step_critic' in self.stats.keys():
self.final_stats['global_step'] = self.stats['global_step_critic']
if 'online_rewards' in self.stats.keys():
self.stats['online_rewards'] = np.array(self.stats['online_rewards'])
self.stats['online_rewards'] = np.reshape(self.stats['online_rewards'], [self.stats['online_rewards'].shape[1], -1])
self.final_stats['online_rewards'] = np.mean(self.stats['online_rewards'][:,10:])
if 'q_loss' in self.stats.keys():
self.final_stats['q_loss'] = np.mean(self.stats['q_loss'])
if 'states' in self.stats.keys():
self.final_stats['states'] = np.transpose(np.squeeze(np.array(self.stats['states'])))
if 'actions' in self.stats.keys():
self.final_stats['actions'] = np.swapaxes(np.array(self.stats['actions']), 0, 1)
if 'disturbance' in self.stats.keys():
self.final_stats['disturbance'] = np.transpose(np.array(self.stats['disturbance']))
if 'g_norm_critic' in self.stats.keys():
self.final_stats['g_norm_critic'] = (np.mean(np.squeeze(np.array(self.stats['g_norm_critic']))),
np.min(np.squeeze(np.array(self.stats['g_norm_critic']))),
np.max(np.squeeze(np.array(self.stats['g_norm_critic']))))
if 'g_norm_actor' in self.stats.keys():
self.final_stats['g_norm_actor'] = (np.mean(np.squeeze(np.array(self.stats['g_norm_actor']))),
np.min(np.squeeze(np.array(self.stats['g_norm_actor']))),
np.max(np.squeeze(np.array(self.stats['g_norm_actor']))))
if 'global_step_actor' in self.stats.keys():
self.final_stats['global_step'] = self.stats['global_step_actor'][-1]
self.total_time = time.time() - self.total_time
del self.stats
def save(self, path):
mat4py.savemat(os.path.join(path, "states", 'states_evol.mat'), {'states': self.final_stats['states']})
mat4py.savemat(os.path.join(path, "states", 'actions_evol.mat'), {'actions': self.final_stats['actions']})
mat4py.savemat(os.path.join(path, "states", 'outputs_evol.mat'), {'disturbance': self.final_stats['disturbance']})
def csv(self):
text = list()
text.append('{}'.format(self.name))
text.append('{:.1f}'.format(self.total_time))
if "eval" not in self.name:
text.append('{:.2e}'.format(self.lr))
text.append('{:.2e}'.format(self.noise_std))
text.append('{}'.format(self.buffer_size))
text.append('{}'.format(self.final_stats['global_step']))
text.append('{:^20}'.format(self.average_reward))
if "eval" not in self.name:
text.append('{}'.format(self.final_stats['g_norm_actor']))
text.append('{}'.format(self.final_stats['g_norm_critic']))
text.append('{:.2e}'.format(self.final_stats['q_loss']))
text.append('{:.5f}'.format(self.final_stats['online_rewards']))
return text
def __repr__(self):
text = list()
text.append('{:^20}'.format(self.name))
text.append('{:^10.1f}'.format(self.total_time))
if "eval" not in self.name:
text.append('{:^9.2e}'.format(self.lr))
text.append('{:^9.2e}'.format(self.noise_std))
text.append('{:^d}'.format(self.buffer_size))
text.append('{}'.format(self.final_stats['global_step']))
text.append('{:^20}'.format(self.average_reward))
if "eval" not in self.name:
mi, ma, mea = self.final_stats['g_norm_actor']
text.append('{:5.2e},{:5.2e},{:5.2e}'.format(mi, ma, mea))
mi, ma, mea = self.final_stats['g_norm_critic']
text.append('{:5.2e},{:5.2e},{:5.2e}'.format(mi, ma, mea))
text.append('{:^10.2e}'.format(self.final_stats['q_loss']))
if "pol" in self.name:
mi, ma, mea = self.final_stats['g_norm_critic']
text.append('{:5.2e},{:5.2e},{:5.2e}'.format(mi, ma, mea))
text.append('{:^10.2e}'.format(self.final_stats['q_loss']))
if len(self.final_stats.keys()) > 0 :
text.append('{:^6.5f}'.format(self.final_stats['online_rewards']))
return " | ".join(text)
class Figure(object):
def __init__(self, name, log_path, y_data, x_data=None, options = None, labels = None):
self.fig = plt.figure()
self.fig.set_size_inches(18.5, 10.5)
for y in y_data:
plt.plot(x_data, y)
plt.legend(labels)
plt.title(" ".join(name.split("_")))
self.fig.savefig(os.path.join(log_path, "plots", name))
plt.close()
| true
| true
|
79029937c25c32d020ea3a39f31a4367f930a97b
| 4,974
|
py
|
Python
|
src/cryptography/hazmat/primitives/hashes.py
|
neheb/cryptography
|
d37346651bc4b9ebc0b8c7cfdc7c1f21de6a751d
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/cryptography/hazmat/primitives/hashes.py
|
neheb/cryptography
|
d37346651bc4b9ebc0b8c7cfdc7c1f21de6a751d
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/cryptography/hazmat/primitives/hashes.py
|
neheb/cryptography
|
d37346651bc4b9ebc0b8c7cfdc7c1f21de6a751d
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import six
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import HashBackend
@six.add_metaclass(abc.ABCMeta)
class HashAlgorithm(object):
@abc.abstractproperty
def name(self):
"""
A string naming this algorithm (e.g. "sha256", "md5").
"""
@abc.abstractproperty
def digest_size(self):
"""
The size of the resulting digest in bytes.
"""
@six.add_metaclass(abc.ABCMeta)
class HashContext(object):
@abc.abstractproperty
def algorithm(self):
"""
A HashAlgorithm that will be used by this context.
"""
@abc.abstractmethod
def update(self, data):
"""
Processes the provided bytes through the hash.
"""
@abc.abstractmethod
def finalize(self):
"""
Finalizes the hash context and returns the hash digest as bytes.
"""
@abc.abstractmethod
def copy(self):
"""
Return a HashContext that is a copy of the current context.
"""
@utils.register_interface(HashContext)
class Hash(object):
def __init__(self, algorithm, backend, ctx=None):
if not isinstance(backend, HashBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement HashBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
if not isinstance(algorithm, HashAlgorithm):
raise TypeError("Expected instance of hashes.HashAlgorithm.")
self._algorithm = algorithm
self._backend = backend
if ctx is None:
self._ctx = self._backend.create_hash_ctx(self.algorithm)
else:
self._ctx = ctx
algorithm = utils.read_only_property("_algorithm")
def update(self, data):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
if not isinstance(data, bytes):
raise TypeError("data must be bytes.")
self._ctx.update(data)
def copy(self):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
return Hash(
self.algorithm, backend=self._backend, ctx=self._ctx.copy()
)
def finalize(self):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
digest = self._ctx.finalize()
self._ctx = None
return digest
@utils.register_interface(HashAlgorithm)
class SHA1(object):
name = "sha1"
digest_size = 20
block_size = 64
@utils.register_interface(HashAlgorithm)
class SHA512_224(object): # noqa: N801
name = "sha512-224"
digest_size = 28
block_size = 128
@utils.register_interface(HashAlgorithm)
class SHA512_256(object): # noqa: N801
name = "sha512-256"
digest_size = 32
block_size = 128
@utils.register_interface(HashAlgorithm)
class SHA224(object):
name = "sha224"
digest_size = 28
block_size = 64
@utils.register_interface(HashAlgorithm)
class SHA256(object):
name = "sha256"
digest_size = 32
block_size = 64
@utils.register_interface(HashAlgorithm)
class SHA384(object):
name = "sha384"
digest_size = 48
block_size = 128
@utils.register_interface(HashAlgorithm)
class SHA512(object):
name = "sha512"
digest_size = 64
block_size = 128
@utils.register_interface(HashAlgorithm)
class MD5(object):
name = "md5"
digest_size = 16
block_size = 64
@utils.register_interface(HashAlgorithm)
class BLAKE2b(object):
name = "blake2b"
_max_digest_size = 64
_min_digest_size = 1
block_size = 128
def __init__(self, digest_size):
if (
digest_size > self._max_digest_size or
digest_size < self._min_digest_size
):
raise ValueError("Digest size must be {0}-{1}".format(
self._min_digest_size, self._max_digest_size)
)
self._digest_size = digest_size
digest_size = utils.read_only_property("_digest_size")
@utils.register_interface(HashAlgorithm)
class BLAKE2s(object):
name = "blake2s"
block_size = 64
_max_digest_size = 32
_min_digest_size = 1
def __init__(self, digest_size):
if (
digest_size > self._max_digest_size or
digest_size < self._min_digest_size
):
raise ValueError("Digest size must be {0}-{1}".format(
self._min_digest_size, self._max_digest_size)
)
self._digest_size = digest_size
digest_size = utils.read_only_property("_digest_size")
| 24.87
| 79
| 0.649377
|
from __future__ import absolute_import, division, print_function
import abc
import six
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import HashBackend
@six.add_metaclass(abc.ABCMeta)
class HashAlgorithm(object):
@abc.abstractproperty
def name(self):
@abc.abstractproperty
def digest_size(self):
@six.add_metaclass(abc.ABCMeta)
class HashContext(object):
@abc.abstractproperty
def algorithm(self):
@abc.abstractmethod
def update(self, data):
@abc.abstractmethod
def finalize(self):
@abc.abstractmethod
def copy(self):
@utils.register_interface(HashContext)
class Hash(object):
def __init__(self, algorithm, backend, ctx=None):
if not isinstance(backend, HashBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement HashBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
if not isinstance(algorithm, HashAlgorithm):
raise TypeError("Expected instance of hashes.HashAlgorithm.")
self._algorithm = algorithm
self._backend = backend
if ctx is None:
self._ctx = self._backend.create_hash_ctx(self.algorithm)
else:
self._ctx = ctx
algorithm = utils.read_only_property("_algorithm")
def update(self, data):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
if not isinstance(data, bytes):
raise TypeError("data must be bytes.")
self._ctx.update(data)
def copy(self):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
return Hash(
self.algorithm, backend=self._backend, ctx=self._ctx.copy()
)
def finalize(self):
if self._ctx is None:
raise AlreadyFinalized("Context was already finalized.")
digest = self._ctx.finalize()
self._ctx = None
return digest
@utils.register_interface(HashAlgorithm)
class SHA1(object):
name = "sha1"
digest_size = 20
block_size = 64
@utils.register_interface(HashAlgorithm)
class SHA512_224(object):
name = "sha512-224"
digest_size = 28
block_size = 128
@utils.register_interface(HashAlgorithm)
class SHA512_256(object):
name = "sha512-256"
digest_size = 32
block_size = 128
@utils.register_interface(HashAlgorithm)
class SHA224(object):
name = "sha224"
digest_size = 28
block_size = 64
@utils.register_interface(HashAlgorithm)
class SHA256(object):
name = "sha256"
digest_size = 32
block_size = 64
@utils.register_interface(HashAlgorithm)
class SHA384(object):
name = "sha384"
digest_size = 48
block_size = 128
@utils.register_interface(HashAlgorithm)
class SHA512(object):
name = "sha512"
digest_size = 64
block_size = 128
@utils.register_interface(HashAlgorithm)
class MD5(object):
name = "md5"
digest_size = 16
block_size = 64
@utils.register_interface(HashAlgorithm)
class BLAKE2b(object):
name = "blake2b"
_max_digest_size = 64
_min_digest_size = 1
block_size = 128
def __init__(self, digest_size):
if (
digest_size > self._max_digest_size or
digest_size < self._min_digest_size
):
raise ValueError("Digest size must be {0}-{1}".format(
self._min_digest_size, self._max_digest_size)
)
self._digest_size = digest_size
digest_size = utils.read_only_property("_digest_size")
@utils.register_interface(HashAlgorithm)
class BLAKE2s(object):
name = "blake2s"
block_size = 64
_max_digest_size = 32
_min_digest_size = 1
def __init__(self, digest_size):
if (
digest_size > self._max_digest_size or
digest_size < self._min_digest_size
):
raise ValueError("Digest size must be {0}-{1}".format(
self._min_digest_size, self._max_digest_size)
)
self._digest_size = digest_size
digest_size = utils.read_only_property("_digest_size")
| true
| true
|
790299fc930a9d73c1c99e78c1948a94c9b4200c
| 4,358
|
py
|
Python
|
pybfcontrol/bf_button.py
|
zhangenter/bf_control
|
c613c138ffc08ac7a52b71469da445ddf066ce21
|
[
"MIT"
] | 1
|
2019-05-08T14:48:28.000Z
|
2019-05-08T14:48:28.000Z
|
pybfcontrol/bf_button.py
|
zhangenter/bf_control
|
c613c138ffc08ac7a52b71469da445ddf066ce21
|
[
"MIT"
] | null | null | null |
pybfcontrol/bf_button.py
|
zhangenter/bf_control
|
c613c138ffc08ac7a52b71469da445ddf066ce21
|
[
"MIT"
] | null | null | null |
# -*- coding=utf-8 -*-
import pygame
from pygame.locals import MOUSEBUTTONDOWN
from pybfcontrol.bf_common import BFControlId,BFBase, TEXT_ALIGN_LEFT,TEXT_ALIGN_MIDDLE
CLICK_EFFECT_TIME = 100
PADING = 4
class BFButton(BFBase):
def __init__(self, parent, rect, text='Button', click=None):
super(BFButton, self).__init__()
self.x,self.y,self.width,self.height = rect
self.bg_color = (225,225,225)
self.parent = parent
self.surface = parent.subsurface(rect)
self.is_hover = False
self.in_click = False
self.click_loss_time = 0
self.click_event_id = -1
self.ctl_id = BFControlId().instance().get_new_id()
self._text = text
self._click = click
self.init_font()
def init_font(self):
white = 100, 100, 100
self.textImage = self.font.render(self._text, True, white)
w, h = self.textImage.get_size()
self._ty = (self.height - h) / 2
if self._text_align == TEXT_ALIGN_LEFT:
self._tx = PADING
elif self._text_align == TEXT_ALIGN_MIDDLE:
self._tx = (self.width - PADING - w) / 2
else:
self._tx = (self.width - PADING * 2 - w)
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
self.init_font()
@property
def click(self):
return self._click
@click.setter
def click(self, value):
self._click = value
def clear_hover(self):
self.is_hover = False
def update(self, event):
if self.in_click and event.type == pygame.USEREVENT+1 and BFControlId().instance().click_id == self.ctl_id:
if self._click: self._click(self)
self.click_event_id = -1
return True
x, y = pygame.mouse.get_pos()
if x > self.x and x < self.x + self.width and y > self.y and y < self.y + self.height:
if self.panel: self.panel.clear_hover()
self.is_hover = True
if event.type == MOUSEBUTTONDOWN:
pressed_array = pygame.mouse.get_pressed()
if pressed_array[0]:
self.in_click = True
if self.panel: self.panel.clear_foucs()
self.click_loss_time = pygame.time.get_ticks() + CLICK_EFFECT_TIME
BFControlId().instance().click_id = self.ctl_id
pygame.time.set_timer(pygame.USEREVENT+1,CLICK_EFFECT_TIME-10)
return True
else:
self.is_hover = False
return False
def draw(self):
if self.in_click:
if self.click_loss_time < pygame.time.get_ticks():
self.in_click = False
if not self._visible:
return
if self.in_click:
r,g,b = self.bg_color
k = 0.95
self.surface.fill((r*k, g*k, b*k))
else:
self.surface.fill(self.bg_color)
if self.is_hover:
pygame.draw.rect(self.surface, (0,0,0), (0,0,self.width,self.height), 1)
pygame.draw.rect(self.surface, (100,100,100), (0,0,self.width-1,self.height-1), 1)
layers = 5
r_step = (210-170)/layers
g_step = (225-205)/layers
for i in range(layers):
pygame.draw.rect(self.surface, (170+r_step*i, 205+g_step*i, 255), (i, i, self.width - 2 - i*2, self.height - 2 - i*2), 1)
else:
self.surface.fill(self.bg_color)
pygame.draw.rect(self.surface, (0,0,0), (0,0,self.width,self.height), 1)
pygame.draw.rect(self.surface, (100,100,100), (0,0,self.width-1,self.height-1), 1)
pygame.draw.rect(self.surface, self.bg_color, (0,0,self.width-2,self.height-2), 1)
self.surface.blit(self.textImage, (self._tx, self._ty))
class BFButtonGroup(object):
def __init__(self):
self.btn_list = []
def add_button(self, button):
self.btn_list.append(button)
def make_button(self, screen, rect, text='Button', click=None):
button = BFButton(screen, rect,text=text,click=click)
self.add_button(button)
def update(self, event):
for button in self.btn_list: button.update(event)
def draw(self):
for button in self.btn_list: button.draw()
| 35.430894
| 137
| 0.583983
|
import pygame
from pygame.locals import MOUSEBUTTONDOWN
from pybfcontrol.bf_common import BFControlId,BFBase, TEXT_ALIGN_LEFT,TEXT_ALIGN_MIDDLE
CLICK_EFFECT_TIME = 100
PADING = 4
class BFButton(BFBase):
def __init__(self, parent, rect, text='Button', click=None):
super(BFButton, self).__init__()
self.x,self.y,self.width,self.height = rect
self.bg_color = (225,225,225)
self.parent = parent
self.surface = parent.subsurface(rect)
self.is_hover = False
self.in_click = False
self.click_loss_time = 0
self.click_event_id = -1
self.ctl_id = BFControlId().instance().get_new_id()
self._text = text
self._click = click
self.init_font()
def init_font(self):
white = 100, 100, 100
self.textImage = self.font.render(self._text, True, white)
w, h = self.textImage.get_size()
self._ty = (self.height - h) / 2
if self._text_align == TEXT_ALIGN_LEFT:
self._tx = PADING
elif self._text_align == TEXT_ALIGN_MIDDLE:
self._tx = (self.width - PADING - w) / 2
else:
self._tx = (self.width - PADING * 2 - w)
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
self.init_font()
@property
def click(self):
return self._click
@click.setter
def click(self, value):
self._click = value
def clear_hover(self):
self.is_hover = False
def update(self, event):
if self.in_click and event.type == pygame.USEREVENT+1 and BFControlId().instance().click_id == self.ctl_id:
if self._click: self._click(self)
self.click_event_id = -1
return True
x, y = pygame.mouse.get_pos()
if x > self.x and x < self.x + self.width and y > self.y and y < self.y + self.height:
if self.panel: self.panel.clear_hover()
self.is_hover = True
if event.type == MOUSEBUTTONDOWN:
pressed_array = pygame.mouse.get_pressed()
if pressed_array[0]:
self.in_click = True
if self.panel: self.panel.clear_foucs()
self.click_loss_time = pygame.time.get_ticks() + CLICK_EFFECT_TIME
BFControlId().instance().click_id = self.ctl_id
pygame.time.set_timer(pygame.USEREVENT+1,CLICK_EFFECT_TIME-10)
return True
else:
self.is_hover = False
return False
def draw(self):
if self.in_click:
if self.click_loss_time < pygame.time.get_ticks():
self.in_click = False
if not self._visible:
return
if self.in_click:
r,g,b = self.bg_color
k = 0.95
self.surface.fill((r*k, g*k, b*k))
else:
self.surface.fill(self.bg_color)
if self.is_hover:
pygame.draw.rect(self.surface, (0,0,0), (0,0,self.width,self.height), 1)
pygame.draw.rect(self.surface, (100,100,100), (0,0,self.width-1,self.height-1), 1)
layers = 5
r_step = (210-170)/layers
g_step = (225-205)/layers
for i in range(layers):
pygame.draw.rect(self.surface, (170+r_step*i, 205+g_step*i, 255), (i, i, self.width - 2 - i*2, self.height - 2 - i*2), 1)
else:
self.surface.fill(self.bg_color)
pygame.draw.rect(self.surface, (0,0,0), (0,0,self.width,self.height), 1)
pygame.draw.rect(self.surface, (100,100,100), (0,0,self.width-1,self.height-1), 1)
pygame.draw.rect(self.surface, self.bg_color, (0,0,self.width-2,self.height-2), 1)
self.surface.blit(self.textImage, (self._tx, self._ty))
class BFButtonGroup(object):
def __init__(self):
self.btn_list = []
def add_button(self, button):
self.btn_list.append(button)
def make_button(self, screen, rect, text='Button', click=None):
button = BFButton(screen, rect,text=text,click=click)
self.add_button(button)
def update(self, event):
for button in self.btn_list: button.update(event)
def draw(self):
for button in self.btn_list: button.draw()
| true
| true
|
79029aad41b74cee129095b7c6bd80267aad3968
| 3,980
|
py
|
Python
|
ckworker/ckworker/ckcore.py
|
mesosphere/cloudkeeper
|
11be262df5874c1033cfec9964bba1596cab6a36
|
[
"Apache-2.0"
] | 99
|
2020-04-15T22:56:34.000Z
|
2021-06-13T15:04:55.000Z
|
ckworker/ckworker/ckcore.py
|
mesosphere/cloudkeeper
|
11be262df5874c1033cfec9964bba1596cab6a36
|
[
"Apache-2.0"
] | null | null | null |
ckworker/ckworker/ckcore.py
|
mesosphere/cloudkeeper
|
11be262df5874c1033cfec9964bba1596cab6a36
|
[
"Apache-2.0"
] | 14
|
2020-04-14T22:13:59.000Z
|
2021-04-05T16:42:31.000Z
|
import json
import requests
from cklib.args import ArgumentParser
from cklib.logging import log
from cklib.jwt import encode_jwt_to_headers
from cklib.graph import Graph, GraphExportIterator
def send_to_ckcore(graph: Graph):
if not ArgumentParser.args.ckcore_uri:
return
log.info("ckcore Event Handler called")
base_uri = ArgumentParser.args.ckcore_uri.strip("/")
ckcore_graph = ArgumentParser.args.ckcore_graph
dump_json = ArgumentParser.args.debug_dump_json
create_graph(base_uri, ckcore_graph)
update_model(graph, base_uri, dump_json=dump_json)
send_graph(graph, base_uri, ckcore_graph, dump_json=dump_json)
def create_graph(ckcore_base_uri: str, ckcore_graph: str):
graph_uri = f"{ckcore_base_uri}/graph/{ckcore_graph}"
log.debug(f"Creating graph {ckcore_graph} via {graph_uri}")
headers = {"accept": "application/json"}
if getattr(ArgumentParser.args, "psk", None):
encode_jwt_to_headers(headers, {}, ArgumentParser.args.psk)
r = requests.post(graph_uri, data="", headers=headers)
if r.status_code != 200:
log.error(r.content)
raise RuntimeError(f"Failed to create graph: {r.content}")
def update_model(graph: Graph, ckcore_base_uri: str, dump_json: bool = False):
model_uri = f"{ckcore_base_uri}/model"
log.debug(f"Updating model via {model_uri}")
model_json = json.dumps(graph.export_model(), indent=4)
if dump_json:
with open("model.dump.json", "w") as model_outfile:
model_outfile.write(model_json)
headers = {}
if getattr(ArgumentParser.args, "psk", None):
encode_jwt_to_headers(headers, {}, ArgumentParser.args.psk)
r = requests.patch(model_uri, data=model_json, headers=headers)
if r.status_code != 200:
log.error(r.content)
raise RuntimeError(f"Failed to create model: {r.content}")
def send_graph(
graph: Graph, ckcore_base_uri: str, ckcore_graph: str, dump_json: bool = False
):
merge_uri = f"{ckcore_base_uri}/graph/{ckcore_graph}/merge"
log.debug(f"Sending graph via {merge_uri}")
graph_outfile = None
if dump_json:
graph_outfile = open("graph.dump.json", "w")
try:
graph_export_iterator = GraphExportIterator(graph, graph_outfile)
headers = {
"Content-Type": "application/x-ndjson",
"Cloudkeeper-Ckworker-Nodes": str(graph.number_of_nodes()),
"Cloudkeeper-Ckworker-Edges": str(graph.number_of_edges()),
}
if getattr(ArgumentParser.args, "psk", None):
encode_jwt_to_headers(headers, {}, ArgumentParser.args.psk)
r = requests.post(
merge_uri,
data=graph_export_iterator,
headers=headers,
)
if r.status_code != 200:
log.error(r.content)
raise RuntimeError(f"Failed to send graph: {r.content}")
log.debug(f"ckcore reply: {r.content.decode()}")
log.debug(
f"Sent {graph_export_iterator.nodes_sent} nodes and"
f" {graph_export_iterator.edges_sent} edges to ckcore"
)
finally:
if graph_outfile is not None:
graph_outfile.close()
def add_args(arg_parser: ArgumentParser) -> None:
arg_parser.add_argument(
"--ckcore-uri",
help="ckcore URI (default: http://localhost:8900)",
default="http://localhost:8900",
dest="ckcore_uri",
)
arg_parser.add_argument(
"--ckcore-ws-uri",
help="ckcore Websocket URI (default: ws://localhost:8900)",
default="ws://localhost:8900",
dest="ckcore_ws_uri",
)
arg_parser.add_argument(
"--ckcore-graph",
help="ckcore graph name (default: ck)",
default="ck",
dest="ckcore_graph",
)
arg_parser.add_argument(
"--debug-dump-json",
help="Dump the generated json data (default: False)",
dest="debug_dump_json",
action="store_true",
)
| 32.096774
| 82
| 0.654271
|
import json
import requests
from cklib.args import ArgumentParser
from cklib.logging import log
from cklib.jwt import encode_jwt_to_headers
from cklib.graph import Graph, GraphExportIterator
def send_to_ckcore(graph: Graph):
if not ArgumentParser.args.ckcore_uri:
return
log.info("ckcore Event Handler called")
base_uri = ArgumentParser.args.ckcore_uri.strip("/")
ckcore_graph = ArgumentParser.args.ckcore_graph
dump_json = ArgumentParser.args.debug_dump_json
create_graph(base_uri, ckcore_graph)
update_model(graph, base_uri, dump_json=dump_json)
send_graph(graph, base_uri, ckcore_graph, dump_json=dump_json)
def create_graph(ckcore_base_uri: str, ckcore_graph: str):
graph_uri = f"{ckcore_base_uri}/graph/{ckcore_graph}"
log.debug(f"Creating graph {ckcore_graph} via {graph_uri}")
headers = {"accept": "application/json"}
if getattr(ArgumentParser.args, "psk", None):
encode_jwt_to_headers(headers, {}, ArgumentParser.args.psk)
r = requests.post(graph_uri, data="", headers=headers)
if r.status_code != 200:
log.error(r.content)
raise RuntimeError(f"Failed to create graph: {r.content}")
def update_model(graph: Graph, ckcore_base_uri: str, dump_json: bool = False):
model_uri = f"{ckcore_base_uri}/model"
log.debug(f"Updating model via {model_uri}")
model_json = json.dumps(graph.export_model(), indent=4)
if dump_json:
with open("model.dump.json", "w") as model_outfile:
model_outfile.write(model_json)
headers = {}
if getattr(ArgumentParser.args, "psk", None):
encode_jwt_to_headers(headers, {}, ArgumentParser.args.psk)
r = requests.patch(model_uri, data=model_json, headers=headers)
if r.status_code != 200:
log.error(r.content)
raise RuntimeError(f"Failed to create model: {r.content}")
def send_graph(
graph: Graph, ckcore_base_uri: str, ckcore_graph: str, dump_json: bool = False
):
merge_uri = f"{ckcore_base_uri}/graph/{ckcore_graph}/merge"
log.debug(f"Sending graph via {merge_uri}")
graph_outfile = None
if dump_json:
graph_outfile = open("graph.dump.json", "w")
try:
graph_export_iterator = GraphExportIterator(graph, graph_outfile)
headers = {
"Content-Type": "application/x-ndjson",
"Cloudkeeper-Ckworker-Nodes": str(graph.number_of_nodes()),
"Cloudkeeper-Ckworker-Edges": str(graph.number_of_edges()),
}
if getattr(ArgumentParser.args, "psk", None):
encode_jwt_to_headers(headers, {}, ArgumentParser.args.psk)
r = requests.post(
merge_uri,
data=graph_export_iterator,
headers=headers,
)
if r.status_code != 200:
log.error(r.content)
raise RuntimeError(f"Failed to send graph: {r.content}")
log.debug(f"ckcore reply: {r.content.decode()}")
log.debug(
f"Sent {graph_export_iterator.nodes_sent} nodes and"
f" {graph_export_iterator.edges_sent} edges to ckcore"
)
finally:
if graph_outfile is not None:
graph_outfile.close()
def add_args(arg_parser: ArgumentParser) -> None:
arg_parser.add_argument(
"--ckcore-uri",
help="ckcore URI (default: http://localhost:8900)",
default="http://localhost:8900",
dest="ckcore_uri",
)
arg_parser.add_argument(
"--ckcore-ws-uri",
help="ckcore Websocket URI (default: ws://localhost:8900)",
default="ws://localhost:8900",
dest="ckcore_ws_uri",
)
arg_parser.add_argument(
"--ckcore-graph",
help="ckcore graph name (default: ck)",
default="ck",
dest="ckcore_graph",
)
arg_parser.add_argument(
"--debug-dump-json",
help="Dump the generated json data (default: False)",
dest="debug_dump_json",
action="store_true",
)
| true
| true
|
79029cccf9d9b1413e725636648df9be72c5d213
| 1,104
|
py
|
Python
|
exercises/search_in_sorted_matrix.py
|
edoriggio/algorithms-and-data-structures
|
d6f3ac520bb3021400bf47770de692ab0c305b75
|
[
"Apache-2.0"
] | null | null | null |
exercises/search_in_sorted_matrix.py
|
edoriggio/algorithms-and-data-structures
|
d6f3ac520bb3021400bf47770de692ab0c305b75
|
[
"Apache-2.0"
] | null | null | null |
exercises/search_in_sorted_matrix.py
|
edoriggio/algorithms-and-data-structures
|
d6f3ac520bb3021400bf47770de692ab0c305b75
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Edoardo Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Complexity: O(nlog(n))
def search_in_sorted_matrix(A, x):
for S in A:
if binary_search(S, x):
return True
return False
def binary_search(A, x):
low = 0
high = len(A) - 1
mid = 0
while low <= high:
mid = (high + low) // 2
if A[mid] < x:
low = mid + 1
elif A[mid] > x:
high = mid - 1
else:
return True
return False
mat = [[1, 2, 3, 4, 5], [9, 10, 20, 32, 55]]
print(search_in_sorted_matrix(mat, 56))
| 24.533333
| 74
| 0.629529
|
def search_in_sorted_matrix(A, x):
for S in A:
if binary_search(S, x):
return True
return False
def binary_search(A, x):
low = 0
high = len(A) - 1
mid = 0
while low <= high:
mid = (high + low) // 2
if A[mid] < x:
low = mid + 1
elif A[mid] > x:
high = mid - 1
else:
return True
return False
mat = [[1, 2, 3, 4, 5], [9, 10, 20, 32, 55]]
print(search_in_sorted_matrix(mat, 56))
| true
| true
|
79029d8b354aeee37cb6869d810b9e3f9aa557ab
| 1,137
|
py
|
Python
|
src/pyhees/section10_j1_f.py
|
jjj-design/pyhees
|
d63e7cd84abfc2f509bc1cd1256598a10aac1825
|
[
"MIT"
] | null | null | null |
src/pyhees/section10_j1_f.py
|
jjj-design/pyhees
|
d63e7cd84abfc2f509bc1cd1256598a10aac1825
|
[
"MIT"
] | null | null | null |
src/pyhees/section10_j1_f.py
|
jjj-design/pyhees
|
d63e7cd84abfc2f509bc1cd1256598a10aac1825
|
[
"MIT"
] | null | null | null |
# 電子レンジ
def get_E_Elc_microwave_d_t(P_Elc_microwave_cook_rtd, t_microwave_cook_d_t):
"""時刻別消費電力量を計算する
Parameters
----------
P_Elc_microwave_cook_rtd : float
調理時の定格待機電力, W
t_microwave_cook_d_t : ndarray(N-dimensional array)
1年間の全時間の調理時間を格納したND配列, h
d日t時の調理時間が年開始時から8760個連続して格納されている
Returns
----------
E_Elc_microwave_d_t : ndarray(N-dimensional array)
1年間の全時間の消費電力量を格納したND配列, Wh
d日t時の消費電力量が年開始時から8760個連続して格納されている
"""
P_Elc_microwave_cook = get_P_Elc_microwave_cook(P_Elc_microwave_cook_rtd)
E_Elc_microwave_d_t = P_Elc_microwave_cook * t_microwave_cook_d_t
E_Elc_microwave_d_t = E_Elc_microwave_d_t * 10**(-3)
return E_Elc_microwave_d_t
def get_P_Elc_microwave_cook(P_Elc_microwave_rtd):
"""調理時の消費電力を計算する
Parameters
----------
P_Elc_microwave_cook_rtd : float
調理時の定格待機電力, W
Returns
----------
P_Elc_microwave_cook : float
調理時の消費電力, W
"""
P_Elc_microwave_cook = 0.9373 * P_Elc_microwave_rtd
return P_Elc_microwave_cook
| 22.74
| 77
| 0.664908
|
def get_E_Elc_microwave_d_t(P_Elc_microwave_cook_rtd, t_microwave_cook_d_t):
P_Elc_microwave_cook = get_P_Elc_microwave_cook(P_Elc_microwave_cook_rtd)
E_Elc_microwave_d_t = P_Elc_microwave_cook * t_microwave_cook_d_t
E_Elc_microwave_d_t = E_Elc_microwave_d_t * 10**(-3)
return E_Elc_microwave_d_t
def get_P_Elc_microwave_cook(P_Elc_microwave_rtd):
P_Elc_microwave_cook = 0.9373 * P_Elc_microwave_rtd
return P_Elc_microwave_cook
| true
| true
|
79029da5f558df950350ea69fc65ee408f050a19
| 5,388
|
py
|
Python
|
test2.py
|
pection/Interactionstudent
|
74291812d3a5455c805b7ccc2d76dad28e6db77f
|
[
"MIT"
] | null | null | null |
test2.py
|
pection/Interactionstudent
|
74291812d3a5455c805b7ccc2d76dad28e6db77f
|
[
"MIT"
] | null | null | null |
test2.py
|
pection/Interactionstudent
|
74291812d3a5455c805b7ccc2d76dad28e6db77f
|
[
"MIT"
] | 1
|
2020-11-24T18:19:02.000Z
|
2020-11-24T18:19:02.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mikeeiei.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(954, 600)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.formLayoutWidget = QtGui.QWidget(self.centralwidget)
self.formLayoutWidget.setGeometry(QtCore.QRect(80, 240, 160, 141))
self.formLayoutWidget.setObjectName(_fromUtf8("formLayoutWidget"))
self.formLayout = QtGui.QFormLayout(self.formLayoutWidget)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.kaojai = QtGui.QPushButton(self.formLayoutWidget)
self.kaojai.setStyleSheet(_fromUtf8("int main(int argc, char *argv[])\n"
"\n"
"#upLeft {\n"
"background-color: transparent;\n"
"border-image: url(:/images/frame.png);\n"
"background: none;\n"
"border: none;\n"
"background-repeat: none;\n"
"}\n"
"{\n"
"border-image: url(:mike2.jpg);\n"
"}"))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/mike/mike.jpg")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.kaojai.setIcon(icon)
self.kaojai.setObjectName(_fromUtf8("kaojai"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.kaojai)
self.maikaojai = QtGui.QPushButton(self.formLayoutWidget)
self.maikaojai.setObjectName(_fromUtf8("maikaojai"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.maikaojai)
self.vote = QtGui.QPushButton(self.formLayoutWidget)
self.vote.setObjectName(_fromUtf8("vote"))
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.vote)
self.question = QtGui.QPushButton(self.formLayoutWidget)
self.question.setObjectName(_fromUtf8("question"))
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.question)
self.frame = QtGui.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(0, 0, 331, 161))
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.tabWidget = QtGui.QTabWidget(self.frame)
self.tabWidget.setGeometry(QtCore.QRect(100, 110, 135, 80))
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.tabWidget.addTab(self.tab, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.tabWidget.addTab(self.tab_2, _fromUtf8(""))
self.label = QtGui.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(60, 100, 831, 271))
self.label.setText(_fromUtf8(""))
self.label.setPixmap(QtGui.QPixmap(_fromUtf8("")))
self.label.setObjectName(_fromUtf8("label"))
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtGui.QToolBar(MainWindow)
self.toolBar.setObjectName(_fromUtf8("toolBar"))
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionSend = QtGui.QAction(MainWindow)
self.actionSend.setIcon(icon)
self.actionSend.setObjectName(_fromUtf8("actionSend"))
self.toolBar.addAction(self.actionSend)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.kaojai.setText(_translate("MainWindow", "PushButton", None))
self.maikaojai.setText(_translate("MainWindow", "PushButton", None))
self.vote.setText(_translate("MainWindow", "PushButton", None))
self.question.setText(_translate("MainWindow", "d", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Tab 1", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Tab 2", None))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar", None))
self.actionSend.setText(_translate("MainWindow", "send", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 45.661017
| 111
| 0.68467
|
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(954, 600)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.formLayoutWidget = QtGui.QWidget(self.centralwidget)
self.formLayoutWidget.setGeometry(QtCore.QRect(80, 240, 160, 141))
self.formLayoutWidget.setObjectName(_fromUtf8("formLayoutWidget"))
self.formLayout = QtGui.QFormLayout(self.formLayoutWidget)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.kaojai = QtGui.QPushButton(self.formLayoutWidget)
self.kaojai.setStyleSheet(_fromUtf8("int main(int argc, char *argv[])\n"
"\n"
"#upLeft {\n"
"background-color: transparent;\n"
"border-image: url(:/images/frame.png);\n"
"background: none;\n"
"border: none;\n"
"background-repeat: none;\n"
"}\n"
"{\n"
"border-image: url(:mike2.jpg);\n"
"}"))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/mike/mike.jpg")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.kaojai.setIcon(icon)
self.kaojai.setObjectName(_fromUtf8("kaojai"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.kaojai)
self.maikaojai = QtGui.QPushButton(self.formLayoutWidget)
self.maikaojai.setObjectName(_fromUtf8("maikaojai"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.maikaojai)
self.vote = QtGui.QPushButton(self.formLayoutWidget)
self.vote.setObjectName(_fromUtf8("vote"))
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.vote)
self.question = QtGui.QPushButton(self.formLayoutWidget)
self.question.setObjectName(_fromUtf8("question"))
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.question)
self.frame = QtGui.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(0, 0, 331, 161))
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.tabWidget = QtGui.QTabWidget(self.frame)
self.tabWidget.setGeometry(QtCore.QRect(100, 110, 135, 80))
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tab = QtGui.QWidget()
self.tab.setObjectName(_fromUtf8("tab"))
self.tabWidget.addTab(self.tab, _fromUtf8(""))
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName(_fromUtf8("tab_2"))
self.tabWidget.addTab(self.tab_2, _fromUtf8(""))
self.label = QtGui.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(60, 100, 831, 271))
self.label.setText(_fromUtf8(""))
self.label.setPixmap(QtGui.QPixmap(_fromUtf8("")))
self.label.setObjectName(_fromUtf8("label"))
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtGui.QToolBar(MainWindow)
self.toolBar.setObjectName(_fromUtf8("toolBar"))
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionSend = QtGui.QAction(MainWindow)
self.actionSend.setIcon(icon)
self.actionSend.setObjectName(_fromUtf8("actionSend"))
self.toolBar.addAction(self.actionSend)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.kaojai.setText(_translate("MainWindow", "PushButton", None))
self.maikaojai.setText(_translate("MainWindow", "PushButton", None))
self.vote.setText(_translate("MainWindow", "PushButton", None))
self.question.setText(_translate("MainWindow", "d", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Tab 1", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Tab 2", None))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar", None))
self.actionSend.setText(_translate("MainWindow", "send", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| true
| true
|
79029dc59c4fb6e36d99fd737bcfda2ff361918c
| 5,173
|
py
|
Python
|
kitsune/questions/urls.py
|
safwanrahman/Ford
|
87e91dea1cc22b1759eea81cef069359ccb5cd0b
|
[
"BSD-3-Clause"
] | 1
|
2020-11-03T23:46:56.000Z
|
2020-11-03T23:46:56.000Z
|
kitsune/questions/urls.py
|
983834572/kitsune
|
d4b3dcdce3294214dbc659c375c8491504473213
|
[
"BSD-3-Clause"
] | 8
|
2020-06-05T18:42:14.000Z
|
2022-03-11T23:26:51.000Z
|
kitsune/questions/urls.py
|
safwanrahman/Ford
|
87e91dea1cc22b1759eea81cef069359ccb5cd0b
|
[
"BSD-3-Clause"
] | 1
|
2020-11-03T23:47:55.000Z
|
2020-11-03T23:47:55.000Z
|
from django.conf.urls import patterns, url
from django.contrib.contenttypes.models import ContentType
from kitsune.questions.feeds import (
QuestionsFeed, AnswersFeed, TaggedQuestionsFeed)
from kitsune.questions.models import Question, Answer
from kitsune.flagit import views as flagit_views
urlpatterns = patterns(
'kitsune.questions.views',
url(r'^$', 'product_list', name='questions.home'),
url(r'^/answer-preview-async$', 'answer_preview_async',
name='questions.answer_preview_async'),
url(r'^/dashboard/metrics$', 'metrics', name='questions.metrics'),
url(r'^/dashboard/metrics/(?P<locale_code>[^/]+)$', 'metrics',
name='questions.locale_metrics'),
# AAQ
url(r'^/new$', 'aaq', name='questions.aaq_step1'),
url(r'^/new/confirm$', 'aaq_confirm', name='questions.aaq_confirm'),
url(r'^/new/(?P<product_key>[\w\-]+)$',
'aaq_step2', name='questions.aaq_step2'),
url(r'^/new/(?P<product_key>[\w\-]+)/(?P<category_key>[\w\-]+)$',
'aaq_step3', name='questions.aaq_step3'),
url(r'^/new/(?P<product_key>[\w\-]+)/(?P<category_key>[\w\-]+)/search$',
'aaq_step4', name='questions.aaq_step4'),
url(r'^/new/(?P<product_key>[\w\-]+)/(?P<category_key>[\w\-]+)/form$',
'aaq_step5', name='questions.aaq_step5'),
# AAQ flow for Marketplace
url(r'^/marketplace$', 'marketplace', name='questions.marketplace_aaq'),
url(r'^/marketplace/success$',
'marketplace_success', name='questions.marketplace_aaq_success'),
url(r'^/marketplace/refund$', 'marketplace_refund',
name='questions.marketplace_refund'),
url(r'^/marketplace/developer-request$', 'marketplace_developer_request',
name='questions.marketplace_developer_request'),
url(r'^/marketplace/(?P<category_slug>[\w\-]+)$',
'marketplace_category', name='questions.marketplace_aaq_category'),
# TODO: Factor out `/(?P<question_id>\d+)` below
url(r'^/(?P<question_id>\d+)$', 'question_details',
name='questions.details'),
url(r'^/(?P<question_id>\d+)/edit$',
'edit_question', name='questions.edit_question'),
url(r'^/(?P<question_id>\d+)/edit-details$',
'edit_details', name='questions.edit_details'),
url(r'^/(?P<question_id>\d+)/reply$', 'reply', name='questions.reply'),
url(r'^/(?P<question_id>\d+)/delete$', 'delete_question',
name='questions.delete'),
url(r'^/(?P<question_id>\d+)/lock$', 'lock_question',
name='questions.lock'),
url(r'^/(?P<question_id>\d+)/archive$', 'archive_question',
name='questions.archive'),
url(r'^/(?P<question_id>\d+)/delete/(?P<answer_id>\d+)$',
'delete_answer', name='questions.delete_answer'),
url(r'^/(?P<question_id>\d+)/edit/(?P<answer_id>\d+)$', 'edit_answer',
name='questions.edit_answer'),
url(r'^/(?P<question_id>\d+)/solve/(?P<answer_id>\d+)$', 'solve',
name='questions.solve'),
url(r'^/(?P<question_id>\d+)/unsolve/(?P<answer_id>\d+)$', 'unsolve',
name='questions.unsolve'),
url(r'^/(?P<question_id>\d+)/vote$', 'question_vote',
name='questions.vote'),
url(r'^/(?P<question_id>\d+)/vote/(?P<answer_id>\d+)$',
'answer_vote', name='questions.answer_vote'),
url(r'^/(?P<question_id>\d+)/add-tag$', 'add_tag',
name='questions.add_tag'),
url(r'^/(?P<question_id>\d+)/remove-tag$', 'remove_tag',
name='questions.remove_tag'),
url(r'^/(?P<question_id>\d+)/add-tag-async$', 'add_tag_async',
name='questions.add_tag_async'),
url(r'^/(?P<question_id>\d+)/remove-tag-async$', 'remove_tag_async',
name='questions.remove_tag_async'),
# Feeds
# Note: this needs to be above questions.list because "feed"
# matches the product slug regex.
url(r'^/feed$', QuestionsFeed(), name='questions.feed'),
url(r'^/(?P<question_id>\d+)/feed$', AnswersFeed(),
name='questions.answers.feed'),
url(r'^/tagged/(?P<tag_slug>[\w\-]+)/feed$', TaggedQuestionsFeed(),
name='questions.tagged_feed'),
# Mark as spam
url(r'^/mark_spam$', 'mark_spam', name='questions.mark_spam'),
url(r'^/unmark_spam$', 'unmark_spam', name='questions.unmark_spam'),
# Question lists
url(r'^/(?P<product_slug>[\w+\-\,]+)$', 'question_list',
name='questions.list'),
# Flag content ("Report this post")
url(r'^/(?P<object_id>\d+)/flag$', flagit_views.flag,
{'content_type': ContentType.objects.get_for_model(Question).id},
name='questions.flag'),
url(r'^/(?P<question_id>\d+)/flag/(?P<object_id>\d+)$', flagit_views.flag,
{'content_type': ContentType.objects.get_for_model(Answer).id},
name='questions.answer_flag'),
# Subcribe by email
url(r'^/(?P<question_id>\d+)/watch$', 'watch_question',
name='questions.watch'),
url(r'^/(?P<question_id>\d+)/unwatch$', 'unwatch_question',
name='questions.unwatch'),
url(r'^/confirm/(?P<watch_id>\d+)/(?P<secret>\w+)$', 'activate_watch',
name='questions.activate_watch'),
url(r'^/unsubscribe/(?P<watch_id>\d+)/(?P<secret>\w+)$',
'unsubscribe_watch', name='questions.unsubscribe'),
)
| 45.778761
| 78
| 0.626329
|
from django.conf.urls import patterns, url
from django.contrib.contenttypes.models import ContentType
from kitsune.questions.feeds import (
QuestionsFeed, AnswersFeed, TaggedQuestionsFeed)
from kitsune.questions.models import Question, Answer
from kitsune.flagit import views as flagit_views
urlpatterns = patterns(
'kitsune.questions.views',
url(r'^$', 'product_list', name='questions.home'),
url(r'^/answer-preview-async$', 'answer_preview_async',
name='questions.answer_preview_async'),
url(r'^/dashboard/metrics$', 'metrics', name='questions.metrics'),
url(r'^/dashboard/metrics/(?P<locale_code>[^/]+)$', 'metrics',
name='questions.locale_metrics'),
url(r'^/new$', 'aaq', name='questions.aaq_step1'),
url(r'^/new/confirm$', 'aaq_confirm', name='questions.aaq_confirm'),
url(r'^/new/(?P<product_key>[\w\-]+)$',
'aaq_step2', name='questions.aaq_step2'),
url(r'^/new/(?P<product_key>[\w\-]+)/(?P<category_key>[\w\-]+)$',
'aaq_step3', name='questions.aaq_step3'),
url(r'^/new/(?P<product_key>[\w\-]+)/(?P<category_key>[\w\-]+)/search$',
'aaq_step4', name='questions.aaq_step4'),
url(r'^/new/(?P<product_key>[\w\-]+)/(?P<category_key>[\w\-]+)/form$',
'aaq_step5', name='questions.aaq_step5'),
url(r'^/marketplace$', 'marketplace', name='questions.marketplace_aaq'),
url(r'^/marketplace/success$',
'marketplace_success', name='questions.marketplace_aaq_success'),
url(r'^/marketplace/refund$', 'marketplace_refund',
name='questions.marketplace_refund'),
url(r'^/marketplace/developer-request$', 'marketplace_developer_request',
name='questions.marketplace_developer_request'),
url(r'^/marketplace/(?P<category_slug>[\w\-]+)$',
'marketplace_category', name='questions.marketplace_aaq_category'),
url(r'^/(?P<question_id>\d+)$', 'question_details',
name='questions.details'),
url(r'^/(?P<question_id>\d+)/edit$',
'edit_question', name='questions.edit_question'),
url(r'^/(?P<question_id>\d+)/edit-details$',
'edit_details', name='questions.edit_details'),
url(r'^/(?P<question_id>\d+)/reply$', 'reply', name='questions.reply'),
url(r'^/(?P<question_id>\d+)/delete$', 'delete_question',
name='questions.delete'),
url(r'^/(?P<question_id>\d+)/lock$', 'lock_question',
name='questions.lock'),
url(r'^/(?P<question_id>\d+)/archive$', 'archive_question',
name='questions.archive'),
url(r'^/(?P<question_id>\d+)/delete/(?P<answer_id>\d+)$',
'delete_answer', name='questions.delete_answer'),
url(r'^/(?P<question_id>\d+)/edit/(?P<answer_id>\d+)$', 'edit_answer',
name='questions.edit_answer'),
url(r'^/(?P<question_id>\d+)/solve/(?P<answer_id>\d+)$', 'solve',
name='questions.solve'),
url(r'^/(?P<question_id>\d+)/unsolve/(?P<answer_id>\d+)$', 'unsolve',
name='questions.unsolve'),
url(r'^/(?P<question_id>\d+)/vote$', 'question_vote',
name='questions.vote'),
url(r'^/(?P<question_id>\d+)/vote/(?P<answer_id>\d+)$',
'answer_vote', name='questions.answer_vote'),
url(r'^/(?P<question_id>\d+)/add-tag$', 'add_tag',
name='questions.add_tag'),
url(r'^/(?P<question_id>\d+)/remove-tag$', 'remove_tag',
name='questions.remove_tag'),
url(r'^/(?P<question_id>\d+)/add-tag-async$', 'add_tag_async',
name='questions.add_tag_async'),
url(r'^/(?P<question_id>\d+)/remove-tag-async$', 'remove_tag_async',
name='questions.remove_tag_async'),
url(r'^/feed$', QuestionsFeed(), name='questions.feed'),
url(r'^/(?P<question_id>\d+)/feed$', AnswersFeed(),
name='questions.answers.feed'),
url(r'^/tagged/(?P<tag_slug>[\w\-]+)/feed$', TaggedQuestionsFeed(),
name='questions.tagged_feed'),
url(r'^/mark_spam$', 'mark_spam', name='questions.mark_spam'),
url(r'^/unmark_spam$', 'unmark_spam', name='questions.unmark_spam'),
url(r'^/(?P<product_slug>[\w+\-\,]+)$', 'question_list',
name='questions.list'),
url(r'^/(?P<object_id>\d+)/flag$', flagit_views.flag,
{'content_type': ContentType.objects.get_for_model(Question).id},
name='questions.flag'),
url(r'^/(?P<question_id>\d+)/flag/(?P<object_id>\d+)$', flagit_views.flag,
{'content_type': ContentType.objects.get_for_model(Answer).id},
name='questions.answer_flag'),
url(r'^/(?P<question_id>\d+)/watch$', 'watch_question',
name='questions.watch'),
url(r'^/(?P<question_id>\d+)/unwatch$', 'unwatch_question',
name='questions.unwatch'),
url(r'^/confirm/(?P<watch_id>\d+)/(?P<secret>\w+)$', 'activate_watch',
name='questions.activate_watch'),
url(r'^/unsubscribe/(?P<watch_id>\d+)/(?P<secret>\w+)$',
'unsubscribe_watch', name='questions.unsubscribe'),
)
| true
| true
|
79029f0a422fecf611aa560616e8b9231a7b35ad
| 2,930
|
py
|
Python
|
aps/load_region.py
|
kmunve/APS
|
4c2f254ede83a3a311cbedc90c76db9ee367a000
|
[
"MIT"
] | null | null | null |
aps/load_region.py
|
kmunve/APS
|
4c2f254ede83a3a311cbedc90c76db9ee367a000
|
[
"MIT"
] | 1
|
2018-12-14T14:47:13.000Z
|
2018-12-14T14:47:13.000Z
|
aps/load_region.py
|
kmunve/APS
|
4c2f254ede83a3a311cbedc90c76db9ee367a000
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
from netCDF4 import Dataset
def load_region(region_id, local=False, return_regions=False):
if local:
_vr = Dataset(
os.path.join(os.path.dirname(os.path.abspath(__file__)), r"data/terrain_parameters/VarslingsOmr_2017.nc"),
"r")
# flip up-down because Meps data is upside down
#_regions = np.flipud(_vr.variables["LokalOmr_2018"][:])
_regions = _vr.variables["LokalOmr_2018"][:]
else:
_vr = Dataset(
os.path.join(os.path.dirname(os.path.abspath(__file__)), r"data/terrain_parameters/VarslingsOmr_2019.nc"),
"r")
# flip up-down because Meps data is upside down
#_regions = np.flipud(_vr.variables["skredomr19_km"][:])
_regions = _vr.variables["skredomr19_km"][:]
print("Missing value: {mv}".format(mv=_vr.variables["skredomr19_km"].missing_value))
_region_bounds = np.where(_regions == region_id) # just to get the bounding box
# get the lower left and upper right corner of a rectangle around the region
y_min, y_max, x_min, x_max = min(_region_bounds[0].flatten()), max(_region_bounds[0].flatten()), \
min(_region_bounds[1].flatten()), max(_region_bounds[1].flatten())
#reg_mask = np.ma.masked_where(_regions[y_min:y_max, x_min:x_max] == region_id, _regions[y_min:y_max, x_min:x_max]).mask
#reg_mask = np.where(_regions[y_min:y_max, x_min:x_max] == region_id, _regions[y_min:y_max, x_min:x_max], np.nan)
reg_mask = np.where(_regions[y_min:y_max, x_min:x_max] == region_id, 1., np.nan)
#reg_mask = np.ma.masked_where(_reg_mask == region_id).mask
_vr.close()
if return_regions:
return _regions, reg_mask, y_min, y_max, x_min, x_max
else:
return reg_mask, y_min, y_max, x_min, x_max
def clip_region(nc_variable, region_mask, t_index, y_min, y_max, x_min, x_max):
s = len(nc_variable.shape)
if s == 2:
#return np.flipud(region_mask * nc_variable[y_min:y_max, x_min:x_max])
return (region_mask * nc_variable[y_min:y_max, x_min:x_max])
elif s == 3:
#return np.flipud(region_mask * nc_variable[t_index, y_min:y_max, x_min:x_max])
return (region_mask * nc_variable[t_index, y_min:y_max, x_min:x_max])
elif s == 4:
#return np.flipud(region_mask * nc_variable[t_index, 0, y_min:y_max, x_min:x_max])
return (region_mask * nc_variable[t_index, 0, y_min:y_max, x_min:x_max])
else:
print('Input array needs to have 2- to 4-dimensions: {0} were given.'.format(s))
if __name__ == "__main__":
import matplotlib.pyplot as plt
regions, region_mask, y_min, y_max, x_min, x_max = load_region(3013, return_regions=True)
print(region_mask, type(region_mask), np.unique(region_mask))
clp = clip_region(regions, region_mask, 0, y_min, y_max, x_min, x_max)
plt.imshow(clp)
plt.show()
k = 'm'
| 43.731343
| 124
| 0.669283
|
import os
import numpy as np
from netCDF4 import Dataset
def load_region(region_id, local=False, return_regions=False):
if local:
_vr = Dataset(
os.path.join(os.path.dirname(os.path.abspath(__file__)), r"data/terrain_parameters/VarslingsOmr_2017.nc"),
"r")
_regions = _vr.variables["LokalOmr_2018"][:]
else:
_vr = Dataset(
os.path.join(os.path.dirname(os.path.abspath(__file__)), r"data/terrain_parameters/VarslingsOmr_2019.nc"),
"r")
_regions = _vr.variables["skredomr19_km"][:]
print("Missing value: {mv}".format(mv=_vr.variables["skredomr19_km"].missing_value))
_region_bounds = np.where(_regions == region_id)
y_min, y_max, x_min, x_max = min(_region_bounds[0].flatten()), max(_region_bounds[0].flatten()), \
min(_region_bounds[1].flatten()), max(_region_bounds[1].flatten())
reg_mask = np.where(_regions[y_min:y_max, x_min:x_max] == region_id, 1., np.nan)
_vr.close()
if return_regions:
return _regions, reg_mask, y_min, y_max, x_min, x_max
else:
return reg_mask, y_min, y_max, x_min, x_max
def clip_region(nc_variable, region_mask, t_index, y_min, y_max, x_min, x_max):
s = len(nc_variable.shape)
if s == 2:
return (region_mask * nc_variable[y_min:y_max, x_min:x_max])
elif s == 3:
return (region_mask * nc_variable[t_index, y_min:y_max, x_min:x_max])
elif s == 4:
return (region_mask * nc_variable[t_index, 0, y_min:y_max, x_min:x_max])
else:
print('Input array needs to have 2- to 4-dimensions: {0} were given.'.format(s))
if __name__ == "__main__":
import matplotlib.pyplot as plt
regions, region_mask, y_min, y_max, x_min, x_max = load_region(3013, return_regions=True)
print(region_mask, type(region_mask), np.unique(region_mask))
clp = clip_region(regions, region_mask, 0, y_min, y_max, x_min, x_max)
plt.imshow(clp)
plt.show()
k = 'm'
| true
| true
|
79029f33ffec745e921a3a7d539c19e44a1bf04d
| 371
|
py
|
Python
|
pypy/module/clr/test/test_interp_clr.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 12
|
2016-01-06T07:10:28.000Z
|
2021-05-13T23:02:02.000Z
|
pypy/module/clr/test/test_interp_clr.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | null | null | null |
pypy/module/clr/test/test_interp_clr.py
|
woodrow/pyoac
|
b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7
|
[
"MIT"
] | 2
|
2020-07-09T08:14:22.000Z
|
2021-01-15T18:01:25.000Z
|
from pypy.module.clr.interp_clr import split_fullname
def test_split_fullname():
split = split_fullname
assert split('Foo') == ('', 'Foo')
assert split('System.Foo') == ('System', 'Foo')
assert split('System.Foo.Bar') == ('System.Foo', 'Bar')
assert split('System.Foo.A+B') == ('System.Foo', 'A+B')
assert split('System.') == ('System', '')
| 33.727273
| 59
| 0.606469
|
from pypy.module.clr.interp_clr import split_fullname
def test_split_fullname():
split = split_fullname
assert split('Foo') == ('', 'Foo')
assert split('System.Foo') == ('System', 'Foo')
assert split('System.Foo.Bar') == ('System.Foo', 'Bar')
assert split('System.Foo.A+B') == ('System.Foo', 'A+B')
assert split('System.') == ('System', '')
| true
| true
|
79029f3ce26757d6d43f6b0a0b2f72cfa70aac96
| 1,563
|
py
|
Python
|
model.py
|
Wowol/Piano-Bot
|
feab884daa4bbe947bf6d95d816664eb7e46cc48
|
[
"MIT"
] | null | null | null |
model.py
|
Wowol/Piano-Bot
|
feab884daa4bbe947bf6d95d816664eb7e46cc48
|
[
"MIT"
] | null | null | null |
model.py
|
Wowol/Piano-Bot
|
feab884daa4bbe947bf6d95d816664eb7e46cc48
|
[
"MIT"
] | null | null | null |
from tensorflow.keras.callbacks import LambdaCallback
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, LSTM
from tensorflow.keras.layers import Dropout, TimeDistributed
try:
from tensorflow.python.keras.layers import CuDNNLSTM as lstm
except:
from tensorflow.keras.layers import Dense, Activation, LSTM as lstm
from tensorflow.keras.layers import Dropout
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.models import load_model as lm
import numpy as np
import random
import sys
import io
from midi import Midi
class Model:
def create(self, size, unique_notes, optimizer=None, hidden_size=128):
self.model = Sequential()
self.model.add(lstm(hidden_size, input_shape=(
size, unique_notes), return_sequences=True))
self.model.add(lstm(hidden_size))
self.model.add(Dropout(0.2))
self.model.add(Dense(unique_notes))
self.model.add(Activation('softmax'))
self.model.compile(loss='categorical_crossentropy', optimizer=RMSprop(
lr=0.01) if optimizer == None else optimizer)
def load_from_file(self, name="model.h5"):
self.model = lm(name)
def save_to_file(self, name="model.h5"):
self.model.save(name)
def learn(self, inputs, outputs, batch_size=256, epochs=185):
self.model.fit(inputs, outputs,
batch_size=batch_size,
epochs=epochs, verbose=True)
def predict(self, arr):
return self.model.predict(arr)
| 34.733333
| 78
| 0.706974
|
from tensorflow.keras.callbacks import LambdaCallback
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, LSTM
from tensorflow.keras.layers import Dropout, TimeDistributed
try:
from tensorflow.python.keras.layers import CuDNNLSTM as lstm
except:
from tensorflow.keras.layers import Dense, Activation, LSTM as lstm
from tensorflow.keras.layers import Dropout
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.models import load_model as lm
import numpy as np
import random
import sys
import io
from midi import Midi
class Model:
def create(self, size, unique_notes, optimizer=None, hidden_size=128):
self.model = Sequential()
self.model.add(lstm(hidden_size, input_shape=(
size, unique_notes), return_sequences=True))
self.model.add(lstm(hidden_size))
self.model.add(Dropout(0.2))
self.model.add(Dense(unique_notes))
self.model.add(Activation('softmax'))
self.model.compile(loss='categorical_crossentropy', optimizer=RMSprop(
lr=0.01) if optimizer == None else optimizer)
def load_from_file(self, name="model.h5"):
self.model = lm(name)
def save_to_file(self, name="model.h5"):
self.model.save(name)
def learn(self, inputs, outputs, batch_size=256, epochs=185):
self.model.fit(inputs, outputs,
batch_size=batch_size,
epochs=epochs, verbose=True)
def predict(self, arr):
return self.model.predict(arr)
| true
| true
|
7902a09eee3aed46a7e4e13f9c77ea9c24f3415b
| 1,035
|
py
|
Python
|
kubernetes/test/test_apps_v1beta1_deployment_list.py
|
dix000p/kubernetes-client-python
|
22e473e02883aca1058606092c86311f02f42be2
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_apps_v1beta1_deployment_list.py
|
dix000p/kubernetes-client-python
|
22e473e02883aca1058606092c86311f02f42be2
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_apps_v1beta1_deployment_list.py
|
dix000p/kubernetes-client-python
|
22e473e02883aca1058606092c86311f02f42be2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.apps_v1beta1_deployment_list import AppsV1beta1DeploymentList
class TestAppsV1beta1DeploymentList(unittest.TestCase):
""" AppsV1beta1DeploymentList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testAppsV1beta1DeploymentList(self):
"""
Test AppsV1beta1DeploymentList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.apps_v1beta1_deployment_list.AppsV1beta1DeploymentList()
pass
if __name__ == '__main__':
unittest.main()
| 23
| 105
| 0.729469
|
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.apps_v1beta1_deployment_list import AppsV1beta1DeploymentList
class TestAppsV1beta1DeploymentList(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testAppsV1beta1DeploymentList(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
7902a1a3df2ef642b65640c1f453148653983baf
| 2,699
|
py
|
Python
|
scripts/count_singletons.py
|
tomarovsky/MACE
|
eccc050820f4bb9e483d2592e485ee372ac9453b
|
[
"Apache-2.0"
] | null | null | null |
scripts/count_singletons.py
|
tomarovsky/MACE
|
eccc050820f4bb9e483d2592e485ee372ac9453b
|
[
"Apache-2.0"
] | null | null | null |
scripts/count_singletons.py
|
tomarovsky/MACE
|
eccc050820f4bb9e483d2592e485ee372ac9453b
|
[
"Apache-2.0"
] | 8
|
2018-05-17T05:00:11.000Z
|
2022-03-06T04:08:24.000Z
|
#!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
from RouToolPa.Parsers.VCF import CollectionVCF
from MACE.Routines import StatsVCF, Visualization
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file", action="store", dest="input", required=True,
help="Input vcf file with mutations.")
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix",
required=True,
help="Prefix of output files")
parser.add_argument("-d", "--dpi", action="store", dest="dpi", type=int, default=200,
help="Dpi of figure")
parser.add_argument("-f", "--figsize", action="store", dest="figsize",
type=lambda s: map(int, s.split(",")),
default=(5, 5),
help="Size of figure in inches. X and Y values should be separated "
"by comma. Default: 5,5")
parser.add_argument("-e", "--output_formats", action="store", dest="output_formats",
type=lambda s: s.split(","),
default=["png"],
help="Comma-separated list of formats (supported by matlotlib) "
"of output figure.Default: png")
parser.add_argument("-l", "--title", action="store", dest="title",
default=None,
help="Title of figure. Default: not set")
parser.add_argument("-m", "--parsing_mode", action="store", dest="parsing_mode",
default="genotypes",
help="Parsing mode. Allowed: genotypes(default), 'coordinates_and_genotypes', 'complete'")
"""
parser.add_argument("-a", "--scaffold_white_list", action="store", dest="scaffold_white_list", default=[],
type=lambda s: s.split(","),
help="Comma-separated list of the only scaffolds to draw. Default: all")
parser.add_argument("-b", "--scaffold_black_list", action="store", dest="scaffold_black_list", default=[],
type=lambda s: s.split(","),
help="Comma-separated list of scaffolds to skip at drawing. Default: not set")
"""
args = parser.parse_args()
mutations = CollectionVCF(args.input, parsing_mode="genotypes")
StatsVCF.count_singletons(collection_vcf=mutations, output_prefix=args.output_prefix)
"""
Visualization.zygoty_bar_plot(StatsVCF.count_zygoty(mutations, outfile="%s.counts" % args.output_prefix),
args.output_prefix, extension_list=args.output_formats,
figsize=args.figsize,
dpi=args.dpi,
title=args.title)
"""
| 51.903846
| 110
| 0.593183
|
__author__ = 'Sergei F. Kliver'
import argparse
from RouToolPa.Parsers.VCF import CollectionVCF
from MACE.Routines import StatsVCF, Visualization
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file", action="store", dest="input", required=True,
help="Input vcf file with mutations.")
parser.add_argument("-o", "--output_prefix", action="store", dest="output_prefix",
required=True,
help="Prefix of output files")
parser.add_argument("-d", "--dpi", action="store", dest="dpi", type=int, default=200,
help="Dpi of figure")
parser.add_argument("-f", "--figsize", action="store", dest="figsize",
type=lambda s: map(int, s.split(",")),
default=(5, 5),
help="Size of figure in inches. X and Y values should be separated "
"by comma. Default: 5,5")
parser.add_argument("-e", "--output_formats", action="store", dest="output_formats",
type=lambda s: s.split(","),
default=["png"],
help="Comma-separated list of formats (supported by matlotlib) "
"of output figure.Default: png")
parser.add_argument("-l", "--title", action="store", dest="title",
default=None,
help="Title of figure. Default: not set")
parser.add_argument("-m", "--parsing_mode", action="store", dest="parsing_mode",
default="genotypes",
help="Parsing mode. Allowed: genotypes(default), 'coordinates_and_genotypes', 'complete'")
args = parser.parse_args()
mutations = CollectionVCF(args.input, parsing_mode="genotypes")
StatsVCF.count_singletons(collection_vcf=mutations, output_prefix=args.output_prefix)
| true
| true
|
7902a2557d81bc45729dfd2a009cdcd04d41f097
| 1,043
|
py
|
Python
|
tools/mo/openvino/tools/mo/ops/prelu.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tools/mo/openvino/tools/mo/ops/prelu.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tools/mo/openvino/tools/mo/ops/prelu.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.op import Op
class PReLU(Op):
op = 'PReLU'
enabled = True
def __init__(self, graph: Graph, attrs: dict):
super().__init__(graph, {
'op': self.op,
'type': self.op,
'version': 'opset1',
'infer': self.infer,
'force_precision_in_ports': {1: 'float'},
'in_ports_count': 2,
'out_ports_count': 1,
}, attrs)
@staticmethod
def infer(node):
if len(node.in_nodes()) == 2:
gamma_vector = node.in_node(1)
if np.all(gamma_vector.shape == [1]):
node['channel_shared'] = 1
else:
node['channel_shared'] = 0
node.in_node(1)['correct_data_type'] = True
copy_shape_infer(node)
| 26.075
| 83
| 0.57814
|
import numpy as np
from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.op import Op
class PReLU(Op):
op = 'PReLU'
enabled = True
def __init__(self, graph: Graph, attrs: dict):
super().__init__(graph, {
'op': self.op,
'type': self.op,
'version': 'opset1',
'infer': self.infer,
'force_precision_in_ports': {1: 'float'},
'in_ports_count': 2,
'out_ports_count': 1,
}, attrs)
@staticmethod
def infer(node):
if len(node.in_nodes()) == 2:
gamma_vector = node.in_node(1)
if np.all(gamma_vector.shape == [1]):
node['channel_shared'] = 1
else:
node['channel_shared'] = 0
node.in_node(1)['correct_data_type'] = True
copy_shape_infer(node)
| true
| true
|
7902a3074e04f09cd07230c3c7b161c70f5e4cab
| 984
|
py
|
Python
|
var/spack/repos/builtin/packages/py-decorator/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2020-10-15T01:08:42.000Z
|
2021-10-18T01:28:18.000Z
|
var/spack/repos/builtin/packages/py-decorator/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2019-07-30T10:12:28.000Z
|
2019-12-17T09:02:27.000Z
|
var/spack/repos/builtin/packages/py-decorator/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5
|
2019-07-30T09:42:14.000Z
|
2021-01-25T05:39:20.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDecorator(PythonPackage):
"""The aim of the decorator module it to simplify the usage of decorators
for the average programmer, and to popularize decorators by showing
various non-trivial examples."""
homepage = "https://github.com/micheles/decorator"
url = "https://pypi.io/packages/source/d/decorator/decorator-4.4.0.tar.gz"
version('4.4.0', sha256='86156361c50488b84a3f148056ea716ca587df2f0de1d34750d35c21312725de')
version('4.3.0', sha256='c39efa13fbdeb4506c476c9b3babf6a718da943dab7811c206005a4a956c080c')
version('4.0.9', sha256='90022e83316363788a55352fe39cfbed357aa3a71d90e5f2803a35471de4bba8')
depends_on('python@2.6:2.8,3.2:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
| 42.782609
| 95
| 0.748984
|
from spack import *
class PyDecorator(PythonPackage):
homepage = "https://github.com/micheles/decorator"
url = "https://pypi.io/packages/source/d/decorator/decorator-4.4.0.tar.gz"
version('4.4.0', sha256='86156361c50488b84a3f148056ea716ca587df2f0de1d34750d35c21312725de')
version('4.3.0', sha256='c39efa13fbdeb4506c476c9b3babf6a718da943dab7811c206005a4a956c080c')
version('4.0.9', sha256='90022e83316363788a55352fe39cfbed357aa3a71d90e5f2803a35471de4bba8')
depends_on('python@2.6:2.8,3.2:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
| true
| true
|
7902a35c8e4c8d64bcf9903324f8fd93964ef60b
| 3,015
|
py
|
Python
|
firebase/firestore-py/lib/faculty/logic.py
|
BraydenKO/RamLife
|
10c9bbb7338fbaf6c3d1c98bb2f559e6cc089ee6
|
[
"MIT"
] | 2
|
2021-04-12T03:09:40.000Z
|
2021-05-22T04:05:54.000Z
|
firebase/firestore-py/lib/faculty/logic.py
|
BraydenKO/RamLife
|
10c9bbb7338fbaf6c3d1c98bb2f559e6cc089ee6
|
[
"MIT"
] | 58
|
2020-03-10T18:48:52.000Z
|
2021-08-31T23:19:09.000Z
|
firebase/firestore-py/lib/faculty/logic.py
|
BraydenKO/RamLife
|
10c9bbb7338fbaf6c3d1c98bb2f559e6cc089ee6
|
[
"MIT"
] | 8
|
2020-09-08T18:29:54.000Z
|
2021-04-20T23:11:50.000Z
|
from collections import defaultdict
from typing import DefaultDict
from .. import utils
from .. import data
'''
A collection of functions o index faculty data.
No function in this class reads data from the data files, just works logic
on them. This helps keep the program modular, by separating the data sources
from the data indexing
'''
'''
Maps faculty to the sections they teach.
This function works by taking several arguments:
- faculty, from [FacultyReader.get_faculty]
- sectionTeachers, from [SectionReader.get_section_faculty_ids]
These are kept as parameters instead of calling the functions by itself
in order to keep the data and logic layers separate.
'''
def get_faculty_sections(faculty,section_teachers):
result = defaultdict(set)
missing_emails = set()
for key, value in section_teachers.items():
section_id = key
faculty_id = value
#Teaches a class but doesn't have basic faculty data
if faculty_id not in faculty:
missing_emails.add(faculty_id)
continue
result[faculty[faculty_id]].add(section_id)
if missing_emails:
utils.logger.warning(f"Missing emails for {missing_emails}")
return result
'''
Returns complete [User] objects.
This function returns [User] objects with more properties than before.
See [User.addSchedule] for which properties are added.
This function works by taking several arguments:
- faculty_sections from [get_faculty_sections]
- section_periods from [student_reader.get_periods]
These are kept as parameters instead of calling the functions by itself
in order to keep the data and logic layers separate.
'''
def get_faculty_with_schedule(faculty_sections, section_periods):
# The schedule for each teacher
schedules = {}
# Sections IDs which are taught but never meet.
missing_periods = set()
# Faculty missing a homerooms.
#
# This will be logged at the debug level.
missing_homerooms = set()
# Loop over teacher sections and get their periods.
for key, value in faculty_sections.items():
periods = []
for section_id in value:
if section_id in section_periods:
periods = list(section_periods[section_id])
elif section_id.startswith("UADV"):
key.homeroom = section_id
key.homeroom_location = "Unavailable"
else:
missing_periods.add(section_id)
# Still couldn'y find any homeroom
if key.homeroom is None:
missing_homerooms.add(key)
key.homeroom = "SENIOR_HOMEROOM"
key.homeroom_location = "Unavailable"
schedules[key] = periods
# Some logging
if not missing_periods:
utils.logger.debug("Missing homerooms", missing_homerooms)
# Compiles a list of periods into a full schedule
result = []
for key, value in schedules.items():
schedule = data.DayDefaultDict()
for period in value:
schedule[period.day][period.period-1] = period
schedule.populate(utils.constants.day_names)
key.schedule = schedule
result.append(key)
return result
| 27.916667
| 76
| 0.733002
|
from collections import defaultdict
from typing import DefaultDict
from .. import utils
from .. import data
def get_faculty_sections(faculty,section_teachers):
result = defaultdict(set)
missing_emails = set()
for key, value in section_teachers.items():
section_id = key
faculty_id = value
if faculty_id not in faculty:
missing_emails.add(faculty_id)
continue
result[faculty[faculty_id]].add(section_id)
if missing_emails:
utils.logger.warning(f"Missing emails for {missing_emails}")
return result
def get_faculty_with_schedule(faculty_sections, section_periods):
# The schedule for each teacher
schedules = {}
# Sections IDs which are taught but never meet.
missing_periods = set()
# Faculty missing a homerooms.
#
# This will be logged at the debug level.
missing_homerooms = set()
# Loop over teacher sections and get their periods.
for key, value in faculty_sections.items():
periods = []
for section_id in value:
if section_id in section_periods:
periods = list(section_periods[section_id])
elif section_id.startswith("UADV"):
key.homeroom = section_id
key.homeroom_location = "Unavailable"
else:
missing_periods.add(section_id)
# Still couldn'y find any homeroom
if key.homeroom is None:
missing_homerooms.add(key)
key.homeroom = "SENIOR_HOMEROOM"
key.homeroom_location = "Unavailable"
schedules[key] = periods
if not missing_periods:
utils.logger.debug("Missing homerooms", missing_homerooms)
result = []
for key, value in schedules.items():
schedule = data.DayDefaultDict()
for period in value:
schedule[period.day][period.period-1] = period
schedule.populate(utils.constants.day_names)
key.schedule = schedule
result.append(key)
return result
| true
| true
|
7902a4319036067d24b6513ad2ff3935b6cfcbf7
| 1,305
|
py
|
Python
|
integration/single/test_basic_state_machine.py
|
will-driven/serverless-application-model
|
5e2e61bc459da259cb3f9256ee6fe33ec28d5591
|
[
"Apache-2.0"
] | 1
|
2021-02-15T15:05:06.000Z
|
2021-02-15T15:05:06.000Z
|
integration/single/test_basic_state_machine.py
|
will-driven/serverless-application-model
|
5e2e61bc459da259cb3f9256ee6fe33ec28d5591
|
[
"Apache-2.0"
] | null | null | null |
integration/single/test_basic_state_machine.py
|
will-driven/serverless-application-model
|
5e2e61bc459da259cb3f9256ee6fe33ec28d5591
|
[
"Apache-2.0"
] | null | null | null |
from integration.helpers.base_test import BaseTest
class TestBasicLayerVersion(BaseTest):
"""
Basic AWS::Serverless::StateMachine tests
"""
def test_basic_state_machine_inline_definition(self):
"""
Creates a State Machine from inline definition
"""
self.create_and_verify_stack("basic_state_machine_inline_definition")
def test_basic_state_machine_with_tags(self):
"""
Creates a State Machine with tags
"""
self.create_and_verify_stack("basic_state_machine_with_tags")
tags = self.get_stack_tags("MyStateMachineArn")
self.assertIsNotNone(tags)
self._verify_tag_presence(tags, "stateMachine:createdBy", "SAM")
self._verify_tag_presence(tags, "TagOne", "ValueOne")
self._verify_tag_presence(tags, "TagTwo", "ValueTwo")
def _verify_tag_presence(self, tags, key, value):
"""
Verifies the presence of a tag and its value
Parameters
----------
tags : List of dict
List of tag objects
key : string
Tag key
value : string
Tag value
"""
tag = next(tag for tag in tags if tag["key"] == key)
self.assertIsNotNone(tag)
self.assertEqual(tag["value"], value)
| 29.659091
| 77
| 0.629885
|
from integration.helpers.base_test import BaseTest
class TestBasicLayerVersion(BaseTest):
def test_basic_state_machine_inline_definition(self):
self.create_and_verify_stack("basic_state_machine_inline_definition")
def test_basic_state_machine_with_tags(self):
self.create_and_verify_stack("basic_state_machine_with_tags")
tags = self.get_stack_tags("MyStateMachineArn")
self.assertIsNotNone(tags)
self._verify_tag_presence(tags, "stateMachine:createdBy", "SAM")
self._verify_tag_presence(tags, "TagOne", "ValueOne")
self._verify_tag_presence(tags, "TagTwo", "ValueTwo")
def _verify_tag_presence(self, tags, key, value):
tag = next(tag for tag in tags if tag["key"] == key)
self.assertIsNotNone(tag)
self.assertEqual(tag["value"], value)
| true
| true
|
7902a5206d6375bfc1bb4ff1fe6f7a3f5a713972
| 115
|
py
|
Python
|
ccc/2017/ccc17j2.py
|
pi-guy-in-the-sky/competitive-programming
|
e079f6caf07b5de061ea4f56218f9b577e49a965
|
[
"MIT"
] | null | null | null |
ccc/2017/ccc17j2.py
|
pi-guy-in-the-sky/competitive-programming
|
e079f6caf07b5de061ea4f56218f9b577e49a965
|
[
"MIT"
] | null | null | null |
ccc/2017/ccc17j2.py
|
pi-guy-in-the-sky/competitive-programming
|
e079f6caf07b5de061ea4f56218f9b577e49a965
|
[
"MIT"
] | 1
|
2020-10-25T05:46:57.000Z
|
2020-10-25T05:46:57.000Z
|
n = int(input())
k = int(input())
total = n
for i in range(k):
total += int(str(n) + ('0' * (i+1)))
print(total)
| 16.428571
| 38
| 0.530435
|
n = int(input())
k = int(input())
total = n
for i in range(k):
total += int(str(n) + ('0' * (i+1)))
print(total)
| true
| true
|
7902a62871e19bdb7d09da3d9697a5c59621294d
| 30,717
|
py
|
Python
|
aries_cloudagent/protocols/coordinate_mediation/v1_0/tests/test_routes.py
|
TimoGlastra/aries-cloudagent-python
|
21a36ed8106426fd80f889abac4cef2a376e1cc3
|
[
"Apache-2.0"
] | null | null | null |
aries_cloudagent/protocols/coordinate_mediation/v1_0/tests/test_routes.py
|
TimoGlastra/aries-cloudagent-python
|
21a36ed8106426fd80f889abac4cef2a376e1cc3
|
[
"Apache-2.0"
] | 22
|
2021-02-13T18:48:53.000Z
|
2021-04-27T07:29:50.000Z
|
aries_cloudagent/protocols/coordinate_mediation/v1_0/tests/test_routes.py
|
sklump/aries-cloudagent-python
|
d3895f6394a5f24a895731bcf6fb672e0d84804e
|
[
"Apache-2.0"
] | 2
|
2021-02-19T17:53:37.000Z
|
2021-02-19T17:56:48.000Z
|
import json
import asynctest
from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
from aries_cloudagent.config.injection_context import InjectionContext
from aries_cloudagent.messaging.request_context import RequestContext
from .....admin.request_context import AdminRequestContext
from .. import routes as test_module
from ..manager import MediationManager
from ..models.mediation_record import MediationRecord
class TestCoordinateMediationRoutes(AsyncTestCase):
def setUp(self):
self.session_inject = {}
self.context = AdminRequestContext.test_context(self.session_inject)
self.outbound_message_router = async_mock.CoroutineMock()
self.request_dict = {
"context": self.context,
"outbound_message_router": self.outbound_message_router,
}
self.request = async_mock.MagicMock(
match_info={
"mediation_id": "test-mediation-id",
"conn_id": "test-conn-id",
},
query={},
json=async_mock.CoroutineMock(return_value={}),
__getitem__=lambda _, k: self.request_dict[k],
)
serialized = {
"mediation_id": "fake_id",
"state": "granted",
"role": "server",
"connection_id": "c3dd00cf-f6a2-4ddf-93d8-49ae74bdacef",
"mediator_terms": [],
"recipient_terms": [],
"routing_keys": ["EwUKjVLboiLSuoWSEtDvrgrd41EUxG5bLecQrkHB63Up"],
"endpoint": "http://192.168.1.13:3005",
"created_at": "1234567890",
}
self.mock_record = async_mock.MagicMock(
**serialized,
serialize=async_mock.MagicMock(return_value=serialized),
save=async_mock.CoroutineMock()
)
def test_mediation_sort_key(self):
assert (
test_module.mediation_sort_key(
{"state": MediationRecord.STATE_DENIED, "created_at": ""}
)
== "2"
)
assert (
test_module.mediation_sort_key(
{"state": MediationRecord.STATE_REQUEST, "created_at": ""}
)
== "1"
)
assert (
test_module.mediation_sort_key(
{"state": MediationRecord.STATE_GRANTED, "created_at": ""}
)
== "0"
)
async def test_list_mediation_requests(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationRecord,
"query",
async_mock.CoroutineMock(return_value=[self.mock_record]),
) as mock_query, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
await test_module.list_mediation_requests(self.request)
json_response.assert_called_once_with(
[self.mock_record.serialize.return_value]
)
mock_query.assert_called_once_with(self.context.session.return_value, {})
async def test_list_mediation_requests_filters(self):
self.request.query = {
"state": MediationRecord.STATE_GRANTED,
"conn_id": "test-conn-id",
}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationRecord,
"query",
async_mock.CoroutineMock(return_value=[self.mock_record]),
) as mock_query, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
await test_module.list_mediation_requests(self.request)
json_response.assert_called_once_with(
[self.mock_record.serialize.return_value]
)
mock_query.assert_called_once_with(
self.context.session.return_value,
{
"connection_id": "test-conn-id",
"state": MediationRecord.STATE_GRANTED,
},
)
async def test_list_mediation_requests_x(self):
with async_mock.patch.object(
test_module,
"MediationRecord",
async_mock.MagicMock(
query=async_mock.CoroutineMock(side_effect=test_module.StorageError())
),
) as mock_med_rec:
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.list_mediation_requests(self.request)
async def test_list_mediation_requests_no_records(self):
with async_mock.patch.object(
test_module,
"MediationRecord",
async_mock.MagicMock(query=async_mock.CoroutineMock(return_value=[])),
) as mock_med_rec, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.list_mediation_requests(self.request)
mock_response.assert_called_once_with([])
async def test_retrieve_mediation_request(self):
with async_mock.patch.object(
test_module.MediationRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_mediation_record_retrieve.return_value = self.mock_record
await test_module.retrieve_mediation_request(self.request)
mock_response.assert_called_once_with(
self.mock_record.serialize.return_value
)
mock_mediation_record_retrieve.assert_called()
async def test_retrieve_mediation_request_x_not_found(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, self.assertRaises(
test_module.web.HTTPNotFound
):
await test_module.retrieve_mediation_request(self.request)
async def test_retrieve_mediation_request_x_storage_error(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, self.assertRaises(
test_module.web.HTTPBadRequest
):
await test_module.retrieve_mediation_request(self.request)
async def test_delete_mediation_request(self):
with async_mock.patch.object(
test_module.MediationRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_mediation_record_retrieve, async_mock.patch.object(
self.mock_record, "delete_record", async_mock.CoroutineMock()
) as mock_delete_record, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_mediation_record_retrieve.return_value = self.mock_record
await test_module.delete_mediation_request(self.request)
mock_response.assert_called_once_with(
self.mock_record.serialize.return_value
)
mock_mediation_record_retrieve.assert_called()
mock_delete_record.assert_called()
async def test_delete_mediation_request_x_not_found(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, self.assertRaises(
test_module.web.HTTPNotFound
):
await test_module.delete_mediation_request(self.request)
async def test_delete_mediation_request_x_storage_error(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, self.assertRaises(
test_module.web.HTTPBadRequest
):
await test_module.delete_mediation_request(self.request)
async def test_request_mediation(self):
body = {
"mediator_terms": ["meaningless string because terms are not used"],
"recipient_terms": ["meaningless string because terms are not a 'thing'"],
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module, "MediationManager", autospec=True
) as mock_med_mgr, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, async_mock.patch.object(
test_module.MediationRecord,
"exists_for_connection_id",
async_mock.CoroutineMock(return_value=False),
) as mock_mediation_record_exists, async_mock.patch.object(
test_module.ConnRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_conn_rec_retrieve_by_id:
mock_med_mgr.return_value.prepare_request = async_mock.CoroutineMock(
return_value=(
self.mock_record,
async_mock.MagicMock( # mediation request
serialize=async_mock.MagicMock(return_value={"a": "value"}),
),
)
)
await test_module.request_mediation(self.request)
mock_response.assert_called_once_with(
self.mock_record.serialize.return_value, status=201
)
self.outbound_message_router.assert_called()
async def test_request_mediation_x_conn_not_ready(self):
body = {
"mediator_terms": ["meaningless string because terms are not used"],
"recipient_terms": ["meaningless string because terms are not a 'thing'"],
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module.ConnRecord,
"retrieve_by_id",
async_mock.CoroutineMock(return_value=async_mock.MagicMock(is_ready=False)),
) as mock_conn_rec_retrieve_by_id, self.assertRaises(
test_module.web.HTTPBadRequest
) as exc:
await test_module.request_mediation(self.request)
assert "request connection is not ready" in exc.msg
async def test_request_mediation_x_already_exists(self):
body = {
"mediator_terms": ["meaningless string because terms are not used"],
"recipient_terms": ["meaningless string because terms are not a 'thing'"],
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module.ConnRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_conn_rec_retrieve_by_id, async_mock.patch.object(
test_module.MediationRecord,
"exists_for_connection_id",
async_mock.CoroutineMock(return_value=True),
) as mock_exists_for_conn, self.assertRaises(
test_module.web.HTTPBadRequest
) as exc:
await test_module.request_mediation(self.request)
assert "already exists for connection" in exc.msg
async def test_request_mediation_x_conn_not_found(self):
body = {
"mediator_terms": ["meaningless string because terms are not used"],
"recipient_terms": ["meaningless string because terms are not a 'thing'"],
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module.ConnRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
) as mock_conn_rec_retrieve_by_id, self.assertRaises(
test_module.web.HTTPNotFound
):
await test_module.request_mediation(self.request)
async def test_request_mediation_x_storage_error(self):
body = {
"mediator_terms": ["meaningless string because terms are not used"],
"recipient_terms": ["meaningless string because terms are not a 'thing'"],
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module.ConnRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_conn_rec_retrieve_by_id, self.assertRaises(
test_module.web.HTTPBadRequest
):
await test_module.request_mediation(self.request)
async def test_mediation_request_grant_role_server(self):
self.mock_record.role = MediationRecord.ROLE_SERVER
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.mediation_request_grant(self.request)
mock_response.assert_called_once_with(
self.mock_record.serialize.return_value, status=201
)
self.outbound_message_router.assert_called()
async def test_mediation_request_grant_role_client_x(self):
self.mock_record.role = MediationRecord.ROLE_CLIENT
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(return_value=self.mock_record),
), self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.mediation_request_grant(self.request)
async def test_mediation_request_grant_x_rec_not_found(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
), self.assertRaises(test_module.web.HTTPNotFound):
await test_module.mediation_request_grant(self.request)
async def test_mediation_request_grant_x_storage_error(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
), self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.mediation_request_grant(self.request)
async def test_mediation_request_deny_role_server(self):
self.mock_record.role = MediationRecord.ROLE_SERVER
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.mediation_request_deny(self.request)
mock_response.assert_called_once_with(
self.mock_record.serialize.return_value, status=201
)
self.outbound_message_router.assert_called()
async def test_mediation_request_deny_role_client_x(self):
self.mock_record.role = MediationRecord.ROLE_CLIENT
with async_mock.patch.object(
test_module.MediationRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
):
mock_mediation_record_retrieve.return_value = async_mock.MagicMock(
role=MediationRecord.ROLE_CLIENT
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.mediation_request_deny(self.request)
async def test_mediation_request_deny_x_rec_not_found(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
), self.assertRaises(test_module.web.HTTPNotFound):
await test_module.mediation_request_deny(self.request)
async def test_mediation_request_deny_x_storage_error(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
), self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.mediation_request_deny(self.request)
async def test_get_keylist(self):
self.request.query["role"] = MediationRecord.ROLE_SERVER
self.request.query["conn_id"] = "test-id"
query_results = [
async_mock.MagicMock(
serialize=async_mock.MagicMock(
return_value={"serialized": "route record"}
)
)
]
with async_mock.patch.object(
test_module.RouteRecord,
"query",
async_mock.CoroutineMock(return_value=query_results),
) as mock_query, async_mock.patch.object(
self.context, "session", async_mock.CoroutineMock()
) as mock_session, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.get_keylist(self.request)
mock_response.assert_called_once_with(
[{"serialized": "route record"}], status=200
)
mock_query.assert_called_once_with(
mock_session.return_value,
{"role": MediationRecord.ROLE_SERVER, "connection_id": "test-id"},
)
async def test_get_keylist_no_matching_records(self):
with async_mock.patch.object(
test_module.RouteRecord,
"query",
async_mock.CoroutineMock(return_value=[]),
) as mock_query, async_mock.patch.object(
self.context, "session", async_mock.CoroutineMock()
) as mock_session, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.get_keylist(self.request)
mock_query.assert_called_once_with(mock_session.return_value, {})
mock_response.assert_called_once_with([], status=200)
async def test_get_keylist_storage_error(self):
with async_mock.patch.object(
test_module.RouteRecord,
"query",
async_mock.CoroutineMock(side_effect=test_module.StorageError),
) as mock_query, self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.get_keylist(self.request)
async def test_send_keylist_update(self):
body = {
"updates": [
{"recipient_key": "test-key0", "action": "add"},
{"recipient_key": "test-key1", "action": "remove"},
]
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(
return_value=async_mock.MagicMock(
state=MediationRecord.STATE_GRANTED, connection_id="test-conn-id"
)
),
) as mock_retrieve_by_id, async_mock.patch.object(
test_module.web,
"json_response",
async_mock.MagicMock(
side_effect=lambda *args, **kwargs: [*args, *kwargs.values()]
),
) as mock_response:
results, status = await test_module.send_keylist_update(self.request)
assert results["updates"] == body["updates"]
assert status == 201
async def test_send_keylist_update_bad_action(self):
self.request.json.return_value = {
"updates": [
{"recipient_key": "test-key0", "action": "wrong"},
]
}
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.send_keylist_update(self.request)
async def test_send_keylist_update_bad_mediation_state(self):
self.request.json.return_value = {
"updates": [
{"recipient_key": "test-key0", "action": "add"},
]
}
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(
return_value=async_mock.MagicMock(
state=MediationRecord.STATE_DENIED, connection_id="test-conn-id"
)
),
) as mock_retrieve_by_id, self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.send_keylist_update(self.request)
async def test_send_keylist_update_bad_updates(self):
self.request.json.return_value = {"updates": []}
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.send_keylist_update(self.request)
async def test_send_keylist_update_x_no_mediation_rec(self):
self.request.json.return_value = {
"updates": [
{"recipient_key": "test-key0", "action": "add"},
]
}
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
), self.assertRaises(test_module.web.HTTPNotFound):
await test_module.send_keylist_update(self.request)
async def test_send_keylist_update_x_storage_error(self):
self.request.json.return_value = {
"updates": [
{"recipient_key": "test-key0", "action": "add"},
]
}
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
), self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.send_keylist_update(self.request)
@async_mock.patch.object(test_module, "MediationManager", autospec=True)
async def test_send_keylist_query(self, mock_manager):
self.request.json.return_value = {"filter": {"test": "filter"}}
self.request.query = {"paginate_limit": 10, "paginate_offset": 20}
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_retrieve_by_id, async_mock.patch.object(
mock_manager.return_value,
"prepare_keylist_query",
async_mock.CoroutineMock(),
) as mock_prepare_keylist_query, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.send_keylist_query(self.request)
mock_prepare_keylist_query.assert_called_once_with(
filter_={"test": "filter"}, paginate_limit=10, paginate_offset=20
)
self.outbound_message_router.assert_called()
mock_response.assert_called_once_with(
mock_prepare_keylist_query.return_value.serialize.return_value,
status=201,
)
async def test_send_keylist_query_x_no_mediation_record(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
) as mock_retrieve_by_id, self.assertRaises(test_module.web.HTTPNotFound):
await test_module.send_keylist_query(self.request)
async def test_send_keylist_query_x_storage_error(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_retrieve_by_id, self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.send_keylist_query(self.request)
async def test_get_default_mediator(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.web, "json_response"
) as json_response, async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_mgr_get_default_record:
await test_module.get_default_mediator(self.request)
json_response.assert_called_once_with(
self.mock_record.serialize.return_value,
status=200,
)
async def test_get_empty_default_mediator(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.web, "json_response"
) as json_response, async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(return_value=None),
) as mock_mgr_get_default_record:
await test_module.get_default_mediator(self.request)
json_response.assert_called_once_with(
{},
status=200,
)
async def test_get_default_mediator_storage_error(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.web, "json_response"
) as json_response, async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
) as mock_mgr_get_default_record:
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.get_default_mediator(self.request)
async def test_set_default_mediator(self):
self.request.match_info = {
"mediation_id": "fake_id",
}
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_mgr_get_default_record, async_mock.patch.object(
test_module.MediationManager,
"set_default_mediator_by_id",
async_mock.CoroutineMock(),
) as mock_mgr_set_default_record_by_id, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
await test_module.set_default_mediator(self.request)
json_response.assert_called_once_with(
self.mock_record.serialize.return_value,
status=201,
)
async def test_set_default_mediator_storage_error(self):
self.request.match_info = {
"mediation_id": "bad_id",
}
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_mgr_get_default_record, async_mock.patch.object(
test_module.MediationManager,
"set_default_mediator_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_mgr_set_default_record_by_id, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.set_default_mediator(self.request)
async def test_clear_default_mediator(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_mgr_get_default_record, async_mock.patch.object(
test_module.MediationManager,
"clear_default_mediator",
async_mock.CoroutineMock(),
) as mock_mgr_clear_default_record_by_id, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
await test_module.clear_default_mediator(self.request)
json_response.assert_called_once_with(
self.mock_record.serialize.return_value,
status=201,
)
async def test_clear_default_mediator_storage_error(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_mgr_get_default_record, async_mock.patch.object(
test_module.MediationManager,
"clear_default_mediator",
async_mock.CoroutineMock(),
) as mock_mgr_clear_default_record_by_id, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.clear_default_mediator(self.request)
async def test_register(self):
mock_app = async_mock.MagicMock()
mock_app.add_routes = async_mock.MagicMock()
await test_module.register(mock_app)
mock_app.add_routes.assert_called_once()
async def test_post_process_routes(self):
mock_app = async_mock.MagicMock(_state={"swagger_dict": {}})
test_module.post_process_routes(mock_app)
assert "tags" in mock_app._state["swagger_dict"]
| 43.385593
| 88
| 0.648078
|
import json
import asynctest
from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
from aries_cloudagent.config.injection_context import InjectionContext
from aries_cloudagent.messaging.request_context import RequestContext
from .....admin.request_context import AdminRequestContext
from .. import routes as test_module
from ..manager import MediationManager
from ..models.mediation_record import MediationRecord
class TestCoordinateMediationRoutes(AsyncTestCase):
def setUp(self):
self.session_inject = {}
self.context = AdminRequestContext.test_context(self.session_inject)
self.outbound_message_router = async_mock.CoroutineMock()
self.request_dict = {
"context": self.context,
"outbound_message_router": self.outbound_message_router,
}
self.request = async_mock.MagicMock(
match_info={
"mediation_id": "test-mediation-id",
"conn_id": "test-conn-id",
},
query={},
json=async_mock.CoroutineMock(return_value={}),
__getitem__=lambda _, k: self.request_dict[k],
)
serialized = {
"mediation_id": "fake_id",
"state": "granted",
"role": "server",
"connection_id": "c3dd00cf-f6a2-4ddf-93d8-49ae74bdacef",
"mediator_terms": [],
"recipient_terms": [],
"routing_keys": ["EwUKjVLboiLSuoWSEtDvrgrd41EUxG5bLecQrkHB63Up"],
"endpoint": "http://192.168.1.13:3005",
"created_at": "1234567890",
}
self.mock_record = async_mock.MagicMock(
**serialized,
serialize=async_mock.MagicMock(return_value=serialized),
save=async_mock.CoroutineMock()
)
def test_mediation_sort_key(self):
assert (
test_module.mediation_sort_key(
{"state": MediationRecord.STATE_DENIED, "created_at": ""}
)
== "2"
)
assert (
test_module.mediation_sort_key(
{"state": MediationRecord.STATE_REQUEST, "created_at": ""}
)
== "1"
)
assert (
test_module.mediation_sort_key(
{"state": MediationRecord.STATE_GRANTED, "created_at": ""}
)
== "0"
)
async def test_list_mediation_requests(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationRecord,
"query",
async_mock.CoroutineMock(return_value=[self.mock_record]),
) as mock_query, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
await test_module.list_mediation_requests(self.request)
json_response.assert_called_once_with(
[self.mock_record.serialize.return_value]
)
mock_query.assert_called_once_with(self.context.session.return_value, {})
async def test_list_mediation_requests_filters(self):
self.request.query = {
"state": MediationRecord.STATE_GRANTED,
"conn_id": "test-conn-id",
}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationRecord,
"query",
async_mock.CoroutineMock(return_value=[self.mock_record]),
) as mock_query, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
await test_module.list_mediation_requests(self.request)
json_response.assert_called_once_with(
[self.mock_record.serialize.return_value]
)
mock_query.assert_called_once_with(
self.context.session.return_value,
{
"connection_id": "test-conn-id",
"state": MediationRecord.STATE_GRANTED,
},
)
async def test_list_mediation_requests_x(self):
with async_mock.patch.object(
test_module,
"MediationRecord",
async_mock.MagicMock(
query=async_mock.CoroutineMock(side_effect=test_module.StorageError())
),
) as mock_med_rec:
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.list_mediation_requests(self.request)
async def test_list_mediation_requests_no_records(self):
with async_mock.patch.object(
test_module,
"MediationRecord",
async_mock.MagicMock(query=async_mock.CoroutineMock(return_value=[])),
) as mock_med_rec, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.list_mediation_requests(self.request)
mock_response.assert_called_once_with([])
async def test_retrieve_mediation_request(self):
with async_mock.patch.object(
test_module.MediationRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_mediation_record_retrieve.return_value = self.mock_record
await test_module.retrieve_mediation_request(self.request)
mock_response.assert_called_once_with(
self.mock_record.serialize.return_value
)
mock_mediation_record_retrieve.assert_called()
async def test_retrieve_mediation_request_x_not_found(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, self.assertRaises(
test_module.web.HTTPNotFound
):
await test_module.retrieve_mediation_request(self.request)
async def test_retrieve_mediation_request_x_storage_error(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, self.assertRaises(
test_module.web.HTTPBadRequest
):
await test_module.retrieve_mediation_request(self.request)
async def test_delete_mediation_request(self):
with async_mock.patch.object(
test_module.MediationRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_mediation_record_retrieve, async_mock.patch.object(
self.mock_record, "delete_record", async_mock.CoroutineMock()
) as mock_delete_record, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_mediation_record_retrieve.return_value = self.mock_record
await test_module.delete_mediation_request(self.request)
mock_response.assert_called_once_with(
self.mock_record.serialize.return_value
)
mock_mediation_record_retrieve.assert_called()
mock_delete_record.assert_called()
async def test_delete_mediation_request_x_not_found(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, self.assertRaises(
test_module.web.HTTPNotFound
):
await test_module.delete_mediation_request(self.request)
async def test_delete_mediation_request_x_storage_error(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, self.assertRaises(
test_module.web.HTTPBadRequest
):
await test_module.delete_mediation_request(self.request)
async def test_request_mediation(self):
body = {
"mediator_terms": ["meaningless string because terms are not used"],
"recipient_terms": ["meaningless string because terms are not a 'thing'"],
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module, "MediationManager", autospec=True
) as mock_med_mgr, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, async_mock.patch.object(
test_module.MediationRecord,
"exists_for_connection_id",
async_mock.CoroutineMock(return_value=False),
) as mock_mediation_record_exists, async_mock.patch.object(
test_module.ConnRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_conn_rec_retrieve_by_id:
mock_med_mgr.return_value.prepare_request = async_mock.CoroutineMock(
return_value=(
self.mock_record,
async_mock.MagicMock(
serialize=async_mock.MagicMock(return_value={"a": "value"}),
),
)
)
await test_module.request_mediation(self.request)
mock_response.assert_called_once_with(
self.mock_record.serialize.return_value, status=201
)
self.outbound_message_router.assert_called()
async def test_request_mediation_x_conn_not_ready(self):
body = {
"mediator_terms": ["meaningless string because terms are not used"],
"recipient_terms": ["meaningless string because terms are not a 'thing'"],
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module.ConnRecord,
"retrieve_by_id",
async_mock.CoroutineMock(return_value=async_mock.MagicMock(is_ready=False)),
) as mock_conn_rec_retrieve_by_id, self.assertRaises(
test_module.web.HTTPBadRequest
) as exc:
await test_module.request_mediation(self.request)
assert "request connection is not ready" in exc.msg
async def test_request_mediation_x_already_exists(self):
body = {
"mediator_terms": ["meaningless string because terms are not used"],
"recipient_terms": ["meaningless string because terms are not a 'thing'"],
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module.ConnRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_conn_rec_retrieve_by_id, async_mock.patch.object(
test_module.MediationRecord,
"exists_for_connection_id",
async_mock.CoroutineMock(return_value=True),
) as mock_exists_for_conn, self.assertRaises(
test_module.web.HTTPBadRequest
) as exc:
await test_module.request_mediation(self.request)
assert "already exists for connection" in exc.msg
async def test_request_mediation_x_conn_not_found(self):
body = {
"mediator_terms": ["meaningless string because terms are not used"],
"recipient_terms": ["meaningless string because terms are not a 'thing'"],
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module.ConnRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
) as mock_conn_rec_retrieve_by_id, self.assertRaises(
test_module.web.HTTPNotFound
):
await test_module.request_mediation(self.request)
async def test_request_mediation_x_storage_error(self):
body = {
"mediator_terms": ["meaningless string because terms are not used"],
"recipient_terms": ["meaningless string because terms are not a 'thing'"],
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module.ConnRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_conn_rec_retrieve_by_id, self.assertRaises(
test_module.web.HTTPBadRequest
):
await test_module.request_mediation(self.request)
async def test_mediation_request_grant_role_server(self):
self.mock_record.role = MediationRecord.ROLE_SERVER
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.mediation_request_grant(self.request)
mock_response.assert_called_once_with(
self.mock_record.serialize.return_value, status=201
)
self.outbound_message_router.assert_called()
async def test_mediation_request_grant_role_client_x(self):
self.mock_record.role = MediationRecord.ROLE_CLIENT
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(return_value=self.mock_record),
), self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.mediation_request_grant(self.request)
async def test_mediation_request_grant_x_rec_not_found(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
), self.assertRaises(test_module.web.HTTPNotFound):
await test_module.mediation_request_grant(self.request)
async def test_mediation_request_grant_x_storage_error(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
), self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.mediation_request_grant(self.request)
async def test_mediation_request_deny_role_server(self):
self.mock_record.role = MediationRecord.ROLE_SERVER
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.mediation_request_deny(self.request)
mock_response.assert_called_once_with(
self.mock_record.serialize.return_value, status=201
)
self.outbound_message_router.assert_called()
async def test_mediation_request_deny_role_client_x(self):
self.mock_record.role = MediationRecord.ROLE_CLIENT
with async_mock.patch.object(
test_module.MediationRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
):
mock_mediation_record_retrieve.return_value = async_mock.MagicMock(
role=MediationRecord.ROLE_CLIENT
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.mediation_request_deny(self.request)
async def test_mediation_request_deny_x_rec_not_found(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
), self.assertRaises(test_module.web.HTTPNotFound):
await test_module.mediation_request_deny(self.request)
async def test_mediation_request_deny_x_storage_error(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
), self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.mediation_request_deny(self.request)
async def test_get_keylist(self):
self.request.query["role"] = MediationRecord.ROLE_SERVER
self.request.query["conn_id"] = "test-id"
query_results = [
async_mock.MagicMock(
serialize=async_mock.MagicMock(
return_value={"serialized": "route record"}
)
)
]
with async_mock.patch.object(
test_module.RouteRecord,
"query",
async_mock.CoroutineMock(return_value=query_results),
) as mock_query, async_mock.patch.object(
self.context, "session", async_mock.CoroutineMock()
) as mock_session, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.get_keylist(self.request)
mock_response.assert_called_once_with(
[{"serialized": "route record"}], status=200
)
mock_query.assert_called_once_with(
mock_session.return_value,
{"role": MediationRecord.ROLE_SERVER, "connection_id": "test-id"},
)
async def test_get_keylist_no_matching_records(self):
with async_mock.patch.object(
test_module.RouteRecord,
"query",
async_mock.CoroutineMock(return_value=[]),
) as mock_query, async_mock.patch.object(
self.context, "session", async_mock.CoroutineMock()
) as mock_session, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.get_keylist(self.request)
mock_query.assert_called_once_with(mock_session.return_value, {})
mock_response.assert_called_once_with([], status=200)
async def test_get_keylist_storage_error(self):
with async_mock.patch.object(
test_module.RouteRecord,
"query",
async_mock.CoroutineMock(side_effect=test_module.StorageError),
) as mock_query, self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.get_keylist(self.request)
async def test_send_keylist_update(self):
body = {
"updates": [
{"recipient_key": "test-key0", "action": "add"},
{"recipient_key": "test-key1", "action": "remove"},
]
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(
return_value=async_mock.MagicMock(
state=MediationRecord.STATE_GRANTED, connection_id="test-conn-id"
)
),
) as mock_retrieve_by_id, async_mock.patch.object(
test_module.web,
"json_response",
async_mock.MagicMock(
side_effect=lambda *args, **kwargs: [*args, *kwargs.values()]
),
) as mock_response:
results, status = await test_module.send_keylist_update(self.request)
assert results["updates"] == body["updates"]
assert status == 201
async def test_send_keylist_update_bad_action(self):
self.request.json.return_value = {
"updates": [
{"recipient_key": "test-key0", "action": "wrong"},
]
}
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.send_keylist_update(self.request)
async def test_send_keylist_update_bad_mediation_state(self):
self.request.json.return_value = {
"updates": [
{"recipient_key": "test-key0", "action": "add"},
]
}
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(
return_value=async_mock.MagicMock(
state=MediationRecord.STATE_DENIED, connection_id="test-conn-id"
)
),
) as mock_retrieve_by_id, self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.send_keylist_update(self.request)
async def test_send_keylist_update_bad_updates(self):
self.request.json.return_value = {"updates": []}
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.send_keylist_update(self.request)
async def test_send_keylist_update_x_no_mediation_rec(self):
self.request.json.return_value = {
"updates": [
{"recipient_key": "test-key0", "action": "add"},
]
}
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
), self.assertRaises(test_module.web.HTTPNotFound):
await test_module.send_keylist_update(self.request)
async def test_send_keylist_update_x_storage_error(self):
self.request.json.return_value = {
"updates": [
{"recipient_key": "test-key0", "action": "add"},
]
}
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
), self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.send_keylist_update(self.request)
@async_mock.patch.object(test_module, "MediationManager", autospec=True)
async def test_send_keylist_query(self, mock_manager):
self.request.json.return_value = {"filter": {"test": "filter"}}
self.request.query = {"paginate_limit": 10, "paginate_offset": 20}
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_retrieve_by_id, async_mock.patch.object(
mock_manager.return_value,
"prepare_keylist_query",
async_mock.CoroutineMock(),
) as mock_prepare_keylist_query, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.send_keylist_query(self.request)
mock_prepare_keylist_query.assert_called_once_with(
filter_={"test": "filter"}, paginate_limit=10, paginate_offset=20
)
self.outbound_message_router.assert_called()
mock_response.assert_called_once_with(
mock_prepare_keylist_query.return_value.serialize.return_value,
status=201,
)
async def test_send_keylist_query_x_no_mediation_record(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
) as mock_retrieve_by_id, self.assertRaises(test_module.web.HTTPNotFound):
await test_module.send_keylist_query(self.request)
async def test_send_keylist_query_x_storage_error(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_retrieve_by_id, self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.send_keylist_query(self.request)
async def test_get_default_mediator(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.web, "json_response"
) as json_response, async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_mgr_get_default_record:
await test_module.get_default_mediator(self.request)
json_response.assert_called_once_with(
self.mock_record.serialize.return_value,
status=200,
)
async def test_get_empty_default_mediator(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.web, "json_response"
) as json_response, async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(return_value=None),
) as mock_mgr_get_default_record:
await test_module.get_default_mediator(self.request)
json_response.assert_called_once_with(
{},
status=200,
)
async def test_get_default_mediator_storage_error(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.web, "json_response"
) as json_response, async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
) as mock_mgr_get_default_record:
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.get_default_mediator(self.request)
async def test_set_default_mediator(self):
self.request.match_info = {
"mediation_id": "fake_id",
}
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_mgr_get_default_record, async_mock.patch.object(
test_module.MediationManager,
"set_default_mediator_by_id",
async_mock.CoroutineMock(),
) as mock_mgr_set_default_record_by_id, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
await test_module.set_default_mediator(self.request)
json_response.assert_called_once_with(
self.mock_record.serialize.return_value,
status=201,
)
async def test_set_default_mediator_storage_error(self):
self.request.match_info = {
"mediation_id": "bad_id",
}
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_mgr_get_default_record, async_mock.patch.object(
test_module.MediationManager,
"set_default_mediator_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_mgr_set_default_record_by_id, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.set_default_mediator(self.request)
async def test_clear_default_mediator(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_mgr_get_default_record, async_mock.patch.object(
test_module.MediationManager,
"clear_default_mediator",
async_mock.CoroutineMock(),
) as mock_mgr_clear_default_record_by_id, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
await test_module.clear_default_mediator(self.request)
json_response.assert_called_once_with(
self.mock_record.serialize.return_value,
status=201,
)
async def test_clear_default_mediator_storage_error(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_mgr_get_default_record, async_mock.patch.object(
test_module.MediationManager,
"clear_default_mediator",
async_mock.CoroutineMock(),
) as mock_mgr_clear_default_record_by_id, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.clear_default_mediator(self.request)
async def test_register(self):
mock_app = async_mock.MagicMock()
mock_app.add_routes = async_mock.MagicMock()
await test_module.register(mock_app)
mock_app.add_routes.assert_called_once()
async def test_post_process_routes(self):
mock_app = async_mock.MagicMock(_state={"swagger_dict": {}})
test_module.post_process_routes(mock_app)
assert "tags" in mock_app._state["swagger_dict"]
| true
| true
|
7902a683656203949c1d46f98d8fbe04a39cc4e9
| 298
|
py
|
Python
|
django_budget/budget/migrations/0005_delete_vehiclelog.py
|
MadeleenRoestorff/django_budget
|
3e13d96b9e3468ec1e3d059373899691d81bebaa
|
[
"MIT"
] | null | null | null |
django_budget/budget/migrations/0005_delete_vehiclelog.py
|
MadeleenRoestorff/django_budget
|
3e13d96b9e3468ec1e3d059373899691d81bebaa
|
[
"MIT"
] | 1
|
2021-12-01T10:51:08.000Z
|
2021-12-01T13:40:07.000Z
|
django_budget/budget/migrations/0005_delete_vehiclelog.py
|
MadeleenRoestorff/django_budget
|
3e13d96b9e3468ec1e3d059373899691d81bebaa
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.8 on 2021-11-29 09:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('budget', '0004_auto_20211125_1330'),
]
operations = [
migrations.DeleteModel(
name='VehicleLog',
),
]
| 17.529412
| 47
| 0.607383
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('budget', '0004_auto_20211125_1330'),
]
operations = [
migrations.DeleteModel(
name='VehicleLog',
),
]
| true
| true
|
7902a6b01d177e8dc60a18f379430c98d05385f0
| 119
|
py
|
Python
|
grtoolkit/Mechanics/Friction/__init__.py
|
ZenosParadox/grtoolkit
|
2e34f151a78f57864e39e572c221ca4b73e48bb7
|
[
"MIT"
] | 3
|
2020-02-02T14:33:30.000Z
|
2020-07-29T00:27:46.000Z
|
grtoolkit/Mechanics/Friction/__init__.py
|
ZenosParadox/grtoolkit
|
2e34f151a78f57864e39e572c221ca4b73e48bb7
|
[
"MIT"
] | null | null | null |
grtoolkit/Mechanics/Friction/__init__.py
|
ZenosParadox/grtoolkit
|
2e34f151a78f57864e39e572c221ca4b73e48bb7
|
[
"MIT"
] | 2
|
2020-02-02T14:33:32.000Z
|
2022-03-21T14:33:34.000Z
|
def Coeff_Static_Friction(Mat_on_Mat):
# Read from CSV
pass
def Coeff_Kinetic_Friction(Mat_on_Mat):
pass
| 14.875
| 39
| 0.739496
|
def Coeff_Static_Friction(Mat_on_Mat):
pass
def Coeff_Kinetic_Friction(Mat_on_Mat):
pass
| true
| true
|
7902a6dd62affb36e1ade2107471d9c6f3fb1c73
| 1,217
|
py
|
Python
|
src/ML_Algorithms/ExpectationMaximization/m_step_gaussian_mixture.py
|
leonardbj/AIMS
|
7f8f484ab829f15366cb04ab37f799ad88edd29a
|
[
"MIT"
] | null | null | null |
src/ML_Algorithms/ExpectationMaximization/m_step_gaussian_mixture.py
|
leonardbj/AIMS
|
7f8f484ab829f15366cb04ab37f799ad88edd29a
|
[
"MIT"
] | null | null | null |
src/ML_Algorithms/ExpectationMaximization/m_step_gaussian_mixture.py
|
leonardbj/AIMS
|
7f8f484ab829f15366cb04ab37f799ad88edd29a
|
[
"MIT"
] | null | null | null |
""" converted from Matlab code
source: http://www.robots.ox.ac.uk/~fwood/teaching/AIMS_CDT_ML_2015/homework/HW_2_em/
"""
import numpy as np
def m_step_gaussian_mixture(data, gamma):
"""% Performs the M-step of the EM algorithm for gaussain mixture model.
%
% @param data : n x d matrix with rows as d dimensional data points
% @param gamma : n x k matrix of resposibilities
%
% @return pi : k x 1 array
% @return mu : k x d matrix of maximized cluster centers
% @return sigma : cell array of maximized
%
"""
n = np.shape(data)[0]
d = np.shape(data)[1]
k = np.shape(gamma)[1]
pi = np.zeros(k)
mu = np.zeros((k, d))
sigma = np.zeros((k, d, d))
for kk in range(k):
Nkk = np.sum(gamma[:, kk])
pi[kk] = Nkk / n
for dd in range(d):
mu[kk, dd] = np.sum(gamma[:, kk] * data[:, dd], axis=0) / Nkk
for kk in range(k):
Nkk = np.sum(gamma[:, kk])
centered_data = data - mu[kk, :]
for nn in range(n):
sigma[kk] += gamma[nn, kk] * np.dot(centered_data[nn, None].T, centered_data[nn, None])
sigma[kk] /= Nkk
return [mu, sigma, pi]
| 29.682927
| 99
| 0.555464
|
import numpy as np
def m_step_gaussian_mixture(data, gamma):
n = np.shape(data)[0]
d = np.shape(data)[1]
k = np.shape(gamma)[1]
pi = np.zeros(k)
mu = np.zeros((k, d))
sigma = np.zeros((k, d, d))
for kk in range(k):
Nkk = np.sum(gamma[:, kk])
pi[kk] = Nkk / n
for dd in range(d):
mu[kk, dd] = np.sum(gamma[:, kk] * data[:, dd], axis=0) / Nkk
for kk in range(k):
Nkk = np.sum(gamma[:, kk])
centered_data = data - mu[kk, :]
for nn in range(n):
sigma[kk] += gamma[nn, kk] * np.dot(centered_data[nn, None].T, centered_data[nn, None])
sigma[kk] /= Nkk
return [mu, sigma, pi]
| true
| true
|
7902a7a41dcbe980541fa794f0964f38a7f3a7e0
| 433
|
py
|
Python
|
scripts/what_web_fetch_cpes.py
|
Wolodija/aucote
|
bb21ff02965ed0cca5a55ee559eae77856d9866c
|
[
"Apache-2.0"
] | 1
|
2019-12-05T11:17:18.000Z
|
2019-12-05T11:17:18.000Z
|
scripts/what_web_fetch_cpes.py
|
FCG-LLC/aucote
|
bb21ff02965ed0cca5a55ee559eae77856d9866c
|
[
"Apache-2.0"
] | 13
|
2019-12-05T10:34:41.000Z
|
2019-12-05T10:49:27.000Z
|
scripts/what_web_fetch_cpes.py
|
Wolodija/aucote
|
bb21ff02965ed0cca5a55ee559eae77856d9866c
|
[
"Apache-2.0"
] | 4
|
2019-11-09T17:37:07.000Z
|
2019-12-16T09:50:02.000Z
|
from sys import argv
from requests import get
import re
app_name = argv[1]
result = get('https://nvd.nist.gov/view/vuln/search-results?query={0}'.format(app_name))
cves = re.findall(r"CVE-\d{4}-\d+", result.text)
for cve in reversed(cves):
result = get('https://nvd.nist.gov/vuln/detail/' + cve)
cpes = re.findall(r">(cpe.*?)</a>", result.text)
if cpes:
print("{0}.tb:{1}".format(app_name, cpes[0]))
break
| 27.0625
| 88
| 0.642032
|
from sys import argv
from requests import get
import re
app_name = argv[1]
result = get('https://nvd.nist.gov/view/vuln/search-results?query={0}'.format(app_name))
cves = re.findall(r"CVE-\d{4}-\d+", result.text)
for cve in reversed(cves):
result = get('https://nvd.nist.gov/vuln/detail/' + cve)
cpes = re.findall(r">(cpe.*?)</a>", result.text)
if cpes:
print("{0}.tb:{1}".format(app_name, cpes[0]))
break
| true
| true
|
7902a7f18b40495ac61fcbbfc7b7ac409ac842d0
| 862
|
py
|
Python
|
Smiley_face.py
|
ShashankShenoy21/Matlab-and-Python
|
148fe060dcbc73f75ac9fcd8bef9f31f82b15e67
|
[
"MIT"
] | null | null | null |
Smiley_face.py
|
ShashankShenoy21/Matlab-and-Python
|
148fe060dcbc73f75ac9fcd8bef9f31f82b15e67
|
[
"MIT"
] | null | null | null |
Smiley_face.py
|
ShashankShenoy21/Matlab-and-Python
|
148fe060dcbc73f75ac9fcd8bef9f31f82b15e67
|
[
"MIT"
] | null | null | null |
# Python program to draw smile
# face emoji using turtle
import turtle
# turtle object
pen = turtle.Turtle()
# function for creation of eye
def eye(col, rad):
pen.down()
pen.fillcolor(col)
pen.begin_fill()
pen.circle(rad)
pen.end_fill()
pen.up()
# draw face
pen.fillcolor('yellow')
pen.begin_fill()
pen.circle(100)
pen.end_fill()
pen.up()
# draw eyes
pen.goto(-40, 120)
eye('white', 15)
pen.goto(-37, 125)
eye('black', 5)
pen.goto(40, 120)
eye('white', 15)
pen.goto(40, 125)
eye('black', 5)
# draw nose
pen.goto(0, 75)
eye('black', 8)
# draw mouth
pen.goto(-40, 85)
pen.down()
pen.right(90)
pen.circle(40, 180)
pen.up()
# draw tongue
pen.goto(-10, 45)
pen.down()
pen.right(180)
pen.fillcolor('red')
pen.begin_fill()
pen.circle(10, 180)
pen.end_fill()
pen.hideturtle()
| 15.672727
| 31
| 0.616009
|
import turtle
pen = turtle.Turtle()
def eye(col, rad):
pen.down()
pen.fillcolor(col)
pen.begin_fill()
pen.circle(rad)
pen.end_fill()
pen.up()
pen.fillcolor('yellow')
pen.begin_fill()
pen.circle(100)
pen.end_fill()
pen.up()
pen.goto(-40, 120)
eye('white', 15)
pen.goto(-37, 125)
eye('black', 5)
pen.goto(40, 120)
eye('white', 15)
pen.goto(40, 125)
eye('black', 5)
pen.goto(0, 75)
eye('black', 8)
pen.goto(-40, 85)
pen.down()
pen.right(90)
pen.circle(40, 180)
pen.up()
pen.goto(-10, 45)
pen.down()
pen.right(180)
pen.fillcolor('red')
pen.begin_fill()
pen.circle(10, 180)
pen.end_fill()
pen.hideturtle()
| true
| true
|
7902a815a49d93ab7dd1653dd1e5281ec186e3a0
| 3,017
|
py
|
Python
|
bob/devtools/scripts/update_bob.py
|
bioidiap/bob.devtools
|
98ec996056a1e11d6d9c91b619441610fbf60bbe
|
[
"BSD-3-Clause"
] | null | null | null |
bob/devtools/scripts/update_bob.py
|
bioidiap/bob.devtools
|
98ec996056a1e11d6d9c91b619441610fbf60bbe
|
[
"BSD-3-Clause"
] | null | null | null |
bob/devtools/scripts/update_bob.py
|
bioidiap/bob.devtools
|
98ec996056a1e11d6d9c91b619441610fbf60bbe
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import click
from ..log import get_logger, verbosity_option
from . import bdt
logger = get_logger(__name__)
@click.command(
epilog="""\b
Examples:
bdt gitlab update-bob -vv
bdt gitlab update-bob -vv --stable
"""
)
@click.option(
"--stable/--beta",
help="To use the stable versions in the list and pin packages.",
)
@verbosity_option()
@bdt.raise_on_error
def update_bob(stable):
"""Updates the Bob meta package with new packages."""
import tempfile
from ..ci import read_packages
from ..release import (
download_path,
get_gitlab_instance,
get_latest_tag_name,
)
gl = get_gitlab_instance()
# download order.txt form nightlies and get the list of packages
nightlies = gl.projects.get("bob/nightlies")
with tempfile.NamedTemporaryFile() as f:
download_path(nightlies, "order.txt", f.name, ref="master")
packages = read_packages(f.name)
# find the list of public packages
public_packages, private_packages = [], []
for n, (package, branch) in enumerate(packages):
if package == "bob/bob":
continue
# determine package visibility
use_package = gl.projects.get(package)
is_public = use_package.attributes["visibility"] == "public"
if is_public:
public_packages.append(package.replace("bob/", ""))
else:
private_packages.append(package.replace("bob/", ""))
logger.debug(
"%s is %s", package, "public" if is_public else "not public"
)
logger.info("Found %d public packages", len(public_packages))
logger.info(
"The following packages were not public:\n%s",
"\n".join(private_packages),
)
# if requires stable versions, add latest tag versions to the names
if stable:
logger.info("Getting latest tag names for the public packages")
tags = [
get_latest_tag_name(gl.projects.get(f"bob/{pkg}"))
for pkg in public_packages
]
public_packages = [
f"{pkg} =={tag}" for pkg, tag in zip(public_packages, tags)
]
# modify conda/meta.yaml and requirements.txt in bob/bob
logger.info("Updating conda/meta.yaml")
start_tag = "# LIST OF BOB PACKAGES - START"
end_tag = "# LIST OF BOB PACKAGES - END"
with open("conda/meta.yaml") as f:
lines = f.read()
i1 = lines.find(start_tag) + len(start_tag)
i2 = lines.find(end_tag)
lines = (
lines[:i1]
+ "\n - ".join([""] + public_packages)
+ "\n "
+ lines[i2:]
)
with open("conda/meta.yaml", "w") as f:
f.write(lines)
logger.info("Updating requirements.txt")
with open("requirements.txt", "w") as f:
f.write("\n".join(public_packages) + "\n")
click.echo(
"You may need to add the ` # [linux]` tag in front of linux only "
"packages in conda/meta.yaml"
)
| 27.427273
| 75
| 0.603911
|
import click
from ..log import get_logger, verbosity_option
from . import bdt
logger = get_logger(__name__)
@click.command(
epilog="""\b
Examples:
bdt gitlab update-bob -vv
bdt gitlab update-bob -vv --stable
"""
)
@click.option(
"--stable/--beta",
help="To use the stable versions in the list and pin packages.",
)
@verbosity_option()
@bdt.raise_on_error
def update_bob(stable):
import tempfile
from ..ci import read_packages
from ..release import (
download_path,
get_gitlab_instance,
get_latest_tag_name,
)
gl = get_gitlab_instance()
nightlies = gl.projects.get("bob/nightlies")
with tempfile.NamedTemporaryFile() as f:
download_path(nightlies, "order.txt", f.name, ref="master")
packages = read_packages(f.name)
public_packages, private_packages = [], []
for n, (package, branch) in enumerate(packages):
if package == "bob/bob":
continue
use_package = gl.projects.get(package)
is_public = use_package.attributes["visibility"] == "public"
if is_public:
public_packages.append(package.replace("bob/", ""))
else:
private_packages.append(package.replace("bob/", ""))
logger.debug(
"%s is %s", package, "public" if is_public else "not public"
)
logger.info("Found %d public packages", len(public_packages))
logger.info(
"The following packages were not public:\n%s",
"\n".join(private_packages),
)
if stable:
logger.info("Getting latest tag names for the public packages")
tags = [
get_latest_tag_name(gl.projects.get(f"bob/{pkg}"))
for pkg in public_packages
]
public_packages = [
f"{pkg} =={tag}" for pkg, tag in zip(public_packages, tags)
]
logger.info("Updating conda/meta.yaml")
start_tag = "# LIST OF BOB PACKAGES - START"
end_tag = "# LIST OF BOB PACKAGES - END"
with open("conda/meta.yaml") as f:
lines = f.read()
i1 = lines.find(start_tag) + len(start_tag)
i2 = lines.find(end_tag)
lines = (
lines[:i1]
+ "\n - ".join([""] + public_packages)
+ "\n "
+ lines[i2:]
)
with open("conda/meta.yaml", "w") as f:
f.write(lines)
logger.info("Updating requirements.txt")
with open("requirements.txt", "w") as f:
f.write("\n".join(public_packages) + "\n")
click.echo(
"You may need to add the ` # [linux]` tag in front of linux only "
"packages in conda/meta.yaml"
)
| true
| true
|
7902a87bd46b24158775ae80473a412640509329
| 1,145
|
py
|
Python
|
apps/bloguser/pipline.py
|
dryprojects/MyBlog
|
ec04ba2bc658e96cddeb1d4766047ca8e89ff656
|
[
"BSD-3-Clause"
] | 2
|
2021-08-17T13:29:21.000Z
|
2021-09-04T05:00:01.000Z
|
apps/bloguser/pipline.py
|
dryprojects/MyBlog
|
ec04ba2bc658e96cddeb1d4766047ca8e89ff656
|
[
"BSD-3-Clause"
] | 1
|
2020-07-16T11:22:32.000Z
|
2020-07-16T11:22:32.000Z
|
apps/bloguser/pipline.py
|
dryprojects/MyBlog
|
ec04ba2bc658e96cddeb1d4766047ca8e89ff656
|
[
"BSD-3-Clause"
] | 1
|
2020-09-18T10:41:59.000Z
|
2020-09-18T10:41:59.000Z
|
#!usr/bin/env python
#-*- coding:utf-8 -*-
"""
@author: nico
@file: pipline.py
@time: 2018/05/05
"""
from django.contrib.auth import get_user_model
from bloguser.utils import get_image_from_url
from uuid import uuid4
User = get_user_model()
def save_bloguser_extra_profile(backend, user, response, *args, **kwargs):
"""
see more:
http://python-social-auth.readthedocs.io/en/latest/use_cases.html#retrieve-google-friends
http://python-social-auth.readthedocs.io/en/latest/pipeline.html
:param backend:
:param user:
:param response:
:param args:
:param kwargs:
:return:
"""
if backend.name == 'github':
#这里获取保存用户github的头像的url,顺便保存到本地
image_url = response.get('avatar_url')
image_file = get_image_from_url(image_url)
if image_file is not None:
# 给头像文件命名采用uuid
avatar_name = 'avatar' + uuid4().hex[:16]
if user.image == 'bloguser/avatar.png':
#如果是默认头像,则替换掉,如果不是则不作处理
user.image.save(avatar_name, image_file)
#user.image_url = image_url
user.save()
| 25.444444
| 97
| 0.628821
|
from django.contrib.auth import get_user_model
from bloguser.utils import get_image_from_url
from uuid import uuid4
User = get_user_model()
def save_bloguser_extra_profile(backend, user, response, *args, **kwargs):
if backend.name == 'github':
image_url = response.get('avatar_url')
image_file = get_image_from_url(image_url)
if image_file is not None:
avatar_name = 'avatar' + uuid4().hex[:16]
if user.image == 'bloguser/avatar.png':
user.image.save(avatar_name, image_file)
user.save()
| true
| true
|
7902a8889fddace3453a6e6fa5254c23f82a4adf
| 8,097
|
py
|
Python
|
trainCNN.py
|
linrio/WhetherOrNotMe
|
239a6d3be82fecb58eb3ade4cf2966d5d294ce10
|
[
"Apache-2.0"
] | null | null | null |
trainCNN.py
|
linrio/WhetherOrNotMe
|
239a6d3be82fecb58eb3ade4cf2966d5d294ce10
|
[
"Apache-2.0"
] | null | null | null |
trainCNN.py
|
linrio/WhetherOrNotMe
|
239a6d3be82fecb58eb3ade4cf2966d5d294ce10
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import cv2
import numpy as np
import os
from sklearn.model_selection import train_test_split
import random
import sys
my_image_path = 'my_face'
others_image_path = 'other_people'
image_data = []
label_data = []
def get_padding_size(image):
#def get_padding_size(image):
h, w, _ = image.shape #长,宽和通道数
longest_edge = max(h, w)
top, bottom, left, right = (0, 0, 0, 0)
if h <= longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w <= longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
return top, bottom, left, right #(0,0,0,0)
#os.listdir(path):path 要获得内容目录的路径。获得当前目录的所有内容。
def read_data(img_path, image_h=64, image_w=64):
for filename in os.listdir(img_path):
if filename.endswith('.jpg'):
filepath = os.path.join(img_path, filename)
image = cv2.imread(filepath)
top, bottom, left, right = get_padding_size(image)
image_pad = cv2.copyMakeBorder(image, top , bottom, left, right, cv2.BORDER_CONSTANT, value=[0, 0, 0])
image = cv2.resize(image_pad, (image_h, image_w))
image_data.append(image)
label_data.append(img_path)
read_data(others_image_path)
read_data(my_image_path)
image_data = np.array(image_data)
label_data = np.array([[0,1] if label == 'my_faces' else [1,0] for label in label_data])
#功能是从样本中随机的按比例选取train data和test data, test_size是样本占比。如果是整数的话就是样本的数量。random_state是随机数的种子。
train_x, test_x, train_y, test_y = train_test_split(image_data, label_data, test_size=0.05, random_state=random.randint(0, 100))
# image (height=64 width=64 channel=3)
train_x = train_x.reshape(train_x.shape[0], 64, 64, 3)
test_x = test_x.reshape(test_x.shape[0], 64, 64, 3)
# nomalize
train_x = train_x.astype('float32') / 255.0
test_x = test_x.astype('float32') / 255.0
print(len(train_x), len(train_y))
print(len(test_x), len(test_y))
#############################################################
#batch_size = 128
batch_size = 64
num_batch = len(train_x) // batch_size
#tf.placeholder() 占位符,传递一个tensor到session.run()中。
X = tf.placeholder(tf.float32, [None, 64, 64, 3]) # 图片大小64x64 channel=3
Y = tf.placeholder(tf.float32, [None, 2])
keep_prob_5 = tf.placeholder(tf.float32)
keep_prob_75 = tf.placeholder(tf.float32)
def panda_joke_cnn():
W_c1 = tf.Variable(tf.random_normal([3, 3, 3, 32], stddev=0.01))
b_c1 = tf.Variable(tf.random_normal([32]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(X, W_c1, strides=[1, 1, 1, 1], padding='SAME'), b_c1))
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv1 = tf.nn.dropout(conv1, keep_prob_5)
#先W*X,再W*X+b,再Relu,再max_pool, 再,dropout
#Dropout是指在模型训练时随机让网络某些隐含层节点的权重不工作,不工作的那些节点可以暂时认为不是网络结构的一部分,但是它的权重得保留下来(只是暂时不更新而已),因为下次样本输入时它可能又得工作了
W_c2 = tf.Variable(tf.random_normal([3, 3, 32, 64], stddev=0.01))
b_c2 = tf.Variable(tf.random_normal([64]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, W_c2, strides=[1, 1, 1, 1], padding='SAME'), b_c2))
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv2 = tf.nn.dropout(conv2, keep_prob_5)
W_c3 = tf.Variable(tf.random_normal([3, 3, 64, 64], stddev=0.01))
b_c3 = tf.Variable(tf.random_normal([64]))
conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, W_c3, strides=[1, 1, 1, 1], padding='SAME'), b_c3))
conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv3 = tf.nn.dropout(conv3, keep_prob_5)
W_c31 = tf.Variable(tf.random_normal([3, 3, 64, 128], stddev=0.01))
b_c31 = tf.Variable(tf.random_normal([128]))
conv31 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv3, W_c31, strides=[1, 1, 1, 1], padding='SAME'), b_c31))
conv31 = tf.nn.max_pool(conv31, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv31 = tf.nn.dropout(conv31, keep_prob_5)
W_c32 = tf.Variable(tf.random_normal([3, 3, 128, 128], stddev=0.01))
b_c32 = tf.Variable(tf.random_normal([128]))
conv32 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv31, W_c32, strides=[1, 1, 1, 1], padding='SAME'), b_c32))
conv32 = tf.nn.max_pool(conv32, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv32 = tf.nn.dropout(conv32, keep_prob_5)
# Fully connected layer
#W_d = tf.Variable(tf.random_normal([8*16*32, 512], stddev=0.01))
W_d = tf.Variable(tf.random_normal([128*128, 512], stddev=0.01))
b_d = tf.Variable(tf.random_normal([512]))
dense = tf.reshape(conv32, [-1, W_d.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, W_d), b_d))
dense = tf.nn.dropout(dense, keep_prob_75)
W_out = tf.Variable(tf.random_normal([512, 2], stddev=0.01))
b_out = tf.Variable(tf.random_normal([2]))
out = tf.add(tf.matmul(dense, W_out), b_out)
return out
#learning_rate = 0.001
def train_cnn():
output = panda_joke_cnn()
#softmax_cross_entropy_with_logits():一步是先对网络最后一层的输出做一个softmax.
#第二步是softmax的输出向量[Y1,Y2,Y3...]和样本的实际标签做一个交叉熵.最后求一个平均,得到我们想要的loss.
#这个函数的返回值并不是一个数,而是一个向量.
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=output))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(output, 1), tf.argmax(Y, 1)), tf.float32))
tf.summary.scalar("loss", loss)
tf.summary.scalar("accuracy", accuracy)
merged_summary_op = tf.summary.merge_all()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter('./log', graph=tf.get_default_graph())
for e in range(50):
for i in range(num_batch):
batch_x = train_x[i*batch_size : (i+1)*batch_size]
batch_y = train_y[i*batch_size : (i+1)*batch_size]
_, loss_, summary = sess.run([optimizer, loss, merged_summary_op], feed_dict={X: batch_x, Y: batch_y, keep_prob_5:0.5, keep_prob_75: 0.75})
summary_writer.add_summary(summary, e*num_batch+i)
print(e*num_batch+i, "loss= ", loss_)
if (e*num_batch+i) % 100 == 0:
acc = accuracy.eval({X: test_x, Y: test_y, keep_prob_5:1.0, keep_prob_75: 1.0})
print(e*num_batch+i,"acc= ", +acc)
# save model
if acc > 0.99:
saver.save(sess, "G:/codes/tensorflow2/WhetherOrNotMe/models/whether_orNot_me.model", global_step=e*num_batch+i)
if e*num_batch+i > 0:
sys.exit(0)
train_cnn()
output = panda_joke_cnn()
predict = tf.argmax(output, 1)
saver = tf.train.Saver()
sess = tf.Session()
saver.restore(sess, tf.train.latest_checkpoint('.'))
def is_my_face(image):
res = sess.run(predict, feed_dict={X: [image/255.0], keep_prob_5:1.0, keep_prob_75: 1.0})
if res[0] == 1:
return True
else:
return False
face_haar = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
face_haar.load('D:/Program Files (x86)/Miniconda3/Library/etc/haarcascades/haarcascade_frontalface_default.xml')
cam = cv2.VideoCapture(0)
while True:
_, img = cam.read()
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_haar.detectMultiScale(gray_image, 1.3, 5)
for face_x,face_y,face_w,face_h in faces:
face = img[face_y:face_y+face_h, face_x:face_x+face_w]
face = cv2.resize(face, (64, 64))
print("my face:"+is_my_face(face))
cv2.imshow('img', face)
key = cv2.waitKey(30) & 0xff
if key == 27:
sys.exit(0)
sess.close()
| 40.283582
| 156
| 0.632827
|
import tensorflow as tf
import cv2
import numpy as np
import os
from sklearn.model_selection import train_test_split
import random
import sys
my_image_path = 'my_face'
others_image_path = 'other_people'
image_data = []
label_data = []
def get_padding_size(image):
h, w, _ = image.shape
longest_edge = max(h, w)
top, bottom, left, right = (0, 0, 0, 0)
if h <= longest_edge:
dh = longest_edge - h
top = dh // 2
bottom = dh - top
elif w <= longest_edge:
dw = longest_edge - w
left = dw // 2
right = dw - left
else:
pass
return top, bottom, left, right
def read_data(img_path, image_h=64, image_w=64):
for filename in os.listdir(img_path):
if filename.endswith('.jpg'):
filepath = os.path.join(img_path, filename)
image = cv2.imread(filepath)
top, bottom, left, right = get_padding_size(image)
image_pad = cv2.copyMakeBorder(image, top , bottom, left, right, cv2.BORDER_CONSTANT, value=[0, 0, 0])
image = cv2.resize(image_pad, (image_h, image_w))
image_data.append(image)
label_data.append(img_path)
read_data(others_image_path)
read_data(my_image_path)
image_data = np.array(image_data)
label_data = np.array([[0,1] if label == 'my_faces' else [1,0] for label in label_data])
train_x, test_x, train_y, test_y = train_test_split(image_data, label_data, test_size=0.05, random_state=random.randint(0, 100))
train_x = train_x.reshape(train_x.shape[0], 64, 64, 3)
test_x = test_x.reshape(test_x.shape[0], 64, 64, 3)
train_x = train_x.astype('float32') / 255.0
test_x = test_x.astype('float32') / 255.0
print(len(train_x), len(train_y))
print(len(test_x), len(test_y))
128]))
conv32 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv31, W_c32, strides=[1, 1, 1, 1], padding='SAME'), b_c32))
conv32 = tf.nn.max_pool(conv32, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv32 = tf.nn.dropout(conv32, keep_prob_5)
W_d = tf.Variable(tf.random_normal([128*128, 512], stddev=0.01))
b_d = tf.Variable(tf.random_normal([512]))
dense = tf.reshape(conv32, [-1, W_d.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, W_d), b_d))
dense = tf.nn.dropout(dense, keep_prob_75)
W_out = tf.Variable(tf.random_normal([512, 2], stddev=0.01))
b_out = tf.Variable(tf.random_normal([2]))
out = tf.add(tf.matmul(dense, W_out), b_out)
return out
def train_cnn():
output = panda_joke_cnn()
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=output))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(output, 1), tf.argmax(Y, 1)), tf.float32))
tf.summary.scalar("loss", loss)
tf.summary.scalar("accuracy", accuracy)
merged_summary_op = tf.summary.merge_all()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter('./log', graph=tf.get_default_graph())
for e in range(50):
for i in range(num_batch):
batch_x = train_x[i*batch_size : (i+1)*batch_size]
batch_y = train_y[i*batch_size : (i+1)*batch_size]
_, loss_, summary = sess.run([optimizer, loss, merged_summary_op], feed_dict={X: batch_x, Y: batch_y, keep_prob_5:0.5, keep_prob_75: 0.75})
summary_writer.add_summary(summary, e*num_batch+i)
print(e*num_batch+i, "loss= ", loss_)
if (e*num_batch+i) % 100 == 0:
acc = accuracy.eval({X: test_x, Y: test_y, keep_prob_5:1.0, keep_prob_75: 1.0})
print(e*num_batch+i,"acc= ", +acc)
if acc > 0.99:
saver.save(sess, "G:/codes/tensorflow2/WhetherOrNotMe/models/whether_orNot_me.model", global_step=e*num_batch+i)
if e*num_batch+i > 0:
sys.exit(0)
train_cnn()
output = panda_joke_cnn()
predict = tf.argmax(output, 1)
saver = tf.train.Saver()
sess = tf.Session()
saver.restore(sess, tf.train.latest_checkpoint('.'))
def is_my_face(image):
res = sess.run(predict, feed_dict={X: [image/255.0], keep_prob_5:1.0, keep_prob_75: 1.0})
if res[0] == 1:
return True
else:
return False
face_haar = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
face_haar.load('D:/Program Files (x86)/Miniconda3/Library/etc/haarcascades/haarcascade_frontalface_default.xml')
cam = cv2.VideoCapture(0)
while True:
_, img = cam.read()
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_haar.detectMultiScale(gray_image, 1.3, 5)
for face_x,face_y,face_w,face_h in faces:
face = img[face_y:face_y+face_h, face_x:face_x+face_w]
face = cv2.resize(face, (64, 64))
print("my face:"+is_my_face(face))
cv2.imshow('img', face)
key = cv2.waitKey(30) & 0xff
if key == 27:
sys.exit(0)
sess.close()
| true
| true
|
7902a898bae3d0ee8b6a07c5b49fd1be50de7cc3
| 9,992
|
py
|
Python
|
exp/exp002/trainner.py
|
fkubota/kaggle-Rainforest-Connection-Species-Audio-Detection
|
7134edff0ba1c60f597b64a0efd953b7707b98e1
|
[
"MIT"
] | 1
|
2021-02-24T03:25:51.000Z
|
2021-02-24T03:25:51.000Z
|
exp/exp003/trainner.py
|
fkubota/kaggle-Rainforest-Connection-Species-Audio-Detection
|
7134edff0ba1c60f597b64a0efd953b7707b98e1
|
[
"MIT"
] | null | null | null |
exp/exp003/trainner.py
|
fkubota/kaggle-Rainforest-Connection-Species-Audio-Detection
|
7134edff0ba1c60f597b64a0efd953b7707b98e1
|
[
"MIT"
] | null | null | null |
from ipdb import set_trace as st
from icecream import ic
import gc
import os
import wandb
import pandas as pd
from fastprogress import progress_bar
from loguru import logger
import numpy as np
import torch
from sklearn.metrics import accuracy_score
import utils as U
import configuration as C
import result_handler as rh
from criterion import mixup_criterion
from early_stopping import EarlyStopping
def train_cv(config):
# config
debug = config['globals']['debug']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_fold = config['split']['n_fold']
n_epoch = config['globals']['num_epochs']
path_trn_tp = config['path']['path_train_tp']
n_classes = config['model']['params']['n_classes']
dir_save_exp, dir_save_ignore_exp, _ = U.get_save_dir_exp(config)
# load data
pwd = os.path.dirname(os.path.abspath(__file__))
trn_tp = pd.read_csv(f'{pwd}/{path_trn_tp}')
# init
acc_val_folds = []
lwlrap_val_folds = []
if debug:
oof_sig = np.zeros([n_classes*n_fold, n_classes])
else:
oof_sig = np.zeros([len(trn_tp), n_classes])
for i_fold in progress_bar(range(n_fold)):
# logger
logger.info("-" * 18)
logger.info(f'\tFold {i_fold + 1}/{n_fold}')
logger.info("-" * 18)
# preparation
model = C.get_model(config).to(device)
criterion = C.get_criterion(config)
optimizer = C.get_optimizer(model, config)
scheduler = C.get_scheduler(optimizer, config)
_, _, exp_name = U.get_save_dir_exp(config)
# wandb
wb_fold = wandb.init(project='kaggle-rfcx',
group=exp_name,
name=f'fold{i_fold}')
wb_fold.config.config = config
epochs = []
losses_trn = []
losses_val = []
accs_val = []
lwlraps_val = []
best_acc_val = 0
best_lwlrap_val = 0
best_loss_val = 0
best_output_sig = 0
save_path = f'{dir_save_ignore_exp}/'\
f'{model.__class__.__name__}_fold{i_fold}.pth'
early_stopping = EarlyStopping(patience=12,
verbose=True,
path=save_path,
trace_func=logger.info)
for epoch in range(1, n_epoch+1):
# 学習を行う
result_dict = train_fold(i_fold, trn_tp, model,
criterion, optimizer,
scheduler, config)
val_idxs = result_dict['val_idxs']
output_sig = result_dict['output_sig']
loss_trn = result_dict['loss_trn']
loss_val = result_dict['loss_val']
acc_val = result_dict['acc_val']
lwlrap_val = result_dict['lwlrap_val']
logger.info(f'[fold({i_fold+1})epoch({epoch})]'
f'loss_trn={loss_trn:.6f} '
f'loss_val={loss_val:.6f} '
f'acc_val={acc_val:.6f} '
f'lwlrap_val={lwlrap_val:.6f}')
wb_fold.log({'epoch': epoch,
'loss_trn': loss_trn,
'loss_val': loss_val,
'acc_val': acc_val,
'lwlrap_val': lwlrap_val})
# 格納
epochs.append(int(epoch))
losses_trn.append(loss_trn)
losses_val.append(loss_val)
accs_val.append(acc_val)
lwlraps_val.append(lwlrap_val)
# best model ?
is_update = early_stopping(loss_val, result_dict['model'], debug)
if is_update:
best_loss_val = loss_val
best_acc_val = acc_val
best_lwlrap_val = lwlrap_val
best_output_sig = output_sig
wb_fold.summary['loss_val'] = best_loss_val
wb_fold.summary['acc_val'] = best_acc_val
wb_fold.summary['lwlrap_val'] = best_lwlrap_val
if early_stopping.early_stop:
logger.info("Early stopping")
break
wb_fold.finish()
# result
rh.save_plot_figure(i_fold, epochs, losses_trn, accs_val, lwlraps_val,
losses_val, dir_save_exp)
rh.save_result_csv(i_fold, best_loss_val, best_acc_val, best_lwlrap_val,
dir_save_exp, config)
# --- fold end ---
# oof_sig
acc_val_folds.append(best_acc_val)
lwlrap_val_folds.append(best_lwlrap_val)
if debug:
oof_sig[i_fold*n_classes:(i_fold+1)*n_classes] = best_output_sig
else:
oof_sig[val_idxs, :] = best_output_sig
logger.info(f'best_loss_val: {best_loss_val:.6f}, '
f'best_acc_val: {best_acc_val:.6f}, '
f'best_lwlrap_val: {best_lwlrap_val:.6f}')
oof = np.argmax(oof_sig, axis=1)
oof_sig = torch.tensor(oof_sig)
labels = np.zeros([len(oof), 24], dtype=int)
if debug:
# 適当な値を答えとする
labels[:, 0] = 1
labels = torch.tensor(labels)
acc_oof = accuracy_score(np.zeros(len(oof)), oof)
lwlrap_oof = U.LWLRAP(oof_sig, labels)
else:
for i_id, id_ in enumerate(trn_tp['species_id'].values):
labels[i_id][id_] = 1
labels = torch.tensor(labels)
acc_oof = accuracy_score(trn_tp['species_id'].values, oof)
lwlrap_oof = U.LWLRAP(oof_sig, labels)
# acc_val_folds
acc_val_folds_mean = np.mean(acc_val_folds)
acc_val_folds_std = np.std(acc_val_folds)
logger.info(f'acc_folds(mean, std): '
f'{acc_val_folds_mean:.6f} +- {acc_val_folds_std:6f}')
logger.info(f'acc_oof: {acc_oof:6f}')
# lwlrap_val_folds
lwlrap_val_folds_mean = np.mean(lwlrap_val_folds)
lwlrap_val_folds_std = np.std(lwlrap_val_folds)
logger.info(f'lwlrap_folds(mean, std): '
f'{lwlrap_val_folds_mean:.6f} +- {lwlrap_val_folds_std:6f}')
logger.info(f'lwlrap_oof: {lwlrap_oof:6f}')
# wandb
wb_summary = wandb.init(project='kaggle-rfcx',
group=exp_name,
name='summary')
wb_summary.config.config = config
wb_summary.log({'acc_val_folds_mean': acc_val_folds_mean,
'acc_val_folds_std': acc_val_folds_std,
'acc_oof': acc_oof,
'lwlrap_val_folds_mean': lwlrap_val_folds_mean,
'lwlrap_val_folds_std': lwlrap_val_folds_std,
'lwlrap_oof': lwlrap_oof})
wb_summary.finish()
# 開放
del result_dict
del model
del optimizer
del scheduler
gc.collect()
torch.cuda.empty_cache()
def train_fold(i_fold, trn_tp, model,
criterion, optimizer,
scheduler, config):
mixup = config['globals']['mixup']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
trn_idxs, val_idxs = C.get_index_fold(trn_tp, i_fold, config)
trn_tp_trn = trn_tp.iloc[trn_idxs].reset_index(drop=True)
trn_tp_val = trn_tp.iloc[val_idxs].reset_index(drop=True)
trn_loader = C.get_trn_val_loader(trn_tp_trn, 'train', config)
val_loader = C.get_trn_val_loader(trn_tp_val, 'valid', config)
# train
model.train()
epoch_train_loss = 0
for batch_idx, (data, target) in enumerate(trn_loader):
data, target = data.to(device), target.to(device)
if mixup:
data, targets_a, targets_b, lam = U.mixup_data(data,
target,
alpha=1.0)
optimizer.zero_grad()
output = model(data)
if mixup:
loss = mixup_criterion(criterion, output,
targets_a, targets_b, lam)
else:
loss = criterion(output, target)
loss.backward()
optimizer.step()
epoch_train_loss += loss.item()*data.size(0)
scheduler.step()
loss_trn = epoch_train_loss / len(trn_loader.dataset)
del data
# eval valid
loss_val, acc_val, lwlrap_val, output_sig = get_loss_score(model,
val_loader,
criterion,
device)
result_dict = {
'model': model,
'val_idxs': val_idxs,
'output_sig': output_sig,
'loss_trn': loss_trn,
'loss_val': loss_val,
'acc_val': acc_val,
'lwlrap_val': lwlrap_val
}
return result_dict
def get_loss_score(model, val_loader, criterion, device):
model.eval()
epoch_valid_loss = 0
y_pred_list = []
y_true_list = []
output_sig_list = []
lwlrap_val = 0
for batch_idx, (data, target) in enumerate(val_loader):
data, target = data.to(device), target.to(device)
output = model(data)
loss = criterion(output, target)
epoch_valid_loss += loss.item()*data.size(0)
output_ = output['output']
output_sig = output['output_sigmoid']
output_sig = output_sig.detach().cpu().numpy()
_y_pred = output_.detach().cpu().numpy().argmax(axis=1)
_y_true = target.detach().cpu().numpy().argmax(axis=1)
y_pred_list.append(_y_pred)
y_true_list.append(_y_true)
output_sig_list.append(output_sig)
lwlrap_val += U.LWLRAP(output_, target) / len(val_loader)
loss_val = epoch_valid_loss / len(val_loader.dataset)
y_pred = np.concatenate(y_pred_list, axis=0)
y_true = np.concatenate(y_true_list, axis=0)
output_sig = np.concatenate(output_sig_list, axis=0)
acc_val = accuracy_score(y_true, y_pred)
del data
return loss_val, acc_val, lwlrap_val, output_sig
| 36.870849
| 80
| 0.572258
|
from ipdb import set_trace as st
from icecream import ic
import gc
import os
import wandb
import pandas as pd
from fastprogress import progress_bar
from loguru import logger
import numpy as np
import torch
from sklearn.metrics import accuracy_score
import utils as U
import configuration as C
import result_handler as rh
from criterion import mixup_criterion
from early_stopping import EarlyStopping
def train_cv(config):
debug = config['globals']['debug']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_fold = config['split']['n_fold']
n_epoch = config['globals']['num_epochs']
path_trn_tp = config['path']['path_train_tp']
n_classes = config['model']['params']['n_classes']
dir_save_exp, dir_save_ignore_exp, _ = U.get_save_dir_exp(config)
pwd = os.path.dirname(os.path.abspath(__file__))
trn_tp = pd.read_csv(f'{pwd}/{path_trn_tp}')
acc_val_folds = []
lwlrap_val_folds = []
if debug:
oof_sig = np.zeros([n_classes*n_fold, n_classes])
else:
oof_sig = np.zeros([len(trn_tp), n_classes])
for i_fold in progress_bar(range(n_fold)):
logger.info("-" * 18)
logger.info(f'\tFold {i_fold + 1}/{n_fold}')
logger.info("-" * 18)
model = C.get_model(config).to(device)
criterion = C.get_criterion(config)
optimizer = C.get_optimizer(model, config)
scheduler = C.get_scheduler(optimizer, config)
_, _, exp_name = U.get_save_dir_exp(config)
wb_fold = wandb.init(project='kaggle-rfcx',
group=exp_name,
name=f'fold{i_fold}')
wb_fold.config.config = config
epochs = []
losses_trn = []
losses_val = []
accs_val = []
lwlraps_val = []
best_acc_val = 0
best_lwlrap_val = 0
best_loss_val = 0
best_output_sig = 0
save_path = f'{dir_save_ignore_exp}/'\
f'{model.__class__.__name__}_fold{i_fold}.pth'
early_stopping = EarlyStopping(patience=12,
verbose=True,
path=save_path,
trace_func=logger.info)
for epoch in range(1, n_epoch+1):
result_dict = train_fold(i_fold, trn_tp, model,
criterion, optimizer,
scheduler, config)
val_idxs = result_dict['val_idxs']
output_sig = result_dict['output_sig']
loss_trn = result_dict['loss_trn']
loss_val = result_dict['loss_val']
acc_val = result_dict['acc_val']
lwlrap_val = result_dict['lwlrap_val']
logger.info(f'[fold({i_fold+1})epoch({epoch})]'
f'loss_trn={loss_trn:.6f} '
f'loss_val={loss_val:.6f} '
f'acc_val={acc_val:.6f} '
f'lwlrap_val={lwlrap_val:.6f}')
wb_fold.log({'epoch': epoch,
'loss_trn': loss_trn,
'loss_val': loss_val,
'acc_val': acc_val,
'lwlrap_val': lwlrap_val})
epochs.append(int(epoch))
losses_trn.append(loss_trn)
losses_val.append(loss_val)
accs_val.append(acc_val)
lwlraps_val.append(lwlrap_val)
is_update = early_stopping(loss_val, result_dict['model'], debug)
if is_update:
best_loss_val = loss_val
best_acc_val = acc_val
best_lwlrap_val = lwlrap_val
best_output_sig = output_sig
wb_fold.summary['loss_val'] = best_loss_val
wb_fold.summary['acc_val'] = best_acc_val
wb_fold.summary['lwlrap_val'] = best_lwlrap_val
if early_stopping.early_stop:
logger.info("Early stopping")
break
wb_fold.finish()
rh.save_plot_figure(i_fold, epochs, losses_trn, accs_val, lwlraps_val,
losses_val, dir_save_exp)
rh.save_result_csv(i_fold, best_loss_val, best_acc_val, best_lwlrap_val,
dir_save_exp, config)
acc_val_folds.append(best_acc_val)
lwlrap_val_folds.append(best_lwlrap_val)
if debug:
oof_sig[i_fold*n_classes:(i_fold+1)*n_classes] = best_output_sig
else:
oof_sig[val_idxs, :] = best_output_sig
logger.info(f'best_loss_val: {best_loss_val:.6f}, '
f'best_acc_val: {best_acc_val:.6f}, '
f'best_lwlrap_val: {best_lwlrap_val:.6f}')
oof = np.argmax(oof_sig, axis=1)
oof_sig = torch.tensor(oof_sig)
labels = np.zeros([len(oof), 24], dtype=int)
if debug:
labels[:, 0] = 1
labels = torch.tensor(labels)
acc_oof = accuracy_score(np.zeros(len(oof)), oof)
lwlrap_oof = U.LWLRAP(oof_sig, labels)
else:
for i_id, id_ in enumerate(trn_tp['species_id'].values):
labels[i_id][id_] = 1
labels = torch.tensor(labels)
acc_oof = accuracy_score(trn_tp['species_id'].values, oof)
lwlrap_oof = U.LWLRAP(oof_sig, labels)
acc_val_folds_mean = np.mean(acc_val_folds)
acc_val_folds_std = np.std(acc_val_folds)
logger.info(f'acc_folds(mean, std): '
f'{acc_val_folds_mean:.6f} +- {acc_val_folds_std:6f}')
logger.info(f'acc_oof: {acc_oof:6f}')
lwlrap_val_folds_mean = np.mean(lwlrap_val_folds)
lwlrap_val_folds_std = np.std(lwlrap_val_folds)
logger.info(f'lwlrap_folds(mean, std): '
f'{lwlrap_val_folds_mean:.6f} +- {lwlrap_val_folds_std:6f}')
logger.info(f'lwlrap_oof: {lwlrap_oof:6f}')
wb_summary = wandb.init(project='kaggle-rfcx',
group=exp_name,
name='summary')
wb_summary.config.config = config
wb_summary.log({'acc_val_folds_mean': acc_val_folds_mean,
'acc_val_folds_std': acc_val_folds_std,
'acc_oof': acc_oof,
'lwlrap_val_folds_mean': lwlrap_val_folds_mean,
'lwlrap_val_folds_std': lwlrap_val_folds_std,
'lwlrap_oof': lwlrap_oof})
wb_summary.finish()
del result_dict
del model
del optimizer
del scheduler
gc.collect()
torch.cuda.empty_cache()
def train_fold(i_fold, trn_tp, model,
criterion, optimizer,
scheduler, config):
mixup = config['globals']['mixup']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
trn_idxs, val_idxs = C.get_index_fold(trn_tp, i_fold, config)
trn_tp_trn = trn_tp.iloc[trn_idxs].reset_index(drop=True)
trn_tp_val = trn_tp.iloc[val_idxs].reset_index(drop=True)
trn_loader = C.get_trn_val_loader(trn_tp_trn, 'train', config)
val_loader = C.get_trn_val_loader(trn_tp_val, 'valid', config)
model.train()
epoch_train_loss = 0
for batch_idx, (data, target) in enumerate(trn_loader):
data, target = data.to(device), target.to(device)
if mixup:
data, targets_a, targets_b, lam = U.mixup_data(data,
target,
alpha=1.0)
optimizer.zero_grad()
output = model(data)
if mixup:
loss = mixup_criterion(criterion, output,
targets_a, targets_b, lam)
else:
loss = criterion(output, target)
loss.backward()
optimizer.step()
epoch_train_loss += loss.item()*data.size(0)
scheduler.step()
loss_trn = epoch_train_loss / len(trn_loader.dataset)
del data
loss_val, acc_val, lwlrap_val, output_sig = get_loss_score(model,
val_loader,
criterion,
device)
result_dict = {
'model': model,
'val_idxs': val_idxs,
'output_sig': output_sig,
'loss_trn': loss_trn,
'loss_val': loss_val,
'acc_val': acc_val,
'lwlrap_val': lwlrap_val
}
return result_dict
def get_loss_score(model, val_loader, criterion, device):
model.eval()
epoch_valid_loss = 0
y_pred_list = []
y_true_list = []
output_sig_list = []
lwlrap_val = 0
for batch_idx, (data, target) in enumerate(val_loader):
data, target = data.to(device), target.to(device)
output = model(data)
loss = criterion(output, target)
epoch_valid_loss += loss.item()*data.size(0)
output_ = output['output']
output_sig = output['output_sigmoid']
output_sig = output_sig.detach().cpu().numpy()
_y_pred = output_.detach().cpu().numpy().argmax(axis=1)
_y_true = target.detach().cpu().numpy().argmax(axis=1)
y_pred_list.append(_y_pred)
y_true_list.append(_y_true)
output_sig_list.append(output_sig)
lwlrap_val += U.LWLRAP(output_, target) / len(val_loader)
loss_val = epoch_valid_loss / len(val_loader.dataset)
y_pred = np.concatenate(y_pred_list, axis=0)
y_true = np.concatenate(y_true_list, axis=0)
output_sig = np.concatenate(output_sig_list, axis=0)
acc_val = accuracy_score(y_true, y_pred)
del data
return loss_val, acc_val, lwlrap_val, output_sig
| true
| true
|
7902a9642cd00215ba2dce5277df7856bc5d626e
| 7,059
|
py
|
Python
|
seamseg/data/dataset.py
|
stanfordmlgroup/seamseg
|
574e89a4e5e99b0aca90071a0c37c31e57c5449e
|
[
"BSD-3-Clause"
] | 2
|
2021-01-11T08:57:40.000Z
|
2021-01-11T08:57:44.000Z
|
seamseg/data/dataset.py
|
stanfordmlgroup/seamseg
|
574e89a4e5e99b0aca90071a0c37c31e57c5449e
|
[
"BSD-3-Clause"
] | null | null | null |
seamseg/data/dataset.py
|
stanfordmlgroup/seamseg
|
574e89a4e5e99b0aca90071a0c37c31e57c5449e
|
[
"BSD-3-Clause"
] | 1
|
2020-09-28T07:55:50.000Z
|
2020-09-28T07:55:50.000Z
|
import glob
from itertools import chain
from os import path
import numpy as np
import torch.utils.data as data
import umsgpack
from PIL import Image
class ISSDataset(data.Dataset):
"""Instance segmentation dataset
This assumes the dataset to be formatted as defined in:
https://github.com/mapillary/seamseg/wiki/Dataset-format
Parameters
----------
root_dir : str
Path to the root directory of the dataset
split_name : str
Name of the split to load: this must correspond to one of the files in `root_dir/lst`
transform : callable
Transformer function applied to the loaded entries to prepare them for pytorch. This should be callable as
`transform(img, msk, cat, cls)`, where:
- `img` is a PIL.Image with `mode="RGB"`, containing the RGB data
- `msk` is a list of PIL.Image with `mode="L"`, containing the instance segmentation masks
- `cat` is a list containing the instance id to class id mapping
- `cls` is an integer specifying a requested class for class-uniform sampling, or None
"""
_IMG_DIR = "img"
_MSK_DIR = "msk"
_LST_DIR = "lst"
_METADATA_FILE = "metadata.bin"
def __init__(self, root_dir, split_name, transform):
super(ISSDataset, self).__init__()
self.root_dir = root_dir
self.split_name = split_name
self.transform = transform
# Folders
self._img_dir = path.join(root_dir, ISSDataset._IMG_DIR)
self._msk_dir = path.join(root_dir, ISSDataset._MSK_DIR)
self._lst_dir = path.join(root_dir, ISSDataset._LST_DIR)
for d in self._img_dir, self._msk_dir, self._lst_dir:
if not path.isdir(d):
raise IOError("Dataset sub-folder {} does not exist".format(d))
# Load meta-data and split
self._meta, self._images = self._load_split()
def _load_split(self):
with open(path.join(self.root_dir, ISSDataset._METADATA_FILE), "rb") as fid:
metadata = umsgpack.unpack(fid, encoding="utf-8")
with open(path.join(self._lst_dir, self.split_name + ".txt"), "r") as fid:
lst = fid.readlines()
lst = set(line.strip() for line in lst)
meta = metadata["meta"]
images = [img_desc for img_desc in metadata["images"] if img_desc["id"] in lst]
return meta, images
def _load_item(self, item):
img_desc = self._images[item]
img_file = path.join(self._img_dir, img_desc["id"])
if path.exists(img_file + ".png"):
img_file = img_file + ".png"
elif path.exists(img_file + ".jpg"):
img_file = img_file + ".jpg"
else:
raise IOError("Cannot find any image for id {} in {}".format(img_desc["id"], self._img_dir))
img = Image.open(img_file).convert(mode="RGB")
# Load all masks
msk_file = path.join(self._msk_dir, img_desc["id"] + ".png")
msk = [Image.open(msk_file)]
i = 1
while path.exists("{}.{}".format(msk_file, i)):
msk.append(Image.open("{}.{}".format(msk_file, i)))
i += 1
cat = img_desc["cat"]
iscrowd = img_desc["iscrowd"]
return img, msk, cat, iscrowd, img_desc["id"]
@property
def categories(self):
"""Category names"""
return self._meta["categories"]
@property
def num_categories(self):
"""Number of categories"""
return len(self.categories)
@property
def num_stuff(self):
"""Number of "stuff" categories"""
return self._meta["num_stuff"]
@property
def num_thing(self):
"""Number of "thing" categories"""
return self.num_categories - self.num_stuff
@property
def original_ids(self):
"""Original class id of each category"""
return self._meta["original_ids"]
@property
def palette(self):
"""Default palette to be used when color-coding semantic labels"""
return np.array(self._meta["palette"], dtype=np.uint8)
@property
def img_sizes(self):
"""Size of each image of the dataset"""
return [img_desc["size"] for img_desc in self._images]
@property
def img_categories(self):
"""Categories present in each image of the dataset"""
return [img_desc["cat"] for img_desc in self._images]
def __len__(self):
return len(self._images)
def __getitem__(self, item):
img, msk, cat, iscrowd, idx = self._load_item(item)
rec = self.transform(img, msk, cat, iscrowd)
size = (img.size[1], img.size[0])
img.close()
for m in msk:
m.close()
rec["idx"] = idx
rec["size"] = size
return rec
def get_raw_image(self, idx):
"""Load a single, unmodified image with given id from the dataset"""
img_file = path.join(self._img_dir, idx)
if path.exists(img_file + ".png"):
img_file = img_file + ".png"
elif path.exists(img_file + ".jpg"):
img_file = img_file + ".jpg"
else:
raise IOError("Cannot find any image for id {} in {}".format(idx, self._img_dir))
return Image.open(img_file)
def get_image_desc(self, idx):
"""Look up an image descriptor given the id"""
matching = [img_desc for img_desc in self._images if img_desc["id"] == idx]
if len(matching) == 1:
return matching[0]
else:
raise ValueError("No image found with id %s" % idx)
class ISSTestDataset(data.Dataset):
_EXTENSIONS = ["*.jpg", "*.jpeg", "*.png"]
def __init__(self, in_dir, transform):
super(ISSTestDataset, self).__init__()
self.in_dir = in_dir
self.transform = transform
# Find all images
self._images = []
for img_path in chain(
*(glob.iglob(path.join(self.in_dir, '**', ext), recursive=True) for ext in ISSTestDataset._EXTENSIONS)):
_, name_with_ext = path.split(img_path)
idx, _ = path.splitext(name_with_ext)
with Image.open(img_path) as img_raw:
size = (img_raw.size[1], img_raw.size[0])
self._images.append({
"idx": idx,
"path": img_path,
"size": size,
})
@property
def img_sizes(self):
"""Size of each image of the dataset"""
return [img_desc["size"] for img_desc in self._images]
def __len__(self):
return len(self._images)
def __getitem__(self, item):
# Load image
with Image.open(self._images[item]["path"]) as img_raw:
size = (img_raw.size[1], img_raw.size[0])
img = self.transform(img_raw.convert(mode="RGB"))
return {
"img": img,
"idx": self._images[item]["idx"],
"size": size,
"abs_path": self._images[item]["path"],
"rel_path": path.relpath(self._images[item]["path"], self.in_dir),
}
| 33.140845
| 120
| 0.593285
|
import glob
from itertools import chain
from os import path
import numpy as np
import torch.utils.data as data
import umsgpack
from PIL import Image
class ISSDataset(data.Dataset):
_IMG_DIR = "img"
_MSK_DIR = "msk"
_LST_DIR = "lst"
_METADATA_FILE = "metadata.bin"
def __init__(self, root_dir, split_name, transform):
super(ISSDataset, self).__init__()
self.root_dir = root_dir
self.split_name = split_name
self.transform = transform
self._img_dir = path.join(root_dir, ISSDataset._IMG_DIR)
self._msk_dir = path.join(root_dir, ISSDataset._MSK_DIR)
self._lst_dir = path.join(root_dir, ISSDataset._LST_DIR)
for d in self._img_dir, self._msk_dir, self._lst_dir:
if not path.isdir(d):
raise IOError("Dataset sub-folder {} does not exist".format(d))
self._meta, self._images = self._load_split()
def _load_split(self):
with open(path.join(self.root_dir, ISSDataset._METADATA_FILE), "rb") as fid:
metadata = umsgpack.unpack(fid, encoding="utf-8")
with open(path.join(self._lst_dir, self.split_name + ".txt"), "r") as fid:
lst = fid.readlines()
lst = set(line.strip() for line in lst)
meta = metadata["meta"]
images = [img_desc for img_desc in metadata["images"] if img_desc["id"] in lst]
return meta, images
def _load_item(self, item):
img_desc = self._images[item]
img_file = path.join(self._img_dir, img_desc["id"])
if path.exists(img_file + ".png"):
img_file = img_file + ".png"
elif path.exists(img_file + ".jpg"):
img_file = img_file + ".jpg"
else:
raise IOError("Cannot find any image for id {} in {}".format(img_desc["id"], self._img_dir))
img = Image.open(img_file).convert(mode="RGB")
msk_file = path.join(self._msk_dir, img_desc["id"] + ".png")
msk = [Image.open(msk_file)]
i = 1
while path.exists("{}.{}".format(msk_file, i)):
msk.append(Image.open("{}.{}".format(msk_file, i)))
i += 1
cat = img_desc["cat"]
iscrowd = img_desc["iscrowd"]
return img, msk, cat, iscrowd, img_desc["id"]
@property
def categories(self):
return self._meta["categories"]
@property
def num_categories(self):
return len(self.categories)
@property
def num_stuff(self):
return self._meta["num_stuff"]
@property
def num_thing(self):
return self.num_categories - self.num_stuff
@property
def original_ids(self):
return self._meta["original_ids"]
@property
def palette(self):
return np.array(self._meta["palette"], dtype=np.uint8)
@property
def img_sizes(self):
return [img_desc["size"] for img_desc in self._images]
@property
def img_categories(self):
return [img_desc["cat"] for img_desc in self._images]
def __len__(self):
return len(self._images)
def __getitem__(self, item):
img, msk, cat, iscrowd, idx = self._load_item(item)
rec = self.transform(img, msk, cat, iscrowd)
size = (img.size[1], img.size[0])
img.close()
for m in msk:
m.close()
rec["idx"] = idx
rec["size"] = size
return rec
def get_raw_image(self, idx):
img_file = path.join(self._img_dir, idx)
if path.exists(img_file + ".png"):
img_file = img_file + ".png"
elif path.exists(img_file + ".jpg"):
img_file = img_file + ".jpg"
else:
raise IOError("Cannot find any image for id {} in {}".format(idx, self._img_dir))
return Image.open(img_file)
def get_image_desc(self, idx):
matching = [img_desc for img_desc in self._images if img_desc["id"] == idx]
if len(matching) == 1:
return matching[0]
else:
raise ValueError("No image found with id %s" % idx)
class ISSTestDataset(data.Dataset):
_EXTENSIONS = ["*.jpg", "*.jpeg", "*.png"]
def __init__(self, in_dir, transform):
super(ISSTestDataset, self).__init__()
self.in_dir = in_dir
self.transform = transform
self._images = []
for img_path in chain(
*(glob.iglob(path.join(self.in_dir, '**', ext), recursive=True) for ext in ISSTestDataset._EXTENSIONS)):
_, name_with_ext = path.split(img_path)
idx, _ = path.splitext(name_with_ext)
with Image.open(img_path) as img_raw:
size = (img_raw.size[1], img_raw.size[0])
self._images.append({
"idx": idx,
"path": img_path,
"size": size,
})
@property
def img_sizes(self):
return [img_desc["size"] for img_desc in self._images]
def __len__(self):
return len(self._images)
def __getitem__(self, item):
with Image.open(self._images[item]["path"]) as img_raw:
size = (img_raw.size[1], img_raw.size[0])
img = self.transform(img_raw.convert(mode="RGB"))
return {
"img": img,
"idx": self._images[item]["idx"],
"size": size,
"abs_path": self._images[item]["path"],
"rel_path": path.relpath(self._images[item]["path"], self.in_dir),
}
| true
| true
|
7902ab0e67a99494380d14a788dcfc8a27e7cbe4
| 7,836
|
py
|
Python
|
contrib/bitrpc/bitrpc.py
|
bitmea-project/bitmea
|
02b400578d0eee35304c27a7b3d8fa4ef7f057e3
|
[
"MIT"
] | 1
|
2017-11-05T11:51:38.000Z
|
2017-11-05T11:51:38.000Z
|
contrib/bitrpc/bitrpc.py
|
bitmea-project/bitmea
|
02b400578d0eee35304c27a7b3d8fa4ef7f057e3
|
[
"MIT"
] | null | null | null |
contrib/bitrpc/bitrpc.py
|
bitmea-project/bitmea
|
02b400578d0eee35304c27a7b3d8fa4ef7f057e3
|
[
"MIT"
] | null | null | null |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:25176")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:25176")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitmea address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitmea address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| 24.110769
| 79
| 0.668198
|
from jsonrpc import ServiceProxy
import sys
import string
rpcuser = ""
rpcpass = ""
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:25176")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:25176")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitmea address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitmea address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| false
| true
|
7902acb04630a9f1b4f45b81c474f8a41d9902fc
| 3,516
|
py
|
Python
|
flapison/exceptions.py
|
Leechael/flapison
|
379b37774fd16197c803a7b5e3bd9eca99191337
|
[
"MIT"
] | 13
|
2019-12-03T22:03:36.000Z
|
2020-11-24T05:09:02.000Z
|
flapison/exceptions.py
|
Leechael/flapison
|
379b37774fd16197c803a7b5e3bd9eca99191337
|
[
"MIT"
] | 16
|
2019-11-11T23:20:20.000Z
|
2020-11-19T09:03:17.000Z
|
flapison/exceptions.py
|
Leechael/flapison
|
379b37774fd16197c803a7b5e3bd9eca99191337
|
[
"MIT"
] | 7
|
2019-11-14T11:06:13.000Z
|
2020-09-28T08:14:40.000Z
|
# -*- coding: utf-8 -*-
"""Collection of useful http error for the Api"""
class JsonApiException(Exception):
"""Base exception class for unknown errors"""
title = "Unknown error"
status = "500"
source = None
def __init__(
self,
detail,
source=None,
title=None,
status=None,
code=None,
id_=None,
links=None,
meta=None,
):
"""Initialize a jsonapi exception
:param dict source: the source of the error
:param str detail: the detail of the error
"""
self.detail = detail
self.source = source
self.code = code
self.id = id_
self.links = links or {}
self.meta = meta or {}
if title is not None:
self.title = title
if status is not None:
self.status = status
def to_dict(self):
"""Return values of each fields of an jsonapi error"""
error_dict = {}
for field in (
"status",
"source",
"title",
"detail",
"id",
"code",
"links",
"meta",
):
if getattr(self, field, None):
error_dict.update({field: getattr(self, field)})
return error_dict
class BadRequest(JsonApiException):
"""BadRequest error"""
title = "Bad request"
status = "400"
class InvalidField(BadRequest):
"""Error to warn that a field specified in fields querystring is not in the requested resource schema"""
title = "Invalid fields querystring parameter."
source = {"parameter": "fields"}
class InvalidInclude(BadRequest):
"""Error to warn that a field specified in include querystring parameter is not a relationship of the requested
resource schema
"""
title = "Invalid include querystring parameter."
source = {"parameter": "include"}
class InvalidFilters(BadRequest):
"""Error to warn that a specified filters in querystring parameter contains errors"""
title = "Invalid filters querystring parameter."
source = {"parameter": "filters"}
class InvalidSort(BadRequest):
"""Error to warn that a field specified in sort querystring parameter is not in the requested resource schema"""
title = "Invalid sort querystring parameter."
source = {"parameter": "sort"}
class ObjectNotFound(JsonApiException):
"""Error to warn that an object is not found in a database"""
title = "Object not found"
status = "404"
class RelatedObjectNotFound(ObjectNotFound):
"""Error to warn that a related object is not found"""
title = "Related object not found"
class RelationNotFound(JsonApiException):
"""Error to warn that a relationship is not found on a model"""
title = "Relation not found"
class InvalidType(JsonApiException):
"""Error to warn that there is a conflit between resource types"""
title = "Invalid type"
status = "409"
class AccessDenied(JsonApiException):
"""Throw this error when requested resource owner doesn't match the user of the ticket"""
title = "Access denied"
status = "403"
class InvalidContentType(JsonApiException):
"""When the request uses a content type the API doesn't understand"""
title = "Bad request"
status = "415"
class InvalidAcceptType(JsonApiException):
"""When the request expects a content type that the API doesn't support"""
title = "Bad request"
status = "406"
| 24.93617
| 116
| 0.625711
|
class JsonApiException(Exception):
title = "Unknown error"
status = "500"
source = None
def __init__(
self,
detail,
source=None,
title=None,
status=None,
code=None,
id_=None,
links=None,
meta=None,
):
self.detail = detail
self.source = source
self.code = code
self.id = id_
self.links = links or {}
self.meta = meta or {}
if title is not None:
self.title = title
if status is not None:
self.status = status
def to_dict(self):
error_dict = {}
for field in (
"status",
"source",
"title",
"detail",
"id",
"code",
"links",
"meta",
):
if getattr(self, field, None):
error_dict.update({field: getattr(self, field)})
return error_dict
class BadRequest(JsonApiException):
title = "Bad request"
status = "400"
class InvalidField(BadRequest):
title = "Invalid fields querystring parameter."
source = {"parameter": "fields"}
class InvalidInclude(BadRequest):
title = "Invalid include querystring parameter."
source = {"parameter": "include"}
class InvalidFilters(BadRequest):
title = "Invalid filters querystring parameter."
source = {"parameter": "filters"}
class InvalidSort(BadRequest):
title = "Invalid sort querystring parameter."
source = {"parameter": "sort"}
class ObjectNotFound(JsonApiException):
title = "Object not found"
status = "404"
class RelatedObjectNotFound(ObjectNotFound):
title = "Related object not found"
class RelationNotFound(JsonApiException):
title = "Relation not found"
class InvalidType(JsonApiException):
title = "Invalid type"
status = "409"
class AccessDenied(JsonApiException):
title = "Access denied"
status = "403"
class InvalidContentType(JsonApiException):
title = "Bad request"
status = "415"
class InvalidAcceptType(JsonApiException):
title = "Bad request"
status = "406"
| true
| true
|
7902ad581daf449f16c00ca77842b11f33a21f72
| 1,304
|
py
|
Python
|
pypy/objspace/std/test/test_annmm.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | 2
|
2016-07-06T23:30:20.000Z
|
2017-05-30T15:59:31.000Z
|
pypy/objspace/std/test/test_annmm.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | null | null | null |
pypy/objspace/std/test/test_annmm.py
|
kantai/passe-pypy-taint-tracking
|
b60a3663f8fe89892dc182c8497aab97e2e75d69
|
[
"MIT"
] | 2
|
2020-07-09T08:14:22.000Z
|
2021-01-15T18:01:25.000Z
|
from pypy.objspace.std.multimethod import *
from rpython.annotator.annrpython import RPythonAnnotator
class W_Root(object):
pass
class W_Int(W_Root):
pass
class W_Str(W_Root):
pass
str_w = MultiMethodTable(1, root_class=W_Root, argnames_before=['space'])
int_w = MultiMethodTable(1, root_class=W_Root, argnames_before=['space'])
def int_w__Int(space, w_x):
assert space == 'space'
assert isinstance(w_x, W_Int)
return 1
def str_w__Str(space, w_x):
assert space == 'space'
assert isinstance(w_x, W_Str)
return "string"
int_w.register(int_w__Int, W_Int)
str_w.register(str_w__Str, W_Str)
def setup_module(mod):
typeorder = {
W_Int: [(W_Int, None)],
W_Str: [(W_Str, None)],
}
mod.typeorder = typeorder
mod.str_w1 = str_w.install('__str_w', [typeorder])
mod.int_w1 = int_w.install('__int_w', [typeorder])
def test_str_w_ann():
a = RPythonAnnotator()
s1 = a.build_types(str_w1,[str, W_Str])
s2 = a.build_types(str_w1,[str, W_Root])
assert s1.knowntype == str
assert s2.knowntype == str
def test_int_w_ann():
a = RPythonAnnotator()
s1 = a.build_types(int_w1,[str, W_Int])
s2 = a.build_types(int_w1,[str, W_Str])
assert s1.knowntype == int
assert s2.knowntype == int
| 22.877193
| 73
| 0.669479
|
from pypy.objspace.std.multimethod import *
from rpython.annotator.annrpython import RPythonAnnotator
class W_Root(object):
pass
class W_Int(W_Root):
pass
class W_Str(W_Root):
pass
str_w = MultiMethodTable(1, root_class=W_Root, argnames_before=['space'])
int_w = MultiMethodTable(1, root_class=W_Root, argnames_before=['space'])
def int_w__Int(space, w_x):
assert space == 'space'
assert isinstance(w_x, W_Int)
return 1
def str_w__Str(space, w_x):
assert space == 'space'
assert isinstance(w_x, W_Str)
return "string"
int_w.register(int_w__Int, W_Int)
str_w.register(str_w__Str, W_Str)
def setup_module(mod):
typeorder = {
W_Int: [(W_Int, None)],
W_Str: [(W_Str, None)],
}
mod.typeorder = typeorder
mod.str_w1 = str_w.install('__str_w', [typeorder])
mod.int_w1 = int_w.install('__int_w', [typeorder])
def test_str_w_ann():
a = RPythonAnnotator()
s1 = a.build_types(str_w1,[str, W_Str])
s2 = a.build_types(str_w1,[str, W_Root])
assert s1.knowntype == str
assert s2.knowntype == str
def test_int_w_ann():
a = RPythonAnnotator()
s1 = a.build_types(int_w1,[str, W_Int])
s2 = a.build_types(int_w1,[str, W_Str])
assert s1.knowntype == int
assert s2.knowntype == int
| true
| true
|
7902ad96592838372ac3d5038aeba106a3da787c
| 8,756
|
py
|
Python
|
ucam_wls/context.py
|
edwinbalani/ucam-wls
|
1c828c1b9cbf4e6b38fb1986235e20a746726f6a
|
[
"MIT"
] | null | null | null |
ucam_wls/context.py
|
edwinbalani/ucam-wls
|
1c828c1b9cbf4e6b38fb1986235e20a746726f6a
|
[
"MIT"
] | 3
|
2020-11-20T18:36:11.000Z
|
2020-12-03T00:55:56.000Z
|
ucam_wls/context.py
|
edwinbalani/ucam-wls
|
1c828c1b9cbf4e6b38fb1986235e20a746726f6a
|
[
"MIT"
] | null | null | null |
import datetime
from . import status
from .errors import InvalidAuthRequest, ProtocolVersionUnsupported, NoMutualAuthType
from .signing import Key
from .response import AuthResponse
class AuthPrincipal:
def __init__(self, userid, auth_methods, ptags=None, session_expiry=None):
self.userid = userid
self.auth_methods = auth_methods
if ptags is None:
ptags = []
self.ptags = ptags
self.session_expiry = session_expiry
class LoginService:
"""High-level interface to implement a web login service (WLS).
This class provides a convenient interface for implementing a WLS with any
authentication backend. It is intended to be instantiated with a single
private key, which is used to sign the responses it generates.
Mechanisms deemed useful for WLS implementation are provided:
- storing the list of supported authentication methods, and checking
whether the WLS and a WAA's request have an method in common
- checking whether the protocol version specified in the WAA request is
supported by `ucam_wls`
These mechanisms can optionally be turned off.
Attributes:
key (ucam_wls.signing.Key): a private key to be used to sign responses
auth_methods (list): a list of supported authentication methods
"""
def __init__(self, key, auth_methods):
if not isinstance(key, Key):
raise TypeError("key must be a ucam_wls.signing.Key instance")
self.key = key
self.auth_methods = auth_methods
def have_mutual_auth_type(self, request):
if request.aauth and any(request.aauth):
return set(request.aauth) & set(self.auth_methods) != set()
else:
return True
def _pre_response(self, request, skip_handling_check, check_auth_types=True):
if not skip_handling_check:
if not request.data_valid:
raise InvalidAuthRequest
if check_auth_types and not self.have_mutual_auth_type(request):
raise NoMutualAuthType(
"WLS supports %s; WAA wants one of %s" % (
self.auth_methods, request.aauth
)
)
if not request.version_supported:
raise ProtocolVersionUnsupported(request.ver)
def _finish_response(self, response, sign=True, force_signature=False):
if sign or response.requires_signature:
if not response.is_signed or force_signature:
self.key.sign(response)
return response
def authenticate_active(self, request, principal, auth, life=None,
sign=True, skip_handling_check=False, *args, **kwargs):
"""Generate a WLS 'success' response based on interaction with the user
This function creates a WLS response specifying that the principal was
authenticated based on 'fresh' interaction with the user (e.g. input of
a username and password).
Args:
request (AuthRequest): the original WAA request
principal (AuthPrincipal): the principal authenticated by the WLS
auth (str): the authentication method used by the principal.
life (int): if specified, the validity (in seconds) of the
principal's session with the WLS.
sign (bool): whether to sign the response or not. Recommended to
leave this at the default value of `True` (see warning below).
*args: passed to `AuthResponse.respond_to_request`
**kwargs: passed to `AuthResponse.respond_to_request`
Returns:
An `AuthResponse` instance matching the given arguments.
Warning:
Responses indicating successful authentication *MUST* be signed by
the WLS. It is recommended that you leave `sign` set to `True`, or
make sure to sign the response manually afterwards.
"""
self._pre_response(request, skip_handling_check)
if request.iact == False:
raise ValueError("WAA demanded passive authentication (iact == 'no')")
if life is None and principal.session_expiry is not None:
life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds())
response = AuthResponse.respond_to_request(
request=request, code=status.SUCCESS, principal=principal.userid,
auth=auth, ptags=principal.ptags, life=life, *args, **kwargs
)
return self._finish_response(response=response, sign=sign)
def authenticate_passive(self, request, principal, sso=[], sign=True,
skip_handling_check=False, *args, **kwargs):
"""Generate a WLS 'success' response based on a pre-existing identity
This function creates a WLS response specifying that the principal was
authenticated based on previous successful authentication (e.g. an
existing WLS session cookie).
Args:
request (AuthRequest): the original WAA request
principal (AuthPrincipal): the principal authenticated by the WLS
sso (list): a list of strings indicating the authentication methods
previously used for authentication by the principal. If an
empty list is passed, `principal.auth_methods` will be used.
sign (bool): whether to sign the response or not. Recommended to
leave this at the default value of `True` (see warning below).
*args: passed to `AuthResponse.respond_to_request`
**kwargs: passed to `AuthResponse.respond_to_request`
Returns:
An `AuthResponse` instance matching the given arguments.
Warning:
Responses indicating successful authentication *MUST* be signed by
the WLS. It is recommended that you leave `sign` set to `True`, or
make sure to sign the response manually afterwards.
"""
self._pre_response(request, skip_handling_check)
if request.iact == True:
raise ValueError("WAA demanded active authentication (iact == 'yes')")
if len(sso) == 0:
sso = principal.auth_methods
if len(sso) == 0:
raise ValueError("no authentication methods specified for `sso`")
if principal.session_expiry is not None:
life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds())
else:
life = None
response = AuthResponse.respond_to_request(
request=request, code=status.SUCCESS, principal=principal.userid,
sso=sso, ptags=principal.ptags, life=life, *args, **kwargs
)
return self._finish_response(response=response, sign=sign)
def generate_failure(self, code, request, msg='', sign=True,
skip_handling_check=False, *args, **kwargs):
"""Generate a response indicating failure.
This is to be used in all cases where the outcome of user interaction
is not success. This function will refuse to handle a request where
the 'fail' parameter is 'yes' (in which case the WLS must not redirect
back to the WAA).
Args:
code (int): the response status code. Values specified in the
protocol are available as constants under `ucam_wls.status`.
request (AuthRequest): the original WAA request
msg (str): an optional message that could be shown to the end user
by the WAA
sign (bool): whether to sign the response or not.
*args: passed to `AuthResponse.respond_to_request`
**kwargs: passed to `AuthResponse.respond_to_request`
Returns:
An `AuthResponse` instance matching the given arguments.
Note:
Signatures on WLS responses indicating a non-success can optionally
be signed. In the interests of security, the default in this
function is to go ahead and sign anyway, but this can be turned off
if really desired.
"""
self._pre_response(request, skip_handling_check, check_auth_types=False)
if request.fail:
raise ValueError("WAA specified that WLS must not redirect "
"back to it on failure")
if code == status.SUCCESS:
raise ValueError("Failure responses must not have success status")
response = AuthResponse.respond_to_request(
request=request, code=code, *args, **kwargs
)
return self._finish_response(response=response, sign=sign)
| 43.346535
| 95
| 0.646071
|
import datetime
from . import status
from .errors import InvalidAuthRequest, ProtocolVersionUnsupported, NoMutualAuthType
from .signing import Key
from .response import AuthResponse
class AuthPrincipal:
def __init__(self, userid, auth_methods, ptags=None, session_expiry=None):
self.userid = userid
self.auth_methods = auth_methods
if ptags is None:
ptags = []
self.ptags = ptags
self.session_expiry = session_expiry
class LoginService:
def __init__(self, key, auth_methods):
if not isinstance(key, Key):
raise TypeError("key must be a ucam_wls.signing.Key instance")
self.key = key
self.auth_methods = auth_methods
def have_mutual_auth_type(self, request):
if request.aauth and any(request.aauth):
return set(request.aauth) & set(self.auth_methods) != set()
else:
return True
def _pre_response(self, request, skip_handling_check, check_auth_types=True):
if not skip_handling_check:
if not request.data_valid:
raise InvalidAuthRequest
if check_auth_types and not self.have_mutual_auth_type(request):
raise NoMutualAuthType(
"WLS supports %s; WAA wants one of %s" % (
self.auth_methods, request.aauth
)
)
if not request.version_supported:
raise ProtocolVersionUnsupported(request.ver)
def _finish_response(self, response, sign=True, force_signature=False):
if sign or response.requires_signature:
if not response.is_signed or force_signature:
self.key.sign(response)
return response
def authenticate_active(self, request, principal, auth, life=None,
sign=True, skip_handling_check=False, *args, **kwargs):
self._pre_response(request, skip_handling_check)
if request.iact == False:
raise ValueError("WAA demanded passive authentication (iact == 'no')")
if life is None and principal.session_expiry is not None:
life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds())
response = AuthResponse.respond_to_request(
request=request, code=status.SUCCESS, principal=principal.userid,
auth=auth, ptags=principal.ptags, life=life, *args, **kwargs
)
return self._finish_response(response=response, sign=sign)
def authenticate_passive(self, request, principal, sso=[], sign=True,
skip_handling_check=False, *args, **kwargs):
self._pre_response(request, skip_handling_check)
if request.iact == True:
raise ValueError("WAA demanded active authentication (iact == 'yes')")
if len(sso) == 0:
sso = principal.auth_methods
if len(sso) == 0:
raise ValueError("no authentication methods specified for `sso`")
if principal.session_expiry is not None:
life = int((principal.session_expiry - datetime.datetime.utcnow()).total_seconds())
else:
life = None
response = AuthResponse.respond_to_request(
request=request, code=status.SUCCESS, principal=principal.userid,
sso=sso, ptags=principal.ptags, life=life, *args, **kwargs
)
return self._finish_response(response=response, sign=sign)
def generate_failure(self, code, request, msg='', sign=True,
skip_handling_check=False, *args, **kwargs):
self._pre_response(request, skip_handling_check, check_auth_types=False)
if request.fail:
raise ValueError("WAA specified that WLS must not redirect "
"back to it on failure")
if code == status.SUCCESS:
raise ValueError("Failure responses must not have success status")
response = AuthResponse.respond_to_request(
request=request, code=code, *args, **kwargs
)
return self._finish_response(response=response, sign=sign)
| true
| true
|
7902ae902f0242a19bcf6952a2b51c71729cf8da
| 6,123
|
py
|
Python
|
components/server/src/routes/report.py
|
Gamer1120/quality-time
|
f3a0d6f75cd6055d78995d37feae72bc3e837e4b
|
[
"Apache-2.0"
] | 1
|
2021-02-22T07:53:36.000Z
|
2021-02-22T07:53:36.000Z
|
components/server/src/routes/report.py
|
Gamer1120/quality-time
|
f3a0d6f75cd6055d78995d37feae72bc3e837e4b
|
[
"Apache-2.0"
] | 338
|
2020-10-29T04:28:09.000Z
|
2022-02-22T04:09:33.000Z
|
components/server/src/routes/report.py
|
dicksnel/quality-time
|
4c04f8852aa97175f2bca2b5c5391b3e09b657af
|
[
"Apache-2.0"
] | 1
|
2022-01-06T04:07:03.000Z
|
2022-01-06T04:07:03.000Z
|
"""Report routes."""
import os
from urllib import parse
import bottle
import requests
from pymongo.database import Database
from database import sessions
from database.datamodels import latest_datamodel
from database.measurements import recent_measurements_by_metric_uuid
from database.reports import insert_new_report, latest_reports
from initialization.report import import_json_report
from model.actions import copy_report
from model.data import ReportData
from model.transformations import hide_credentials, summarize_report
from server_utilities.functions import report_date_time, uuid
from server_utilities.type import ReportId
@bottle.post("/api/v3/report/import")
def post_report_import(database: Database):
"""Import a preconfigured report into the database."""
report = dict(bottle.request.json)
result = import_json_report(database, report)
result["new_report_uuid"] = report["report_uuid"]
return result
@bottle.post("/api/v3/report/new")
def post_report_new(database: Database):
"""Add a new report."""
report_uuid = uuid()
user = sessions.user(database)
report = dict(
report_uuid=report_uuid, title="New report", subjects={},
delta=dict(uuids=[report_uuid], email=user["email"], description=f"{user['user']} created a new report."))
result = insert_new_report(database, report)
result["new_report_uuid"] = report_uuid
return result
@bottle.post("/api/v3/report/<report_uuid>/copy")
def post_report_copy(report_uuid: ReportId, database: Database):
"""Copy a report."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
report_copy = copy_report(data.report, data.datamodel)
user = sessions.user(database)
report_copy["delta"] = dict(
uuids=[report_uuid, report_copy["report_uuid"]], email=user["email"],
description=f"{user['user']} copied the report '{data.report_name}'.")
result = insert_new_report(database, report_copy)
result["new_report_uuid"] = report_copy["report_uuid"]
return result
@bottle.get("/api/v3/report/<report_uuid>/pdf")
def export_report_as_pdf(report_uuid: ReportId):
"""Download the report as pdf."""
renderer_host = os.environ.get("RENDERER_HOST", "renderer")
renderer_port = os.environ.get("RENDERER_PORT", "9000")
render_url = f"http://{renderer_host}:{renderer_port}/api/render"
proxy_host = os.environ.get("PROXY_HOST", "www")
proxy_port = os.environ.get("PROXY_PORT", "80")
query_string = f"?{bottle.request.query_string}" if bottle.request.query_string else ""
report_url = parse.quote(f"http://{proxy_host}:{proxy_port}/{report_uuid}{query_string}")
margins = "&".join([f"pdf.margin.{side}=25" for side in ("top", "bottom", "left", "right")])
# Set pdf scale to 70% or otherwise the dashboard falls off the page
options = f"emulateScreenMedia=false&goto.timeout=60000&pdf.scale=0.7&{margins}"
response = requests.get(f"{render_url}?url={report_url}&{options}")
response.raise_for_status()
bottle.response.content_type = "application/pdf"
return response.content
@bottle.delete("/api/v3/report/<report_uuid>")
def delete_report(report_uuid: ReportId, database: Database):
"""Delete a report."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
data.report["deleted"] = "true"
user = sessions.user(database)
data.report["delta"] = dict(
uuids=[report_uuid], email=user["email"],
description=f"{user['user']} deleted the report '{data.report_name}'.")
return insert_new_report(database, data.report)
@bottle.post("/api/v3/report/<report_uuid>/attribute/<report_attribute>")
def post_report_attribute(report_uuid: ReportId, report_attribute: str, database: Database):
"""Set a report attribute."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
value = dict(bottle.request.json)[report_attribute]
old_value = data.report.get(report_attribute) or ""
data.report[report_attribute] = value
value_change_description = "" if report_attribute == "layout" else f" from '{old_value}' to '{value}'"
user = sessions.user(database)
data.report["delta"] = dict(
uuids=[report_uuid], email=user["email"],
description=f"{user['user']} changed the {report_attribute} of report '{data.report_name}'"
f"{value_change_description}.")
return insert_new_report(database, data.report)
@bottle.get("/api/v3/tagreport/<tag>")
def get_tag_report(tag: str, database: Database):
"""Get a report with all metrics that have the specified tag."""
date_time = report_date_time()
reports = latest_reports(database, date_time)
data_model = latest_datamodel(database, date_time)
subjects = _get_subjects_and_metrics_by_tag(data_model, reports, tag)
tag_report = dict(
title=f'Report for tag "{tag}"', subtitle="Note: tag reports are read-only", report_uuid=f"tag-{tag}",
timestamp=date_time, subjects=subjects)
hide_credentials(data_model, tag_report)
summarize_report(tag_report, recent_measurements_by_metric_uuid(database, date_time), data_model)
return tag_report
def _get_subjects_and_metrics_by_tag(data_model, reports, tag: str):
"""Return all subjects and metrics that have the tag."""
subjects = {}
for report in reports:
for subject_uuid, subject in list(report.get("subjects", {}).items()):
for metric_uuid, metric in list(subject.get("metrics", {}).items()):
if tag not in metric.get("tags", []):
del subject["metrics"][metric_uuid]
if subject.get("metrics", {}):
subject_name = subject.get("name") or data_model["subjects"][subject["type"]]["name"]
subject["name"] = report["title"] + " / " + subject_name
subjects[subject_uuid] = subject
return subjects
| 44.05036
| 114
| 0.709783
|
import os
from urllib import parse
import bottle
import requests
from pymongo.database import Database
from database import sessions
from database.datamodels import latest_datamodel
from database.measurements import recent_measurements_by_metric_uuid
from database.reports import insert_new_report, latest_reports
from initialization.report import import_json_report
from model.actions import copy_report
from model.data import ReportData
from model.transformations import hide_credentials, summarize_report
from server_utilities.functions import report_date_time, uuid
from server_utilities.type import ReportId
@bottle.post("/api/v3/report/import")
def post_report_import(database: Database):
report = dict(bottle.request.json)
result = import_json_report(database, report)
result["new_report_uuid"] = report["report_uuid"]
return result
@bottle.post("/api/v3/report/new")
def post_report_new(database: Database):
report_uuid = uuid()
user = sessions.user(database)
report = dict(
report_uuid=report_uuid, title="New report", subjects={},
delta=dict(uuids=[report_uuid], email=user["email"], description=f"{user['user']} created a new report."))
result = insert_new_report(database, report)
result["new_report_uuid"] = report_uuid
return result
@bottle.post("/api/v3/report/<report_uuid>/copy")
def post_report_copy(report_uuid: ReportId, database: Database):
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
report_copy = copy_report(data.report, data.datamodel)
user = sessions.user(database)
report_copy["delta"] = dict(
uuids=[report_uuid, report_copy["report_uuid"]], email=user["email"],
description=f"{user['user']} copied the report '{data.report_name}'.")
result = insert_new_report(database, report_copy)
result["new_report_uuid"] = report_copy["report_uuid"]
return result
@bottle.get("/api/v3/report/<report_uuid>/pdf")
def export_report_as_pdf(report_uuid: ReportId):
renderer_host = os.environ.get("RENDERER_HOST", "renderer")
renderer_port = os.environ.get("RENDERER_PORT", "9000")
render_url = f"http://{renderer_host}:{renderer_port}/api/render"
proxy_host = os.environ.get("PROXY_HOST", "www")
proxy_port = os.environ.get("PROXY_PORT", "80")
query_string = f"?{bottle.request.query_string}" if bottle.request.query_string else ""
report_url = parse.quote(f"http://{proxy_host}:{proxy_port}/{report_uuid}{query_string}")
margins = "&".join([f"pdf.margin.{side}=25" for side in ("top", "bottom", "left", "right")])
options = f"emulateScreenMedia=false&goto.timeout=60000&pdf.scale=0.7&{margins}"
response = requests.get(f"{render_url}?url={report_url}&{options}")
response.raise_for_status()
bottle.response.content_type = "application/pdf"
return response.content
@bottle.delete("/api/v3/report/<report_uuid>")
def delete_report(report_uuid: ReportId, database: Database):
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
data.report["deleted"] = "true"
user = sessions.user(database)
data.report["delta"] = dict(
uuids=[report_uuid], email=user["email"],
description=f"{user['user']} deleted the report '{data.report_name}'.")
return insert_new_report(database, data.report)
@bottle.post("/api/v3/report/<report_uuid>/attribute/<report_attribute>")
def post_report_attribute(report_uuid: ReportId, report_attribute: str, database: Database):
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
value = dict(bottle.request.json)[report_attribute]
old_value = data.report.get(report_attribute) or ""
data.report[report_attribute] = value
value_change_description = "" if report_attribute == "layout" else f" from '{old_value}' to '{value}'"
user = sessions.user(database)
data.report["delta"] = dict(
uuids=[report_uuid], email=user["email"],
description=f"{user['user']} changed the {report_attribute} of report '{data.report_name}'"
f"{value_change_description}.")
return insert_new_report(database, data.report)
@bottle.get("/api/v3/tagreport/<tag>")
def get_tag_report(tag: str, database: Database):
date_time = report_date_time()
reports = latest_reports(database, date_time)
data_model = latest_datamodel(database, date_time)
subjects = _get_subjects_and_metrics_by_tag(data_model, reports, tag)
tag_report = dict(
title=f'Report for tag "{tag}"', subtitle="Note: tag reports are read-only", report_uuid=f"tag-{tag}",
timestamp=date_time, subjects=subjects)
hide_credentials(data_model, tag_report)
summarize_report(tag_report, recent_measurements_by_metric_uuid(database, date_time), data_model)
return tag_report
def _get_subjects_and_metrics_by_tag(data_model, reports, tag: str):
subjects = {}
for report in reports:
for subject_uuid, subject in list(report.get("subjects", {}).items()):
for metric_uuid, metric in list(subject.get("metrics", {}).items()):
if tag not in metric.get("tags", []):
del subject["metrics"][metric_uuid]
if subject.get("metrics", {}):
subject_name = subject.get("name") or data_model["subjects"][subject["type"]]["name"]
subject["name"] = report["title"] + " / " + subject_name
subjects[subject_uuid] = subject
return subjects
| true
| true
|
7902af6a46c574329e2c40a124e6dc48cbbe1d0c
| 525
|
py
|
Python
|
project-euler/14/solution.py
|
gashev/algorithms
|
ea750b84658e282afad9db3cd51081e30521074b
|
[
"Unlicense"
] | 1
|
2020-07-23T21:33:43.000Z
|
2020-07-23T21:33:43.000Z
|
project-euler/14/solution.py
|
gashev/algorithms
|
ea750b84658e282afad9db3cd51081e30521074b
|
[
"Unlicense"
] | null | null | null |
project-euler/14/solution.py
|
gashev/algorithms
|
ea750b84658e282afad9db3cd51081e30521074b
|
[
"Unlicense"
] | null | null | null |
lengths = {0: 0, 1: 1}
def sequenceLength(n: int) -> int:
global lengths
if n not in lengths:
if n % 2 == 0:
lengths[n] = sequenceLength(n//2) + 1
else:
lengths[n] = sequenceLength(3 * n + 1) + 1
return lengths[n]
def solution(n: int = 1000000) -> int:
result = 0
maxLength = 0
for i in range(n):
counter = sequenceLength(i)
if counter > maxLength:
result = i
maxLength = counter
return result
print(solution())
| 22.826087
| 56
| 0.531429
|
lengths = {0: 0, 1: 1}
def sequenceLength(n: int) -> int:
global lengths
if n not in lengths:
if n % 2 == 0:
lengths[n] = sequenceLength(n//2) + 1
else:
lengths[n] = sequenceLength(3 * n + 1) + 1
return lengths[n]
def solution(n: int = 1000000) -> int:
result = 0
maxLength = 0
for i in range(n):
counter = sequenceLength(i)
if counter > maxLength:
result = i
maxLength = counter
return result
print(solution())
| true
| true
|
7902b04bd4549c6fba3065d0f0628471d91cc796
| 4,170
|
py
|
Python
|
api/lib/perm/acl.py
|
lilixiang/cmdb
|
d60857c26b9b81c8a33b72548b637cbde8782fe1
|
[
"MIT"
] | 1
|
2020-02-15T00:13:45.000Z
|
2020-02-15T00:13:45.000Z
|
api/lib/perm/acl.py
|
lilixiang/cmdb
|
d60857c26b9b81c8a33b72548b637cbde8782fe1
|
[
"MIT"
] | null | null | null |
api/lib/perm/acl.py
|
lilixiang/cmdb
|
d60857c26b9b81c8a33b72548b637cbde8782fe1
|
[
"MIT"
] | 1
|
2019-10-31T07:55:20.000Z
|
2019-10-31T07:55:20.000Z
|
# -*- coding:utf-8 -*-
import functools
import six
from flask import current_app, g, request
from flask import session, abort
from api.extensions import cache
def get_access_token():
return
class AccessTokenCache(object):
@classmethod
def get(cls):
if cache.get("AccessToken") is not None:
return cache.get("AccessToken")
res = get_access_token() or ""
cache.set("AccessToken", res, timeout=60 * 60)
return res
@classmethod
def clean(cls):
cache.clear("AccessToken")
class ACLManager(object):
def __init__(self):
self.access_token = AccessTokenCache.get()
self.acl_session = dict(uid=session.get("uid"),
token=self.access_token)
self.user_info = session["acl"] if "acl" in session else {}
def add_resource(self, name, resource_type_name=None):
pass
def grant_resource_to_role(self, name, role, resource_type_name=None):
pass
def del_resource(self, name, resource_type_name=None):
pass
def get_user_info(self, username):
return dict()
def get_resources(self, resource_type_name=None):
if "acl" not in session:
abort(405)
return []
def has_permission(self, resource_name, resource_type, perm):
if "acl" not in session:
abort(405)
return True
def validate_permission(resources, resource_type, perm):
if not resources:
return
if current_app.config.get("USE_ACL"):
if g.user.username == "worker":
return
resources = [resources] if isinstance(resources, six.string_types) else resources
for resource in resources:
if not ACLManager().has_permission(resource, resource_type, perm):
return abort(403, "has no permission")
def can_access_resources(resource_type):
def decorator_can_access_resources(func):
@functools.wraps(func)
def wrapper_can_access_resources(*args, **kwargs):
if current_app.config.get("USE_ACL"):
res = ACLManager().get_resources(resource_type)
result = {i.get("name"): i.get("permissions") for i in res}
if hasattr(g, "resources"):
g.resources.update({resource_type: result})
else:
g.resources = {resource_type: result}
return func(*args, **kwargs)
return wrapper_can_access_resources
return decorator_can_access_resources
def has_perm(resources, resource_type, perm):
def decorator_has_perm(func):
@functools.wraps(func)
def wrapper_has_perm(*args, **kwargs):
if not resources:
return
if current_app.config.get("USE_ACL"):
validate_permission(resources, resource_type, perm)
return func(*args, **kwargs)
return wrapper_has_perm
return decorator_has_perm
def has_perm_from_args(arg_name, resource_type, perm, callback=None):
def decorator_has_perm(func):
@functools.wraps(func)
def wrapper_has_perm(*args, **kwargs):
if not arg_name:
return
resource = request.view_args.get(arg_name) or request.values.get(arg_name)
if callback is not None and resource:
resource = callback(resource)
if current_app.config.get("USE_ACL") and resource:
validate_permission(resource, resource_type, perm)
return func(*args, **kwargs)
return wrapper_has_perm
return decorator_has_perm
def role_required(role_name):
def decorator_role_required(func):
@functools.wraps(func)
def wrapper_role_required(*args, **kwargs):
if not role_name:
return
if current_app.config.get("USE_ACL"):
if role_name not in session.get("acl", {}).get("parentRoles", []):
return abort(403, "Role {0} is required".format(role_name))
return func(*args, **kwargs)
return wrapper_role_required
return decorator_role_required
| 29.785714
| 89
| 0.622302
|
import functools
import six
from flask import current_app, g, request
from flask import session, abort
from api.extensions import cache
def get_access_token():
return
class AccessTokenCache(object):
@classmethod
def get(cls):
if cache.get("AccessToken") is not None:
return cache.get("AccessToken")
res = get_access_token() or ""
cache.set("AccessToken", res, timeout=60 * 60)
return res
@classmethod
def clean(cls):
cache.clear("AccessToken")
class ACLManager(object):
def __init__(self):
self.access_token = AccessTokenCache.get()
self.acl_session = dict(uid=session.get("uid"),
token=self.access_token)
self.user_info = session["acl"] if "acl" in session else {}
def add_resource(self, name, resource_type_name=None):
pass
def grant_resource_to_role(self, name, role, resource_type_name=None):
pass
def del_resource(self, name, resource_type_name=None):
pass
def get_user_info(self, username):
return dict()
def get_resources(self, resource_type_name=None):
if "acl" not in session:
abort(405)
return []
def has_permission(self, resource_name, resource_type, perm):
if "acl" not in session:
abort(405)
return True
def validate_permission(resources, resource_type, perm):
if not resources:
return
if current_app.config.get("USE_ACL"):
if g.user.username == "worker":
return
resources = [resources] if isinstance(resources, six.string_types) else resources
for resource in resources:
if not ACLManager().has_permission(resource, resource_type, perm):
return abort(403, "has no permission")
def can_access_resources(resource_type):
def decorator_can_access_resources(func):
@functools.wraps(func)
def wrapper_can_access_resources(*args, **kwargs):
if current_app.config.get("USE_ACL"):
res = ACLManager().get_resources(resource_type)
result = {i.get("name"): i.get("permissions") for i in res}
if hasattr(g, "resources"):
g.resources.update({resource_type: result})
else:
g.resources = {resource_type: result}
return func(*args, **kwargs)
return wrapper_can_access_resources
return decorator_can_access_resources
def has_perm(resources, resource_type, perm):
def decorator_has_perm(func):
@functools.wraps(func)
def wrapper_has_perm(*args, **kwargs):
if not resources:
return
if current_app.config.get("USE_ACL"):
validate_permission(resources, resource_type, perm)
return func(*args, **kwargs)
return wrapper_has_perm
return decorator_has_perm
def has_perm_from_args(arg_name, resource_type, perm, callback=None):
def decorator_has_perm(func):
@functools.wraps(func)
def wrapper_has_perm(*args, **kwargs):
if not arg_name:
return
resource = request.view_args.get(arg_name) or request.values.get(arg_name)
if callback is not None and resource:
resource = callback(resource)
if current_app.config.get("USE_ACL") and resource:
validate_permission(resource, resource_type, perm)
return func(*args, **kwargs)
return wrapper_has_perm
return decorator_has_perm
def role_required(role_name):
def decorator_role_required(func):
@functools.wraps(func)
def wrapper_role_required(*args, **kwargs):
if not role_name:
return
if current_app.config.get("USE_ACL"):
if role_name not in session.get("acl", {}).get("parentRoles", []):
return abort(403, "Role {0} is required".format(role_name))
return func(*args, **kwargs)
return wrapper_role_required
return decorator_role_required
| true
| true
|
7902b34f6b114df659048615489dfce1fb8a915f
| 417
|
py
|
Python
|
cbexplorer/types/AbstractType.py
|
ambitus/cbexplorer
|
8bb50efce9f81e29ad96c5c1bd49e3d3e95cc97e
|
[
"Apache-2.0"
] | 1
|
2021-12-07T10:09:44.000Z
|
2021-12-07T10:09:44.000Z
|
cbexplorer/types/AbstractType.py
|
ambitus/cbexplorer
|
8bb50efce9f81e29ad96c5c1bd49e3d3e95cc97e
|
[
"Apache-2.0"
] | 1
|
2021-12-20T22:03:05.000Z
|
2021-12-20T22:03:05.000Z
|
cbexplorer/types/AbstractType.py
|
ambitus/cbexplorer
|
8bb50efce9f81e29ad96c5c1bd49e3d3e95cc97e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020- IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
#
"""
"""
from abc import ABC, abstractproperty, abstractmethod
class AbstractType(ABC):
@abstractproperty
def length(self):
pass
@abstractmethod
def __call__(self):
pass
def _get_chunk(self):
return self.locator.content(self.length)
| 16.68
| 53
| 0.657074
|
from abc import ABC, abstractproperty, abstractmethod
class AbstractType(ABC):
@abstractproperty
def length(self):
pass
@abstractmethod
def __call__(self):
pass
def _get_chunk(self):
return self.locator.content(self.length)
| true
| true
|
7902b4e7cc22288f67b261811b0a338d1c514027
| 421
|
py
|
Python
|
backend/battleroyal_lite_28428/wsgi.py
|
crowdbotics-apps/battleroyal-lite-28428
|
0897d028c48f1021edf707799d1e6eead9d4a608
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/battleroyal_lite_28428/wsgi.py
|
crowdbotics-apps/battleroyal-lite-28428
|
0897d028c48f1021edf707799d1e6eead9d4a608
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/battleroyal_lite_28428/wsgi.py
|
crowdbotics-apps/battleroyal-lite-28428
|
0897d028c48f1021edf707799d1e6eead9d4a608
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for battleroyal_lite_28428 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'battleroyal_lite_28428.settings')
application = get_wsgi_application()
| 24.764706
| 82
| 0.800475
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'battleroyal_lite_28428.settings')
application = get_wsgi_application()
| true
| true
|
7902b4e8d5c6a365b8aec1450fe7b1cbb2c5f3f1
| 6,764
|
py
|
Python
|
run.py
|
jparsai/cvejob
|
8f9462a1ecdf1d4de877ac5f44e772239ffcb379
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
jparsai/cvejob
|
8f9462a1ecdf1d4de877ac5f44e772239ffcb379
|
[
"Apache-2.0"
] | null | null | null |
run.py
|
jparsai/cvejob
|
8f9462a1ecdf1d4de877ac5f44e772239ffcb379
|
[
"Apache-2.0"
] | null | null | null |
"""Run CVEjob."""
import sys
from decimal import Decimal
import multiprocessing
import nvdlib
from nvdlib.manager import FeedManager
from nvdlib.query_selectors import in_range
from cvejob.filters.input import validate_cve
from cvejob.config import Config
from cvejob.identifiers import get_identifier_cls
from cvejob.cpe2pkg import get_pkgfile_path, PackageNameCandidate
from cvejob.selectors.basic import VersionSelector
from cvejob.outputs.victims import VictimsYamlOutput
from cvejob.versions import NVDVersions
from cvejob.utils import parse_date_range
import logging
# logging configuration
logging.basicConfig(level=logging.DEBUG,
handlers=[nvdlib.get_logging_handler()]) # use nvdlib's handler
logger = logging.getLogger('cvejob')
FEED_NAME_PATTERN = r"nvdcve-" \
r"(?P<version>[\d.]+)-" \
r"(?P<name>(?P<name_string>(([A-Za-z]+)))|(?P<name_year>([\d]+)))" \
r".json"
def _log_results(victims_output):
"""Log results."""
cve_id = victims_output.cve.id_
logger.info(
"[{cve_id}] picked `{winner}` out of `{candidates}`".format(
cve_id=cve_id,
winner=victims_output.winner,
candidates=victims_output.candidates
))
logger.info(
"[{cve_id}] Affected version range: {version_ranges}".format(
cve_id=cve_id,
version_ranges=victims_output.affected_versions
))
logger.info(
"[{cve_id}] Safe version range: {version_ranges}".format(
cve_id=cve_id,
version_ranges=victims_output.safe_versions
))
def _filter_collection(collection, date_range, cherry_pick):
"""Filter Document collection."""
if date_range:
collection_size_before = collection.count()
collection = collection.find(
{'published_date': in_range(*date_range)}
)
logger.debug(("Filtered out {} Documents that do not fall "
"in the given range.").format(
collection_size_before - collection.count()
))
if cherry_pick:
logger.debug("Cherry-picked CVE `{cve_id}`".format(
cve_id=cherry_pick
))
collection = collection.find(
{'cve.id_': cherry_pick}
)
return collection
def run():
"""Run CVEjob."""
feed_dir = Config.feed_dir
feed_names = Config.feed_names
date_range = Config.date_range
cherrypicked_cve_id = Config.cve_id
cherrypicked_year = None
if cherrypicked_cve_id:
cherrypicked_year = cherrypicked_cve_id.split(sep='-')[1]
if int(cherrypicked_year) < 2002:
# all CVEs prior to 2002 are stored in 2002 feed
cherrypicked_year = 2002
if date_range:
date_range = parse_date_range(Config.date_range)
feed_names = range(date_range[0].year, date_range[1].year + 1)
if cherrypicked_cve_id: # optimization check
if int(cherrypicked_year) not in feed_names:
logger.info(
"[{picked_cve_id}] does not belong to the given feed range:"
" {date_range}".format(
picked_cve_id=cherrypicked_cve_id,
date_range=date_range
))
return
# prune the feed names as it is not necessary to iterate over all of them
feed_names = [cherrypicked_year]
if not feed_names:
if cherrypicked_cve_id:
feed_names = [cherrypicked_year]
else:
feed_names = ['modified']
with FeedManager(n_workers=multiprocessing.cpu_count()) as feed_manager:
feeds = feed_manager.fetch_feeds(
feed_names=feed_names, data_dir=feed_dir, update=True
)
collection = feed_manager.collect(feeds)
collection = _filter_collection(collection,
date_range,
cherrypicked_cve_id)
if not collection: # collection is empty
logger.info(
"Collection is empty.".format(
picked_cve_id=cherrypicked_cve_id,
))
return
logger.debug("Number of CVE Documents in the collection: {}".format(
collection.count()
))
if Config.package_name and Config.cve_id:
# user knows the package name, so we don't have to guess ;)
doc = [x for x in collection][0] # Collection doesn't support indexing
affected, safe = NVDVersions(doc, Config.package_name, Config.ecosystem).run()
victims_output = VictimsYamlOutput(
ecosystem=Config.ecosystem,
cve_doc=doc,
winner=PackageNameCandidate(Config.package_name, Decimal('1.0')),
candidates=[],
affected=affected,
fixedin=safe
)
_log_results(victims_output)
victims_output.write()
sys.exit(0)
for doc in collection:
cve_id = doc.cve.id_
try:
if not validate_cve(doc):
logger.debug(
"[{cve_id}] was filtered out by input checks".format(
cve_id=cve_id
))
continue
pkgfile_path = get_pkgfile_path(Config.pkgfile_dir, Config.ecosystem)
identifier = get_identifier_cls()(doc, Config.ecosystem, pkgfile_path)
candidates = identifier.identify()
if not candidates:
logger.info(
"[{cve_id}] no package name candidates found".format(
cve_id=cve_id
))
continue
selector = VersionSelector(doc, candidates, Config.ecosystem)
winner = selector.pick_winner()
if not winner:
logger.info(
"[{cve_id}] no package name found".format(
cve_id=cve_id
))
continue
affected, safe = NVDVersions(doc, winner.package, Config.ecosystem).run()
victims_output = VictimsYamlOutput(
ecosystem=Config.ecosystem,
cve_doc=doc,
winner=winner,
candidates=candidates,
affected=affected,
fixedin=safe
)
_log_results(victims_output)
victims_output.write()
except Exception as exc:
logger.warning(
"[{cve_id}] Unexpected exception occurred: {exc}".format(
cve_id=cve_id,
exc=exc
), exc_info=True)
if __name__ == '__main__':
run()
| 29.666667
| 88
| 0.582496
|
import sys
from decimal import Decimal
import multiprocessing
import nvdlib
from nvdlib.manager import FeedManager
from nvdlib.query_selectors import in_range
from cvejob.filters.input import validate_cve
from cvejob.config import Config
from cvejob.identifiers import get_identifier_cls
from cvejob.cpe2pkg import get_pkgfile_path, PackageNameCandidate
from cvejob.selectors.basic import VersionSelector
from cvejob.outputs.victims import VictimsYamlOutput
from cvejob.versions import NVDVersions
from cvejob.utils import parse_date_range
import logging
logging.basicConfig(level=logging.DEBUG,
handlers=[nvdlib.get_logging_handler()])
logger = logging.getLogger('cvejob')
FEED_NAME_PATTERN = r"nvdcve-" \
r"(?P<version>[\d.]+)-" \
r"(?P<name>(?P<name_string>(([A-Za-z]+)))|(?P<name_year>([\d]+)))" \
r".json"
def _log_results(victims_output):
cve_id = victims_output.cve.id_
logger.info(
"[{cve_id}] picked `{winner}` out of `{candidates}`".format(
cve_id=cve_id,
winner=victims_output.winner,
candidates=victims_output.candidates
))
logger.info(
"[{cve_id}] Affected version range: {version_ranges}".format(
cve_id=cve_id,
version_ranges=victims_output.affected_versions
))
logger.info(
"[{cve_id}] Safe version range: {version_ranges}".format(
cve_id=cve_id,
version_ranges=victims_output.safe_versions
))
def _filter_collection(collection, date_range, cherry_pick):
if date_range:
collection_size_before = collection.count()
collection = collection.find(
{'published_date': in_range(*date_range)}
)
logger.debug(("Filtered out {} Documents that do not fall "
"in the given range.").format(
collection_size_before - collection.count()
))
if cherry_pick:
logger.debug("Cherry-picked CVE `{cve_id}`".format(
cve_id=cherry_pick
))
collection = collection.find(
{'cve.id_': cherry_pick}
)
return collection
def run():
feed_dir = Config.feed_dir
feed_names = Config.feed_names
date_range = Config.date_range
cherrypicked_cve_id = Config.cve_id
cherrypicked_year = None
if cherrypicked_cve_id:
cherrypicked_year = cherrypicked_cve_id.split(sep='-')[1]
if int(cherrypicked_year) < 2002:
# all CVEs prior to 2002 are stored in 2002 feed
cherrypicked_year = 2002
if date_range:
date_range = parse_date_range(Config.date_range)
feed_names = range(date_range[0].year, date_range[1].year + 1)
if cherrypicked_cve_id: # optimization check
if int(cherrypicked_year) not in feed_names:
logger.info(
"[{picked_cve_id}] does not belong to the given feed range:"
" {date_range}".format(
picked_cve_id=cherrypicked_cve_id,
date_range=date_range
))
return
# prune the feed names as it is not necessary to iterate over all of them
feed_names = [cherrypicked_year]
if not feed_names:
if cherrypicked_cve_id:
feed_names = [cherrypicked_year]
else:
feed_names = ['modified']
with FeedManager(n_workers=multiprocessing.cpu_count()) as feed_manager:
feeds = feed_manager.fetch_feeds(
feed_names=feed_names, data_dir=feed_dir, update=True
)
collection = feed_manager.collect(feeds)
collection = _filter_collection(collection,
date_range,
cherrypicked_cve_id)
if not collection: # collection is empty
logger.info(
"Collection is empty.".format(
picked_cve_id=cherrypicked_cve_id,
))
return
logger.debug("Number of CVE Documents in the collection: {}".format(
collection.count()
))
if Config.package_name and Config.cve_id:
# user knows the package name, so we don't have to guess ;)
doc = [x for x in collection][0]
affected, safe = NVDVersions(doc, Config.package_name, Config.ecosystem).run()
victims_output = VictimsYamlOutput(
ecosystem=Config.ecosystem,
cve_doc=doc,
winner=PackageNameCandidate(Config.package_name, Decimal('1.0')),
candidates=[],
affected=affected,
fixedin=safe
)
_log_results(victims_output)
victims_output.write()
sys.exit(0)
for doc in collection:
cve_id = doc.cve.id_
try:
if not validate_cve(doc):
logger.debug(
"[{cve_id}] was filtered out by input checks".format(
cve_id=cve_id
))
continue
pkgfile_path = get_pkgfile_path(Config.pkgfile_dir, Config.ecosystem)
identifier = get_identifier_cls()(doc, Config.ecosystem, pkgfile_path)
candidates = identifier.identify()
if not candidates:
logger.info(
"[{cve_id}] no package name candidates found".format(
cve_id=cve_id
))
continue
selector = VersionSelector(doc, candidates, Config.ecosystem)
winner = selector.pick_winner()
if not winner:
logger.info(
"[{cve_id}] no package name found".format(
cve_id=cve_id
))
continue
affected, safe = NVDVersions(doc, winner.package, Config.ecosystem).run()
victims_output = VictimsYamlOutput(
ecosystem=Config.ecosystem,
cve_doc=doc,
winner=winner,
candidates=candidates,
affected=affected,
fixedin=safe
)
_log_results(victims_output)
victims_output.write()
except Exception as exc:
logger.warning(
"[{cve_id}] Unexpected exception occurred: {exc}".format(
cve_id=cve_id,
exc=exc
), exc_info=True)
if __name__ == '__main__':
run()
| true
| true
|
7902b4e8efc61c4f0d7ad21a1538407cbb8b5c9f
| 10,568
|
py
|
Python
|
src/module_tick.py
|
lanl/pymplot
|
093769a06051c6b41be76ce1431e6a6e64477e5a
|
[
"BSD-3-Clause"
] | 3
|
2020-10-29T20:49:10.000Z
|
2020-11-01T05:01:49.000Z
|
src/module_tick.py
|
lanl/pymplot
|
093769a06051c6b41be76ce1431e6a6e64477e5a
|
[
"BSD-3-Clause"
] | null | null | null |
src/module_tick.py
|
lanl/pymplot
|
093769a06051c6b41be76ce1431e6a6e64477e5a
|
[
"BSD-3-Clause"
] | 2
|
2020-11-12T19:07:14.000Z
|
2020-11-25T21:45:54.000Z
|
'''
Module:
Set regular or irregular axis ticks for a plot.
'''
from module_utility import *
import numpy as np
import matplotlib.pyplot as plt
# ticks : contains irregular ticks locations
# tickbeg : regular major ticks begin location
# tickend : regular major ticks end location
# tickd : regular major ticks interval
# mtick : number of minor tick intervals betwen two major ticks
# xbeg : axis begin location
# xend : axis end location
# ns : number of points to plot
# d : interval between two points
# axislen : apparent axis length
def define_tick(ticks, tickbeg, tickend, tickd, mtick, xbeg, xend, ns, d, axislen, format, extend=False):
# regular ticks
if ticks is None:
# major tick interval
if tickd is None:
tick_interval = nice((xend - xbeg) / 5.0)
if tick_interval == 0:
tick_interval = 1.0e10
else:
tick_interval = float(tickd)
# tick begin location
if tickbeg is None:
tick_beg = nice(xbeg)
base = 0.5
nb = 0
if tick_interval > 0:
while nb <= 10 and tick_beg > xbeg + tick_interval:
base = base / 10.0
tick_beg = nice(xbeg, base)
nb = nb + 1
else:
while nb <= 10 and tick_beg < xbeg + tick_interval:
base = base / 10.0
tick_beg = nice(xbeg, base)
nb = nb + 1
else:
tick_beg = float(tickbeg)
# tick end location
if tickend is None:
tick_end = tick_beg + (round((xend - xbeg) / tick_interval) + 2) * tick_interval
if tick_interval > 0:
while tick_end < xend:
tick_end = tick_end + abs(tick_interval)
else:
while tick_end > xend:
tick_end = tick_end - abs(tick_interval)
else:
tick_end = float(tickend)
# regular major and minor tick locations
tick = np.arange(tick_beg, tick_end + 0.1 * abs(tick_interval), tick_interval)
minor_tick_interval = tick_interval / (mtick + 1.0)
minor_tick = np.arange(tick_beg, tick_end + 0.1 * abs(minor_tick_interval), minor_tick_interval)
# some ticks might out of axis range, therefore remove them if strict
if not extend:
if d > 0:
tick = np.asarray([i for i in tick if i >= xbeg and i <= xend])
minor_tick = np.asarray(
[i for i in minor_tick if i >= xbeg and i <= xend and (not i in tick)])
if d < 0:
tick = np.asarray([i for i in tick if i <= xbeg and i >= xend])
minor_tick = np.asarray(
[i for i in minor_tick if i <= xbeg and i >= xend and (not i in tick)])
# linearly scale the ticks to figure canvas
if ns == 1:
# if only one sample point, then tick location is 0.5
tick_location = np.asarray([0.5])
ntick = 1
else:
# if multiple sample points, then scale to apparent axis length
tick_location = [(i - xbeg + 0.5 * d) / ((ns - 1) * d) * axislen for i in tick]
minor_tick_location = [(i - xbeg + 0.5 * d) / ((ns - 1) * d) * axislen for i in minor_tick]
t = tick_location
# set major tick location and labels, note some major ticks might be out of axis range
tl = []
tick_label = []
for i in range(0, len(tick)):
if extend or ((not extend) and tick_location[i] >= 0 and tick_location[i] <= axislen + 1.0e-10):
tl.append(tick_location[i])
if format == 'sci' or format == 'plain':
tick_label.append(('%f' % tick[i]).rstrip('0').rstrip('.'))
else:
tick_label.append((format % tick[i]))
tick_location = tl
# irregular ticks
else:
# get contents from user-specified ticks
ticks = ticks[0].split(',')
location = [0 for i in range(0, len(ticks))]
label = ['' for i in range(0, len(ticks))]
# set tick locations
for i in range(0, len(ticks)):
t = ticks[i].split(':')
location[i] = (float(t[0]) + 0.5 * d) / ((ns - 1) * d) * axislen
label[i] = t[1]
# sort according to tick location
yx = list(zip(location, label))
yx.sort()
tick_location = [location for location, label in yx]
tick_label = [label for location, label in yx]
# minor ticks
if mtick != 0:
mtick = mtick + 1
minor_tick_location = np.linspace(tick_location[0], tick_location[1], mtick + 1)
minor_tick_location = minor_tick_location[1:mtick]
for i in range(1, len(tick_location) - 1):
t = np.linspace(tick_location[i], tick_location[i + 1], mtick + 1)
minor_tick_location = np.append(minor_tick_location, t[1:mtick])
else:
minor_tick_location = []
# return major tick location, major tick label and minor tick location
return tick_location, tick_label, minor_tick_location
def set_tick(args,
font,
x1beg,
x1end,
n1beg,
n1end,
d1,
axis1len,
x2beg,
x2end,
n2beg,
n2end,
d2,
axis2len,
extend=False):
ax = plt.gca()
label_1_size = float(args.label1size)
label_2_size = float(args.label2size)
xlabel = ax.set_xlabel(args.label2, fontsize=label_2_size, labelpad=float(args.label2pad)*72*2)
ylabel = ax.set_ylabel(args.label1, fontsize=label_1_size, labelpad=float(args.label1pad)*72*2)
l = ax.yaxis.get_label()
l.set_fontproperties(font)
l.set_fontsize(label_1_size)
l = ax.xaxis.get_label()
l.set_fontproperties(font)
l.set_fontsize(label_2_size)
if args.label2loc is not None:
ax.xaxis.set_label_position(args.label2loc)
else:
if args.ticktop:
ax.xaxis.set_label_position('top')
else:
ax.xaxis.set_label_position('bottom')
if args.label1loc is not None:
ax.yaxis.set_label_position(args.label1loc)
else:
if args.tickleft:
ax.yaxis.set_label_position('left')
else:
ax.yaxis.set_label_position('right')
ylabel.set_rotation(270)
# ticks on/off
ax.get_yaxis().set_tick_params(which='both', direction='out')
ax.get_xaxis().set_tick_params(which='both', direction='out')
plt.tick_params(
axis='x', # changes apply to the x1-axis
which='both', # both major and minor ticks are affected
bottom=args.tickbottom, # ticks along the bottom axis
top=args.ticktop, # ticks along the top axis
labelbottom=args.tickbottom, # labels along the bottom axis
labeltop=args.ticktop) # labels along the top axis
plt.tick_params(
axis='y', # changes apply to the x2-axis
which='both', # both major and minor ticks are affected
left=args.tickleft, # ticks along the left axis
right=args.tickright, # ticks along the right axis
labelleft=args.tickleft, # labels along the left axis
labelright=args.tickright) # labels along the right axis
# if tick font size and family not speciefied, then inherit from axis labels
if args.tick1size is None:
tick_1_font_size = label_1_size - 2
else:
tick_1_font_size = float(args.tick1size)
if args.tick2size is None:
tick_2_font_size = label_2_size - 2
else:
tick_2_font_size = float(args.tick2size)
# axis 1
tick_1_location, tick_1_label, tick_1_minor = define_tick(args.ticks1, args.tick1beg, args.tick1end,
args.tick1d, args.mtick1, x1beg, x1end,
n1end - n1beg + 1, d1, axis1len,
args.tick1format, extend)
plt.yticks(tick_1_location, tick_1_label, fontsize=tick_1_font_size, rotation=float(args.tick1rot))
if not args.tick1label:
ax.yaxis.set_ticklabels([])
# axis 2
tick_2_location, tick_2_label, tick_2_minor = define_tick(args.ticks2, args.tick2beg, args.tick2end,
args.tick2d, args.mtick2, x2beg, x2end,
n2end - n2beg + 1, d2, axis2len,
args.tick2format, extend)
plt.xticks(tick_2_location, tick_2_label, fontsize=tick_2_font_size, rotation=float(args.tick2rot))
if not args.tick2label:
ax.xaxis.set_ticklabels([])
# major and minor ticks sytle
ax.tick_params('both', length=float(args.tickmajorlen), width=float(args.tickmajorwid), which='major')
# minor tick positions
ax.set_yticks(tick_1_minor, minor=True)
ax.set_xticks(tick_2_minor, minor=True)
# minor ticks style
if args.tickminorlen is None:
tick_minor_length = 0.5 * float(args.tickmajorlen)
else:
tick_minor_length = float(args.tickminorlen)
if args.tickminorwid is None:
tick_minor_width = 0.75 * float(args.tickmajorwid)
else:
tick_minor_width = float(args.tickminorwid)
ax.tick_params('both', length=tick_minor_length, width=tick_minor_width, which='minor')
for l in ax.yaxis.get_ticklabels():
l.set_fontproperties(font)
l.set_fontsize(tick_1_font_size)
for l in ax.xaxis.get_ticklabels():
l.set_fontproperties(font)
l.set_fontsize(tick_2_font_size)
# make tick labels rigid
def rigid_tick_label(tick_label):
ndec = 0
for i in tick_label:
dec = i.split('.')
if len(dec) == 2:
ll = len(dec[1])
if ll > ndec:
ndec = ll
for i in range(0, len(tick_label)):
dec = tick_label[i].split('.')
if len(dec) == 2:
ll = len(dec[1])
if ll < ndec:
for k in range(0, ndec - ll):
tick_label[i] = tick_label[i] + '0'
if len(dec) == 1 and ndec != 0:
tick_label[i] = tick_label[i] + '.'
for k in range(0, ndec):
tick_label[i] = tick_label[i] + '0'
return tick_label
| 37.475177
| 108
| 0.568793
|
from module_utility import *
import numpy as np
import matplotlib.pyplot as plt
def define_tick(ticks, tickbeg, tickend, tickd, mtick, xbeg, xend, ns, d, axislen, format, extend=False):
if ticks is None:
if tickd is None:
tick_interval = nice((xend - xbeg) / 5.0)
if tick_interval == 0:
tick_interval = 1.0e10
else:
tick_interval = float(tickd)
if tickbeg is None:
tick_beg = nice(xbeg)
base = 0.5
nb = 0
if tick_interval > 0:
while nb <= 10 and tick_beg > xbeg + tick_interval:
base = base / 10.0
tick_beg = nice(xbeg, base)
nb = nb + 1
else:
while nb <= 10 and tick_beg < xbeg + tick_interval:
base = base / 10.0
tick_beg = nice(xbeg, base)
nb = nb + 1
else:
tick_beg = float(tickbeg)
if tickend is None:
tick_end = tick_beg + (round((xend - xbeg) / tick_interval) + 2) * tick_interval
if tick_interval > 0:
while tick_end < xend:
tick_end = tick_end + abs(tick_interval)
else:
while tick_end > xend:
tick_end = tick_end - abs(tick_interval)
else:
tick_end = float(tickend)
tick = np.arange(tick_beg, tick_end + 0.1 * abs(tick_interval), tick_interval)
minor_tick_interval = tick_interval / (mtick + 1.0)
minor_tick = np.arange(tick_beg, tick_end + 0.1 * abs(minor_tick_interval), minor_tick_interval)
if not extend:
if d > 0:
tick = np.asarray([i for i in tick if i >= xbeg and i <= xend])
minor_tick = np.asarray(
[i for i in minor_tick if i >= xbeg and i <= xend and (not i in tick)])
if d < 0:
tick = np.asarray([i for i in tick if i <= xbeg and i >= xend])
minor_tick = np.asarray(
[i for i in minor_tick if i <= xbeg and i >= xend and (not i in tick)])
if ns == 1:
tick_location = np.asarray([0.5])
ntick = 1
else:
tick_location = [(i - xbeg + 0.5 * d) / ((ns - 1) * d) * axislen for i in tick]
minor_tick_location = [(i - xbeg + 0.5 * d) / ((ns - 1) * d) * axislen for i in minor_tick]
t = tick_location
tl = []
tick_label = []
for i in range(0, len(tick)):
if extend or ((not extend) and tick_location[i] >= 0 and tick_location[i] <= axislen + 1.0e-10):
tl.append(tick_location[i])
if format == 'sci' or format == 'plain':
tick_label.append(('%f' % tick[i]).rstrip('0').rstrip('.'))
else:
tick_label.append((format % tick[i]))
tick_location = tl
else:
ticks = ticks[0].split(',')
location = [0 for i in range(0, len(ticks))]
label = ['' for i in range(0, len(ticks))]
for i in range(0, len(ticks)):
t = ticks[i].split(':')
location[i] = (float(t[0]) + 0.5 * d) / ((ns - 1) * d) * axislen
label[i] = t[1]
yx = list(zip(location, label))
yx.sort()
tick_location = [location for location, label in yx]
tick_label = [label for location, label in yx]
if mtick != 0:
mtick = mtick + 1
minor_tick_location = np.linspace(tick_location[0], tick_location[1], mtick + 1)
minor_tick_location = minor_tick_location[1:mtick]
for i in range(1, len(tick_location) - 1):
t = np.linspace(tick_location[i], tick_location[i + 1], mtick + 1)
minor_tick_location = np.append(minor_tick_location, t[1:mtick])
else:
minor_tick_location = []
return tick_location, tick_label, minor_tick_location
def set_tick(args,
font,
x1beg,
x1end,
n1beg,
n1end,
d1,
axis1len,
x2beg,
x2end,
n2beg,
n2end,
d2,
axis2len,
extend=False):
ax = plt.gca()
label_1_size = float(args.label1size)
label_2_size = float(args.label2size)
xlabel = ax.set_xlabel(args.label2, fontsize=label_2_size, labelpad=float(args.label2pad)*72*2)
ylabel = ax.set_ylabel(args.label1, fontsize=label_1_size, labelpad=float(args.label1pad)*72*2)
l = ax.yaxis.get_label()
l.set_fontproperties(font)
l.set_fontsize(label_1_size)
l = ax.xaxis.get_label()
l.set_fontproperties(font)
l.set_fontsize(label_2_size)
if args.label2loc is not None:
ax.xaxis.set_label_position(args.label2loc)
else:
if args.ticktop:
ax.xaxis.set_label_position('top')
else:
ax.xaxis.set_label_position('bottom')
if args.label1loc is not None:
ax.yaxis.set_label_position(args.label1loc)
else:
if args.tickleft:
ax.yaxis.set_label_position('left')
else:
ax.yaxis.set_label_position('right')
ylabel.set_rotation(270)
ax.get_yaxis().set_tick_params(which='both', direction='out')
ax.get_xaxis().set_tick_params(which='both', direction='out')
plt.tick_params(
axis='x',
which='both',
bottom=args.tickbottom,
top=args.ticktop,
labelbottom=args.tickbottom,
labeltop=args.ticktop)
plt.tick_params(
axis='y',
which='both',
left=args.tickleft,
right=args.tickright,
labelleft=args.tickleft,
labelright=args.tickright)
if args.tick1size is None:
tick_1_font_size = label_1_size - 2
else:
tick_1_font_size = float(args.tick1size)
if args.tick2size is None:
tick_2_font_size = label_2_size - 2
else:
tick_2_font_size = float(args.tick2size)
tick_1_location, tick_1_label, tick_1_minor = define_tick(args.ticks1, args.tick1beg, args.tick1end,
args.tick1d, args.mtick1, x1beg, x1end,
n1end - n1beg + 1, d1, axis1len,
args.tick1format, extend)
plt.yticks(tick_1_location, tick_1_label, fontsize=tick_1_font_size, rotation=float(args.tick1rot))
if not args.tick1label:
ax.yaxis.set_ticklabels([])
tick_2_location, tick_2_label, tick_2_minor = define_tick(args.ticks2, args.tick2beg, args.tick2end,
args.tick2d, args.mtick2, x2beg, x2end,
n2end - n2beg + 1, d2, axis2len,
args.tick2format, extend)
plt.xticks(tick_2_location, tick_2_label, fontsize=tick_2_font_size, rotation=float(args.tick2rot))
if not args.tick2label:
ax.xaxis.set_ticklabels([])
ax.tick_params('both', length=float(args.tickmajorlen), width=float(args.tickmajorwid), which='major')
ax.set_yticks(tick_1_minor, minor=True)
ax.set_xticks(tick_2_minor, minor=True)
if args.tickminorlen is None:
tick_minor_length = 0.5 * float(args.tickmajorlen)
else:
tick_minor_length = float(args.tickminorlen)
if args.tickminorwid is None:
tick_minor_width = 0.75 * float(args.tickmajorwid)
else:
tick_minor_width = float(args.tickminorwid)
ax.tick_params('both', length=tick_minor_length, width=tick_minor_width, which='minor')
for l in ax.yaxis.get_ticklabels():
l.set_fontproperties(font)
l.set_fontsize(tick_1_font_size)
for l in ax.xaxis.get_ticklabels():
l.set_fontproperties(font)
l.set_fontsize(tick_2_font_size)
def rigid_tick_label(tick_label):
ndec = 0
for i in tick_label:
dec = i.split('.')
if len(dec) == 2:
ll = len(dec[1])
if ll > ndec:
ndec = ll
for i in range(0, len(tick_label)):
dec = tick_label[i].split('.')
if len(dec) == 2:
ll = len(dec[1])
if ll < ndec:
for k in range(0, ndec - ll):
tick_label[i] = tick_label[i] + '0'
if len(dec) == 1 and ndec != 0:
tick_label[i] = tick_label[i] + '.'
for k in range(0, ndec):
tick_label[i] = tick_label[i] + '0'
return tick_label
| true
| true
|
7902b51d73c71ea57a8e3a2f7e254e9fb0256044
| 1,016
|
py
|
Python
|
stagesetting/validators.py
|
kezabelle/django-stagesetting
|
d01029c309915599e9a0413c1be0957638725f69
|
[
"BSD-2-Clause-FreeBSD"
] | 5
|
2015-10-19T13:35:57.000Z
|
2020-06-15T14:26:28.000Z
|
stagesetting/validators.py
|
kezabelle/django-stagesetting
|
d01029c309915599e9a0413c1be0957638725f69
|
[
"BSD-2-Clause-FreeBSD"
] | 9
|
2015-08-19T14:05:27.000Z
|
2017-05-17T09:27:41.000Z
|
stagesetting/validators.py
|
kezabelle/django-stagesetting
|
d01029c309915599e9a0413c1be0957638725f69
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.utils.translation import ugettext_lazy as _
from re import compile
setting_name_re_str = '^[A-Z][A-Z0-9_]+[A-Z0-9]$'
class SettingNameValidator(RegexValidator):
message = _("Setting format should be CAPITAL_WITH_UNDERSCORES")
regex = compile(setting_name_re_str)
validate_setting_name = SettingNameValidator()
def validate_formish(value):
try:
assert hasattr(value, 'is_valid')
assert hasattr(value, 'clean')
except AssertionError:
raise ValidationError("%(form)r doesn't appear to be a Form class" % {
'form': value})
def validate_default(value):
try:
assert hasattr(value, '__getitem__')
assert hasattr(value, 'keys')
except AssertionError:
raise ValidationError("%r doesn't appear to be dictish" % value)
| 29.882353
| 78
| 0.722441
|
from __future__ import absolute_import
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.utils.translation import ugettext_lazy as _
from re import compile
setting_name_re_str = '^[A-Z][A-Z0-9_]+[A-Z0-9]$'
class SettingNameValidator(RegexValidator):
message = _("Setting format should be CAPITAL_WITH_UNDERSCORES")
regex = compile(setting_name_re_str)
validate_setting_name = SettingNameValidator()
def validate_formish(value):
try:
assert hasattr(value, 'is_valid')
assert hasattr(value, 'clean')
except AssertionError:
raise ValidationError("%(form)r doesn't appear to be a Form class" % {
'form': value})
def validate_default(value):
try:
assert hasattr(value, '__getitem__')
assert hasattr(value, 'keys')
except AssertionError:
raise ValidationError("%r doesn't appear to be dictish" % value)
| true
| true
|
7902b5cdce5da8596b8def22ae4be02ddead718b
| 8,051
|
py
|
Python
|
comodit_client/api/exporter.py
|
geoco84/comodit-client
|
4cf47e60a6739ed8b88ce8b955ed57375c4d400d
|
[
"MIT"
] | null | null | null |
comodit_client/api/exporter.py
|
geoco84/comodit-client
|
4cf47e60a6739ed8b88ce8b955ed57375c4d400d
|
[
"MIT"
] | null | null | null |
comodit_client/api/exporter.py
|
geoco84/comodit-client
|
4cf47e60a6739ed8b88ce8b955ed57375c4d400d
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Provides the exporter tool. The exporter can be used to export ComodIT entities
to local directories.
"""
from __future__ import print_function
from builtins import object
import os
from comodit_client.api.collection import EntityNotFoundException
from comodit_client.api.exceptions import PythonApiException
from comodit_client.api.host import Host
from comodit_client.rest.exceptions import ApiException
from comodit_client.util.path import ensure
import six
from comodit_client.api import orchestration
class ExportException(Exception):
"""
Exception raised by exporter in case of error.
"""
pass
class Export(object):
"""
The exporter is a tool that enables to export entities to local
directories. Exported entities may later be (re-)imported (see L{Import}).
"""
def __init__(self, force = False):
"""
Creates an exporter instance. If force flag is set, all data already
present in a destination folder are overwritten on export.
@param force: If True, force flag is set. It is not otherwise.
@type force: bool
"""
self._force = force
def _export_files_content(self, entity, output_folder):
for template in entity.files():
file_name = template.name
try:
with open(os.path.join(output_folder, file_name), "w") as f:
if six.PY2:
f.write(template.read_content().encode('utf-8'))
else:
f.write(template.read_content())
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def _export_entity(self, res, res_folder, export_files = False, export_thumb = False, backup = False):
if backup:
print("backup", res.name, "to", res_folder)
else:
print("exporting", res.name, "to", res_folder)
# Ensures local repository does not contain stale data
if(os.path.exists(res_folder) and len(os.listdir(res_folder)) > 0) and not self._force:
raise ExportException(res_folder + " already exists and is not empty.")
res.dump(res_folder)
if export_files:
# Dump files' content to disk
files_folder = os.path.join(res_folder, "files")
ensure(files_folder)
self._export_files_content(res, files_folder)
if export_thumb:
# Dump thumbnail to disk
try:
content = res.read_thumbnail_content()
with open(os.path.join(res_folder, "thumb"), "wb") as f:
f.write(content)
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def export_application(self, app, path, backup = False):
"""
Exports an application to a local folder.
@param app: The application to export.
@type app: L{Application}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(app, path, True, True, backup)
def export_distribution(self, dist, path, backup = False):
"""
Exports a distribution to a local folder.
@param dist: The distribution to export.
@type dist: L{Distribution}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(dist, path, True, True, backup)
def export_platform(self, plat, path, backup = False):
"""
Exports a platform to a local folder.
@param plat: The platform to export.
@type plat: L{Platform}
@param path: Path to local directory.
@type path: string
@param backup: indicate is a backup.
@type path: bool
"""
self._export_entity(plat, path, True, backup=backup)
def export_environment(self, env, path):
"""
Exports an environment to a local folder. Hosts of the environment
are exported also.
@param env: The environment to export.
@type env: L{Environment}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(env, path)
hosts_folder = os.path.join(path, "hosts")
for host in env.hosts():
self.export_host(host, os.path.join(hosts_folder, host.name))
def export_job(self, job, path):
"""
Exports a job to a local folder.
@param job: The job to export.
@type job: L{Job}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(job, path)
def export_orchestration(self, orchestration, path):
"""
Exports a orchestration to a local folder.
@param job: The orchestration to export.
@type orchestration: L{Orchestration}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(orchestration, path)
def export_notification(self, notification, path):
"""
Exports a jobnotificationto a local folder.
@param notification: The notification to export.
@type notification: L{Notification}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(notification, path)
def export_host(self, host, path):
"""
Exports a host to a local folder. Contexts and instance are exported
also.
@param host: The host to export.
@type host: L{Host}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(host, path)
# Export instance
if host.state != Host.State.DEFINED:
try:
instance = host.get_instance()
instance.dump_json(os.path.join(path, "instance.json"))
except PythonApiException:
pass
# Export application contexts
app_folder = os.path.join(path, "applications")
ensure(app_folder)
for context in host.applications():
context.dump_json(os.path.join(app_folder, context.application + ".json"))
# Export platform context
try:
host.get_platform().dump_json(os.path.join(path, "platform.json"))
except EntityNotFoundException:
pass
# Export distribution context
try:
host.get_distribution().dump_json(os.path.join(path, "distribution.json"))
except EntityNotFoundException:
pass
def export_organization(self, org, path):
"""
Exports an organization to a local folder. Environments, applications,
distributions and platforms are exported also.
@param org: The organization to export.
@type org: L{Organization}
@param path: Path to local directory.
@type path: string
"""
self._export_entity(org, path)
for app in org.applications():
self.export_application(app, os.path.join(path, "applications", app.name))
for dist in org.distributions():
self.export_distribution(dist, os.path.join(path, "distributions", dist.name))
for plat in org.platforms():
self.export_platform(plat, os.path.join(path, "platforms", plat.name))
for job in org.jobs():
self.export_job(job, os.path.join(path, "jobs", job.name))
for orch in org.orchestrations():
self.export_orchestration(orch, os.path.join(path, "orchestrations", orch.name))
for env in org.environments():
self.export_environment(env, os.path.join(path, "environments", env.name))
| 31.822134
| 106
| 0.600795
|
from __future__ import print_function
from builtins import object
import os
from comodit_client.api.collection import EntityNotFoundException
from comodit_client.api.exceptions import PythonApiException
from comodit_client.api.host import Host
from comodit_client.rest.exceptions import ApiException
from comodit_client.util.path import ensure
import six
from comodit_client.api import orchestration
class ExportException(Exception):
pass
class Export(object):
def __init__(self, force = False):
self._force = force
def _export_files_content(self, entity, output_folder):
for template in entity.files():
file_name = template.name
try:
with open(os.path.join(output_folder, file_name), "w") as f:
if six.PY2:
f.write(template.read_content().encode('utf-8'))
else:
f.write(template.read_content())
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def _export_entity(self, res, res_folder, export_files = False, export_thumb = False, backup = False):
if backup:
print("backup", res.name, "to", res_folder)
else:
print("exporting", res.name, "to", res_folder)
if(os.path.exists(res_folder) and len(os.listdir(res_folder)) > 0) and not self._force:
raise ExportException(res_folder + " already exists and is not empty.")
res.dump(res_folder)
if export_files:
files_folder = os.path.join(res_folder, "files")
ensure(files_folder)
self._export_files_content(res, files_folder)
if export_thumb:
# Dump thumbnail to disk
try:
content = res.read_thumbnail_content()
with open(os.path.join(res_folder, "thumb"), "wb") as f:
f.write(content)
except ApiException as e:
if e.code == 404:
pass
else:
raise e
def export_application(self, app, path, backup = False):
self._export_entity(app, path, True, True, backup)
def export_distribution(self, dist, path, backup = False):
self._export_entity(dist, path, True, True, backup)
def export_platform(self, plat, path, backup = False):
self._export_entity(plat, path, True, backup=backup)
def export_environment(self, env, path):
self._export_entity(env, path)
hosts_folder = os.path.join(path, "hosts")
for host in env.hosts():
self.export_host(host, os.path.join(hosts_folder, host.name))
def export_job(self, job, path):
self._export_entity(job, path)
def export_orchestration(self, orchestration, path):
self._export_entity(orchestration, path)
def export_notification(self, notification, path):
self._export_entity(notification, path)
def export_host(self, host, path):
self._export_entity(host, path)
# Export instance
if host.state != Host.State.DEFINED:
try:
instance = host.get_instance()
instance.dump_json(os.path.join(path, "instance.json"))
except PythonApiException:
pass
# Export application contexts
app_folder = os.path.join(path, "applications")
ensure(app_folder)
for context in host.applications():
context.dump_json(os.path.join(app_folder, context.application + ".json"))
# Export platform context
try:
host.get_platform().dump_json(os.path.join(path, "platform.json"))
except EntityNotFoundException:
pass
# Export distribution context
try:
host.get_distribution().dump_json(os.path.join(path, "distribution.json"))
except EntityNotFoundException:
pass
def export_organization(self, org, path):
self._export_entity(org, path)
for app in org.applications():
self.export_application(app, os.path.join(path, "applications", app.name))
for dist in org.distributions():
self.export_distribution(dist, os.path.join(path, "distributions", dist.name))
for plat in org.platforms():
self.export_platform(plat, os.path.join(path, "platforms", plat.name))
for job in org.jobs():
self.export_job(job, os.path.join(path, "jobs", job.name))
for orch in org.orchestrations():
self.export_orchestration(orch, os.path.join(path, "orchestrations", orch.name))
for env in org.environments():
self.export_environment(env, os.path.join(path, "environments", env.name))
| true
| true
|
7902b64da8a83c100463dd21a0346ef4f982c06a
| 3,271
|
py
|
Python
|
part_of_speech.py
|
ruchind159/grammar_correction
|
4af73cd2403c02b827161ad464b0c8339d3715f1
|
[
"Apache-2.0"
] | 2
|
2021-03-15T14:07:05.000Z
|
2021-08-12T09:22:00.000Z
|
part_of_speech.py
|
steven-cheng-com/grammar_correction_with_bert
|
fc1a168dc8d4ada954637aa615656fb7e82cc02a
|
[
"Apache-2.0"
] | 2
|
2020-11-16T08:53:39.000Z
|
2021-03-04T12:06:53.000Z
|
part_of_speech.py
|
ruchind159/grammar_correction
|
4af73cd2403c02b827161ad464b0c8339d3715f1
|
[
"Apache-2.0"
] | null | null | null |
import nltk
# nltk.download('stopwords') #if doesnt work download all these first
# nltk.download('punkt')
# nltk.download('averaged_perceptron_tagger')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
stop_words = set(stopwords.words('english'))
meaning_with_example = {
"CC" : "coordinating conjunction",
"CD" : "cardinal digit",
"DT" : "determiner",
"EX" : "existential there (like: “there is” … think of it like “there exists”)",
"FW" : "foreign word",
"IN" : "preposition/subordinating conjunction",
"JJ" : "adjective ‘big’",
"JJR": "adjective, comparative ‘bigger’",
"JJS": "adjective, superlative ‘biggest’",
"LS" : "list marker 1)",
"MD" : "modal could, will",
"NN" : "noun, singular ‘desk’",
"NNS": "noun plural ‘desks’",
"NNP": "proper noun, singular ‘Harrison’",
"NNPS": "proper noun, plural ‘Americans’",
"PDT": "predeterminer ‘all the kids’",
"POS": "possessive ending parent‘s",
"PRP": "personal pronoun I, he, she",
"PRP$": "possessive pronoun my, his, hers",
"RB" : "adverb very, silently,",
"RBR": "adverb, comparative better",
"RBS": "adverb, superlative best",
"RP" : "particle give up",
"TO" : "to go ‘to‘ the store.",
"UH" : "interjection errrrrrrrm",
"VB" : "verb, base form take",
"VBD": "verb, past tense took",
"VBG": "verb, gerund/present participle taking",
"VBN": "verb, past participle taken",
"VBP": "verb, sing. present, non-3d take",
"VBZ": "verb, 3rd person sing. present takes",
"WDT": "wh-determiner which",
"WP" : "wh-pronoun who, what",
"WP$": "possessive wh-pronoun whose",
"WRB": "wh-abverb where, when",
"," : "comma",
"." : "full stop"
}
meaning = {
"CC" : "coordinating conjunction",
"CD" : "cardinal digit",
"DT" : "determiner",
"EX" : "existential there",
"FW" : "foreign word",
"IN" : "preposition/subordinating conjunction",
"JJ" : "adjective",
"JJR": "adjective, comparative",
"JJS": "adjective, superlative",
"LS" : "list marker",
"MD" : "modal could, will",
"NN" : "noun singular",
"NNS": "noun plural",
"NNP": "proper noun, singular",
"NNPS": "proper noun, plural",
"PDT": "predeterminer",
"POS": "possessive ending",
"PRP": "personal pronoun",
"PRP$": "possessive pronoun",
"RB" : "adverb ",
"RBR": "adverb, comparative ",
"RBS": "adverb, superlative ",
"RP" : "particle ",
"TO" : "to go ‘to‘ the store.",
"UH" : "interjection",
"VB" : "verb base form ",
"VBD": "verb past tense ",
"VBG": "verb gerund/present participle",
"VBN": "verb past participle ",
"VBP": "verb sing. present",
"VBZ": "verb 3rd person sing. present ",
"WDT": "wh-determiner which",
"WP" : "wh-pronoun who, what",
"WP$": "possessive wh-pronoun whose",
"WRB": "wh-abverb where, when"
}
def get_part_of_speech(sentence):
cleaned=[]
tokenized = sent_tokenize(sentence)
for i in tokenized:
wordsList = nltk.word_tokenize(i)
wordsList = [w for w in wordsList if not w in stop_words]
tagged = nltk.pos_tag(wordsList)
for pair in tagged:
c_pair=[]
c_pair.append(pair[0])
try :
c_pair.append(meaning[pair[1]])
except :
c_pair.append("Punctuation")
cleaned.append(c_pair)
return cleaned
#print(get_part_of_speech("Sukanya, Rajib and Naba are my good friends."))
| 30.570093
| 81
| 0.642006
|
import nltk
rom nltk.tokenize import word_tokenize, sent_tokenize
stop_words = set(stopwords.words('english'))
meaning_with_example = {
"CC" : "coordinating conjunction",
"CD" : "cardinal digit",
"DT" : "determiner",
"EX" : "existential there (like: “there is” … think of it like “there exists”)",
"FW" : "foreign word",
"IN" : "preposition/subordinating conjunction",
"JJ" : "adjective ‘big’",
"JJR": "adjective, comparative ‘bigger’",
"JJS": "adjective, superlative ‘biggest’",
"LS" : "list marker 1)",
"MD" : "modal could, will",
"NN" : "noun, singular ‘desk’",
"NNS": "noun plural ‘desks’",
"NNP": "proper noun, singular ‘Harrison’",
"NNPS": "proper noun, plural ‘Americans’",
"PDT": "predeterminer ‘all the kids’",
"POS": "possessive ending parent‘s",
"PRP": "personal pronoun I, he, she",
"PRP$": "possessive pronoun my, his, hers",
"RB" : "adverb very, silently,",
"RBR": "adverb, comparative better",
"RBS": "adverb, superlative best",
"RP" : "particle give up",
"TO" : "to go ‘to‘ the store.",
"UH" : "interjection errrrrrrrm",
"VB" : "verb, base form take",
"VBD": "verb, past tense took",
"VBG": "verb, gerund/present participle taking",
"VBN": "verb, past participle taken",
"VBP": "verb, sing. present, non-3d take",
"VBZ": "verb, 3rd person sing. present takes",
"WDT": "wh-determiner which",
"WP" : "wh-pronoun who, what",
"WP$": "possessive wh-pronoun whose",
"WRB": "wh-abverb where, when",
"," : "comma",
"." : "full stop"
}
meaning = {
"CC" : "coordinating conjunction",
"CD" : "cardinal digit",
"DT" : "determiner",
"EX" : "existential there",
"FW" : "foreign word",
"IN" : "preposition/subordinating conjunction",
"JJ" : "adjective",
"JJR": "adjective, comparative",
"JJS": "adjective, superlative",
"LS" : "list marker",
"MD" : "modal could, will",
"NN" : "noun singular",
"NNS": "noun plural",
"NNP": "proper noun, singular",
"NNPS": "proper noun, plural",
"PDT": "predeterminer",
"POS": "possessive ending",
"PRP": "personal pronoun",
"PRP$": "possessive pronoun",
"RB" : "adverb ",
"RBR": "adverb, comparative ",
"RBS": "adverb, superlative ",
"RP" : "particle ",
"TO" : "to go ‘to‘ the store.",
"UH" : "interjection",
"VB" : "verb base form ",
"VBD": "verb past tense ",
"VBG": "verb gerund/present participle",
"VBN": "verb past participle ",
"VBP": "verb sing. present",
"VBZ": "verb 3rd person sing. present ",
"WDT": "wh-determiner which",
"WP" : "wh-pronoun who, what",
"WP$": "possessive wh-pronoun whose",
"WRB": "wh-abverb where, when"
}
def get_part_of_speech(sentence):
cleaned=[]
tokenized = sent_tokenize(sentence)
for i in tokenized:
wordsList = nltk.word_tokenize(i)
wordsList = [w for w in wordsList if not w in stop_words]
tagged = nltk.pos_tag(wordsList)
for pair in tagged:
c_pair=[]
c_pair.append(pair[0])
try :
c_pair.append(meaning[pair[1]])
except :
c_pair.append("Punctuation")
cleaned.append(c_pair)
return cleaned
| true
| true
|
7902b7d331b3cc1103d0723383f0188ca9a5695d
| 2,442
|
py
|
Python
|
stanCode_projects/name_searching_system/milestone1.py
|
kenhuang1204/stanCode_projects
|
f697a34a1c54a864c1140cb0f2f76e2d70b45698
|
[
"MIT"
] | null | null | null |
stanCode_projects/name_searching_system/milestone1.py
|
kenhuang1204/stanCode_projects
|
f697a34a1c54a864c1140cb0f2f76e2d70b45698
|
[
"MIT"
] | null | null | null |
stanCode_projects/name_searching_system/milestone1.py
|
kenhuang1204/stanCode_projects
|
f697a34a1c54a864c1140cb0f2f76e2d70b45698
|
[
"MIT"
] | null | null | null |
"""
File: Milestone1.py
Name: 黃科諺
-----------------------
This file tests the milestone 1 for
our babyname.py project
"""
import sys
def add_data_for_name(name_data, year, rank, name):
name_info = {year: rank}
if name in name_data:
if year in name_data[name]:
exist_rank = int(name_data[name][year])
if int(rank) < exist_rank:
name_data[name][year] = rank
else:
name_data[name][year] = rank
else:
name_data[name] = name_info
# ------------- DO NOT EDIT THE CODE BELOW THIS LINE ---------------- #
def test1():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2010', '208', 'Kate')
print('--------------------test1----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test2():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2000', '104', 'Kylie')
print('--------------------test2----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test3():
name_data = {'Kylie': {'2010': '57'}, 'Sammy': {'1980': '451', '1990': '200'}, 'Kate': {'2000': '100'}}
add_data_for_name(name_data, '1990', '900', 'Sammy')
add_data_for_name(name_data, '2010', '400', 'Kylie')
add_data_for_name(name_data, '2000', '20', 'Kate')
print('-------------------test3-----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test4():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2010', '208', 'Kate')
add_data_for_name(name_data, '2000', '108', 'Kate')
add_data_for_name(name_data, '1990', '200', 'Sammy')
add_data_for_name(name_data, '1990', '90', 'Sammy')
add_data_for_name(name_data, '2000', '104', 'Kylie')
print('--------------------test4----------------------')
print(str(name_data))
print('-----------------------------------------------')
def main():
args = sys.argv[1:]
if len(args) == 1 and args[0] == 'test1':
test1()
elif len(args) == 1 and args[0] == 'test2':
test2()
elif len(args) == 1 and args[0] == 'test3':
test3()
elif len(args) == 1 and args[0] == 'test4':
test4()
if __name__ == "__main__":
main()
| 31.307692
| 107
| 0.47502
|
import sys
def add_data_for_name(name_data, year, rank, name):
name_info = {year: rank}
if name in name_data:
if year in name_data[name]:
exist_rank = int(name_data[name][year])
if int(rank) < exist_rank:
name_data[name][year] = rank
else:
name_data[name][year] = rank
else:
name_data[name] = name_info
def test1():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2010', '208', 'Kate')
print('--------------------test1----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test2():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2000', '104', 'Kylie')
print('--------------------test2----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test3():
name_data = {'Kylie': {'2010': '57'}, 'Sammy': {'1980': '451', '1990': '200'}, 'Kate': {'2000': '100'}}
add_data_for_name(name_data, '1990', '900', 'Sammy')
add_data_for_name(name_data, '2010', '400', 'Kylie')
add_data_for_name(name_data, '2000', '20', 'Kate')
print('-------------------test3-----------------------')
print(str(name_data))
print('-----------------------------------------------')
def test4():
name_data = {'Kylie': {'2010': '57'}, 'Nick': {'2010': '37'}}
add_data_for_name(name_data, '2010', '208', 'Kate')
add_data_for_name(name_data, '2000', '108', 'Kate')
add_data_for_name(name_data, '1990', '200', 'Sammy')
add_data_for_name(name_data, '1990', '90', 'Sammy')
add_data_for_name(name_data, '2000', '104', 'Kylie')
print('--------------------test4----------------------')
print(str(name_data))
print('-----------------------------------------------')
def main():
args = sys.argv[1:]
if len(args) == 1 and args[0] == 'test1':
test1()
elif len(args) == 1 and args[0] == 'test2':
test2()
elif len(args) == 1 and args[0] == 'test3':
test3()
elif len(args) == 1 and args[0] == 'test4':
test4()
if __name__ == "__main__":
main()
| true
| true
|
7902b80bc51d759fc1c6b2262f316e6019c967d6
| 299
|
py
|
Python
|
mass_api_client/schemas/scheduled_analysis.py
|
tbehner/mass_api_client
|
eec1764b062e2b24a9d1f8f59f665e6b56c9d366
|
[
"MIT"
] | 2
|
2017-06-27T12:28:40.000Z
|
2017-10-13T09:34:18.000Z
|
mass_api_client/schemas/scheduled_analysis.py
|
tbehner/mass_api_client
|
eec1764b062e2b24a9d1f8f59f665e6b56c9d366
|
[
"MIT"
] | 124
|
2016-11-22T13:11:07.000Z
|
2019-10-22T23:36:10.000Z
|
mass_api_client/schemas/scheduled_analysis.py
|
tbehner/mass_api_client
|
eec1764b062e2b24a9d1f8f59f665e6b56c9d366
|
[
"MIT"
] | null | null | null |
from marshmallow import fields
from .base import BaseSchema
class ScheduledAnalysisSchema(BaseSchema):
analysis_system_instance = fields.Url(required=True)
sample = fields.Url(required=True)
analysis_scheduled = fields.DateTime(required=True)
priority = fields.Int(required=True)
| 27.181818
| 56
| 0.782609
|
from marshmallow import fields
from .base import BaseSchema
class ScheduledAnalysisSchema(BaseSchema):
analysis_system_instance = fields.Url(required=True)
sample = fields.Url(required=True)
analysis_scheduled = fields.DateTime(required=True)
priority = fields.Int(required=True)
| true
| true
|
7902b81321920a85441cfdd8e8511a4049d31b8c
| 4,152
|
py
|
Python
|
anwa/main_click.py
|
mjirik/anwa
|
c77138debdec3915f1f9539278dee248348c2c61
|
[
"MIT"
] | null | null | null |
anwa/main_click.py
|
mjirik/anwa
|
c77138debdec3915f1f9539278dee248348c2c61
|
[
"MIT"
] | null | null | null |
anwa/main_click.py
|
mjirik/anwa
|
c77138debdec3915f1f9539278dee248348c2c61
|
[
"MIT"
] | null | null | null |
# /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Modul is used for GUI of Lisa
"""
from loguru import logger
import sys
import click
from pathlib import Path
import ast
from . import app_tools
# print("start")
# from . import image
# print("start 5")
# print("start 6")
# from scaffan import algorithm
from . import algorithm
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
# print("Running __main__.py")
# @batch_detect.command(context_settings=CONTEXT_SETTINGS)
# @click.argument("image_stack_dir", type=click.Path(exists=True))
# @click.argument("working_dir", type=click.Path())
# @click.option("--create-icon", is_flag=True,
# help="Create desktop icon"
# )
@click.group(context_settings=CONTEXT_SETTINGS, invoke_without_command=True)
@click.pass_context
def run(ctx, *args, **kwargs):
if ctx.invoked_subcommand is None:
# click.echo('I was invoked without subcommand')
ctx.invoke(gui, *args, **kwargs)
# a.main()
else:
pass
click.echo("I am about to invoke %s" % ctx.invoked_subcommand)
pass
# @run.command(context_settings=CONTEXT_SETTINGS, help="Set persistent values")
# @click.option("--common-spreadsheet-file", help="Set path for common spreadsheet file.", type=click.Path())
# def set(common_spreadsheet_file=None):
# mainapp = algorithm.AnimalWatch()
# if common_spreadsheet_file is not None:
# mainapp.set_common_spreadsheet_file(path=common_spreadsheet_file)
# logger.info(f"Common spreadsheet file path is : {common_spreadsheet_file}")
# print(f"Common spreadsheet file path is : {common_spreadsheet_file}")
# def print_params(params):
# algorithm.Scaffan().parameters.
# params.
@run.command(context_settings=CONTEXT_SETTINGS)
@click.option(
"--params",
"-p",
multiple=True,
default=None,
nargs=2,
help='Set parameter. First argument is path to parameter separated by ";". Second is the value.'
"python -m scaffan gui -p Processing;Show True",
)
@click.option("--print-params", "-pp", is_flag=True, help="Print parameters")
def gui(params, print_params):
mainapp = algorithm.AnimalWatch()
if print_params:
make_print_params(mainapp)
exit()
# mainapp.parameters.param(*param[0].split(";")).setValue(ast.literal_eval(param[1]))
set_params(mainapp, params)
mainapp.start_gui()
def set_params(mainapp, params):
if params is not None:
logger.debug("set_params() ...")
app_tools.set_parameters_by_path(mainapp.parameters, params)
# for param in params:
# mainapp.set_parameter(param[0], value=ast.literal_eval(param[1]))
def make_print_params(mainapp):
import pprint
pprint.pprint(mainapp.parameters_to_dict())
@run.command(
context_settings=CONTEXT_SETTINGS, help="Create an icon on Windows platform"
)
def install():
from .app_tools import create_icon
icon_filename = Path(__file__).parent / Path("anwa.ico")
create_icon("anwa", icon_filename, conda_env_name="anwa_app")
# print(platform.system)
# if platform.system() == "Windows":
# import pathlib
# pass
@run.command(context_settings=CONTEXT_SETTINGS)
@click.option(
"--input-path",
"-i",
type=click.Path(exists=True),
help='Path to input directory with video files.',
)
@click.option(
"--params",
"-p",
multiple=True,
default=None,
nargs=2,
help='Set parameter. First argument is path to parameter separated by ";". Second is the value.'
"python -m anwa nogui -p Processing;Show True",
)
@click.option("--print-params", "-pp", is_flag=True, help="Print parameters")
def nogui(input_path, params, print_params):
mainapp = algorithm.AnimalWatch()
logger.debug(f"params={params})")
if print_params:
make_print_params(mainapp)
exit()
set_params(mainapp, params)
# for param in params:
# mainapp.parameters.param(*param[0].split(";")).setValue(
# ast.literal_eval(param[1])
# )
mainapp.set_input_dir(input_path)
# mainapp.start_gui()
mainapp.run()
# def install():
| 28.634483
| 110
| 0.678227
|
from loguru import logger
import sys
import click
from pathlib import Path
import ast
from . import app_tools
from . import algorithm
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.group(context_settings=CONTEXT_SETTINGS, invoke_without_command=True)
@click.pass_context
def run(ctx, *args, **kwargs):
if ctx.invoked_subcommand is None:
ctx.invoke(gui, *args, **kwargs)
else:
pass
click.echo("I am about to invoke %s" % ctx.invoked_subcommand)
pass
@run.command(context_settings=CONTEXT_SETTINGS)
@click.option(
"--params",
"-p",
multiple=True,
default=None,
nargs=2,
help='Set parameter. First argument is path to parameter separated by ";". Second is the value.'
"python -m scaffan gui -p Processing;Show True",
)
@click.option("--print-params", "-pp", is_flag=True, help="Print parameters")
def gui(params, print_params):
mainapp = algorithm.AnimalWatch()
if print_params:
make_print_params(mainapp)
exit()
set_params(mainapp, params)
mainapp.start_gui()
def set_params(mainapp, params):
if params is not None:
logger.debug("set_params() ...")
app_tools.set_parameters_by_path(mainapp.parameters, params)
def make_print_params(mainapp):
import pprint
pprint.pprint(mainapp.parameters_to_dict())
@run.command(
context_settings=CONTEXT_SETTINGS, help="Create an icon on Windows platform"
)
def install():
from .app_tools import create_icon
icon_filename = Path(__file__).parent / Path("anwa.ico")
create_icon("anwa", icon_filename, conda_env_name="anwa_app")
@run.command(context_settings=CONTEXT_SETTINGS)
@click.option(
"--input-path",
"-i",
type=click.Path(exists=True),
help='Path to input directory with video files.',
)
@click.option(
"--params",
"-p",
multiple=True,
default=None,
nargs=2,
help='Set parameter. First argument is path to parameter separated by ";". Second is the value.'
"python -m anwa nogui -p Processing;Show True",
)
@click.option("--print-params", "-pp", is_flag=True, help="Print parameters")
def nogui(input_path, params, print_params):
mainapp = algorithm.AnimalWatch()
logger.debug(f"params={params})")
if print_params:
make_print_params(mainapp)
exit()
set_params(mainapp, params)
mainapp.set_input_dir(input_path)
mainapp.run()
| true
| true
|
7902b82d00f17ada7e994316eefb3f7ae752f3ce
| 9,066
|
py
|
Python
|
test/interval_test.py
|
diatche/intervalpy
|
8204265fe3a24bd9268a46af644c58778ee7112a
|
[
"MIT"
] | null | null | null |
test/interval_test.py
|
diatche/intervalpy
|
8204265fe3a24bd9268a46af644c58778ee7112a
|
[
"MIT"
] | null | null | null |
test/interval_test.py
|
diatche/intervalpy
|
8204265fe3a24bd9268a46af644c58778ee7112a
|
[
"MIT"
] | null | null | null |
import pytest
import math
import os
import sys
module_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(module_dir, '..', 'intervalpy'))
from intervalpy import Interval
def test_intersection():
# closed, closed
d1 = Interval(0, 2, start_open=False, end_open=False)
d2 = Interval(1, 3, start_open=False, end_open=False)
assert d1.contains(0)
assert d1.contains(1)
assert d1.contains(2)
d = Interval.intersection([d1, d2])
assert d.start == 1
assert d.end == 2
assert not d.start_open
assert not d.end_open
d = Interval.union([d1, d2])
assert d.start == 0
assert d.end == 3
assert not d.start_open
assert not d.end_open
# closed, open
d1 = Interval(0, 2, start_open=False, end_open=False)
d2 = Interval(1, 3, start_open=True, end_open=True)
d = Interval.intersection([d1, d2])
assert d.start == 1
assert d.end == 2
assert d.start_open
assert not d.end_open
d = Interval.union([d1, d2])
assert d.start == 0
assert d.end == 3
assert not d.start_open
assert d.end_open
# open, open
d1 = Interval(0, 2, start_open=True, end_open=True)
d2 = Interval(1, 3, start_open=True, end_open=True)
assert not d1.contains(0)
assert d1.contains(1)
assert not d1.contains(2)
d = Interval.intersection([d1, d2])
assert d.start == 1
assert d.end == 2
assert d.start_open
assert d.end_open
d = Interval.union([d1, d2])
assert d.start == 0
assert d.end == 3
assert d.start_open
assert d.end_open
d = Interval.intersection([Interval(0, 1), Interval(2, 3)])
assert d.is_empty
d = Interval.intersection([Interval(0, 1, end_open=True), Interval(1, 3, start_open=True)])
assert d.is_empty
d = Interval.intersection([Interval(0, 1), Interval.empty()])
assert d.is_empty
d = Interval.union([Interval.empty(), 1])
assert d.start == 1
assert d.end == 1
def test_interval_contains_inf():
inf = Interval.infinite()
assert inf.contains(math.inf) is True
assert inf.contains(-math.inf) is True
assert Interval.gte(0).contains(math.inf) is True
assert Interval.gte(0).contains(-math.inf) is False
assert Interval.lte(0).contains(math.inf) is False
assert Interval.lte(0).contains(-math.inf) is True
def test_intersection_inf():
assert Interval.intersection([Interval.gte(100), (98, 101)]) == (100, 101)
assert Interval.intersection([Interval.point(100), Interval.open_closed(100, 101)]) == Interval.empty()
def test_cast():
assert bool(Interval.empty()) is False
assert bool(Interval(0, 0)) is True
assert list(Interval.empty()) == []
assert list(Interval(0, 0)) == [0, 0]
assert list(Interval.open(1, 20)) == [1, 20]
def test_intersects():
assert Interval.closed(1, 3).intersects(Interval.closed(2, 3))
assert Interval.closed(1, 3).intersects((2, 3))
assert Interval.closed(1, 3).intersects((1, 3))
assert Interval.closed(1, 3).intersects(Interval.open(1, 3))
assert Interval.closed(1, 3).intersects(Interval.closed(3, 4))
assert not Interval.closed(1, 3).intersects(Interval.open(3, 4))
assert not Interval.open(1, 3).intersects(Interval.closed(3, 4))
assert Interval.point(3).intersects(Interval.closed(3, 4))
assert Interval.point(3).intersects(Interval.closed(1, 3))
assert not Interval.point(3).intersects(Interval.open(3, 4))
assert not Interval.point(3).intersects(Interval.open(1, 3))
assert Interval.closed(1, 3).intersects(Interval.closed(0, 1))
assert not Interval.closed(1, 3).intersects(Interval.open(0, 1))
assert not Interval.open(1, 3).intersects(Interval.closed(0, 1))
assert not Interval.closed(1, 3).intersects(Interval.closed(4, 5))
assert not Interval.closed(1, 3).intersects(Interval.closed(-2, 0))
assert not Interval.closed(1, 3).intersects(Interval.empty())
assert Interval.closed(1, 3).intersects(Interval.infinite())
assert not Interval.point(1).intersects(Interval.open_closed(1, 2))
def test_parse():
d = Interval.parse(Interval(0, 1, start_open=True, end_open=True))
assert d.start == 0
assert d.end == 1
assert d.start_open
assert d.end_open
d = Interval.parse((0, 1))
assert d.start == 0
assert d.end == 1
assert not d.start_open
assert not d.end_open
d = Interval.parse(1)
assert d.start == 1
assert d.end == 1
assert not d.start_open
assert not d.end_open
with pytest.raises(Exception):
_ = Interval.parse(None)
with pytest.raises(Exception):
_ = Interval.parse(None, default_inf=False)
assert Interval.parse(None, default_inf=True) == Interval.infinite()
d = Interval.parse(math.inf)
assert math.isinf(d.start)
assert math.isinf(d.end)
assert d.start > 0
assert d.end > 0
assert not d.is_negative_infinite
assert not d.is_positive_infinite
d = Interval.parse(-math.inf)
assert math.isinf(d.start)
assert math.isinf(d.end)
assert d.start < 0
assert d.end < 0
assert not d.is_negative_infinite
assert not d.is_positive_infinite
d = Interval.parse([])
assert d.is_empty
def test_partition():
ds = Interval(1, 3).partition([2])
assert list(map(tuple, ds)) == [(1, 2), (2, 3)]
assert not ds[0].start_open
assert ds[0].end_open
assert not ds[1].start_open
assert not ds[1].end_open
ds = Interval(0, 3).partition([0, 1, 2, 3, 4], start_open=True)
assert list(map(tuple, ds)) == [(0, 0), (0, 1), (1, 2), (2, 3)]
assert not ds[0].start_open
assert not ds[0].end_open
assert ds[1].start_open
assert not ds[1].end_open
ds = Interval(0, 3).partition([0, 1, 2, 3, 4], start_open=False)
assert list(map(tuple, ds)) == [(0, 1), (1, 2), (2, 3), (3, 3)]
assert not ds[0].start_open
assert ds[0].end_open
assert not ds[1].start_open
assert ds[1].end_open
def test_subset():
d = Interval(1, 3)
assert d.is_subset_of((0, 4))
assert d.is_subset_of((1, 3))
assert not d.is_subset_of(Interval.closed_open(1, 3))
assert d.is_superset_of((2, 2))
assert d.is_superset_of((1, 3))
assert d.is_superset_of(Interval.closed_open(1, 3))
def test_equals():
d = Interval(1, 3)
assert d.equals((1, 3))
assert not d.equals(None)
assert not d.equals(Interval.closed_open(1, 3))
assert Interval.empty().equals(Interval.empty())
# Empty intervals are always equal
assert Interval.open(1, 1).equals(Interval.open(2, 2))
assert Interval.infinite().equals(Interval.infinite())
def test_infinite():
assert Interval.gte(math.inf).is_empty is True
assert Interval.gte(-math.inf).is_empty is False
assert Interval.lte(math.inf).is_empty is False
assert Interval.lte(-math.inf).is_empty is True
def test_round():
assert Interval(1.2, 3.4).round() == (1, 3)
assert Interval(1.2, 3.4).round(method=math.floor) == (1, 3)
assert Interval(1.2, 3.4).round(method=math.ceil) == (2, 4)
assert Interval.open_closed(1.2, 3.4).round() == Interval.open_closed(1, 3)
assert Interval.closed_open(1.2, 3.4).round() == Interval.closed_open(1, 3)
assert Interval.empty().round() == Interval.empty()
def test_extensions():
d = Interval(1, 3)
assert d.get_lte().equals(Interval.lte(3))
assert d.get_gte().equals(Interval.gte(1))
assert d.get_lt().equals(Interval.lt(1))
assert d.get_gt().equals(Interval.gt(3))
d = Interval.open(1, 3)
assert d.get_lte().equals(Interval.lt(3))
assert d.get_gte().equals(Interval.gt(1))
assert d.get_lt().equals(Interval.lte(1))
assert d.get_gt().equals(Interval.gte(3))
d = Interval.empty()
assert d.get_lte().is_empty
assert d.get_gte().is_empty
assert d.get_lt().is_empty
assert d.get_gt().is_empty
def test_inequalities():
assert Interval(1, 3) == (1, 3)
assert (1, 3) == Interval(1, 3)
assert Interval(1, 3) < (4, 6)
assert not Interval(1, 3) < (3, 6)
assert not Interval(1, 3) < (-3, -1)
assert Interval(1, 3) <= (3, 6)
assert Interval(1, 3) <= (2, 6)
assert Interval(1, 3) <= (1, 6)
assert Interval(3, 5) <= (1, 6)
assert not Interval(1, 3) <= (-3, -1)
assert not Interval(3, 6) <= Interval.open(1, 6)
assert Interval(1, 3) < Interval.empty()
assert Interval(1, 3) <= Interval.empty()
assert Interval(7, 9) > (4, 6)
assert not Interval(7, 9) > (4, 7)
assert not Interval(7, 9) > (10, 12)
assert Interval(7, 9) >= (4, 7)
assert Interval(7, 9) >= (4, 8)
assert Interval(7, 9) >= (4, 9)
assert not Interval(7, 9) >= (10, 12)
assert not Interval(4, 10) >= Interval.open(4, 9)
assert Interval(7, 9) > Interval.empty()
assert Interval(7, 9) >= Interval.empty()
def test_arithmetic():
assert Interval(1, 3) + (2, 4) == (1, 4)
assert (1, 3) + Interval(2, 4) == (1, 4)
assert Interval.open(1, 3) + (2, 4) == Interval.open_closed(1, 4)
assert (1, 3) + Interval.open(2, 4) == Interval.closed_open(1, 4)
| 32.263345
| 107
| 0.647695
|
import pytest
import math
import os
import sys
module_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(module_dir, '..', 'intervalpy'))
from intervalpy import Interval
def test_intersection():
d1 = Interval(0, 2, start_open=False, end_open=False)
d2 = Interval(1, 3, start_open=False, end_open=False)
assert d1.contains(0)
assert d1.contains(1)
assert d1.contains(2)
d = Interval.intersection([d1, d2])
assert d.start == 1
assert d.end == 2
assert not d.start_open
assert not d.end_open
d = Interval.union([d1, d2])
assert d.start == 0
assert d.end == 3
assert not d.start_open
assert not d.end_open
d1 = Interval(0, 2, start_open=False, end_open=False)
d2 = Interval(1, 3, start_open=True, end_open=True)
d = Interval.intersection([d1, d2])
assert d.start == 1
assert d.end == 2
assert d.start_open
assert not d.end_open
d = Interval.union([d1, d2])
assert d.start == 0
assert d.end == 3
assert not d.start_open
assert d.end_open
d1 = Interval(0, 2, start_open=True, end_open=True)
d2 = Interval(1, 3, start_open=True, end_open=True)
assert not d1.contains(0)
assert d1.contains(1)
assert not d1.contains(2)
d = Interval.intersection([d1, d2])
assert d.start == 1
assert d.end == 2
assert d.start_open
assert d.end_open
d = Interval.union([d1, d2])
assert d.start == 0
assert d.end == 3
assert d.start_open
assert d.end_open
d = Interval.intersection([Interval(0, 1), Interval(2, 3)])
assert d.is_empty
d = Interval.intersection([Interval(0, 1, end_open=True), Interval(1, 3, start_open=True)])
assert d.is_empty
d = Interval.intersection([Interval(0, 1), Interval.empty()])
assert d.is_empty
d = Interval.union([Interval.empty(), 1])
assert d.start == 1
assert d.end == 1
def test_interval_contains_inf():
inf = Interval.infinite()
assert inf.contains(math.inf) is True
assert inf.contains(-math.inf) is True
assert Interval.gte(0).contains(math.inf) is True
assert Interval.gte(0).contains(-math.inf) is False
assert Interval.lte(0).contains(math.inf) is False
assert Interval.lte(0).contains(-math.inf) is True
def test_intersection_inf():
assert Interval.intersection([Interval.gte(100), (98, 101)]) == (100, 101)
assert Interval.intersection([Interval.point(100), Interval.open_closed(100, 101)]) == Interval.empty()
def test_cast():
assert bool(Interval.empty()) is False
assert bool(Interval(0, 0)) is True
assert list(Interval.empty()) == []
assert list(Interval(0, 0)) == [0, 0]
assert list(Interval.open(1, 20)) == [1, 20]
def test_intersects():
assert Interval.closed(1, 3).intersects(Interval.closed(2, 3))
assert Interval.closed(1, 3).intersects((2, 3))
assert Interval.closed(1, 3).intersects((1, 3))
assert Interval.closed(1, 3).intersects(Interval.open(1, 3))
assert Interval.closed(1, 3).intersects(Interval.closed(3, 4))
assert not Interval.closed(1, 3).intersects(Interval.open(3, 4))
assert not Interval.open(1, 3).intersects(Interval.closed(3, 4))
assert Interval.point(3).intersects(Interval.closed(3, 4))
assert Interval.point(3).intersects(Interval.closed(1, 3))
assert not Interval.point(3).intersects(Interval.open(3, 4))
assert not Interval.point(3).intersects(Interval.open(1, 3))
assert Interval.closed(1, 3).intersects(Interval.closed(0, 1))
assert not Interval.closed(1, 3).intersects(Interval.open(0, 1))
assert not Interval.open(1, 3).intersects(Interval.closed(0, 1))
assert not Interval.closed(1, 3).intersects(Interval.closed(4, 5))
assert not Interval.closed(1, 3).intersects(Interval.closed(-2, 0))
assert not Interval.closed(1, 3).intersects(Interval.empty())
assert Interval.closed(1, 3).intersects(Interval.infinite())
assert not Interval.point(1).intersects(Interval.open_closed(1, 2))
def test_parse():
d = Interval.parse(Interval(0, 1, start_open=True, end_open=True))
assert d.start == 0
assert d.end == 1
assert d.start_open
assert d.end_open
d = Interval.parse((0, 1))
assert d.start == 0
assert d.end == 1
assert not d.start_open
assert not d.end_open
d = Interval.parse(1)
assert d.start == 1
assert d.end == 1
assert not d.start_open
assert not d.end_open
with pytest.raises(Exception):
_ = Interval.parse(None)
with pytest.raises(Exception):
_ = Interval.parse(None, default_inf=False)
assert Interval.parse(None, default_inf=True) == Interval.infinite()
d = Interval.parse(math.inf)
assert math.isinf(d.start)
assert math.isinf(d.end)
assert d.start > 0
assert d.end > 0
assert not d.is_negative_infinite
assert not d.is_positive_infinite
d = Interval.parse(-math.inf)
assert math.isinf(d.start)
assert math.isinf(d.end)
assert d.start < 0
assert d.end < 0
assert not d.is_negative_infinite
assert not d.is_positive_infinite
d = Interval.parse([])
assert d.is_empty
def test_partition():
ds = Interval(1, 3).partition([2])
assert list(map(tuple, ds)) == [(1, 2), (2, 3)]
assert not ds[0].start_open
assert ds[0].end_open
assert not ds[1].start_open
assert not ds[1].end_open
ds = Interval(0, 3).partition([0, 1, 2, 3, 4], start_open=True)
assert list(map(tuple, ds)) == [(0, 0), (0, 1), (1, 2), (2, 3)]
assert not ds[0].start_open
assert not ds[0].end_open
assert ds[1].start_open
assert not ds[1].end_open
ds = Interval(0, 3).partition([0, 1, 2, 3, 4], start_open=False)
assert list(map(tuple, ds)) == [(0, 1), (1, 2), (2, 3), (3, 3)]
assert not ds[0].start_open
assert ds[0].end_open
assert not ds[1].start_open
assert ds[1].end_open
def test_subset():
d = Interval(1, 3)
assert d.is_subset_of((0, 4))
assert d.is_subset_of((1, 3))
assert not d.is_subset_of(Interval.closed_open(1, 3))
assert d.is_superset_of((2, 2))
assert d.is_superset_of((1, 3))
assert d.is_superset_of(Interval.closed_open(1, 3))
def test_equals():
d = Interval(1, 3)
assert d.equals((1, 3))
assert not d.equals(None)
assert not d.equals(Interval.closed_open(1, 3))
assert Interval.empty().equals(Interval.empty())
assert Interval.open(1, 1).equals(Interval.open(2, 2))
assert Interval.infinite().equals(Interval.infinite())
def test_infinite():
assert Interval.gte(math.inf).is_empty is True
assert Interval.gte(-math.inf).is_empty is False
assert Interval.lte(math.inf).is_empty is False
assert Interval.lte(-math.inf).is_empty is True
def test_round():
assert Interval(1.2, 3.4).round() == (1, 3)
assert Interval(1.2, 3.4).round(method=math.floor) == (1, 3)
assert Interval(1.2, 3.4).round(method=math.ceil) == (2, 4)
assert Interval.open_closed(1.2, 3.4).round() == Interval.open_closed(1, 3)
assert Interval.closed_open(1.2, 3.4).round() == Interval.closed_open(1, 3)
assert Interval.empty().round() == Interval.empty()
def test_extensions():
d = Interval(1, 3)
assert d.get_lte().equals(Interval.lte(3))
assert d.get_gte().equals(Interval.gte(1))
assert d.get_lt().equals(Interval.lt(1))
assert d.get_gt().equals(Interval.gt(3))
d = Interval.open(1, 3)
assert d.get_lte().equals(Interval.lt(3))
assert d.get_gte().equals(Interval.gt(1))
assert d.get_lt().equals(Interval.lte(1))
assert d.get_gt().equals(Interval.gte(3))
d = Interval.empty()
assert d.get_lte().is_empty
assert d.get_gte().is_empty
assert d.get_lt().is_empty
assert d.get_gt().is_empty
def test_inequalities():
assert Interval(1, 3) == (1, 3)
assert (1, 3) == Interval(1, 3)
assert Interval(1, 3) < (4, 6)
assert not Interval(1, 3) < (3, 6)
assert not Interval(1, 3) < (-3, -1)
assert Interval(1, 3) <= (3, 6)
assert Interval(1, 3) <= (2, 6)
assert Interval(1, 3) <= (1, 6)
assert Interval(3, 5) <= (1, 6)
assert not Interval(1, 3) <= (-3, -1)
assert not Interval(3, 6) <= Interval.open(1, 6)
assert Interval(1, 3) < Interval.empty()
assert Interval(1, 3) <= Interval.empty()
assert Interval(7, 9) > (4, 6)
assert not Interval(7, 9) > (4, 7)
assert not Interval(7, 9) > (10, 12)
assert Interval(7, 9) >= (4, 7)
assert Interval(7, 9) >= (4, 8)
assert Interval(7, 9) >= (4, 9)
assert not Interval(7, 9) >= (10, 12)
assert not Interval(4, 10) >= Interval.open(4, 9)
assert Interval(7, 9) > Interval.empty()
assert Interval(7, 9) >= Interval.empty()
def test_arithmetic():
assert Interval(1, 3) + (2, 4) == (1, 4)
assert (1, 3) + Interval(2, 4) == (1, 4)
assert Interval.open(1, 3) + (2, 4) == Interval.open_closed(1, 4)
assert (1, 3) + Interval.open(2, 4) == Interval.closed_open(1, 4)
| true
| true
|
7902b96858ce90e6851ca8f3ac49612340413a94
| 2,940
|
py
|
Python
|
scripts/importcode.py
|
paulscottrobson/flat
|
976644db7f52f04dae50dce43504ede9e97f695e
|
[
"MIT"
] | null | null | null |
scripts/importcode.py
|
paulscottrobson/flat
|
976644db7f52f04dae50dce43504ede9e97f695e
|
[
"MIT"
] | null | null | null |
scripts/importcode.py
|
paulscottrobson/flat
|
976644db7f52f04dae50dce43504ede9e97f695e
|
[
"MIT"
] | null | null | null |
# ***************************************************************************************
# ***************************************************************************************
#
# Name : importcode.py
# Author : Paul Robson (paul@robsons.org.uk)
# Date : 12th March 2019.
# Purpose : Import code into buffer area
#
# ***************************************************************************************
# ***************************************************************************************
import sys
from imagelib import *
#
# Initialise and get info
#
image = BinaryImage()
bufferInfo = image.sourcePages()
firstSourcePage = bufferInfo[0]
sourcePageCount = bufferInfo[1]
pageSize = image.getBufferSize()
#
# Clear all buffers
#
for p in range(firstSourcePage,firstSourcePage+sourcePageCount*2,2):
for a in range(0xC000,0x10000,pageSize):
image.write(p,a,0x80)
image.write(p,a+pageSize-1,0x00)
print("Found and erased {0} buffers for import ${1:02x}-${2:02x}.". \
format(int(sourcePageCount*16384/pageSize),firstSourcePage,firstSourcePage+sourcePageCount*2-2))
#
# Info on first buffer
#
currentPageNumber = firstSourcePage
currentPageAddress = 0xC000
currentBasePageAddress = 0xC000
bytesRemaining = pageSize
count = 1
#
# Work through all the source
#
for f in sys.argv[1:]:
src = [x if x.find("//") < 0 else x[:x.find("//")] for x in open(f).readlines()]
src = " ".join([x.replace("\t"," ").replace("\n"," ") for x in src])
src = [x for x in src.split(" ") if x != ""]
for word in src:
#
# For each word, look at it to see if it has a tag. Default is compilation.
#
tag = 0x40 # Green (compile) $40
if word[0] == ":": # Red (define) $00
tag = 0x00
word = word[1:]
elif word[0] == "[" and word[-1] == "]": # Yellow (execute) $80
tag = 0x80
word = word[1:-1]
#
# Make the final word and check it fits.
#
assert len(word) < 32,"Word too long "+word
if len(word) + 4 >= bytesRemaining: # it doesn't fit.
image.write(currentPageNumber,currentPageAddress,0x80)
currentPageAddress = (currentBasePageAddress + pageSize) & 0xFFFF
if currentPageAddress == 0:
currentPageNumber += 1
currentPageAddress = 0xC000
currentBasePageAddress = currentPageAddress
count += 1
bytesRemaining = pageSize
#
#print("\t\t{0:02x} {1:16} ${2:02x}:${3:04x} {4}".format(tag,word,currentPageNumber,currentPageAddress,bytesRemaining))
#
# Store the word
#
image.write(currentPageNumber,currentPageAddress,tag+len(word))
currentPageAddress += 1
for c in word:
image.write(currentPageNumber,currentPageAddress,ord(c))
currentPageAddress += 1
bytesRemaining = bytesRemaining - 1 - len(word)
#
# Add a trailing $80 in case it is the last.
#
image.write(currentPageNumber,currentPageAddress,0x80)
print("\tImported file '{0}'.".format(f))
#
# and write out
#
image.save()
print("Filled {0} buffers.".format(count))
| 31.956522
| 121
| 0.591837
|
import sys
from imagelib import *
image = BinaryImage()
bufferInfo = image.sourcePages()
firstSourcePage = bufferInfo[0]
sourcePageCount = bufferInfo[1]
pageSize = image.getBufferSize()
for p in range(firstSourcePage,firstSourcePage+sourcePageCount*2,2):
for a in range(0xC000,0x10000,pageSize):
image.write(p,a,0x80)
image.write(p,a+pageSize-1,0x00)
print("Found and erased {0} buffers for import ${1:02x}-${2:02x}.". \
format(int(sourcePageCount*16384/pageSize),firstSourcePage,firstSourcePage+sourcePageCount*2-2))
currentPageNumber = firstSourcePage
currentPageAddress = 0xC000
currentBasePageAddress = 0xC000
bytesRemaining = pageSize
count = 1
for f in sys.argv[1:]:
src = [x if x.find("//") < 0 else x[:x.find("//")] for x in open(f).readlines()]
src = " ".join([x.replace("\t"," ").replace("\n"," ") for x in src])
src = [x for x in src.split(" ") if x != ""]
for word in src:
tag = 0x40
if word[0] == ":":
tag = 0x00
word = word[1:]
elif word[0] == "[" and word[-1] == "]":
tag = 0x80
word = word[1:-1]
assert len(word) < 32,"Word too long "+word
if len(word) + 4 >= bytesRemaining:
image.write(currentPageNumber,currentPageAddress,0x80)
currentPageAddress = (currentBasePageAddress + pageSize) & 0xFFFF
if currentPageAddress == 0:
currentPageNumber += 1
currentPageAddress = 0xC000
currentBasePageAddress = currentPageAddress
count += 1
bytesRemaining = pageSize
#
#print("\t\t{0:02x} {1:16} ${2:02x}:${3:04x} {4}".format(tag,word,currentPageNumber,currentPageAddress,bytesRemaining))
#
# Store the word
#
image.write(currentPageNumber,currentPageAddress,tag+len(word))
currentPageAddress += 1
for c in word:
image.write(currentPageNumber,currentPageAddress,ord(c))
currentPageAddress += 1
bytesRemaining = bytesRemaining - 1 - len(word)
#
# Add a trailing $80 in case it is the last.
#
image.write(currentPageNumber,currentPageAddress,0x80)
print("\tImported file '{0}'.".format(f))
#
# and write out
#
image.save()
print("Filled {0} buffers.".format(count))
| true
| true
|
7902ba97f62d0493076275418dfc4a67cd95fa69
| 67,692
|
py
|
Python
|
model_selection.py
|
EnricoPittini/model-selection
|
dcd3e202773372088047056d866c12c15dba65ac
|
[
"MIT"
] | null | null | null |
model_selection.py
|
EnricoPittini/model-selection
|
dcd3e202773372088047056d866c12c15dba65ac
|
[
"MIT"
] | null | null | null |
model_selection.py
|
EnricoPittini/model-selection
|
dcd3e202773372088047056d866c12c15dba65ac
|
[
"MIT"
] | null | null | null |
"""
Module for the selection of machine learning models.
There are several different functions which can perform the model selection: all of them have an intuitive interface, but
are also powerful and flexible.
In addition, almost all these functions can optionally make plots, which sum up the performed selection in a visual way.
These different functions perform the model selection in different contexts, i.e. each function is specifically meant for a
specific scenario. Certain contexts are more specific, and other are more general.
On the whole, there are six different model selection functions, divided into two main groups:
1. functions that perform the model selection with respect to a **single dataset**;
2. functions that perform the model selection with respect to **multiple datasets**.
The six functions, sorted from the most specific context to the most general one, are:
- *hyperparameter_validation*, *hyperparameters_validation*, *models_validation* (single dataset);
- *datasets_hyperparameter_validation*, *datasets_hyperparameters_validation*, *datasets_models_validation* (multiple
datasets).
This module deeply uses the **numpy** library. It is built on the top of it. In fact, the datasets are represented as np.array.
Moreover, the plots are made using the **matplotlib** library. In addition, it is built on the top of the **sklearn** module:
- the machine learning models are represented as sklearn models (i.e. sklearn estimators);
- under the hood, the selection is performed using the grid search cross validation provided by sklearn (i.e.
GridSearchCV);
- several other operations are done using the functionalities provided by sklearn.
This module, besides the model selection functions, contains also some utilities:
- the PolynomialRegression class;
- some utility functions.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.utils import resample
from sklearn.model_selection import train_test_split, cross_val_score, TimeSeriesSplit, GridSearchCV
from sklearn.metrics import mean_squared_error, accuracy_score
from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures
from sklearn.base import BaseEstimator
from sklearn.linear_model import LinearRegression
#----------------------------------------------------------------------------------------------------------------------------
# POLYNOMIAL REGRESSOR MODEL
class PolynomialRegression(BaseEstimator):
"""
Polynomial regression model.
It's a sklearn model: it's compliant to the sklearn estimators interface.
`Example <https://scikit-learn.org/stable/developers/develop.html>`_
Parameters
----------
degree: int
Degree to apply for the polynomial transformation.
Notes
----------
The polynomial transformation is performed using the sklearn PolynomialFeatures.
"""
def __init__(self, degree=1):
self.degree=degree
def fit(self, X, y):
self.poly_transformer = PolynomialFeatures(self.degree, include_bias=False)
self.poly_transformer.fit(X)
X = self.poly_transformer.transform(X)
self.model = LinearRegression(fit_intercept=True)
self.model.fit(X,y)
return self
def predict(self, X):
X = self.poly_transformer.transform(X)
return self.model.predict(X)
def get_params(self, deep=True):
return {"degree": self.degree}
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
#----------------------------------------------------------------------------------------------------------------------------
# UTILITY FUNCTIONS
def compute_train_val_test(X, y, model, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5,
regr=True):
"""
Compute the training-validation-test scores for the given model on the given dataset.
The training and test scores are simply computed by splitting the dataset into the training and test sets. The validation
score is performed applying the cross validation on the training set.
Parameters
----------
X: np.array
Two-dimensional np.array, containing the explanatory features of the dataset.
y: np.array
Mono dimensional np.array, containing the response feature of the dataset.
model: sklearn.base.BaseEstimator
Model to evaluate.
scale: bool
Indicates whether to scale or not the features in `X`.
(The scaling is performed using the sklearn MinMaxScaler).
test_size: float
Decimal number between 0 and 1, which indicates the proportion of the test set.
time_series: bool
Indicates if the given dataset is a time series dataset (i.e. datasets indexed by days).
(This affects the computing of the scores).
random_state: int
Used in the training-test splitting of the dataset.
n_folds: int
Indicates how many folds are made in order to compute the k-fold cross validation.
(It's used only if `time_series` is False).
regr: bool
Indicates if it's either a regression or a classification problem.
Returns
----------
train_score: float
val_score: float
test_score: float
Notes
----------
- If `regr` is True, the returned scores are errors, computed using the MSE formula (i.e. Mean Squared Error).
Otherwise, the returned scores are accuracy measures.
- If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross
validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.
Otherwise, if `time_series` is True, the training-test sets are obtained simply by splitting the dataset into two
contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
"""
if regr:
scoring="neg_mean_squared_error"
else:
scoring="accuracy"
# Split into training e test.
if not time_series : # Random splitting (not time series)
X_train_80, X_test, y_train_80, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
else: # time series splitting
train_len = int(X.shape[0]*(1-test_size))
X_train_80 = X[:train_len]
y_train_80 = y[:train_len]
X_test = X[train_len:]
y_test = y[train_len:]
if(scale): # Scale the features in X
scaler = MinMaxScaler()
scaler.fit(X_train_80)
X_train_80 = scaler.transform(X_train_80)
X_test = scaler.transform(X_test)
# Cross validation
if not time_series: # k-fold cross validation
cv = n_folds
else: # cross validation for time series
cv = TimeSeriesSplit(n_splits = n_folds)
scores = cross_val_score(model, X_train_80, y_train_80, cv=cv, scoring=scoring)
val_score = scores.mean() # validation score
if regr:
val_score = -val_score
model.fit(X_train_80,y_train_80) # Fit the model using all the training
# Compute training and test scores
train_score=0
test_score=0
if regr:
train_score = mean_squared_error(y_true=y_train_80, y_pred=model.predict(X_train_80))
test_score = mean_squared_error(y_true=y_test, y_pred=model.predict(X_test))
else:
train_score = accuracy_score(y_true=y_train_80, y_pred=model.predict(X_train_80))
test_score = accuracy_score(y_true=y_test, y_pred=model.predict(X_test))
return train_score, val_score, test_score # Return a triple
def compute_bias_variance_error(X, y, model, scale=False, N_TESTS = 20, sample_size=0.67):
"""
Compute the bias^2-variance-error scores for the given model on the given dataset.
These measures are computed in an approximate way, using `N_TESTS` random samples of size `sample_size` from the
dataset.
Parameters
----------
X: np.array
Two-dimensional np.array, containing the explanatory features of the dataset.
y: np.array
Mono dimensional np.array, containing the response feature of the dataset.
model: sklearn.base.BaseEstimator
Model to evaluate.
scale: bool
Indicates whether to scale or not the features in `X`.
(The scaling is performed using the sklearn MinMaxScaler).
N_TESTS: int
Number of samples that are made in order to compute the measures.
sample_size: float
Decimal number between 0 and 1, which indicates the proportion of the sample.
Returns
----------
bias: float
variance: float
error: float
"""
# Scale the features in `X`
if(scale):
scaler = MinMaxScaler()
scaler.fit(X)
X = scaler.transform(X)
# Vector 'vector_ypred': at the beginning is a list of lists (i.e. two dimensional list).
# In the end it will be a matrix which has as many rows as `N_TESTS` (each row corresponds to a sample) and as many
# columns as the number of instances in `X` (each column is a point of the dataset).
# Row 'i' --> there are the predictions made by the model on the sample 'i' using all the dataset points.
# Column 'j' --> there are the predictions made by the model on the point 'j' using all the `N_TESTS` samples.
vector_ypred = []
# Iterate through N_TESTS. At each iteration extract a new sample and fit the model on it.
for i in range(N_TESTS):
# Extract a new sample (sample 'i')
Xs, ys = resample(X,y, n_samples=int(sample_size*len(y)) )
# Fit the model on this sample 'i'
model.fit(Xs,ys)
# Add the predictions made by the model on all the dataset points
vector_ypred.append(list(model.predict(X)))
vector_ypred = np.array(vector_ypred) # Transform into numpy array
# Vector that has as many elements as the dataset points, and for each of them it has the associated bias^2 computed on
# the `N_TEST` samples.
vector_bias = (y - np.mean(vector_ypred, axis=0))**2
# Vector that has as many elements as the dataset points, and for each of them it has the associated variance computed on
# the `N_TEST` samples.
vector_variance = np.var(vector_ypred, axis=0)
# Vector that has as many elements as the dataset points, and for each of them it has the associated error computed on
# the `N_TEST` samples.
vector_error = np.sum((vector_ypred - y)**2, axis=0)/N_TESTS
bias = np.mean(vector_bias) # Total bias^2 of the model
variance = np.mean(vector_variance) # Total variance of the model
error = np.mean(vector_error) # Total error of the model
return bias,variance,error # Return a triple
def plot_predictions(X, y, model, scale=False, test_size=0.2, plot_type=0, xvalues=None, xlabel="Index",
title="Actual vs Predicted values", figsize=(6,6)):
"""
Plot the predictions made by the given model on the given dataset, versus its actual values.
The dataset is split into training-test sets: the former is used to train the `model`, on the latter the predictions are
made.
Parameters
----------
X: np.array
Two-dimensional np.array, containing the explanatory features of the dataset.
y: np.array
Mono dimensional np.array, containing the response feature of the dataset.
model: sklearn.base.BaseEstimator
Model used to make the predictions.
scale: bool
Indicates whether to scale or not the features in `X`.
(The scaling is performed using the sklearn MinMaxScaler).
test_size: float
Decimal number between 0 and 1, which indicates the proportion of the test set.
plot_type: int
Indicates the type of the plot.
- 0 -> In the same plot two different curves are drawn: the first has on the x axis `xvalues` and on the y axis
the actual values (i.e. `y`); the second has on the x axis `xvalues` and on the y axis the computed
predicted values.
- 1 -> On the x axis the actual values are put, on the y axis the predicted ones.
xvalues: list (in general, iterable)
Values that have to be put in the x axis of the plot.
(It's used only if `plot_type` is 0).
xlabel: str
Label of the x axis of the plot.
(It's used only if `plot_type` is 0).
title: str
Title of the plot.
figsize: tuple
Two dimensions of the plot.
Returns
----------
matplotlib.axes.Axes
The matplotlib Axes where the plot has been made.
Notes
----------
The splitting of the datasets into the training-test sets is simply made by dividing the dataset into two contiguous
sequences.
I.e. it is the same technique used usually when the dataset is a time series dataset. (This is done in order to simplify
the visualization).
For this reason, typically this function is applied on time series datasets.
"""
train_len = int(X.shape[0]*(1-test_size))
X_train_80 = X[:train_len]
y_train_80 = y[:train_len]
X_test = X[train_len:]
y_test = y[train_len:]
if(scale): # Scale the features in X
scaler = MinMaxScaler()
scaler.fit(X_train_80)
X_train_80 = scaler.transform(X_train_80)
X_test = scaler.transform(X_test)
model.fit(X_train_80,y_train_80) # Fit using all the training set
predictions = model.predict(X_test)
fig, ax = plt.subplots(figsize=figsize)
if plot_type==0:
if xvalues is None:
xvalues=range(len(X))
ax.plot(xvalues,y, 'o:', label='actual values')
ax.plot(xvalues[train_len:],predictions, 'o:', label='predicted values')
ax.legend()
elif plot_type==1:
ax.plot(y[train_len:],predictions,'o')
ax.plot([0, 1], [0, 1], 'r-',transform=ax.transAxes)
xlabel="Actual values"
ax.set_ylabel("Predicted values")
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.grid()
return ax
def _plot_TrainVal_values(xvalues, train_val_scores, plot_train, xlabel, title, figsize=(6,6), bar=False):
"""
Plot the given list of training-validation scores.
This function is an auxiliary function for the model selection functions. It's meant to be private in the
module.
Parameters
----------
xvalues: list (in general iterable)
Values to put in the x axis of the plot.
train_val_scores: np.array
Two dimensional np.array, containing two columns: the first contains the trainining scores, the second the validation
scores.
Basically, it is a list of training-validation scores.
plot_train: bool
Indicates whether to plot also the training scores or to plot only the validation ones.
xlabel: str
Label of the x axis.
title: str
Title of the plot.
figsize: tuple
Two dimensions of the plot.
bar: bool
Indicates whether to plot the scores using bars or using points.
If `bar` it's True, `xvalues` must contain string (i.e. labels).
Returns
----------
matplotlib.axes.Axes
The matplotlib Axes where the plot has been made.
"""
fig, ax = plt.subplots(figsize=figsize)
if not bar: # Points
if plot_train: # Plot also the training scores
ax.plot(xvalues,train_val_scores[:,0], 'o:', label='Train')
ax.plot(xvalues,train_val_scores[:,1], 'o:', label='Validation') # Validation scores
else: # Bars
if plot_train: # Plot also the training scores
x = np.arange(len(xvalues)) # The label locations
width = 0.35 # The width of the bars
ax.bar(x-width/2,train_val_scores[:,0], width=width, label='Train')
ax.bar(x+width/2,train_val_scores[:,1], width=width, label='Validation') # Validation scores
ax.set_xticks(x)
ax.set_xticklabels(xvalues)
else:
ax.bar(xvalues,train_val_scores[:,1],label='Validation')
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.grid()
ax.legend()
return ax
#----------------------------------------------------------------------------------------------------------------------------
# FUNCTIONS THAT PERFORM THE MODEL SELECTION WITH RESPECT TO A SINGLE DATASET
def hyperparameter_validation(X, y, model, hyperparameter, hyperparameter_values, scale=False, test_size=0.2,
time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False,
xvalues=None, xlabel=None, title="Hyperparameter validation", figsize=(6,6)):
"""
Select the best value for the specified hyperparameter of the specified model on the given dataset.
In other words, perform the tuning of the `hyperparameter` among the values in `hyperparameter_values`.
This selection is made using the validation score (i.e. the best hyperparameter value is the one with the best validation
score).
The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross
validation on the training set.
Additionally, the training and test scores are also computed.
Optionally, the validation scores of the `hyperparameter_values` can be plotted, making a graphical visualization of the
selection.
Parameters
----------
X: np.array
Two-dimensional np.array, containing the explanatory features of the dataset.
y: np.array
Mono dimensional np.array, containing the response feature of the dataset.
model: sklearn.base.BaseEstimator
Model which has the specified `hyperparameter`.
hyperparameter: str
The name of the hyperparameter that has to be validated.
hyperparameter_values: list
List of values for `hyperparameter` that have to be taken into account in the selection.
scale: bool
Indicates whether to scale or not the features in `X`.
(The scaling is performed using the sklearn MinMaxScaler).
test_size: float
Decimal number between 0 and 1, which indicates the proportion of the test set.
time_series: bool
Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days).
(This affects the computing of the validation score).
random_state: int
Used in the training-test splitting of the dataset.
n_folds: int
Indicates how many folds are made in order to compute the k-fold cross validation.
(It's used only if `time_series` is False).
regr: bool
Indicates if it's either a regression or a classification problem.
plot: bool
Indicates whether to plot or not the validation score values.
plot_train: bool
Indicates whether to plot also the training scores.
(It's considered only if `plot` is True).
xvalues: list (in general, iterable)
Values that have to be put in the x axis of the plot.
xlabel: str
Label of the x axis of the plot.
title: str
Title of the plot.
figsize: tuple
Two dimensions of the plot.
Returns
----------
train_val_scores: np.array
Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation
scores.
It has as many rows as the number of values in `hyperparameter_values` (i.e. number of values to be tested).
best_index: int
Index of `hyperparameter_values` that indicates which is the best hyperparameter value.
test_score: float
Test score associated with the best hyperparameter value.
ax: matplotlib.axes.Axes
The matplotlib Axes where the plot has been made.
If `plot` is False, then it is None.
Notes
----------
- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best
hyperparameter value is the one associated with the minimum validation score.
Otherwise, the validation scores are accuracies: this means that the best hyperparameter value is the one associated
with the maximum validation score.
- If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross
validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.
Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two
contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
"""
param_grid = {hyperparameter:hyperparameter_values} # Create the hyperparameter grid
# Call the function for the validation of an arbitrary number of hyperparameters
params, train_val_scores, best_index, test_score = hyperparameters_validation(X, y, model, param_grid, scale=scale,
test_size=test_size,
time_series=time_series,
random_state=random_state, n_folds=n_folds,
regr=regr)
ax = None
if(plot): # Make the plot
if not xvalues: # Default values on the x axis
xvalues = hyperparameter_values
if not xlabel: # Default label on the x axis
xlabel = hyperparameter
ax = _plot_TrainVal_values(xvalues, train_val_scores, plot_train, xlabel, title, figsize)
return train_val_scores, best_index, test_score, ax
def hyperparameters_validation(X, y, model, param_grid, scale=False, test_size=0.2, time_series=False, random_state=123,
n_folds=5, regr=True):
"""
Select the best combination of values for the specified hyperparameters of the specified model on the given dataset.
In other words, perform the tuning of multiple hyperparameters.
The parameter `param_grid` is a dictionary that indicates which are the specified hyperparameters and what are the
associated values to test.
All the possible combinations of values are tested, in an exhaustive way (i.e. grid search).
This selection is made using the validation score (i.e. the best combination of hyperparameters values is the one with
the best validation score).
The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross
validation on the training set.
Additionally, the training and test scores are also computed.
Parameters
----------
X: np.array
Two-dimensional np.array, containing the explanatory features of the dataset.
y: np.array
Mono dimensional np.array, containing the response feature of the dataset.
model: sklearn.base.BaseEstimator
Model which has the specified hyperparameters.
param_grid: dict
Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of
values to test.
scale: bool
Indicates whether to scale or not the features in `X`.
(The scaling is performed using the sklearn MinMaxScaler).
test_size: float
Decimal number between 0 and 1, which indicates the proportion of the test set.
time_series: bool
Indicates if the given dataset is a time series dataset (i.e. dataframe indexed by days).
(This affects the computing of the validation score).
random_state: int
Used in the training-test splitting of the dataset.
n_folds: int
Indicates how many folds are made in order to compute the k-fold cross validation.
(It's used only if `time_series` is False).
regr: bool
Indicates if it's either a regression or a classification problem.
Returns
----------
params: list
List which enumerates all the possible combinations of hyperparameters values.
It's a list of dictionaries: each dictionary represents a specific combination of hyperparameters values. (It's a
dictionary which has as keys the hyperparameters names and as values the specific associated values of that combination).
train_val_scores: np.array
Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation
scores.
It has as many rows as the number of possible combinations of the hyperparameters values.
(It has as many rows as the elements of `params`).
best_index: int
Index of `params` that indicates which is the best combination of hyperparameters values.
test_score: float
Test score associated with the best combination of hyperparameters values.
Notes
----------
- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best
combination of hyperparameters values is the one associated with the minimum validation score.
Otherwise, the validation scores are accuracies: this means that the best combination of hyperparameters values is the
one associated with the maximum validation score.
- If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross
validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.
Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two
contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
"""
if regr:
scoring="neg_mean_squared_error"
else:
scoring="accuracy"
# Split into training-test sets
if not time_series : # Random splitting
X_train_80, X_test, y_train_80, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
else: # Time series splitting
train_len = int(X.shape[0]*(1-test_size))
X_train_80 = X[:train_len]
y_train_80 = y[:train_len]
X_test = X[train_len:]
y_test = y[train_len:]
if(scale): # Scale the features in `X`
scaler = MinMaxScaler()
scaler.fit(X_train_80)
X_train_80 = scaler.transform(X_train_80)
X_test = scaler.transform(X_test)
# Cross validation strategy
if not time_series: # The strategy is the classic k-fold cross validation
cv = n_folds
else: # Time series cross validation strategy
cv = TimeSeriesSplit(n_splits = n_folds)
# Grid search
grid_search = GridSearchCV(model,param_grid,scoring=scoring,cv=cv,return_train_score=True)
grid_search.fit(X_train_80,y_train_80)
params = grid_search.cv_results_["params"] # List of all the possible combinations of hyperparameters values
# List where for all the possible combinations of hyperparameters values there is the associated training score
train_scores = grid_search.cv_results_["mean_train_score"]
# List where for all the possible combinations of hyperparameters values there is the associated validation score
val_scores = grid_search.cv_results_["mean_test_score"]
# Index of `params`, corresponding to the best combination of hyperparameters values
best_index = grid_search.best_index_
# Model with the best combination of hyperparameters values
best_model = grid_search.best_estimator_
if regr: # The scores are negative: moltiply by -1
train_scores = train_scores*(-1)
val_scores = val_scores*(-1)
train_val_scores = np.concatenate((train_scores.reshape(-1,1), val_scores.reshape(-1,1)), axis=1)
# Fit the best model on all the training set
best_model.fit(X_train_80,y_train_80)
# Compute the test score of the best model
test_score=0
if regr:
test_score = mean_squared_error(y_true=y_test, y_pred=best_model.predict(X_test))
else:
test_score = accuracy_score(y_true=y_test, y_pred=best_model.predict(X_test))
return params, train_val_scores, best_index, test_score
def models_validation(X, y, model_paramGrid_list, scale_list=None, test_size=0.2, time_series=False, random_state=123,
n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel="Models",
title="Models validation", figsize=(6,6)):
"""
Select the best model on the given dataset.
The parameter `model_paramGrid_list` is the list of the models to test. It also contains, for each model, the grid of
hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for each
specified hyperparameter of the model).
(That grid has the same structure as the `param_grid` parameter of the function `hyperparameters_validation`. See
`hyperparameters_validation`).
For each specified model, the best combination of hyperparameters values is selected in an exhaustive way (i.e. grid
search).
Actually, the function `hyperparameters_validation` is used.
(See `hyperparameters_validation`).
The selection of the best model is made using the validation score (i.e. the best model is the one with the best
validation score).
The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross
validation on the training set.
Additionally, the training and test scores are also computed.
Optionally, the validation scores of the different models can be plotted, making a graphical visualization of the
selection.
Parameters
----------
X: np.array
Two-dimensional np.array, containing the explanatory features of the dataset.
y: np.array
Mono dimensional np.array, containing the response feature of the dataset.
model_paramGrid_list: list
List that specifies the models and the relative grids of hyperparameters to be tested.
It's a list of triples (i.e. tuples), where each triple represents a model:
- the first element is a string, which is a mnemonic name of that model;
- the second element is the sklearn model;
- the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same
structure of the parameter `param_grid` of the function `hyperparameters_validation`.
scale_list: list or bool
List of booleans, which has as many elements as the models to test (i.e. as the elements of the
`model_paramGrid_list` list).
This list indicates, for each different model, if the features in `X` have to be scaled or not.
`scale_list` can be None or False: in this case the `X` features aren't scaled for any model. `scale_list` can be
True: in this case the `X` features are scaled for all the models.
test_size: float
Decimal number between 0 and 1, which indicates the proportion of the test set.
time_series: bool
Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days).
(This affects the computing of the validation score).
random_state: int
Used in the training-test splitting of the dataset.
n_folds: int
Indicates how many folds are made in order to compute the k-fold cross validation.
(It's used only if `time_series` is False).
regr: bool
Indicates if it's either a regression or a classification problem.
plot: bool
Indicates whether to plot or not the validation score values.
plot_train: bool
Indicates whether to plot also the training scores.
(It's considered only if `plot` is True).
xvalues: list (in general, iterable)
Values that have to be put in the x axis of the plot.
xlabel: str
Label of the x axis of the plot.
title: str
Title of the plot.
figsize: tuple
Two dimensions of the plot.
Returns
----------
models_train_val_score: np.array
Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation
scores.
It has as many rows as the number of models to test (i.e. number of elements in the `model_paramGrid_list` list).
models_best_params: list
List which indicates, for each model, the best combination of the hyperparameters values for that model.
It has as many elements as the models to test (i.e. as the elements of the `model_paramGrid_list` list), and it
contains dictionaries: each dictionary represents the best combination of the hyperparameters values for the
associated model.
best_index: int
Index of `model_paramGrid_list` that indicates which is the best model.
test_score: float
Test score associated with the best model.
ax: matplotlib.axes.Axes
The matplotlib Axes where the plot has been made.
If `plot` is False, then it is None.
See also
----------
hyperparameters_validation:
select the best combination of values for the specified hyperparameters of the specified model on the given dataset.
Notes
----------
- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best
model is the one associated with the minimum validation score.
Otherwise, the validation scores are accuracies: this means that the best model is the one associated with the
maximum validation score.
- If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross
validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.
Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two
contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
"""
if not scale_list: # `scale_list` is either None or False
scale_list = [False]*len(model_paramGrid_list)
elif scale_list is True: # `scale_list` is True
scale_list = [True]*len(model_paramGrid_list)
# Numpy matrix (np.array) which has as many rows as the models and which has two columns, one for the training scores and
# the other for the validation scores. At the beginning it is a list of tuples.
models_train_val_score = []
# List which has as many elements as the models: for each model there is the dictionary of the best combination of
# hyperparameters values.
models_best_params = []
# List which has as many elements as the models: for each model there is the test score (associated with the best
# combination of hyperparameters values).
models_test_score = []
for i,triple in enumerate(model_paramGrid_list): # Iterate through all the cuples model-param_grid
model,param_grid = triple[1:]
# Apply the grid search on model-param_grid
params, train_val_scores, best_index, test_score = hyperparameters_validation(X, y, model, param_grid,
scale=scale_list[i],
test_size=test_size,
time_series=time_series,
random_state=random_state,
n_folds=n_folds, regr=regr)
models_train_val_score.append(tuple(train_val_scores[best_index])) # Add the row for that model
models_best_params.append(params[best_index]) # Add the element for that model
models_test_score.append(test_score) # Add the element for that model
models_train_val_score = np.array(models_train_val_score) # Transform into numpy matrix (i.e. np.array)
# Find the best index (i.e. the best model)
if regr:
best_index = np.argmin(models_train_val_score,axis=0)[1]
else:
best_index = np.argmax(models_train_val_score,axis=0)[1]
# Test score of the best model
test_score = models_test_score[best_index]
ax = None
if(plot): # Make the plot
if not xvalues: # Default values for the x axis
xvalues = [model_paramGrid_list[i][0] for i in range(len(model_paramGrid_list))]
ax = _plot_TrainVal_values(xvalues, models_train_val_score, plot_train, xlabel, title, figsize, bar=True)
return models_train_val_score, models_best_params, best_index, test_score, ax
#----------------------------------------------------------------------------------------------------------------------------
# FUNCTIONS THAT PERFORM THE MODEL SELECTION WITH RESPECT TO MULTIPLE DATASETS
def datasets_hyperparameter_validation(dataset_list, model, hyperparameter, hyperparameter_values, scale=False,
test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False,
plot_train=False, xvalues=None, xlabel="Datasets", title="Datasets validation",
figsize=(6,6) ,verbose=False, figsize_verbose=(6,6)):
"""
Select the best dataset and the best value for the specified hyperparameter of the specified model (i.e. select the best
couple dataset-hyperparameter value).
For each dataset in `dataset_list`, all the specified values `hyperparameter_values` are tested for the specified
`hyperparameter` of `model`.
In other words, on each dataset the tuning of `hyperparameter` is performed: in fact, on each dataset, the function
`hyperparameter_validation` is applied. (See `hyperparameter_validation`).
In the end, the best couple dataset-hyperparameter value is selected.
Despite the fact that a couple dataset-hyperparameter value is selected, the main viewpoint is focused with respect to
the datasets. It's a validation focused on the datasets.
In fact, first of all, for each dataset the hyperparameter tuning is performed: in this way the best value is selected
and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each
dataset the function `hyperparameter_validation` is applied). Finally, after that, the best dataset is selected.
It's a two-levels selection.
This selection is made using the validation score (i.e. the best couple dataset-hyperparameter value is the one with the
best validation score).
The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross
validation on the training set.
Additionally, the training and test scores are also computed.
Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset
selection. This is the 'main' plot.
Moreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the
`hyperparameter_values` are plotted, making a graphical visualization of the hyperparameter tuning on that dataset.
(As the plot made by the `hyperparameter_validation` function).
Parameters
----------
dataset_list: list
List of couples, where each couple is a dataset.
- The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.
- The second element is y, the mono dimensional np.array containing the response feature of the dataset.
model: sklearn.base.BaseEstimator
Model which has the specified `hyperparameter`.
hyperparameter: str
The name of the hyperparameter that has to be validated.
hyperparameter_values: list
List of values for `hyperparameter` that have to be taken into account in the selection.
scale: bool
Indicates whether to scale or not the features in 'X' (for all the datasets).
(The scaling is performed using the sklearn MinMaxScaler).
test_size: float
Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).
time_series: bool
Indicates if the given datasets are time series dataset (i.e. datasets indexed by days).
(This affects the computing of the validation scores).
random_state: int
Used in the training-test splitting of the datasets.
n_folds: int
Indicates how many folds are made in order to compute the k-fold cross validation.
(It's used only if `time_series` is False).
regr: bool
Indicates if it's either a regression or a classification problem.
plot: bool
Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot).
plot_train: bool
Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots).
xvalues: list (in general, iterable)
Values that have to be put in the x axis of the 'main' plot.
xlabel: str
Label of the x axis of the 'main' plot.
title: str
Title of the 'main' plot.
figsize: tuple
Two dimensions of the 'main' plot.
verbose: bool
If True, for each dataset are plotted the validation scores of the hyperparameter tuning (these are the 'secondary'
plots).
(See 'hyperparameter_validation').
figsize_verbose: tuple
Two dimensions of the 'secondary' plots.
Returns
----------
datasets_train_val_score: np.array
Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation
scores.
It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.
datasets_best_hyperparameter_value: list
List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For
each dataset, it contains the best `hyperparameter` value on that dataset.
best_index: int
Index of `dataset_list` that indicates which is the best dataset.
test_score: float
Test score associated with the best couple dataset-hyperparameter value.
axes: list
List of the matplotlib Axes where the plots have been made.
Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any).
If no plot has been made, `axes` is an empty list.
See also
----------
hyperparameter_validation:
select the best value for the specified hyperparameter of the specified model on the given dataset.
Notes
----------
- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best
couple dataset-hyperparameter value is the one associated with the minimum validation score.
Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the
maximum validation score.
- If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross
validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.
Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two
contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
"""
# numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as
# columns. At the beginning it is a list.
datasets_train_val_score = []
# List which contains, for each dataset, the best hyperparameter value
datasets_best_hyperparameter_value = []
# List which contains, for each dataset, its test score (associated with the best hyperparameter value)
datasets_test_score = []
# List of axes
axes = []
for i,dataset in enumerate(dataset_list): # Iterate through all the datasets
X,y = dataset
# Perform the hyperparameter tuning on the current dataset
train_val_scores, best_index, test_score, ax = hyperparameter_validation(X, y, model, hyperparameter,
hyperparameter_values, scale=scale, test_size=test_size, time_series=time_series,
random_state=random_state, n_folds=n_folds, regr=regr, plot=verbose, plot_train=plot_train,
xvalues=hyperparameter_values, xlabel=hyperparameter,
title="Dataset "+str(i)+" : hyperparameter validation", figsize=figsize_verbose)
datasets_train_val_score.append(tuple(train_val_scores[best_index,:])) # Add the row related to that dataset
datasets_best_hyperparameter_value.append(hyperparameter_values[best_index]) # Add the element related to that dataset
datasets_test_score.append(test_score) # Add the row related to that dataset
if ax:
axes.append(ax)
datasets_train_val_score = np.array(datasets_train_val_score) # Transform into numpy
# Find the best index, i.e. the best dataset (more precisely, the best couple dataset-hyperparameter value)
if regr:
best_index = np.argmin(datasets_train_val_score,axis=0)[1]
else:
best_index = np.argmax(datasets_train_val_score,axis=0)[1]
# Test score of the best couple dataset-hyperparameter value
test_score = datasets_test_score[best_index]
if(plot): # Make the plot
if not xvalues: # Default values on the x axis
xvalues = range(len(dataset_list))
ax = _plot_TrainVal_values(xvalues,datasets_train_val_score,plot_train,xlabel,title,figsize, bar=True)
axes.append(ax)
return datasets_train_val_score, datasets_best_hyperparameter_value, best_index, test_score, axes
def datasets_hyperparameters_validation(dataset_list, model, param_grid, scale=False, test_size=0.2, time_series=False,
random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None,
xlabel="Datasets", title="Datasets validation",figsize=(6,6)):
"""
Select the best dataset and the best combination of values for the specified hyperparameters of the specified model (i.e.
select the best couple dataset-combination of hyperparameters values).
For each dataset in `dataset_list`, all the possible combinations of the hyperparameters values for `model` (specified
with `param_grid`) are tested.
In other words, on each dataset the tuning of the specified hyperparameters is performed in an exhaustive way: in fact,
on each dataset, the function `hyperparameters_validation` is applied. (See `hyperparameters_validation`).
In the end, the best couple dataset-combination of hyperparameters values is selected.
Despite the fact that a couple dataset-combination of hyperparameters values is selected, the main viewpoint is focused
with respect to the datasets. It's a validation focused on the datasets.
In fact, first of all, for each dataset the hyperparameters tuning is performed: in this way the best combination of
values is selected and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other
words, on each dataset the function `hyperparameters_validation` is applied). Finally, after that, the best dataset is
selected. It's a two-levels selection.
This selection is made using the validation score (i.e. the best couple dataset-combination of hyperparameters values, is
the one with best validation score).
The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross
validation on the training set.
Additionally, the training and test scores are also computed.
Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset
selection.
Parameters
----------
dataset_list: list
List of couple, where each couple is a dataset.
- The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.
- The second element is y, the mono dimensional np.array containing the response feature of the dataset.
model: sklearn.base.BaseEstimator
Model which has the specified hyperparameters.
param_grid: dict
Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of
values to test.
scale: bool
Indicates whether to scale or not the features in 'X' (for all the datasets).
(The scaling is performed using the sklearn MinMaxScaler).
test_size: float
Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).
time_series: bool
Indicates if the given datasets are time series datasets (i.e. datasets indexed by days).
(This affects the computing of the validation score).
random_state: int
Used in the training-test splitting of the datasets.
n_folds: int
Indicates how many folds are made in order to compute the k-fold cross validation.
(It's used only if `time_series` is False).
regr: bool
Indicates if it's either a regression or a classification problem.
plot: bool
Indicates whether to plot or not the validation score values of the datasets.
plot_train: bool
Indicates whether to plot also the training scores.
(It's considered only if `plot` is True).
xvalues: list (in general, iterable)
Values that have to be put in the x axis of the plot.
xlabel: str
Label of the x axis of the plot.
title: str
Title of the plot.
figsize: tuple
Two dimensions of the plot.
Returns
----------
datasets_train_val_score: np.array
Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation
scores.
It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.
datasets_best_params: list
List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For
each dataset, it contains the best combination of hyperparameters values on that dataset.
Each combination is represented as a dictionary, with keys the hyperparameters names and values the associated
values.
best_index: int
Index of `dataset_list` that indicates which is the best dataset.
test_score: float
Test score associated with the best couple dataset-combination of hyperparameters values.
ax: matplotlib.axes.Axes
The matplotlib Axes where the plot has been made.
See also
----------
hyperparameters_validation:
select the best combination of values for the specified hyperparameters of the specified model on the given dataset.
Notes
----------
- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best
couple dataset-combination of hyperparameters values is the one associated with the minimum validation score.
Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the
maximum validation score.
- If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross
validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.
Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two
contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
"""
# numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as
# columns . At the beginning it is a list.
datasets_train_val_score = []
# List which contains, for each dataset, the best combination of hyperparameters values (i.e. a dictionary)
datasets_best_params = []
# List which contains, for each dataset, its test score (associated to the best combination of hyperparameters values)
datasets_test_score = []
for X,y in dataset_list: # Iterate through all the datasets
# Perform the exaustive hyperparameters tuning on the current dataset
params, train_val_scores, best_index, test_score = hyperparameters_validation(X, y, model, param_grid, scale=scale,
test_size=test_size,
time_series=time_series,
random_state=random_state,
n_folds=n_folds, regr=regr)
datasets_train_val_score.append(tuple(train_val_scores[best_index,:])) # Add the row related to that dataset
datasets_best_params.append(params[best_index]) # Add the element related to that dataset
datasets_test_score.append(test_score) # Add the row related to that dataset
datasets_train_val_score = np.array(datasets_train_val_score) # Transform into numpy
# Find the best index, i.e. the best dataset (more precisely, the best couple dataset-combination of hyperparameters
# values)
if regr:
best_index = np.argmin(datasets_train_val_score,axis=0)[1]
else:
best_index = np.argmax(datasets_train_val_score,axis=0)[1]
# Test score of the best couple dataset-combination of hyperparameters values
test_score = datasets_test_score[best_index]
ax = None
if(plot): # Make the plot
if not xvalues: # Default values on the x axis
xvalues = range(len(dataset_list))
ax = _plot_TrainVal_values(xvalues,datasets_train_val_score,plot_train,xlabel,title,figsize, bar=True)
return datasets_train_val_score, datasets_best_params, best_index, test_score, ax
def datasets_models_validation(dataset_list, model_paramGrid_list, scale_list=None, test_size=0.2, time_series=False,
random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None,
xlabel="Datasets", title="Datasets validation", figsize=(6,6) ,verbose=False,
figsize_verbose=(6,6)):
"""
Select the best dataset and the best model (i.e. select the best couple dataset-model).
For each dataset in `dataset_list`, all the models in `model_paramGrid_list` are tested: each model is tested performing
an exhaustive tuning of the specified hyperparameters. In fact, `model_paramGrid_list` also contains, for each model, the
grid of the hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for
each specified hyperparameter of the model).
In other words, on each dataset the selection of the best model is performed: in fact, on each dataset, the function
`models_validation` is applied. (See `models_validation`).
In the end, the best couple dataset-model is selected.
Despite the fact that a couple dataset-model is selected, the main viewpoint is focused with respect to the datasets.
It's a validation focused on the datasets.
In fact, first of all, for each dataset the model selection is performed: in this way the best model is selected
and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each
dataset the function `models_validation` is applied). Finally, after that, the best dataset is selected.
It's a two-levels selection.
This selection is made using the validation score (i.e. the best couple dataset-model is the one with best validation
score).
The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross
validation on the training set.
Additionally, the training and test scores are also computed.
Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset
selection. This is the 'main' plot.
Moreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the models are
plotted, making a graphical visualization of the models selection on that dataset. (As the plot made by the
`models_validation` function).
Parameters
----------
dataset_list: list
List of couples, where each couple is a dataset.
- The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.
- The second element is y, the mono dimensional np.array containing the response feature of the dataset.
model_paramGrid_list: list
List that specifies the models and the relative grid of hyperparameters to be tested.
It's a list of triples (i.e. tuples), where each triple represents a model:
- the first element is a string, which is a mnemonic name of that model;
- the second element is the sklearn model;
- the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same
structure of parameter `param_grid` of the function `hyperparameters_validation`.
scale_list: list or bool
List of booleans, which has as many elements as the number of models to test (i.e. number of elements in the
`model_paramGrid_list` list).
This list indicates, for each different model, if the features in 'X' have to be scaled or not (for all the datasets).
`scale_list` can be None or False: in this case the 'X' features aren't scaled for any model. `scale_list` can be
True: in this case the 'X' features are scaled for all the models.
test_size: float
Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).
time_series: bool
Indicates if the given datasets are time series dataset (i.e. datasets indexed by days).
(This affects the computing of the validation score).
random_state: int
Used in the training-test splitting of the datasets.
n_folds: int
Indicates how many folds are made in order to compute the k-fold cross validation.
(It's used only if `time_series` is False).
regr: bool
Indicates if it's either a regression or a classification problem.
plot: bool
Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot).
plot_train: bool
Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots).
xvalues: list (in general, iterable)
Values that have to be put in the x axis of the 'main' plot.
xlabel: str
Label of the x axis of the 'main' plot.
title: str
Title of the 'main' plot.
figsize: tuple
Two dimensions of the 'main' plot.
verbose: bool
If True, for each dataset the validation scores of the models are plotted (i.e. these are the 'secondary' plots).
(See 'models_validation').
figsize_verbose: tuple
Two dimensions of the 'secondary' plots.
Returns
----------
datasets_train_val_score: np.array
Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation
scores.
It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.
datasets_best_model: list
List which has as many elements as the number of the datasets (i.e. number of elements in `dataset_list`). For
each dataset, it contains the best model for that dataset.
More precisely, it is a list of triple:
- the first element is the index of `model_paramGrid_list` which indicates the best model;
- the second element is the mnemonic name of the best model;
- the third element is the best combination of hyperparameters values on that best model (i.e. it's a dictionary
which has as keys the hyperparameters names and as values their associated values).
best_index: int
Index of `dataset_list` that indicates which is the best dataset.
test_score: float
Test score associated with the best couple dataset-model.
axes: list
List of the matplotlib Axes where the plots have been made.
Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any).
If no plot has been made, `axes` is an empty list.
See also
----------
models_validation: select the best model on the given dataset.
Notes
----------
- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best
couple dataset-model is the one associated with the minimum validation score.
Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the
maximum validation score.
- If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross
validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.
Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two
contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
"""
# numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as
# columns. At the beginning it is a list.
datasets_train_val_score = []
# List which contains, for each dataset, the best model. I.e. there is the triple index-model name-best combination of
# hyperparameters values
datasets_best_model = []
# List which contains, for each dataset, its test score (associated to the best model)
datasets_test_score = []
# List of axes
axes = []
for i,dataset in enumerate(dataset_list): # Iterate through all the datasets
X,y = dataset
# Perform the models validation on the current dataset
models_train_val_score, models_best_params, best_index, test_score, ax = models_validation(X, y,
model_paramGrid_list,
scale_list=scale_list,
test_size=test_size,
time_series=time_series,
random_state=random_state,
n_folds=n_folds,
regr=regr, plot=verbose,
plot_train=plot_train,
xlabel="Models",
title=("Dataset "+str(i)+
" : models validation"),
figsize=figsize_verbose)
datasets_train_val_score.append(tuple(models_train_val_score[best_index,:])) # Add the row related to that dataset
# Add the element related to that dataset
datasets_best_model.append((best_index,model_paramGrid_list[best_index][0],models_best_params[best_index]))
datasets_test_score.append(test_score) # Add the element related to that dataset
if ax:
axes.append(ax)
datasets_train_val_score = np.array(datasets_train_val_score) # Transform into numpy
# Find the best index, i.e. the best dataset (more precisely, the best couple dataset-model)
if regr:
best_index = np.argmin(datasets_train_val_score,axis=0)[1]
else:
best_index = np.argmax(datasets_train_val_score,axis=0)[1]
# Test score of the best couple dataset-model
test_score = datasets_test_score[best_index]
if(plot): # Make the plot
if not xvalues: # Default values on the x axis
xvalues = range(len(dataset_list))
ax = _plot_TrainVal_values(xvalues,datasets_train_val_score,plot_train,xlabel,title,figsize, bar=True)
axes.append(ax)
return datasets_train_val_score, datasets_best_model, best_index, test_score, axes
| 52.110855
| 130
| 0.664643
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn.utils import resample
from sklearn.model_selection import train_test_split, cross_val_score, TimeSeriesSplit, GridSearchCV
from sklearn.metrics import mean_squared_error, accuracy_score
from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures
from sklearn.base import BaseEstimator
from sklearn.linear_model import LinearRegression
class PolynomialRegression(BaseEstimator):
def __init__(self, degree=1):
self.degree=degree
def fit(self, X, y):
self.poly_transformer = PolynomialFeatures(self.degree, include_bias=False)
self.poly_transformer.fit(X)
X = self.poly_transformer.transform(X)
self.model = LinearRegression(fit_intercept=True)
self.model.fit(X,y)
return self
def predict(self, X):
X = self.poly_transformer.transform(X)
return self.model.predict(X)
def get_params(self, deep=True):
return {"degree": self.degree}
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def compute_train_val_test(X, y, model, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5,
regr=True):
if regr:
scoring="neg_mean_squared_error"
else:
scoring="accuracy"
if not time_series :
X_train_80, X_test, y_train_80, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
else:
train_len = int(X.shape[0]*(1-test_size))
X_train_80 = X[:train_len]
y_train_80 = y[:train_len]
X_test = X[train_len:]
y_test = y[train_len:]
if(scale):
scaler = MinMaxScaler()
scaler.fit(X_train_80)
X_train_80 = scaler.transform(X_train_80)
X_test = scaler.transform(X_test)
if not time_series:
cv = n_folds
else:
cv = TimeSeriesSplit(n_splits = n_folds)
scores = cross_val_score(model, X_train_80, y_train_80, cv=cv, scoring=scoring)
val_score = scores.mean()
if regr:
val_score = -val_score
model.fit(X_train_80,y_train_80)
train_score=0
test_score=0
if regr:
train_score = mean_squared_error(y_true=y_train_80, y_pred=model.predict(X_train_80))
test_score = mean_squared_error(y_true=y_test, y_pred=model.predict(X_test))
else:
train_score = accuracy_score(y_true=y_train_80, y_pred=model.predict(X_train_80))
test_score = accuracy_score(y_true=y_test, y_pred=model.predict(X_test))
return train_score, val_score, test_score
def compute_bias_variance_error(X, y, model, scale=False, N_TESTS = 20, sample_size=0.67):
if(scale):
scaler = MinMaxScaler()
scaler.fit(X)
X = scaler.transform(X)
vector_ypred = []
for i in range(N_TESTS):
Xs, ys = resample(X,y, n_samples=int(sample_size*len(y)) )
model.fit(Xs,ys)
vector_ypred.append(list(model.predict(X)))
vector_ypred = np.array(vector_ypred)
vector_bias = (y - np.mean(vector_ypred, axis=0))**2
vector_variance = np.var(vector_ypred, axis=0)
vector_error = np.sum((vector_ypred - y)**2, axis=0)/N_TESTS
bias = np.mean(vector_bias)
variance = np.mean(vector_variance)
error = np.mean(vector_error)
return bias,variance,error
def plot_predictions(X, y, model, scale=False, test_size=0.2, plot_type=0, xvalues=None, xlabel="Index",
title="Actual vs Predicted values", figsize=(6,6)):
train_len = int(X.shape[0]*(1-test_size))
X_train_80 = X[:train_len]
y_train_80 = y[:train_len]
X_test = X[train_len:]
y_test = y[train_len:]
if(scale):
scaler = MinMaxScaler()
scaler.fit(X_train_80)
X_train_80 = scaler.transform(X_train_80)
X_test = scaler.transform(X_test)
model.fit(X_train_80,y_train_80)
predictions = model.predict(X_test)
fig, ax = plt.subplots(figsize=figsize)
if plot_type==0:
if xvalues is None:
xvalues=range(len(X))
ax.plot(xvalues,y, 'o:', label='actual values')
ax.plot(xvalues[train_len:],predictions, 'o:', label='predicted values')
ax.legend()
elif plot_type==1:
ax.plot(y[train_len:],predictions,'o')
ax.plot([0, 1], [0, 1], 'r-',transform=ax.transAxes)
xlabel="Actual values"
ax.set_ylabel("Predicted values")
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.grid()
return ax
def _plot_TrainVal_values(xvalues, train_val_scores, plot_train, xlabel, title, figsize=(6,6), bar=False):
fig, ax = plt.subplots(figsize=figsize)
if not bar:
if plot_train:
ax.plot(xvalues,train_val_scores[:,0], 'o:', label='Train')
ax.plot(xvalues,train_val_scores[:,1], 'o:', label='Validation')
else:
if plot_train:
x = np.arange(len(xvalues))
width = 0.35
ax.bar(x-width/2,train_val_scores[:,0], width=width, label='Train')
ax.bar(x+width/2,train_val_scores[:,1], width=width, label='Validation')
ax.set_xticks(x)
ax.set_xticklabels(xvalues)
else:
ax.bar(xvalues,train_val_scores[:,1],label='Validation')
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.grid()
ax.legend()
return ax
def hyperparameter_validation(X, y, model, hyperparameter, hyperparameter_values, scale=False, test_size=0.2,
time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False,
xvalues=None, xlabel=None, title="Hyperparameter validation", figsize=(6,6)):
param_grid = {hyperparameter:hyperparameter_values}
params, train_val_scores, best_index, test_score = hyperparameters_validation(X, y, model, param_grid, scale=scale,
test_size=test_size,
time_series=time_series,
random_state=random_state, n_folds=n_folds,
regr=regr)
ax = None
if(plot):
if not xvalues:
xvalues = hyperparameter_values
if not xlabel:
xlabel = hyperparameter
ax = _plot_TrainVal_values(xvalues, train_val_scores, plot_train, xlabel, title, figsize)
return train_val_scores, best_index, test_score, ax
def hyperparameters_validation(X, y, model, param_grid, scale=False, test_size=0.2, time_series=False, random_state=123,
n_folds=5, regr=True):
if regr:
scoring="neg_mean_squared_error"
else:
scoring="accuracy"
if not time_series :
X_train_80, X_test, y_train_80, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
else:
train_len = int(X.shape[0]*(1-test_size))
X_train_80 = X[:train_len]
y_train_80 = y[:train_len]
X_test = X[train_len:]
y_test = y[train_len:]
if(scale):
scaler = MinMaxScaler()
scaler.fit(X_train_80)
X_train_80 = scaler.transform(X_train_80)
X_test = scaler.transform(X_test)
if not time_series:
cv = n_folds
else:
cv = TimeSeriesSplit(n_splits = n_folds)
grid_search = GridSearchCV(model,param_grid,scoring=scoring,cv=cv,return_train_score=True)
grid_search.fit(X_train_80,y_train_80)
params = grid_search.cv_results_["params"]
train_scores = grid_search.cv_results_["mean_train_score"]
val_scores = grid_search.cv_results_["mean_test_score"]
best_index = grid_search.best_index_
best_model = grid_search.best_estimator_
if regr:
train_scores = train_scores*(-1)
val_scores = val_scores*(-1)
train_val_scores = np.concatenate((train_scores.reshape(-1,1), val_scores.reshape(-1,1)), axis=1)
best_model.fit(X_train_80,y_train_80)
test_score=0
if regr:
test_score = mean_squared_error(y_true=y_test, y_pred=best_model.predict(X_test))
else:
test_score = accuracy_score(y_true=y_test, y_pred=best_model.predict(X_test))
return params, train_val_scores, best_index, test_score
def models_validation(X, y, model_paramGrid_list, scale_list=None, test_size=0.2, time_series=False, random_state=123,
n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel="Models",
title="Models validation", figsize=(6,6)):
if not scale_list:
scale_list = [False]*len(model_paramGrid_list)
elif scale_list is True:
scale_list = [True]*len(model_paramGrid_list)
models_train_val_score = []
models_best_params = []
models_test_score = []
for i,triple in enumerate(model_paramGrid_list):
model,param_grid = triple[1:]
params, train_val_scores, best_index, test_score = hyperparameters_validation(X, y, model, param_grid,
scale=scale_list[i],
test_size=test_size,
time_series=time_series,
random_state=random_state,
n_folds=n_folds, regr=regr)
models_train_val_score.append(tuple(train_val_scores[best_index]))
models_best_params.append(params[best_index])
models_test_score.append(test_score)
models_train_val_score = np.array(models_train_val_score)
if regr:
best_index = np.argmin(models_train_val_score,axis=0)[1]
else:
best_index = np.argmax(models_train_val_score,axis=0)[1]
test_score = models_test_score[best_index]
ax = None
if(plot):
if not xvalues:
xvalues = [model_paramGrid_list[i][0] for i in range(len(model_paramGrid_list))]
ax = _plot_TrainVal_values(xvalues, models_train_val_score, plot_train, xlabel, title, figsize, bar=True)
return models_train_val_score, models_best_params, best_index, test_score, ax
def datasets_hyperparameter_validation(dataset_list, model, hyperparameter, hyperparameter_values, scale=False,
test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False,
plot_train=False, xvalues=None, xlabel="Datasets", title="Datasets validation",
figsize=(6,6) ,verbose=False, figsize_verbose=(6,6)):
datasets_train_val_score = []
datasets_best_hyperparameter_value = []
datasets_test_score = []
axes = []
for i,dataset in enumerate(dataset_list):
X,y = dataset
train_val_scores, best_index, test_score, ax = hyperparameter_validation(X, y, model, hyperparameter,
hyperparameter_values, scale=scale, test_size=test_size, time_series=time_series,
random_state=random_state, n_folds=n_folds, regr=regr, plot=verbose, plot_train=plot_train,
xvalues=hyperparameter_values, xlabel=hyperparameter,
title="Dataset "+str(i)+" : hyperparameter validation", figsize=figsize_verbose)
datasets_train_val_score.append(tuple(train_val_scores[best_index,:]))
datasets_best_hyperparameter_value.append(hyperparameter_values[best_index])
datasets_test_score.append(test_score)
if ax:
axes.append(ax)
datasets_train_val_score = np.array(datasets_train_val_score)
if regr:
best_index = np.argmin(datasets_train_val_score,axis=0)[1]
else:
best_index = np.argmax(datasets_train_val_score,axis=0)[1]
test_score = datasets_test_score[best_index]
if(plot):
if not xvalues:
xvalues = range(len(dataset_list))
ax = _plot_TrainVal_values(xvalues,datasets_train_val_score,plot_train,xlabel,title,figsize, bar=True)
axes.append(ax)
return datasets_train_val_score, datasets_best_hyperparameter_value, best_index, test_score, axes
def datasets_hyperparameters_validation(dataset_list, model, param_grid, scale=False, test_size=0.2, time_series=False,
random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None,
xlabel="Datasets", title="Datasets validation",figsize=(6,6)):
datasets_train_val_score = []
datasets_best_params = []
datasets_test_score = []
for X,y in dataset_list:
params, train_val_scores, best_index, test_score = hyperparameters_validation(X, y, model, param_grid, scale=scale,
test_size=test_size,
time_series=time_series,
random_state=random_state,
n_folds=n_folds, regr=regr)
datasets_train_val_score.append(tuple(train_val_scores[best_index,:]))
datasets_best_params.append(params[best_index])
datasets_test_score.append(test_score)
datasets_train_val_score = np.array(datasets_train_val_score)
if regr:
best_index = np.argmin(datasets_train_val_score,axis=0)[1]
else:
best_index = np.argmax(datasets_train_val_score,axis=0)[1]
test_score = datasets_test_score[best_index]
ax = None
if(plot):
if not xvalues:
xvalues = range(len(dataset_list))
ax = _plot_TrainVal_values(xvalues,datasets_train_val_score,plot_train,xlabel,title,figsize, bar=True)
return datasets_train_val_score, datasets_best_params, best_index, test_score, ax
def datasets_models_validation(dataset_list, model_paramGrid_list, scale_list=None, test_size=0.2, time_series=False,
random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None,
xlabel="Datasets", title="Datasets validation", figsize=(6,6) ,verbose=False,
figsize_verbose=(6,6)):
datasets_train_val_score = []
datasets_best_model = []
datasets_test_score = []
axes = []
for i,dataset in enumerate(dataset_list):
X,y = dataset
models_train_val_score, models_best_params, best_index, test_score, ax = models_validation(X, y,
model_paramGrid_list,
scale_list=scale_list,
test_size=test_size,
time_series=time_series,
random_state=random_state,
n_folds=n_folds,
regr=regr, plot=verbose,
plot_train=plot_train,
xlabel="Models",
title=("Dataset "+str(i)+
" : models validation"),
figsize=figsize_verbose)
datasets_train_val_score.append(tuple(models_train_val_score[best_index,:]))
datasets_best_model.append((best_index,model_paramGrid_list[best_index][0],models_best_params[best_index]))
datasets_test_score.append(test_score)
if ax:
axes.append(ax)
datasets_train_val_score = np.array(datasets_train_val_score)
if regr:
best_index = np.argmin(datasets_train_val_score,axis=0)[1]
else:
best_index = np.argmax(datasets_train_val_score,axis=0)[1]
test_score = datasets_test_score[best_index]
if(plot):
if not xvalues:
xvalues = range(len(dataset_list))
ax = _plot_TrainVal_values(xvalues,datasets_train_val_score,plot_train,xlabel,title,figsize, bar=True)
axes.append(ax)
return datasets_train_val_score, datasets_best_model, best_index, test_score, axes
| true
| true
|
7902bb213fbc19165021f2e2d67ce2e3cb89f9a8
| 17,735
|
py
|
Python
|
test/test_art.py
|
stragu/beets
|
da46a62772ab7a88c5799c84841f744dfc0f0a20
|
[
"MIT"
] | null | null | null |
test/test_art.py
|
stragu/beets
|
da46a62772ab7a88c5799c84841f744dfc0f0a20
|
[
"MIT"
] | null | null | null |
test/test_art.py
|
stragu/beets
|
da46a62772ab7a88c5799c84841f744dfc0f0a20
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the album art fetchers."""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
import shutil
import responses
from mock import patch
from test import _common
from test._common import unittest
from beetsplug import fetchart
from beets.autotag import AlbumInfo, AlbumMatch
from beets import library
from beets import importer
from beets import config
from beets import logging
from beets import util
from beets.util.artresizer import ArtResizer, WEBPROXY
logger = logging.getLogger('beets.test_art')
class UseThePlugin(_common.TestCase):
def setUp(self):
super(UseThePlugin, self).setUp()
self.plugin = fetchart.FetchArtPlugin()
class FetchImageTest(UseThePlugin):
@responses.activate
def run(self, *args, **kwargs):
super(FetchImageTest, self).run(*args, **kwargs)
def mock_response(self, content_type):
responses.add(responses.GET, 'http://example.com',
content_type=content_type)
def test_invalid_type_returns_none(self):
self.mock_response('image/watercolour')
artpath = self.plugin._fetch_image('http://example.com')
self.assertEqual(artpath, None)
def test_jpeg_type_returns_path(self):
self.mock_response('image/jpeg')
artpath = self.plugin._fetch_image('http://example.com')
self.assertNotEqual(artpath, None)
class FSArtTest(UseThePlugin):
def setUp(self):
super(FSArtTest, self).setUp()
self.dpath = os.path.join(self.temp_dir, 'arttest')
os.mkdir(self.dpath)
self.source = fetchart.FileSystem(logger, self.plugin.config)
def test_finds_jpg_in_directory(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
fn = self.source.get(self.dpath, ('art',), False)
self.assertEqual(fn, os.path.join(self.dpath, 'a.jpg'))
def test_appropriately_named_file_takes_precedence(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
_common.touch(os.path.join(self.dpath, 'art.jpg'))
fn = self.source.get(self.dpath, ('art',), False)
self.assertEqual(fn, os.path.join(self.dpath, 'art.jpg'))
def test_non_image_file_not_identified(self):
_common.touch(os.path.join(self.dpath, 'a.txt'))
fn = self.source.get(self.dpath, ('art',), False)
self.assertEqual(fn, None)
def test_cautious_skips_fallback(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
fn = self.source.get(self.dpath, ('art',), True)
self.assertEqual(fn, None)
def test_empty_dir(self):
fn = self.source.get(self.dpath, ('art',), True)
self.assertEqual(fn, None)
def test_precedence_amongst_correct_files(self):
_common.touch(os.path.join(self.dpath, 'back.jpg'))
_common.touch(os.path.join(self.dpath, 'front.jpg'))
_common.touch(os.path.join(self.dpath, 'front-cover.jpg'))
fn = self.source.get(self.dpath, ('cover', 'front', 'back'), False)
self.assertEqual(fn, os.path.join(self.dpath, 'front-cover.jpg'))
class CombinedTest(UseThePlugin):
ASIN = 'xxxx'
MBID = 'releaseid'
AMAZON_URL = 'http://images.amazon.com/images/P/{0}.01.LZZZZZZZ.jpg' \
.format(ASIN)
AAO_URL = 'http://www.albumart.org/index_detail.php?asin={0}' \
.format(ASIN)
CAA_URL = 'http://coverartarchive.org/release/{0}/front' \
.format(MBID)
def setUp(self):
super(CombinedTest, self).setUp()
self.dpath = os.path.join(self.temp_dir, 'arttest')
os.mkdir(self.dpath)
@responses.activate
def run(self, *args, **kwargs):
super(CombinedTest, self).run(*args, **kwargs)
def mock_response(self, url, content_type='image/jpeg'):
responses.add(responses.GET, url, content_type=content_type)
def test_main_interface_returns_amazon_art(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
artpath = self.plugin.art_for_album(album, None)
self.assertNotEqual(artpath, None)
def test_main_interface_returns_none_for_missing_asin_and_path(self):
album = _common.Bag()
artpath = self.plugin.art_for_album(album, None)
self.assertEqual(artpath, None)
def test_main_interface_gives_precedence_to_fs_art(self):
_common.touch(os.path.join(self.dpath, 'art.jpg'))
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
artpath = self.plugin.art_for_album(album, [self.dpath])
self.assertEqual(artpath, os.path.join(self.dpath, 'art.jpg'))
def test_main_interface_falls_back_to_amazon(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
artpath = self.plugin.art_for_album(album, [self.dpath])
self.assertNotEqual(artpath, None)
self.assertFalse(artpath.startswith(self.dpath))
def test_main_interface_tries_amazon_before_aao(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
self.plugin.art_for_album(album, [self.dpath])
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.AMAZON_URL)
def test_main_interface_falls_back_to_aao(self):
self.mock_response(self.AMAZON_URL, content_type='text/html')
album = _common.Bag(asin=self.ASIN)
self.plugin.art_for_album(album, [self.dpath])
self.assertEqual(responses.calls[-1].request.url, self.AAO_URL)
def test_main_interface_uses_caa_when_mbid_available(self):
self.mock_response(self.CAA_URL)
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
artpath = self.plugin.art_for_album(album, None)
self.assertNotEqual(artpath, None)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.CAA_URL)
def test_local_only_does_not_access_network(self):
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
artpath = self.plugin.art_for_album(album, [self.dpath],
local_only=True)
self.assertEqual(artpath, None)
self.assertEqual(len(responses.calls), 0)
def test_local_only_gets_fs_image(self):
_common.touch(os.path.join(self.dpath, 'art.jpg'))
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
artpath = self.plugin.art_for_album(album, [self.dpath],
local_only=True)
self.assertEqual(artpath, os.path.join(self.dpath, 'art.jpg'))
self.assertEqual(len(responses.calls), 0)
class AAOTest(UseThePlugin):
ASIN = 'xxxx'
AAO_URL = 'http://www.albumart.org/index_detail.php?asin={0}'.format(ASIN)
def setUp(self):
super(AAOTest, self).setUp()
self.source = fetchart.AlbumArtOrg(logger, self.plugin.config)
@responses.activate
def run(self, *args, **kwargs):
super(AAOTest, self).run(*args, **kwargs)
def mock_response(self, url, body):
responses.add(responses.GET, url, body=body, content_type='text/html',
match_querystring=True)
def test_aao_scraper_finds_image(self):
body = b"""
<br />
<a href=\"TARGET_URL\" title=\"View larger image\"
class=\"thickbox\" style=\"color: #7E9DA2; text-decoration:none;\">
<img src=\"http://www.albumart.org/images/zoom-icon.jpg\"
alt=\"View larger image\" width=\"17\" height=\"15\" border=\"0\"/></a>
"""
self.mock_response(self.AAO_URL, body)
album = _common.Bag(asin=self.ASIN)
res = self.source.get(album)
self.assertEqual(list(res)[0], 'TARGET_URL')
def test_aao_scraper_returns_no_result_when_no_image_present(self):
self.mock_response(self.AAO_URL, b'blah blah')
album = _common.Bag(asin=self.ASIN)
res = self.source.get(album)
self.assertEqual(list(res), [])
class GoogleImageTest(UseThePlugin):
def setUp(self):
super(GoogleImageTest, self).setUp()
self.source = fetchart.GoogleImages(logger, self.plugin.config)
@responses.activate
def run(self, *args, **kwargs):
super(GoogleImageTest, self).run(*args, **kwargs)
def mock_response(self, url, json):
responses.add(responses.GET, url, body=json,
content_type='application/json')
def test_google_art_finds_image(self):
album = _common.Bag(albumartist="some artist", album="some album")
json = b'{"items": [{"link": "url_to_the_image"}]}'
self.mock_response(fetchart.GoogleImages.URL, json)
result_url = self.source.get(album)
self.assertEqual(list(result_url)[0], 'url_to_the_image')
def test_google_art_returns_no_result_when_error_received(self):
album = _common.Bag(albumartist="some artist", album="some album")
json = b'{"error": {"errors": [{"reason": "some reason"}]}}'
self.mock_response(fetchart.GoogleImages.URL, json)
result_url = self.source.get(album)
self.assertEqual(list(result_url), [])
def test_google_art_returns_no_result_with_malformed_response(self):
album = _common.Bag(albumartist="some artist", album="some album")
json = b"""bla blup"""
self.mock_response(fetchart.GoogleImages.URL, json)
result_url = self.source.get(album)
self.assertEqual(list(result_url), [])
@_common.slow_test()
class ArtImporterTest(UseThePlugin):
def setUp(self):
super(ArtImporterTest, self).setUp()
# Mock the album art fetcher to always return our test file.
self.art_file = os.path.join(self.temp_dir, 'tmpcover.jpg')
_common.touch(self.art_file)
self.old_afa = self.plugin.art_for_album
self.afa_response = self.art_file
def art_for_album(i, p, local_only=False):
return self.afa_response
self.plugin.art_for_album = art_for_album
# Test library.
self.libpath = os.path.join(self.temp_dir, 'tmplib.blb')
self.libdir = os.path.join(self.temp_dir, 'tmplib')
os.mkdir(self.libdir)
os.mkdir(os.path.join(self.libdir, 'album'))
itempath = os.path.join(self.libdir, 'album', 'test.mp3')
shutil.copyfile(os.path.join(_common.RSRC, 'full.mp3'), itempath)
self.lib = library.Library(self.libpath)
self.i = _common.item()
self.i.path = itempath
self.album = self.lib.add_album([self.i])
self.lib._connection().commit()
# The import configuration.
self.session = _common.import_session(self.lib)
# Import task for the coroutine.
self.task = importer.ImportTask(None, None, [self.i])
self.task.is_album = True
self.task.album = self.album
info = AlbumInfo(
album='some album',
album_id='albumid',
artist='some artist',
artist_id='artistid',
tracks=[],
)
self.task.set_choice(AlbumMatch(0, info, {}, set(), set()))
def tearDown(self):
self.lib._connection().close()
super(ArtImporterTest, self).tearDown()
self.plugin.art_for_album = self.old_afa
def _fetch_art(self, should_exist):
"""Execute the fetch_art coroutine for the task and return the
album's resulting artpath. ``should_exist`` specifies whether to
assert that art path was set (to the correct value) or or that
the path was not set.
"""
# Execute the two relevant parts of the importer.
self.plugin.fetch_art(self.session, self.task)
self.plugin.assign_art(self.session, self.task)
artpath = self.lib.albums()[0].artpath
if should_exist:
self.assertEqual(
artpath,
os.path.join(os.path.dirname(self.i.path), 'cover.jpg')
)
self.assertExists(artpath)
else:
self.assertEqual(artpath, None)
return artpath
def test_fetch_art(self):
assert not self.lib.albums()[0].artpath
self._fetch_art(True)
def test_art_not_found(self):
self.afa_response = None
self._fetch_art(False)
def test_no_art_for_singleton(self):
self.task.is_album = False
self._fetch_art(False)
def test_leave_original_file_in_place(self):
self._fetch_art(True)
self.assertExists(self.art_file)
def test_delete_original_file(self):
config['import']['delete'] = True
self._fetch_art(True)
self.assertNotExists(self.art_file)
def test_move_original_file(self):
config['import']['move'] = True
self._fetch_art(True)
self.assertNotExists(self.art_file)
def test_do_not_delete_original_if_already_in_place(self):
artdest = os.path.join(os.path.dirname(self.i.path), 'cover.jpg')
shutil.copyfile(self.art_file, artdest)
self.afa_response = artdest
self._fetch_art(True)
def test_fetch_art_if_imported_file_deleted(self):
# See #1126. Test the following scenario:
# - Album art imported, `album.artpath` set.
# - Imported album art file subsequently deleted (by user or other
# program).
# `fetchart` should import album art again instead of printing the
# message "<album> has album art".
self._fetch_art(True)
util.remove(self.album.artpath)
self.plugin.batch_fetch_art(self.lib, self.lib.albums(), force=False)
self.assertExists(self.album.artpath)
class ArtForAlbumTest(UseThePlugin):
""" Tests that fetchart.art_for_album respects the size
configuration (e.g., minwidth, enforce_ratio)
"""
IMG_225x225 = os.path.join(_common.RSRC, 'abbey.jpg')
IMG_348x348 = os.path.join(_common.RSRC, 'abbey-different.jpg')
IMG_500x490 = os.path.join(_common.RSRC, 'abbey-similar.jpg')
def setUp(self):
super(ArtForAlbumTest, self).setUp()
self.old_fs_source_get = self.plugin.fs_source.get
self.old_fetch_img = self.plugin._fetch_image
self.old_source_urls = self.plugin._source_urls
def fs_source_get(*_):
return self.image_file
def source_urls(_):
return ['']
def fetch_img(_):
return self.image_file
self.plugin.fs_source.get = fs_source_get
self.plugin._source_urls = source_urls
self.plugin._fetch_image = fetch_img
def tearDown(self):
self.plugin.fs_source.get = self.old_fs_source_get
self.plugin._source_urls = self.old_source_urls
self.plugin._fetch_image = self.old_fetch_img
super(ArtForAlbumTest, self).tearDown()
def _assertImageIsValidArt(self, image_file, should_exist):
self.assertExists(image_file)
self.image_file = image_file
local_artpath = self.plugin.art_for_album(None, [''], True)
remote_artpath = self.plugin.art_for_album(None, [], False)
self.assertEqual(local_artpath, remote_artpath)
if should_exist:
self.assertEqual(local_artpath, self.image_file)
self.assertExists(local_artpath)
return local_artpath
else:
self.assertIsNone(local_artpath)
def _assertImageResized(self, image_file, should_resize):
self.image_file = image_file
with patch.object(ArtResizer.shared, 'resize') as mock_resize:
self.plugin.art_for_album(None, [''], True)
self.assertEqual(mock_resize.called, should_resize)
def _require_backend(self):
"""Skip the test if the art resizer doesn't have ImageMagick or
PIL (so comparisons and measurements are unavailable).
"""
if ArtResizer.shared.method[0] == WEBPROXY:
self.skipTest("ArtResizer has no local imaging backend available")
def test_respect_minwidth(self):
self._require_backend()
self.plugin.minwidth = 300
self._assertImageIsValidArt(self.IMG_225x225, False)
self._assertImageIsValidArt(self.IMG_348x348, True)
def test_respect_enforce_ratio_yes(self):
self._require_backend()
self.plugin.enforce_ratio = True
self._assertImageIsValidArt(self.IMG_500x490, False)
self._assertImageIsValidArt(self.IMG_225x225, True)
def test_respect_enforce_ratio_no(self):
self.plugin.enforce_ratio = False
self._assertImageIsValidArt(self.IMG_500x490, True)
def test_resize_if_necessary(self):
self._require_backend()
self.plugin.maxwidth = 300
self._assertImageResized(self.IMG_225x225, False)
self._assertImageResized(self.IMG_348x348, True)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| 37.574153
| 79
| 0.66287
|
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import os
import shutil
import responses
from mock import patch
from test import _common
from test._common import unittest
from beetsplug import fetchart
from beets.autotag import AlbumInfo, AlbumMatch
from beets import library
from beets import importer
from beets import config
from beets import logging
from beets import util
from beets.util.artresizer import ArtResizer, WEBPROXY
logger = logging.getLogger('beets.test_art')
class UseThePlugin(_common.TestCase):
def setUp(self):
super(UseThePlugin, self).setUp()
self.plugin = fetchart.FetchArtPlugin()
class FetchImageTest(UseThePlugin):
@responses.activate
def run(self, *args, **kwargs):
super(FetchImageTest, self).run(*args, **kwargs)
def mock_response(self, content_type):
responses.add(responses.GET, 'http://example.com',
content_type=content_type)
def test_invalid_type_returns_none(self):
self.mock_response('image/watercolour')
artpath = self.plugin._fetch_image('http://example.com')
self.assertEqual(artpath, None)
def test_jpeg_type_returns_path(self):
self.mock_response('image/jpeg')
artpath = self.plugin._fetch_image('http://example.com')
self.assertNotEqual(artpath, None)
class FSArtTest(UseThePlugin):
def setUp(self):
super(FSArtTest, self).setUp()
self.dpath = os.path.join(self.temp_dir, 'arttest')
os.mkdir(self.dpath)
self.source = fetchart.FileSystem(logger, self.plugin.config)
def test_finds_jpg_in_directory(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
fn = self.source.get(self.dpath, ('art',), False)
self.assertEqual(fn, os.path.join(self.dpath, 'a.jpg'))
def test_appropriately_named_file_takes_precedence(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
_common.touch(os.path.join(self.dpath, 'art.jpg'))
fn = self.source.get(self.dpath, ('art',), False)
self.assertEqual(fn, os.path.join(self.dpath, 'art.jpg'))
def test_non_image_file_not_identified(self):
_common.touch(os.path.join(self.dpath, 'a.txt'))
fn = self.source.get(self.dpath, ('art',), False)
self.assertEqual(fn, None)
def test_cautious_skips_fallback(self):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
fn = self.source.get(self.dpath, ('art',), True)
self.assertEqual(fn, None)
def test_empty_dir(self):
fn = self.source.get(self.dpath, ('art',), True)
self.assertEqual(fn, None)
def test_precedence_amongst_correct_files(self):
_common.touch(os.path.join(self.dpath, 'back.jpg'))
_common.touch(os.path.join(self.dpath, 'front.jpg'))
_common.touch(os.path.join(self.dpath, 'front-cover.jpg'))
fn = self.source.get(self.dpath, ('cover', 'front', 'back'), False)
self.assertEqual(fn, os.path.join(self.dpath, 'front-cover.jpg'))
class CombinedTest(UseThePlugin):
ASIN = 'xxxx'
MBID = 'releaseid'
AMAZON_URL = 'http://images.amazon.com/images/P/{0}.01.LZZZZZZZ.jpg' \
.format(ASIN)
AAO_URL = 'http://www.albumart.org/index_detail.php?asin={0}' \
.format(ASIN)
CAA_URL = 'http://coverartarchive.org/release/{0}/front' \
.format(MBID)
def setUp(self):
super(CombinedTest, self).setUp()
self.dpath = os.path.join(self.temp_dir, 'arttest')
os.mkdir(self.dpath)
@responses.activate
def run(self, *args, **kwargs):
super(CombinedTest, self).run(*args, **kwargs)
def mock_response(self, url, content_type='image/jpeg'):
responses.add(responses.GET, url, content_type=content_type)
def test_main_interface_returns_amazon_art(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
artpath = self.plugin.art_for_album(album, None)
self.assertNotEqual(artpath, None)
def test_main_interface_returns_none_for_missing_asin_and_path(self):
album = _common.Bag()
artpath = self.plugin.art_for_album(album, None)
self.assertEqual(artpath, None)
def test_main_interface_gives_precedence_to_fs_art(self):
_common.touch(os.path.join(self.dpath, 'art.jpg'))
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
artpath = self.plugin.art_for_album(album, [self.dpath])
self.assertEqual(artpath, os.path.join(self.dpath, 'art.jpg'))
def test_main_interface_falls_back_to_amazon(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
artpath = self.plugin.art_for_album(album, [self.dpath])
self.assertNotEqual(artpath, None)
self.assertFalse(artpath.startswith(self.dpath))
def test_main_interface_tries_amazon_before_aao(self):
self.mock_response(self.AMAZON_URL)
album = _common.Bag(asin=self.ASIN)
self.plugin.art_for_album(album, [self.dpath])
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.AMAZON_URL)
def test_main_interface_falls_back_to_aao(self):
self.mock_response(self.AMAZON_URL, content_type='text/html')
album = _common.Bag(asin=self.ASIN)
self.plugin.art_for_album(album, [self.dpath])
self.assertEqual(responses.calls[-1].request.url, self.AAO_URL)
def test_main_interface_uses_caa_when_mbid_available(self):
self.mock_response(self.CAA_URL)
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
artpath = self.plugin.art_for_album(album, None)
self.assertNotEqual(artpath, None)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.CAA_URL)
def test_local_only_does_not_access_network(self):
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
artpath = self.plugin.art_for_album(album, [self.dpath],
local_only=True)
self.assertEqual(artpath, None)
self.assertEqual(len(responses.calls), 0)
def test_local_only_gets_fs_image(self):
_common.touch(os.path.join(self.dpath, 'art.jpg'))
album = _common.Bag(mb_albumid=self.MBID, asin=self.ASIN)
artpath = self.plugin.art_for_album(album, [self.dpath],
local_only=True)
self.assertEqual(artpath, os.path.join(self.dpath, 'art.jpg'))
self.assertEqual(len(responses.calls), 0)
class AAOTest(UseThePlugin):
ASIN = 'xxxx'
AAO_URL = 'http://www.albumart.org/index_detail.php?asin={0}'.format(ASIN)
def setUp(self):
super(AAOTest, self).setUp()
self.source = fetchart.AlbumArtOrg(logger, self.plugin.config)
@responses.activate
def run(self, *args, **kwargs):
super(AAOTest, self).run(*args, **kwargs)
def mock_response(self, url, body):
responses.add(responses.GET, url, body=body, content_type='text/html',
match_querystring=True)
def test_aao_scraper_finds_image(self):
body = b"""
<br />
<a href=\"TARGET_URL\" title=\"View larger image\"
class=\"thickbox\" style=\"color: #7E9DA2; text-decoration:none;\">
<img src=\"http://www.albumart.org/images/zoom-icon.jpg\"
alt=\"View larger image\" width=\"17\" height=\"15\" border=\"0\"/></a>
"""
self.mock_response(self.AAO_URL, body)
album = _common.Bag(asin=self.ASIN)
res = self.source.get(album)
self.assertEqual(list(res)[0], 'TARGET_URL')
def test_aao_scraper_returns_no_result_when_no_image_present(self):
self.mock_response(self.AAO_URL, b'blah blah')
album = _common.Bag(asin=self.ASIN)
res = self.source.get(album)
self.assertEqual(list(res), [])
class GoogleImageTest(UseThePlugin):
def setUp(self):
super(GoogleImageTest, self).setUp()
self.source = fetchart.GoogleImages(logger, self.plugin.config)
@responses.activate
def run(self, *args, **kwargs):
super(GoogleImageTest, self).run(*args, **kwargs)
def mock_response(self, url, json):
responses.add(responses.GET, url, body=json,
content_type='application/json')
def test_google_art_finds_image(self):
album = _common.Bag(albumartist="some artist", album="some album")
json = b'{"items": [{"link": "url_to_the_image"}]}'
self.mock_response(fetchart.GoogleImages.URL, json)
result_url = self.source.get(album)
self.assertEqual(list(result_url)[0], 'url_to_the_image')
def test_google_art_returns_no_result_when_error_received(self):
album = _common.Bag(albumartist="some artist", album="some album")
json = b'{"error": {"errors": [{"reason": "some reason"}]}}'
self.mock_response(fetchart.GoogleImages.URL, json)
result_url = self.source.get(album)
self.assertEqual(list(result_url), [])
def test_google_art_returns_no_result_with_malformed_response(self):
album = _common.Bag(albumartist="some artist", album="some album")
json = b"""bla blup"""
self.mock_response(fetchart.GoogleImages.URL, json)
result_url = self.source.get(album)
self.assertEqual(list(result_url), [])
@_common.slow_test()
class ArtImporterTest(UseThePlugin):
def setUp(self):
super(ArtImporterTest, self).setUp()
self.art_file = os.path.join(self.temp_dir, 'tmpcover.jpg')
_common.touch(self.art_file)
self.old_afa = self.plugin.art_for_album
self.afa_response = self.art_file
def art_for_album(i, p, local_only=False):
return self.afa_response
self.plugin.art_for_album = art_for_album
self.libpath = os.path.join(self.temp_dir, 'tmplib.blb')
self.libdir = os.path.join(self.temp_dir, 'tmplib')
os.mkdir(self.libdir)
os.mkdir(os.path.join(self.libdir, 'album'))
itempath = os.path.join(self.libdir, 'album', 'test.mp3')
shutil.copyfile(os.path.join(_common.RSRC, 'full.mp3'), itempath)
self.lib = library.Library(self.libpath)
self.i = _common.item()
self.i.path = itempath
self.album = self.lib.add_album([self.i])
self.lib._connection().commit()
self.session = _common.import_session(self.lib)
self.task = importer.ImportTask(None, None, [self.i])
self.task.is_album = True
self.task.album = self.album
info = AlbumInfo(
album='some album',
album_id='albumid',
artist='some artist',
artist_id='artistid',
tracks=[],
)
self.task.set_choice(AlbumMatch(0, info, {}, set(), set()))
def tearDown(self):
self.lib._connection().close()
super(ArtImporterTest, self).tearDown()
self.plugin.art_for_album = self.old_afa
def _fetch_art(self, should_exist):
self.plugin.fetch_art(self.session, self.task)
self.plugin.assign_art(self.session, self.task)
artpath = self.lib.albums()[0].artpath
if should_exist:
self.assertEqual(
artpath,
os.path.join(os.path.dirname(self.i.path), 'cover.jpg')
)
self.assertExists(artpath)
else:
self.assertEqual(artpath, None)
return artpath
def test_fetch_art(self):
assert not self.lib.albums()[0].artpath
self._fetch_art(True)
def test_art_not_found(self):
self.afa_response = None
self._fetch_art(False)
def test_no_art_for_singleton(self):
self.task.is_album = False
self._fetch_art(False)
def test_leave_original_file_in_place(self):
self._fetch_art(True)
self.assertExists(self.art_file)
def test_delete_original_file(self):
config['import']['delete'] = True
self._fetch_art(True)
self.assertNotExists(self.art_file)
def test_move_original_file(self):
config['import']['move'] = True
self._fetch_art(True)
self.assertNotExists(self.art_file)
def test_do_not_delete_original_if_already_in_place(self):
artdest = os.path.join(os.path.dirname(self.i.path), 'cover.jpg')
shutil.copyfile(self.art_file, artdest)
self.afa_response = artdest
self._fetch_art(True)
def test_fetch_art_if_imported_file_deleted(self):
self._fetch_art(True)
util.remove(self.album.artpath)
self.plugin.batch_fetch_art(self.lib, self.lib.albums(), force=False)
self.assertExists(self.album.artpath)
class ArtForAlbumTest(UseThePlugin):
IMG_225x225 = os.path.join(_common.RSRC, 'abbey.jpg')
IMG_348x348 = os.path.join(_common.RSRC, 'abbey-different.jpg')
IMG_500x490 = os.path.join(_common.RSRC, 'abbey-similar.jpg')
def setUp(self):
super(ArtForAlbumTest, self).setUp()
self.old_fs_source_get = self.plugin.fs_source.get
self.old_fetch_img = self.plugin._fetch_image
self.old_source_urls = self.plugin._source_urls
def fs_source_get(*_):
return self.image_file
def source_urls(_):
return ['']
def fetch_img(_):
return self.image_file
self.plugin.fs_source.get = fs_source_get
self.plugin._source_urls = source_urls
self.plugin._fetch_image = fetch_img
def tearDown(self):
self.plugin.fs_source.get = self.old_fs_source_get
self.plugin._source_urls = self.old_source_urls
self.plugin._fetch_image = self.old_fetch_img
super(ArtForAlbumTest, self).tearDown()
def _assertImageIsValidArt(self, image_file, should_exist):
self.assertExists(image_file)
self.image_file = image_file
local_artpath = self.plugin.art_for_album(None, [''], True)
remote_artpath = self.plugin.art_for_album(None, [], False)
self.assertEqual(local_artpath, remote_artpath)
if should_exist:
self.assertEqual(local_artpath, self.image_file)
self.assertExists(local_artpath)
return local_artpath
else:
self.assertIsNone(local_artpath)
def _assertImageResized(self, image_file, should_resize):
self.image_file = image_file
with patch.object(ArtResizer.shared, 'resize') as mock_resize:
self.plugin.art_for_album(None, [''], True)
self.assertEqual(mock_resize.called, should_resize)
def _require_backend(self):
if ArtResizer.shared.method[0] == WEBPROXY:
self.skipTest("ArtResizer has no local imaging backend available")
def test_respect_minwidth(self):
self._require_backend()
self.plugin.minwidth = 300
self._assertImageIsValidArt(self.IMG_225x225, False)
self._assertImageIsValidArt(self.IMG_348x348, True)
def test_respect_enforce_ratio_yes(self):
self._require_backend()
self.plugin.enforce_ratio = True
self._assertImageIsValidArt(self.IMG_500x490, False)
self._assertImageIsValidArt(self.IMG_225x225, True)
def test_respect_enforce_ratio_no(self):
self.plugin.enforce_ratio = False
self._assertImageIsValidArt(self.IMG_500x490, True)
def test_resize_if_necessary(self):
self._require_backend()
self.plugin.maxwidth = 300
self._assertImageResized(self.IMG_225x225, False)
self._assertImageResized(self.IMG_348x348, True)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| true
| true
|
7902bb410a1d585a1dec814f872cb619765ef7b6
| 1,111
|
py
|
Python
|
azure-servicefabric/azure/servicefabric/models/wait_for_primary_placement_safety_check.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | null | null | null |
azure-servicefabric/azure/servicefabric/models/wait_for_primary_placement_safety_check.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | null | null | null |
azure-servicefabric/azure/servicefabric/models/wait_for_primary_placement_safety_check.py
|
v-Ajnava/azure-sdk-for-python
|
a1f6f80eb5869c5b710e8bfb66146546697e2a6f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .partition_safety_check import PartitionSafetyCheck
class WaitForPrimaryPlacementSafetyCheck(PartitionSafetyCheck):
"""Safety check that waits for the primary replica that was moved out of the
node due to upgrade to be placed back again on that node.
:param kind: Constant filled by server.
:type kind: str
:param partition_id:
:type partition_id: str
"""
_validation = {
'kind': {'required': True},
}
def __init__(self, partition_id=None):
super(WaitForPrimaryPlacementSafetyCheck, self).__init__(partition_id=partition_id)
self.kind = 'WaitForPrimaryPlacement'
| 34.71875
| 91
| 0.633663
|
from .partition_safety_check import PartitionSafetyCheck
class WaitForPrimaryPlacementSafetyCheck(PartitionSafetyCheck):
_validation = {
'kind': {'required': True},
}
def __init__(self, partition_id=None):
super(WaitForPrimaryPlacementSafetyCheck, self).__init__(partition_id=partition_id)
self.kind = 'WaitForPrimaryPlacement'
| true
| true
|
7902bce9c4ad380033291c205f4cafacc3be37e2
| 749
|
py
|
Python
|
utils/color.py
|
WangYihang/Reverse-Shell-Manager
|
7c660b7a080dd1068139db7e061c626c66f94beb
|
[
"Unlicense"
] | 372
|
2017-10-23T03:22:57.000Z
|
2022-03-30T02:06:43.000Z
|
utils/color.py
|
SpaceRealms/Reverse-Shell-Manager
|
5bf429c278950d63a41fd51f01df495a3b870e94
|
[
"Unlicense"
] | 9
|
2017-10-26T06:59:45.000Z
|
2020-09-28T11:24:20.000Z
|
utils/color.py
|
SpaceRealms/Reverse-Shell-Manager
|
5bf429c278950d63a41fd51f01df495a3b870e94
|
[
"Unlicense"
] | 106
|
2017-10-24T05:33:57.000Z
|
2021-12-30T03:29:50.000Z
|
#!/usr/bin/envpython
# -*- coding: utf-8 -*-
def black(string):
return'\033[30m'+string+'\033[0m'
def blue(string):
return'\033[94m'+string+'\033[0m'
def gray(string):
return'\033[1;30m'+string+'\033[0m'
def green(string):
return'\033[92m'+string+'\033[0m'
def cyan(string):
return'\033[96m'+string+'\033[0m'
def lightPurple(string):
return'\033[94m'+string+'\033[0m'
def purple(string):
return'\033[95m'+string+'\033[0m'
def red(string):
return'\033[91m'+string+'\033[0m'
def underline(string):
return'\033[4m'+string+'\033[0m'
def white(string):
return'\033[0m'+string+'\033[0m'
def white_2(string):
return'\033[1m'+string+'\033[0m'
def yellow(string):
return'\033[93m'+string+'\033[0m'
| 19.205128
| 39
| 0.635514
|
def black(string):
return'\033[30m'+string+'\033[0m'
def blue(string):
return'\033[94m'+string+'\033[0m'
def gray(string):
return'\033[1;30m'+string+'\033[0m'
def green(string):
return'\033[92m'+string+'\033[0m'
def cyan(string):
return'\033[96m'+string+'\033[0m'
def lightPurple(string):
return'\033[94m'+string+'\033[0m'
def purple(string):
return'\033[95m'+string+'\033[0m'
def red(string):
return'\033[91m'+string+'\033[0m'
def underline(string):
return'\033[4m'+string+'\033[0m'
def white(string):
return'\033[0m'+string+'\033[0m'
def white_2(string):
return'\033[1m'+string+'\033[0m'
def yellow(string):
return'\033[93m'+string+'\033[0m'
| true
| true
|
7902bd4370ad3d055382dfc98a2c7333e9bbb30d
| 2,395
|
py
|
Python
|
venv/lib/python3.9/site-packages/markdown/extensions/meta.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 182
|
2017-03-05T07:43:13.000Z
|
2022-03-15T13:09:07.000Z
|
venv/lib/python3.9/site-packages/markdown/extensions/meta.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 15
|
2018-05-02T11:05:30.000Z
|
2018-05-11T20:51:27.000Z
|
env/lib/python3.6/site-packages/markdown/extensions/meta.py
|
bcss-pm/incidents
|
927a102104b5718fe118bceb307d3cd633d6699b
|
[
"MIT"
] | 38
|
2017-04-26T14:13:37.000Z
|
2021-06-24T11:36:38.000Z
|
"""
Meta Data Extension for Python-Markdown
=======================================
This extension adds Meta Data handling to markdown.
See <https://Python-Markdown.github.io/extensions/meta_data>
for documentation.
Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
import re
import logging
log = logging.getLogger('MARKDOWN')
# Global Vars
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
BEGIN_RE = re.compile(r'^-{3}(\s.*)?')
END_RE = re.compile(r'^(-{3}|\.{3})(\s.*)?')
class MetaExtension (Extension):
""" Meta-Data extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add MetaPreprocessor to Markdown instance. """
md.preprocessors.add("meta",
MetaPreprocessor(md),
">normalize_whitespace")
class MetaPreprocessor(Preprocessor):
""" Get Meta-Data. """
def run(self, lines):
""" Parse Meta-Data and store in Markdown.Meta. """
meta = {}
key = None
if lines and BEGIN_RE.match(lines[0]):
lines.pop(0)
while lines:
line = lines.pop(0)
m1 = META_RE.match(line)
if line.strip() == '' or END_RE.match(line):
break # blank line or end of YAML header - done
if m1:
key = m1.group('key').lower().strip()
value = m1.group('value').strip()
try:
meta[key].append(value)
except KeyError:
meta[key] = [value]
else:
m2 = META_MORE_RE.match(line)
if m2 and key:
# Add another line to existing key
meta[key].append(m2.group('value').strip())
else:
lines.insert(0, line)
break # no meta data - done
self.markdown.Meta = meta
return lines
def makeExtension(*args, **kwargs):
return MetaExtension(*args, **kwargs)
| 30.316456
| 74
| 0.561169
|
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
import re
import logging
log = logging.getLogger('MARKDOWN')
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
BEGIN_RE = re.compile(r'^-{3}(\s.*)?')
END_RE = re.compile(r'^(-{3}|\.{3})(\s.*)?')
class MetaExtension (Extension):
def extendMarkdown(self, md, md_globals):
md.preprocessors.add("meta",
MetaPreprocessor(md),
">normalize_whitespace")
class MetaPreprocessor(Preprocessor):
def run(self, lines):
meta = {}
key = None
if lines and BEGIN_RE.match(lines[0]):
lines.pop(0)
while lines:
line = lines.pop(0)
m1 = META_RE.match(line)
if line.strip() == '' or END_RE.match(line):
break
if m1:
key = m1.group('key').lower().strip()
value = m1.group('value').strip()
try:
meta[key].append(value)
except KeyError:
meta[key] = [value]
else:
m2 = META_MORE_RE.match(line)
if m2 and key:
meta[key].append(m2.group('value').strip())
else:
lines.insert(0, line)
break
self.markdown.Meta = meta
return lines
def makeExtension(*args, **kwargs):
return MetaExtension(*args, **kwargs)
| true
| true
|
7902bda17a6f03db284d8ff883e0efa9e30816fa
| 817
|
py
|
Python
|
Lib/site-packages/wx-2.8-msw-unicode/wx/tools/Editra/src/__init__.py
|
ekkipermana/robotframework-test
|
243ca26f69962f8cf20cd7d054e0ff3e709bc7f4
|
[
"bzip2-1.0.6"
] | 27
|
2020-11-12T19:24:54.000Z
|
2022-03-27T23:10:45.000Z
|
Lib/site-packages/wx-2.8-msw-unicode/wx/tools/Editra/src/__init__.py
|
ekkipermana/robotframework-test
|
243ca26f69962f8cf20cd7d054e0ff3e709bc7f4
|
[
"bzip2-1.0.6"
] | 2
|
2020-11-02T06:30:39.000Z
|
2022-02-23T18:39:55.000Z
|
Lib/site-packages/wx-2.8-msw-unicode/wx/tools/Editra/src/__init__.py
|
ekkipermana/robotframework-test
|
243ca26f69962f8cf20cd7d054e0ff3e709bc7f4
|
[
"bzip2-1.0.6"
] | 7
|
2018-02-13T10:22:39.000Z
|
2019-07-04T07:39:28.000Z
|
###############################################################################
# Name: __init__.py #
# Purpose: Import the required base modules needed for launching Editra into #
# into the namespace. #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2007 Cody Precord <staff@editra.org> #
# Licence: wxWindows Licence #
###############################################################################
"""Main package module"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: __init__.py 49807 2007-11-10 07:08:33Z CJP $"
__revision__ = "$Revision: 49807 $"
| 58.357143
| 79
| 0.383109
| true
| true
|
|
7902bfe62c34045bcf5da4d2c7aed97cdc66b949
| 3,871
|
py
|
Python
|
alps/descriptors.py
|
michalporeba/alps-py
|
bfbfd048437e1fb77d253bc649d3965257dd557c
|
[
"MIT"
] | null | null | null |
alps/descriptors.py
|
michalporeba/alps-py
|
bfbfd048437e1fb77d253bc649d3965257dd557c
|
[
"MIT"
] | null | null | null |
alps/descriptors.py
|
michalporeba/alps-py
|
bfbfd048437e1fb77d253bc649d3965257dd557c
|
[
"MIT"
] | null | null | null |
from diogi.functions import *
from diogi.conventions import to_data
from .docs import WithDocsMixin
def noop_resolver(href: str) -> dict:
pass
class Descriptor:
@staticmethod
def parse(obj: any, resolver: callable):
if dict == type(obj):
href = get_if_exists(obj, "href", None)
resolved = obj
if href:
resolved = {**default_if_none(resolver(href), {}), **obj}
desc_type = get_if_exists(resolved, "type", "semantic")
docs = get_if_exists(resolved, "doc", None)
else:
return None
# desc = None
id = get_if_exists(resolved, "id")
name = get_if_exists(resolved, "name")
if desc_type == "semantic":
desc = Semantic(id=id, name=name)
elif desc_type == "safe":
desc = Safe(id=id, name=name)
elif desc_type == "unsafe":
desc = Unsafe(id=id, name=name)
elif desc_type == "idempotent":
desc = Idempotent(id=id, name=name)
if docs:
add_doc = getattr(desc, "add_doc", None)
if add_doc:
add_doc(docs)
for d in always_a_list(get_if_exists(resolved, "descriptor", [])):
desc.add_descriptor(d, resolver)
return desc
class DescriptorBase(WithDocsMixin):
def __init__(self):
super(DescriptorBase, self).__init__()
self.contents = {}
@property
def id(self):
return get_if_exists(self.contents, "id", None)
@property
def name(self):
return get_if_exists(self.contents, "name", None)
@property
def descriptors(self):
return always_a_list(get_if_exists(self.contents, "descriptor", []))
def add_descriptor(
self, descriptor: Descriptor, resolver: callable = noop_resolver
):
if not isinstance(descriptor, Descriptor):
descriptor = Descriptor.parse(descriptor, resolver)
append_if_not_none(self.contents, descriptor, "descriptor")
return self
def get_descriptor(self, id: str) -> Descriptor:
return list_is_optional(
[d for d in get_if_exists(self.contents, "descriptor", []) if d.id == id]
)
def as_data(self):
data = {}
for k, v in self.contents.items():
set_if_not_none(data, to_data(list_is_optional(v)), k)
return data
def __eq__(self, other):
if type(other) is type(self):
return self.contents == other.contents
else:
return False
def __hash__(self):
return hash((self.contents, self.contents))
class SimpleDescriptor(Descriptor, DescriptorBase):
def __init__(
self,
id: str = None,
text: str = None,
ref: str = None,
name: str = None,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.contents["id"] = id
self.contents["text"] = text
self.contents["ref"] = ref
self.contents["name"] = name
class Idempotent(SimpleDescriptor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents["type"] = "idempotent"
class ReferencingDescriptor(SimpleDescriptor):
def __init__(self, ref: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents["ref"] = ref
class Safe(SimpleDescriptor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents["type"] = "safe"
class Semantic(SimpleDescriptor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents["type"] = "semantic"
class Unsafe(SimpleDescriptor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents["type"] = "unsafe"
| 27.453901
| 85
| 0.587703
|
from diogi.functions import *
from diogi.conventions import to_data
from .docs import WithDocsMixin
def noop_resolver(href: str) -> dict:
pass
class Descriptor:
@staticmethod
def parse(obj: any, resolver: callable):
if dict == type(obj):
href = get_if_exists(obj, "href", None)
resolved = obj
if href:
resolved = {**default_if_none(resolver(href), {}), **obj}
desc_type = get_if_exists(resolved, "type", "semantic")
docs = get_if_exists(resolved, "doc", None)
else:
return None
id = get_if_exists(resolved, "id")
name = get_if_exists(resolved, "name")
if desc_type == "semantic":
desc = Semantic(id=id, name=name)
elif desc_type == "safe":
desc = Safe(id=id, name=name)
elif desc_type == "unsafe":
desc = Unsafe(id=id, name=name)
elif desc_type == "idempotent":
desc = Idempotent(id=id, name=name)
if docs:
add_doc = getattr(desc, "add_doc", None)
if add_doc:
add_doc(docs)
for d in always_a_list(get_if_exists(resolved, "descriptor", [])):
desc.add_descriptor(d, resolver)
return desc
class DescriptorBase(WithDocsMixin):
def __init__(self):
super(DescriptorBase, self).__init__()
self.contents = {}
@property
def id(self):
return get_if_exists(self.contents, "id", None)
@property
def name(self):
return get_if_exists(self.contents, "name", None)
@property
def descriptors(self):
return always_a_list(get_if_exists(self.contents, "descriptor", []))
def add_descriptor(
self, descriptor: Descriptor, resolver: callable = noop_resolver
):
if not isinstance(descriptor, Descriptor):
descriptor = Descriptor.parse(descriptor, resolver)
append_if_not_none(self.contents, descriptor, "descriptor")
return self
def get_descriptor(self, id: str) -> Descriptor:
return list_is_optional(
[d for d in get_if_exists(self.contents, "descriptor", []) if d.id == id]
)
def as_data(self):
data = {}
for k, v in self.contents.items():
set_if_not_none(data, to_data(list_is_optional(v)), k)
return data
def __eq__(self, other):
if type(other) is type(self):
return self.contents == other.contents
else:
return False
def __hash__(self):
return hash((self.contents, self.contents))
class SimpleDescriptor(Descriptor, DescriptorBase):
def __init__(
self,
id: str = None,
text: str = None,
ref: str = None,
name: str = None,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.contents["id"] = id
self.contents["text"] = text
self.contents["ref"] = ref
self.contents["name"] = name
class Idempotent(SimpleDescriptor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents["type"] = "idempotent"
class ReferencingDescriptor(SimpleDescriptor):
def __init__(self, ref: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents["ref"] = ref
class Safe(SimpleDescriptor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents["type"] = "safe"
class Semantic(SimpleDescriptor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents["type"] = "semantic"
class Unsafe(SimpleDescriptor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents["type"] = "unsafe"
| true
| true
|
7902c0eb5175879fdc97f1cf09a1cd5cc8374bfd
| 4,994
|
py
|
Python
|
models/official/unet3d/unet_main.py
|
tensorflow/tpu-demos
|
8aac591077e5781785aa6c22bc400472ba14dada
|
[
"Apache-2.0"
] | 65
|
2017-07-28T03:47:42.000Z
|
2018-02-04T20:54:18.000Z
|
models/official/unet3d/unet_main.py
|
tensorflow/tpu-demos
|
8aac591077e5781785aa6c22bc400472ba14dada
|
[
"Apache-2.0"
] | 10
|
2017-08-11T22:55:40.000Z
|
2018-02-07T01:11:28.000Z
|
models/official/unet3d/unet_main.py
|
tensorflow/tpu-demos
|
8aac591077e5781785aa6c22bc400472ba14dada
|
[
"Apache-2.0"
] | 28
|
2017-07-28T08:20:06.000Z
|
2018-01-28T16:28:12.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Training script for UNet-3D."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from hyperparameters import params_dict
import input_reader
import tpu_executor
import unet_config
import unet_model
tpu_executor.define_tpu_flags()
flags.DEFINE_string(
'mode', 'train', 'Mode to run: train or eval or train_and_eval '
'(default: train)')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string('training_file_pattern', '', 'Location of the train data.')
flags.DEFINE_string('eval_file_pattern', '', 'Location of ther eval data')
flags.DEFINE_string('config_file', '', 'a YAML file which specifies overrides.')
flags.DEFINE_string('params_override', '',
'A JSON-style string that specifies overrides.')
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
FLAGS = flags.FLAGS
def run_executer(params,
train_input_shapes=None, eval_input_shapes=None,
train_input_fn=None, eval_input_fn=None):
"""Runs Mask RCNN model on distribution strategy defined by the user."""
executer = tpu_executor.TPUEstimatorExecuter(
unet_model.unet_model_fn, params,
train_input_shapes=train_input_shapes,
eval_input_shapes=eval_input_shapes)
if FLAGS.mode == 'train':
assert train_input_fn is not None
results = executer.train(train_input_fn)
elif FLAGS.mode == 'eval':
assert eval_input_fn is not None
results = executer.evaluate(eval_input_fn)
elif FLAGS.mode == 'train_and_eval':
assert train_input_fn is not None
assert eval_input_fn is not None
results = executer.train_and_eval(train_input_fn, eval_input_fn)
else:
raise ValueError('Mode must be one of `train`, `eval`, or `train_and_eval`')
return results
def main(argv):
del argv # Unused.
params = params_dict.ParamsDict(unet_config.UNET_CONFIG,
unet_config.UNET_RESTRICTIONS)
params = params_dict.override_params_dict(
params, FLAGS.config_file, is_strict=False)
if FLAGS.training_file_pattern:
params.override({'training_file_pattern': FLAGS.training_file_pattern},
is_strict=True)
if FLAGS.eval_file_pattern:
params.override({'eval_file_pattern': FLAGS.eval_file_pattern},
is_strict=True)
train_epoch_steps = params.train_item_count // params.train_batch_size
eval_epoch_steps = params.eval_item_count // params.eval_batch_size
params.override(
{
'model_dir': FLAGS.model_dir,
'min_eval_interval': FLAGS.min_eval_interval,
'eval_timeout': FLAGS.eval_timeout,
'tpu_config': tpu_executor.get_tpu_flags(),
'lr_decay_steps': train_epoch_steps,
'train_steps': params.train_epochs * train_epoch_steps,
'eval_steps': eval_epoch_steps,
},
is_strict=False)
params = params_dict.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.validate()
params.lock()
train_input_fn = None
eval_input_fn = None
train_input_shapes = None
eval_input_shapes = None
if FLAGS.mode in ('train', 'train_and_eval'):
train_input_fn = input_reader.LiverInputFn(
params.training_file_pattern, params, mode=tf_estimator.ModeKeys.TRAIN)
train_input_shapes = train_input_fn.get_input_shapes(params)
if FLAGS.mode in ('eval', 'train_and_eval'):
eval_input_fn = input_reader.LiverInputFn(
params.eval_file_pattern, params, mode=tf_estimator.ModeKeys.EVAL)
eval_input_shapes = eval_input_fn.get_input_shapes(params)
assert train_input_shapes is not None or eval_input_shapes is not None
run_executer(params,
train_input_shapes=train_input_shapes,
eval_input_shapes=eval_input_shapes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn)
if __name__ == '__main__':
tf.disable_v2_behavior()
app.run(main)
| 35.928058
| 80
| 0.718662
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import estimator as tf_estimator
from hyperparameters import params_dict
import input_reader
import tpu_executor
import unet_config
import unet_model
tpu_executor.define_tpu_flags()
flags.DEFINE_string(
'mode', 'train', 'Mode to run: train or eval or train_and_eval '
'(default: train)')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string('training_file_pattern', '', 'Location of the train data.')
flags.DEFINE_string('eval_file_pattern', '', 'Location of ther eval data')
flags.DEFINE_string('config_file', '', 'a YAML file which specifies overrides.')
flags.DEFINE_string('params_override', '',
'A JSON-style string that specifies overrides.')
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
FLAGS = flags.FLAGS
def run_executer(params,
train_input_shapes=None, eval_input_shapes=None,
train_input_fn=None, eval_input_fn=None):
executer = tpu_executor.TPUEstimatorExecuter(
unet_model.unet_model_fn, params,
train_input_shapes=train_input_shapes,
eval_input_shapes=eval_input_shapes)
if FLAGS.mode == 'train':
assert train_input_fn is not None
results = executer.train(train_input_fn)
elif FLAGS.mode == 'eval':
assert eval_input_fn is not None
results = executer.evaluate(eval_input_fn)
elif FLAGS.mode == 'train_and_eval':
assert train_input_fn is not None
assert eval_input_fn is not None
results = executer.train_and_eval(train_input_fn, eval_input_fn)
else:
raise ValueError('Mode must be one of `train`, `eval`, or `train_and_eval`')
return results
def main(argv):
del argv
params = params_dict.ParamsDict(unet_config.UNET_CONFIG,
unet_config.UNET_RESTRICTIONS)
params = params_dict.override_params_dict(
params, FLAGS.config_file, is_strict=False)
if FLAGS.training_file_pattern:
params.override({'training_file_pattern': FLAGS.training_file_pattern},
is_strict=True)
if FLAGS.eval_file_pattern:
params.override({'eval_file_pattern': FLAGS.eval_file_pattern},
is_strict=True)
train_epoch_steps = params.train_item_count // params.train_batch_size
eval_epoch_steps = params.eval_item_count // params.eval_batch_size
params.override(
{
'model_dir': FLAGS.model_dir,
'min_eval_interval': FLAGS.min_eval_interval,
'eval_timeout': FLAGS.eval_timeout,
'tpu_config': tpu_executor.get_tpu_flags(),
'lr_decay_steps': train_epoch_steps,
'train_steps': params.train_epochs * train_epoch_steps,
'eval_steps': eval_epoch_steps,
},
is_strict=False)
params = params_dict.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.validate()
params.lock()
train_input_fn = None
eval_input_fn = None
train_input_shapes = None
eval_input_shapes = None
if FLAGS.mode in ('train', 'train_and_eval'):
train_input_fn = input_reader.LiverInputFn(
params.training_file_pattern, params, mode=tf_estimator.ModeKeys.TRAIN)
train_input_shapes = train_input_fn.get_input_shapes(params)
if FLAGS.mode in ('eval', 'train_and_eval'):
eval_input_fn = input_reader.LiverInputFn(
params.eval_file_pattern, params, mode=tf_estimator.ModeKeys.EVAL)
eval_input_shapes = eval_input_fn.get_input_shapes(params)
assert train_input_shapes is not None or eval_input_shapes is not None
run_executer(params,
train_input_shapes=train_input_shapes,
eval_input_shapes=eval_input_shapes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn)
if __name__ == '__main__':
tf.disable_v2_behavior()
app.run(main)
| true
| true
|
7902c17011b88149151bf12ca4db88d98f3a35a8
| 8,388
|
py
|
Python
|
osh/builtin_bracket.py
|
Schweinepriester/oil
|
8b0e5c58a825223341896064d63a95c8b57a9c05
|
[
"Apache-2.0"
] | 2,209
|
2016-11-20T10:32:58.000Z
|
2022-03-31T20:51:27.000Z
|
osh/builtin_bracket.py
|
Schweinepriester/oil
|
8b0e5c58a825223341896064d63a95c8b57a9c05
|
[
"Apache-2.0"
] | 1,074
|
2016-12-07T05:02:48.000Z
|
2022-03-22T02:09:11.000Z
|
osh/builtin_bracket.py
|
Schweinepriester/oil
|
8b0e5c58a825223341896064d63a95c8b57a9c05
|
[
"Apache-2.0"
] | 147
|
2016-12-11T04:13:28.000Z
|
2022-03-27T14:50:00.000Z
|
"""
builtin_bracket.py
"""
from __future__ import print_function
from _devbuild.gen.id_kind_asdl import Id
from _devbuild.gen.runtime_asdl import value
from _devbuild.gen.syntax_asdl import (
word, word_e, word_t, word__String, bool_expr,
)
from _devbuild.gen.types_asdl import lex_mode_e
from asdl import runtime
from core import error
from core.pyerror import e_usage, p_die, log
from core import vm
from frontend import match
from osh import sh_expr_eval
from osh import bool_parse
from osh import word_parse
from osh import word_eval
_ = log
from typing import cast, TYPE_CHECKING
if TYPE_CHECKING:
from _devbuild.gen.runtime_asdl import cmd_value__Argv, value__Str
from _devbuild.gen.syntax_asdl import word__String, bool_expr_t
from _devbuild.gen.types_asdl import lex_mode_t
from core.ui import ErrorFormatter
from core import optview
from core import state
class _StringWordEmitter(word_parse.WordEmitter):
"""For test/[, we need a word parser that returns String.
The BoolParser calls word_.BoolId(w), and deals with Kind.BoolUnary,
Kind.BoolBinary, etc. This is instead of Compound/Token (as in the
[[ case.
"""
def __init__(self, cmd_val):
# type: (cmd_value__Argv) -> None
self.cmd_val = cmd_val
self.i = 0
self.n = len(cmd_val.argv)
def ReadWord(self, unused_lex_mode):
# type: (lex_mode_t) -> word__String
"""Interface for bool_parse.py.
TODO: This should probably be word_t
"""
if self.i == self.n:
# Does it make sense to define Eof_Argv or something?
# TODO: Add a way to show this location. Show 1 char past the right-most
# spid of the last word? But we only have the left-most spid.
w = word.String(Id.Eof_Real, '', runtime.NO_SPID)
return w
#log('ARGV %s i %d', self.argv, self.i)
s = self.cmd_val.argv[self.i]
left_spid = self.cmd_val.arg_spids[self.i]
self.i += 1
# default is an operand word
id_ = match.BracketUnary(s)
if id_ == Id.Undefined_Tok:
id_ = match.BracketBinary(s)
if id_ == Id.Undefined_Tok:
id_ = match.BracketOther(s)
if id_ == Id.Undefined_Tok:
id_ = Id.Word_Compound
# NOTE: We only have the left spid now. It might be useful to add the
# right one.
w = word.String(id_, s, left_spid)
return w
def Read(self):
# type: () -> word__String
"""Interface used for special cases below."""
return self.ReadWord(lex_mode_e.ShCommand)
def Peek(self, offset):
# type: (int) -> str
"""For special cases."""
return self.cmd_val.argv[self.i + offset]
def Rewind(self, offset):
# type: (int) -> None
"""For special cases."""
self.i -= offset
class _WordEvaluator(word_eval.StringWordEvaluator):
def __init__(self):
# type: () -> None
word_eval.StringWordEvaluator.__init__(self)
def EvalWordToString(self, w, eval_flags=0):
# type: (word_t, int) -> value__Str
# do_fnmatch: for the [[ == ]] semantics which we don't have!
# I think I need another type of node
# Maybe it should be BuiltinEqual and BuiltinDEqual? Parse it into a
# different tree.
assert w.tag_() == word_e.String
string_word = cast(word__String, w)
return value.Str(string_word.s)
def _TwoArgs(w_parser):
# type: (_StringWordEmitter) -> bool_expr_t
"""Returns an expression tree to be evaluated."""
w0 = w_parser.Read()
w1 = w_parser.Read()
s0 = w0.s
if s0 == '!':
return bool_expr.LogicalNot(bool_expr.WordTest(w1))
unary_id = Id.Undefined_Tok
# Oil's preferred long flags
if w0.s.startswith('--'):
if s0 == '--dir':
unary_id = Id.BoolUnary_d
elif s0 == '--exists':
unary_id = Id.BoolUnary_e
elif s0 == '--file':
unary_id = Id.BoolUnary_f
elif s0 == '--symlink':
unary_id = Id.BoolUnary_L
if unary_id == Id.Undefined_Tok:
unary_id = match.BracketUnary(w0.s)
if unary_id == Id.Undefined_Tok:
p_die('Expected unary operator, got %r (2 args)', w0.s, word=w0)
return bool_expr.Unary(unary_id, w1)
def _ThreeArgs(w_parser):
# type: (_StringWordEmitter) -> bool_expr_t
"""Returns an expression tree to be evaluated."""
w0 = w_parser.Read()
w1 = w_parser.Read()
w2 = w_parser.Read()
# NOTE: Order is important here.
binary_id = match.BracketBinary(w1.s)
if binary_id != Id.Undefined_Tok:
return bool_expr.Binary(binary_id, w0, w2)
if w1.s == '-a':
return bool_expr.LogicalAnd(bool_expr.WordTest(w0), bool_expr.WordTest(w2))
if w1.s == '-o':
return bool_expr.LogicalOr(bool_expr.WordTest(w0), bool_expr.WordTest(w2))
if w0.s == '!':
w_parser.Rewind(2)
child = _TwoArgs(w_parser)
return bool_expr.LogicalNot(child)
if w0.s == '(' and w2.s == ')':
return bool_expr.WordTest(w1)
p_die('Expected binary operator, got %r (3 args)', w1.s, word=w1)
class Test(vm._Builtin):
def __init__(self, need_right_bracket, exec_opts, mem, errfmt):
# type: (bool, optview.Exec, state.Mem, ErrorFormatter) -> None
self.need_right_bracket = need_right_bracket
self.exec_opts = exec_opts
self.mem = mem
self.errfmt = errfmt
def Run(self, cmd_val):
# type: (cmd_value__Argv) -> int
"""The test/[ builtin.
The only difference between test and [ is that [ needs a matching ].
"""
if self.need_right_bracket: # Preprocess right bracket
if self.exec_opts.simple_test_builtin():
e_usage("should be invoked as 'test' (simple_test_builtin)")
strs = cmd_val.argv
if not strs or strs[-1] != ']':
self.errfmt.Print_('missing closing ]', span_id=cmd_val.arg_spids[0])
return 2
# Remove the right bracket
cmd_val.argv.pop()
cmd_val.arg_spids.pop()
w_parser = _StringWordEmitter(cmd_val)
w_parser.Read() # dummy: advance past argv[0]
b_parser = bool_parse.BoolParser(w_parser)
# There is a fundamental ambiguity due to poor language design, in cases like:
# [ -z ]
# [ -z -a ]
# [ -z -a ] ]
#
# See posixtest() in bash's test.c:
# "This is an implementation of a Posix.2 proposal by David Korn."
# It dispatches on expressions of length 0, 1, 2, 3, 4, and N args. We do
# the same here.
#
# Another ambiguity:
# -a is both a unary prefix operator and an infix operator. How to fix this
# ambiguity?
bool_node = None # type: bool_expr_t
n = len(cmd_val.argv) - 1
if self.exec_opts.simple_test_builtin() and n > 3:
e_usage("should only have 3 arguments or fewer (simple_test_builtin)")
try:
if n == 0:
return 1 # [ ] is False
elif n == 1:
w = w_parser.Read()
bool_node = bool_expr.WordTest(w)
elif n == 2:
bool_node = _TwoArgs(w_parser)
elif n == 3:
bool_node = _ThreeArgs(w_parser)
if n == 4:
a0 = w_parser.Peek(0)
if a0 == '!':
w_parser.Read() # skip !
child = _ThreeArgs(w_parser)
bool_node = bool_expr.LogicalNot(child)
elif a0 == '(' and w_parser.Peek(3) == ')':
w_parser.Read() # skip ')'
bool_node = _TwoArgs(w_parser)
else:
pass # fallthrough
if bool_node is None:
bool_node = b_parser.ParseForBuiltin()
except error.Parse as e:
self.errfmt.PrettyPrintError(e, prefix='(test) ')
return 2
# We technically don't need mem because we don't support BASH_REMATCH here.
word_ev = _WordEvaluator()
bool_ev = sh_expr_eval.BoolEvaluator(self.mem, self.exec_opts, None,
self.errfmt)
# We want [ a -eq a ] to always be an error, unlike [[ a -eq a ]]. This is a
# weird case of [[ being less strict.
bool_ev.Init_AlwaysStrict()
bool_ev.word_ev = word_ev
bool_ev.CheckCircularDeps()
try:
b = bool_ev.EvalB(bool_node)
except error._ErrorWithLocation as e:
# We want to catch e_die() and e_strict(). Those are both FatalRuntime
# errors now, but it might not make sense later.
# NOTE: This doesn't seem to happen. We have location info for all
# errors that arise out of [.
#if not e.HasLocation():
# raise
self.errfmt.PrettyPrintError(e, prefix='(test) ')
return 2 # 1 means 'false', and this usage error is like a parse error.
status = 0 if b else 1
return status
| 29.850534
| 82
| 0.651884
|
from __future__ import print_function
from _devbuild.gen.id_kind_asdl import Id
from _devbuild.gen.runtime_asdl import value
from _devbuild.gen.syntax_asdl import (
word, word_e, word_t, word__String, bool_expr,
)
from _devbuild.gen.types_asdl import lex_mode_e
from asdl import runtime
from core import error
from core.pyerror import e_usage, p_die, log
from core import vm
from frontend import match
from osh import sh_expr_eval
from osh import bool_parse
from osh import word_parse
from osh import word_eval
_ = log
from typing import cast, TYPE_CHECKING
if TYPE_CHECKING:
from _devbuild.gen.runtime_asdl import cmd_value__Argv, value__Str
from _devbuild.gen.syntax_asdl import word__String, bool_expr_t
from _devbuild.gen.types_asdl import lex_mode_t
from core.ui import ErrorFormatter
from core import optview
from core import state
class _StringWordEmitter(word_parse.WordEmitter):
def __init__(self, cmd_val):
self.cmd_val = cmd_val
self.i = 0
self.n = len(cmd_val.argv)
def ReadWord(self, unused_lex_mode):
if self.i == self.n:
w = word.String(Id.Eof_Real, '', runtime.NO_SPID)
return w
s = self.cmd_val.argv[self.i]
left_spid = self.cmd_val.arg_spids[self.i]
self.i += 1
id_ = match.BracketUnary(s)
if id_ == Id.Undefined_Tok:
id_ = match.BracketBinary(s)
if id_ == Id.Undefined_Tok:
id_ = match.BracketOther(s)
if id_ == Id.Undefined_Tok:
id_ = Id.Word_Compound
w = word.String(id_, s, left_spid)
return w
def Read(self):
return self.ReadWord(lex_mode_e.ShCommand)
def Peek(self, offset):
return self.cmd_val.argv[self.i + offset]
def Rewind(self, offset):
self.i -= offset
class _WordEvaluator(word_eval.StringWordEvaluator):
def __init__(self):
word_eval.StringWordEvaluator.__init__(self)
def EvalWordToString(self, w, eval_flags=0):
# I think I need another type of node
# Maybe it should be BuiltinEqual and BuiltinDEqual? Parse it into a
# different tree.
assert w.tag_() == word_e.String
string_word = cast(word__String, w)
return value.Str(string_word.s)
def _TwoArgs(w_parser):
# type: (_StringWordEmitter) -> bool_expr_t
w0 = w_parser.Read()
w1 = w_parser.Read()
s0 = w0.s
if s0 == '!':
return bool_expr.LogicalNot(bool_expr.WordTest(w1))
unary_id = Id.Undefined_Tok
# Oil's preferred long flags
if w0.s.startswith('--'):
if s0 == '--dir':
unary_id = Id.BoolUnary_d
elif s0 == '--exists':
unary_id = Id.BoolUnary_e
elif s0 == '--file':
unary_id = Id.BoolUnary_f
elif s0 == '--symlink':
unary_id = Id.BoolUnary_L
if unary_id == Id.Undefined_Tok:
unary_id = match.BracketUnary(w0.s)
if unary_id == Id.Undefined_Tok:
p_die('Expected unary operator, got %r (2 args)', w0.s, word=w0)
return bool_expr.Unary(unary_id, w1)
def _ThreeArgs(w_parser):
w0 = w_parser.Read()
w1 = w_parser.Read()
w2 = w_parser.Read()
binary_id = match.BracketBinary(w1.s)
if binary_id != Id.Undefined_Tok:
return bool_expr.Binary(binary_id, w0, w2)
if w1.s == '-a':
return bool_expr.LogicalAnd(bool_expr.WordTest(w0), bool_expr.WordTest(w2))
if w1.s == '-o':
return bool_expr.LogicalOr(bool_expr.WordTest(w0), bool_expr.WordTest(w2))
if w0.s == '!':
w_parser.Rewind(2)
child = _TwoArgs(w_parser)
return bool_expr.LogicalNot(child)
if w0.s == '(' and w2.s == ')':
return bool_expr.WordTest(w1)
p_die('Expected binary operator, got %r (3 args)', w1.s, word=w1)
class Test(vm._Builtin):
def __init__(self, need_right_bracket, exec_opts, mem, errfmt):
self.need_right_bracket = need_right_bracket
self.exec_opts = exec_opts
self.mem = mem
self.errfmt = errfmt
def Run(self, cmd_val):
if self.need_right_bracket:
if self.exec_opts.simple_test_builtin():
e_usage("should be invoked as 'test' (simple_test_builtin)")
strs = cmd_val.argv
if not strs or strs[-1] != ']':
self.errfmt.Print_('missing closing ]', span_id=cmd_val.arg_spids[0])
return 2
cmd_val.argv.pop()
cmd_val.arg_spids.pop()
w_parser = _StringWordEmitter(cmd_val)
w_parser.Read()
b_parser = bool_parse.BoolParser(w_parser)
# "This is an implementation of a Posix.2 proposal by David Korn."
# It dispatches on expressions of length 0, 1, 2, 3, 4, and N args. We do
# the same here.
#
# Another ambiguity:
# -a is both a unary prefix operator and an infix operator. How to fix this
# ambiguity?
bool_node = None # type: bool_expr_t
n = len(cmd_val.argv) - 1
if self.exec_opts.simple_test_builtin() and n > 3:
e_usage("should only have 3 arguments or fewer (simple_test_builtin)")
try:
if n == 0:
return 1 # [ ] is False
elif n == 1:
w = w_parser.Read()
bool_node = bool_expr.WordTest(w)
elif n == 2:
bool_node = _TwoArgs(w_parser)
elif n == 3:
bool_node = _ThreeArgs(w_parser)
if n == 4:
a0 = w_parser.Peek(0)
if a0 == '!':
w_parser.Read() # skip !
child = _ThreeArgs(w_parser)
bool_node = bool_expr.LogicalNot(child)
elif a0 == '(' and w_parser.Peek(3) == ')':
w_parser.Read() # skip ')'
bool_node = _TwoArgs(w_parser)
else:
pass # fallthrough
if bool_node is None:
bool_node = b_parser.ParseForBuiltin()
except error.Parse as e:
self.errfmt.PrettyPrintError(e, prefix='(test) ')
return 2
# We technically don't need mem because we don't support BASH_REMATCH here.
word_ev = _WordEvaluator()
bool_ev = sh_expr_eval.BoolEvaluator(self.mem, self.exec_opts, None,
self.errfmt)
# We want [ a -eq a ] to always be an error, unlike [[ a -eq a ]]. This is a
# weird case of [[ being less strict.
bool_ev.Init_AlwaysStrict()
bool_ev.word_ev = word_ev
bool_ev.CheckCircularDeps()
try:
b = bool_ev.EvalB(bool_node)
except error._ErrorWithLocation as e:
# We want to catch e_die() and e_strict(). Those are both FatalRuntime
# errors now, but it might not make sense later.
# NOTE: This doesn't seem to happen. We have location info for all
self.errfmt.PrettyPrintError(e, prefix='(test) ')
return 2
status = 0 if b else 1
return status
| true
| true
|
7902c1dde76668cd152efaed3547620712f047f2
| 461
|
py
|
Python
|
components/climber.py
|
BlueCrewRobotics/2017Robot
|
d55c9179df819ed27da61c314dc8d74b0938c376
|
[
"MIT"
] | null | null | null |
components/climber.py
|
BlueCrewRobotics/2017Robot
|
d55c9179df819ed27da61c314dc8d74b0938c376
|
[
"MIT"
] | null | null | null |
components/climber.py
|
BlueCrewRobotics/2017Robot
|
d55c9179df819ed27da61c314dc8d74b0938c376
|
[
"MIT"
] | null | null | null |
#climber.py
#Robot Code For BlueCrew 6153
import wpilib
#Commands to make the robot climb.
class Climber:
climb_motor = wpilib.Talon
#Set robot to climb when motor is on.
def climb(self):
self.climb_motor.set(1)
#Stops the robot from climbing when motor is off.
def stop_climb(self):
self.climb_motor.set(0)
#Execute is a necessary method for robotpy
#DO NO DELETE
def execute(self):
pass
| 21.952381
| 53
| 0.657267
|
import wpilib
class Climber:
climb_motor = wpilib.Talon
def climb(self):
self.climb_motor.set(1)
def stop_climb(self):
self.climb_motor.set(0)
def execute(self):
pass
| true
| true
|
7902c390ad5e9c53d1a89d41161ecf5ce7f436b3
| 845
|
py
|
Python
|
hook/viewsets.py
|
PythonBenin/sphinx-bot
|
3b58d2109ae62294aff33c86e02a50e11f4b730b
|
[
"MIT"
] | null | null | null |
hook/viewsets.py
|
PythonBenin/sphinx-bot
|
3b58d2109ae62294aff33c86e02a50e11f4b730b
|
[
"MIT"
] | 6
|
2021-03-19T03:35:31.000Z
|
2022-02-10T08:52:55.000Z
|
hook/viewsets.py
|
PythonBenin/sphinx-bot
|
3b58d2109ae62294aff33c86e02a50e11f4b730b
|
[
"MIT"
] | 1
|
2019-11-28T18:12:19.000Z
|
2019-11-28T18:12:19.000Z
|
from importlib import import_module
from rest_framework import status
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
class HookViewSet(GenericViewSet):
def post(self, request, *args, **kwargs):
data = request.data
action = data['action']
event = request.META.get('HTTP_X_GITHUB_EVENT', None)
if not event:
return Response({'result': False}, status=status.HTTP_200_OK)
if 'installation' in event:
event = 'installation'
try:
dirname = __name__.split('viewsets')[0]
module = import_module(f'{dirname}{event}.api')
result = getattr(module, f'hook_{action}')(data)
except ImportError:
result = False
return Response({'result': result}, status.HTTP_200_OK)
| 33.8
| 73
| 0.648521
|
from importlib import import_module
from rest_framework import status
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
class HookViewSet(GenericViewSet):
def post(self, request, *args, **kwargs):
data = request.data
action = data['action']
event = request.META.get('HTTP_X_GITHUB_EVENT', None)
if not event:
return Response({'result': False}, status=status.HTTP_200_OK)
if 'installation' in event:
event = 'installation'
try:
dirname = __name__.split('viewsets')[0]
module = import_module(f'{dirname}{event}.api')
result = getattr(module, f'hook_{action}')(data)
except ImportError:
result = False
return Response({'result': result}, status.HTTP_200_OK)
| true
| true
|
7902c3b72cd784fa20d29ada443a30329b5c8004
| 32,091
|
py
|
Python
|
scipy/linalg/interpolative.py
|
jake-is-ESD-protected/scipy
|
d7283ff75c218c300f372b5fdd960b987c1709a1
|
[
"BSD-3-Clause"
] | 9,095
|
2015-01-02T18:24:23.000Z
|
2022-03-31T20:35:31.000Z
|
scipy/linalg/interpolative.py
|
jake-is-ESD-protected/scipy
|
d7283ff75c218c300f372b5fdd960b987c1709a1
|
[
"BSD-3-Clause"
] | 11,500
|
2015-01-01T01:15:30.000Z
|
2022-03-31T23:07:35.000Z
|
scipy/linalg/interpolative.py
|
jake-is-ESD-protected/scipy
|
d7283ff75c218c300f372b5fdd960b987c1709a1
|
[
"BSD-3-Clause"
] | 5,838
|
2015-01-05T11:56:42.000Z
|
2022-03-31T23:21:19.000Z
|
#******************************************************************************
# Copyright (C) 2013 Kenneth L. Ho
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# None of the names of the copyright holders may be used to endorse or
# promote products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#******************************************************************************
# Python module for interfacing with `id_dist`.
r"""
======================================================================
Interpolative matrix decomposition (:mod:`scipy.linalg.interpolative`)
======================================================================
.. moduleauthor:: Kenneth L. Ho <klho@stanford.edu>
.. versionadded:: 0.13
.. currentmodule:: scipy.linalg.interpolative
An interpolative decomposition (ID) of a matrix :math:`A \in
\mathbb{C}^{m \times n}` of rank :math:`k \leq \min \{ m, n \}` is a
factorization
.. math::
A \Pi =
\begin{bmatrix}
A \Pi_{1} & A \Pi_{2}
\end{bmatrix} =
A \Pi_{1}
\begin{bmatrix}
I & T
\end{bmatrix},
where :math:`\Pi = [\Pi_{1}, \Pi_{2}]` is a permutation matrix with
:math:`\Pi_{1} \in \{ 0, 1 \}^{n \times k}`, i.e., :math:`A \Pi_{2} =
A \Pi_{1} T`. This can equivalently be written as :math:`A = BP`,
where :math:`B = A \Pi_{1}` and :math:`P = [I, T] \Pi^{\mathsf{T}}`
are the *skeleton* and *interpolation matrices*, respectively.
If :math:`A` does not have exact rank :math:`k`, then there exists an
approximation in the form of an ID such that :math:`A = BP + E`, where
:math:`\| E \| \sim \sigma_{k + 1}` is on the order of the :math:`(k +
1)`-th largest singular value of :math:`A`. Note that :math:`\sigma_{k
+ 1}` is the best possible error for a rank-:math:`k` approximation
and, in fact, is achieved by the singular value decomposition (SVD)
:math:`A \approx U S V^{*}`, where :math:`U \in \mathbb{C}^{m \times
k}` and :math:`V \in \mathbb{C}^{n \times k}` have orthonormal columns
and :math:`S = \mathop{\mathrm{diag}} (\sigma_{i}) \in \mathbb{C}^{k
\times k}` is diagonal with nonnegative entries. The principal
advantages of using an ID over an SVD are that:
- it is cheaper to construct;
- it preserves the structure of :math:`A`; and
- it is more efficient to compute with in light of the identity submatrix of :math:`P`.
Routines
========
Main functionality:
.. autosummary::
:toctree: generated/
interp_decomp
reconstruct_matrix_from_id
reconstruct_interp_matrix
reconstruct_skel_matrix
id_to_svd
svd
estimate_spectral_norm
estimate_spectral_norm_diff
estimate_rank
Support functions:
.. autosummary::
:toctree: generated/
seed
rand
References
==========
This module uses the ID software package [1]_ by Martinsson, Rokhlin,
Shkolnisky, and Tygert, which is a Fortran library for computing IDs
using various algorithms, including the rank-revealing QR approach of
[2]_ and the more recent randomized methods described in [3]_, [4]_,
and [5]_. This module exposes its functionality in a way convenient
for Python users. Note that this module does not add any functionality
beyond that of organizing a simpler and more consistent interface.
We advise the user to consult also the `documentation for the ID package
<http://tygert.com/id_doc.4.pdf>`_.
.. [1] P.G. Martinsson, V. Rokhlin, Y. Shkolnisky, M. Tygert. "ID: a
software package for low-rank approximation of matrices via interpolative
decompositions, version 0.2." http://tygert.com/id_doc.4.pdf.
.. [2] H. Cheng, Z. Gimbutas, P.G. Martinsson, V. Rokhlin. "On the
compression of low rank matrices." *SIAM J. Sci. Comput.* 26 (4): 1389--1404,
2005. :doi:`10.1137/030602678`.
.. [3] E. Liberty, F. Woolfe, P.G. Martinsson, V. Rokhlin, M.
Tygert. "Randomized algorithms for the low-rank approximation of matrices."
*Proc. Natl. Acad. Sci. U.S.A.* 104 (51): 20167--20172, 2007.
:doi:`10.1073/pnas.0709640104`.
.. [4] P.G. Martinsson, V. Rokhlin, M. Tygert. "A randomized
algorithm for the decomposition of matrices." *Appl. Comput. Harmon. Anal.* 30
(1): 47--68, 2011. :doi:`10.1016/j.acha.2010.02.003`.
.. [5] F. Woolfe, E. Liberty, V. Rokhlin, M. Tygert. "A fast
randomized algorithm for the approximation of matrices." *Appl. Comput.
Harmon. Anal.* 25 (3): 335--366, 2008. :doi:`10.1016/j.acha.2007.12.002`.
Tutorial
========
Initializing
------------
The first step is to import :mod:`scipy.linalg.interpolative` by issuing the
command:
>>> import scipy.linalg.interpolative as sli
Now let's build a matrix. For this, we consider a Hilbert matrix, which is well
know to have low rank:
>>> from scipy.linalg import hilbert
>>> n = 1000
>>> A = hilbert(n)
We can also do this explicitly via:
>>> import numpy as np
>>> n = 1000
>>> A = np.empty((n, n), order='F')
>>> for j in range(n):
>>> for i in range(m):
>>> A[i,j] = 1. / (i + j + 1)
Note the use of the flag ``order='F'`` in :func:`numpy.empty`. This
instantiates the matrix in Fortran-contiguous order and is important for
avoiding data copying when passing to the backend.
We then define multiplication routines for the matrix by regarding it as a
:class:`scipy.sparse.linalg.LinearOperator`:
>>> from scipy.sparse.linalg import aslinearoperator
>>> L = aslinearoperator(A)
This automatically sets up methods describing the action of the matrix and its
adjoint on a vector.
Computing an ID
---------------
We have several choices of algorithm to compute an ID. These fall largely
according to two dichotomies:
1. how the matrix is represented, i.e., via its entries or via its action on a
vector; and
2. whether to approximate it to a fixed relative precision or to a fixed rank.
We step through each choice in turn below.
In all cases, the ID is represented by three parameters:
1. a rank ``k``;
2. an index array ``idx``; and
3. interpolation coefficients ``proj``.
The ID is specified by the relation
``np.dot(A[:,idx[:k]], proj) == A[:,idx[k:]]``.
From matrix entries
...................
We first consider a matrix given in terms of its entries.
To compute an ID to a fixed precision, type:
>>> k, idx, proj = sli.interp_decomp(A, eps)
where ``eps < 1`` is the desired precision.
To compute an ID to a fixed rank, use:
>>> idx, proj = sli.interp_decomp(A, k)
where ``k >= 1`` is the desired rank.
Both algorithms use random sampling and are usually faster than the
corresponding older, deterministic algorithms, which can be accessed via the
commands:
>>> k, idx, proj = sli.interp_decomp(A, eps, rand=False)
and:
>>> idx, proj = sli.interp_decomp(A, k, rand=False)
respectively.
From matrix action
..................
Now consider a matrix given in terms of its action on a vector as a
:class:`scipy.sparse.linalg.LinearOperator`.
To compute an ID to a fixed precision, type:
>>> k, idx, proj = sli.interp_decomp(L, eps)
To compute an ID to a fixed rank, use:
>>> idx, proj = sli.interp_decomp(L, k)
These algorithms are randomized.
Reconstructing an ID
--------------------
The ID routines above do not output the skeleton and interpolation matrices
explicitly but instead return the relevant information in a more compact (and
sometimes more useful) form. To build these matrices, write:
>>> B = sli.reconstruct_skel_matrix(A, k, idx)
for the skeleton matrix and:
>>> P = sli.reconstruct_interp_matrix(idx, proj)
for the interpolation matrix. The ID approximation can then be computed as:
>>> C = np.dot(B, P)
This can also be constructed directly using:
>>> C = sli.reconstruct_matrix_from_id(B, idx, proj)
without having to first compute ``P``.
Alternatively, this can be done explicitly as well using:
>>> B = A[:,idx[:k]]
>>> P = np.hstack([np.eye(k), proj])[:,np.argsort(idx)]
>>> C = np.dot(B, P)
Computing an SVD
----------------
An ID can be converted to an SVD via the command:
>>> U, S, V = sli.id_to_svd(B, idx, proj)
The SVD approximation is then:
>>> C = np.dot(U, np.dot(np.diag(S), np.dot(V.conj().T)))
The SVD can also be computed "fresh" by combining both the ID and conversion
steps into one command. Following the various ID algorithms above, there are
correspondingly various SVD algorithms that one can employ.
From matrix entries
...................
We consider first SVD algorithms for a matrix given in terms of its entries.
To compute an SVD to a fixed precision, type:
>>> U, S, V = sli.svd(A, eps)
To compute an SVD to a fixed rank, use:
>>> U, S, V = sli.svd(A, k)
Both algorithms use random sampling; for the determinstic versions, issue the
keyword ``rand=False`` as above.
From matrix action
..................
Now consider a matrix given in terms of its action on a vector.
To compute an SVD to a fixed precision, type:
>>> U, S, V = sli.svd(L, eps)
To compute an SVD to a fixed rank, use:
>>> U, S, V = sli.svd(L, k)
Utility routines
----------------
Several utility routines are also available.
To estimate the spectral norm of a matrix, use:
>>> snorm = sli.estimate_spectral_norm(A)
This algorithm is based on the randomized power method and thus requires only
matrix-vector products. The number of iterations to take can be set using the
keyword ``its`` (default: ``its=20``). The matrix is interpreted as a
:class:`scipy.sparse.linalg.LinearOperator`, but it is also valid to supply it
as a :class:`numpy.ndarray`, in which case it is trivially converted using
:func:`scipy.sparse.linalg.aslinearoperator`.
The same algorithm can also estimate the spectral norm of the difference of two
matrices ``A1`` and ``A2`` as follows:
>>> diff = sli.estimate_spectral_norm_diff(A1, A2)
This is often useful for checking the accuracy of a matrix approximation.
Some routines in :mod:`scipy.linalg.interpolative` require estimating the rank
of a matrix as well. This can be done with either:
>>> k = sli.estimate_rank(A, eps)
or:
>>> k = sli.estimate_rank(L, eps)
depending on the representation. The parameter ``eps`` controls the definition
of the numerical rank.
Finally, the random number generation required for all randomized routines can
be controlled via :func:`scipy.linalg.interpolative.seed`. To reset the seed
values to their original values, use:
>>> sli.seed('default')
To specify the seed values, use:
>>> sli.seed(s)
where ``s`` must be an integer or array of 55 floats. If an integer, the array
of floats is obtained by using ``numpy.random.rand`` with the given integer
seed.
To simply generate some random numbers, type:
>>> sli.rand(n)
where ``n`` is the number of random numbers to generate.
Remarks
-------
The above functions all automatically detect the appropriate interface and work
with both real and complex data types, passing input arguments to the proper
backend routine.
"""
import scipy.linalg._interpolative_backend as _backend
import numpy as np
import sys
__all__ = [
'estimate_rank',
'estimate_spectral_norm',
'estimate_spectral_norm_diff',
'id_to_svd',
'interp_decomp',
'rand',
'reconstruct_interp_matrix',
'reconstruct_matrix_from_id',
'reconstruct_skel_matrix',
'seed',
'svd',
]
_DTYPE_ERROR = ValueError("invalid input dtype (input must be float64 or complex128)")
_TYPE_ERROR = TypeError("invalid input type (must be array or LinearOperator)")
_32BIT_ERROR = ValueError("interpolative decomposition on 32-bit systems "
"with complex128 is buggy")
_IS_32BIT = (sys.maxsize < 2**32)
def _is_real(A):
try:
if A.dtype == np.complex128:
return False
elif A.dtype == np.float64:
return True
else:
raise _DTYPE_ERROR
except AttributeError as e:
raise _TYPE_ERROR from e
def seed(seed=None):
"""
Seed the internal random number generator used in this ID package.
The generator is a lagged Fibonacci method with 55-element internal state.
Parameters
----------
seed : int, sequence, 'default', optional
If 'default', the random seed is reset to a default value.
If `seed` is a sequence containing 55 floating-point numbers
in range [0,1], these are used to set the internal state of
the generator.
If the value is an integer, the internal state is obtained
from `numpy.random.RandomState` (MT19937) with the integer
used as the initial seed.
If `seed` is omitted (None), ``numpy.random.rand`` is used to
initialize the generator.
"""
# For details, see :func:`_backend.id_srand`, :func:`_backend.id_srandi`,
# and :func:`_backend.id_srando`.
if isinstance(seed, str) and seed == 'default':
_backend.id_srando()
elif hasattr(seed, '__len__'):
state = np.asfortranarray(seed, dtype=float)
if state.shape != (55,):
raise ValueError("invalid input size")
elif state.min() < 0 or state.max() > 1:
raise ValueError("values not in range [0,1]")
_backend.id_srandi(state)
elif seed is None:
_backend.id_srandi(np.random.rand(55))
else:
rnd = np.random.RandomState(seed)
_backend.id_srandi(rnd.rand(55))
def rand(*shape):
"""
Generate standard uniform pseudorandom numbers via a very efficient lagged
Fibonacci method.
This routine is used for all random number generation in this package and
can affect ID and SVD results.
Parameters
----------
*shape
Shape of output array
"""
# For details, see :func:`_backend.id_srand`, and :func:`_backend.id_srando`.
return _backend.id_srand(np.prod(shape)).reshape(shape)
def interp_decomp(A, eps_or_k, rand=True):
"""
Compute ID of a matrix.
An ID of a matrix `A` is a factorization defined by a rank `k`, a column
index array `idx`, and interpolation coefficients `proj` such that::
numpy.dot(A[:,idx[:k]], proj) = A[:,idx[k:]]
The original matrix can then be reconstructed as::
numpy.hstack([A[:,idx[:k]],
numpy.dot(A[:,idx[:k]], proj)]
)[:,numpy.argsort(idx)]
or via the routine :func:`reconstruct_matrix_from_id`. This can
equivalently be written as::
numpy.dot(A[:,idx[:k]],
numpy.hstack([numpy.eye(k), proj])
)[:,np.argsort(idx)]
in terms of the skeleton and interpolation matrices::
B = A[:,idx[:k]]
and::
P = numpy.hstack([numpy.eye(k), proj])[:,np.argsort(idx)]
respectively. See also :func:`reconstruct_interp_matrix` and
:func:`reconstruct_skel_matrix`.
The ID can be computed to any relative precision or rank (depending on the
value of `eps_or_k`). If a precision is specified (`eps_or_k < 1`), then
this function has the output signature::
k, idx, proj = interp_decomp(A, eps_or_k)
Otherwise, if a rank is specified (`eps_or_k >= 1`), then the output
signature is::
idx, proj = interp_decomp(A, eps_or_k)
.. This function automatically detects the form of the input parameters
and passes them to the appropriate backend. For details, see
:func:`_backend.iddp_id`, :func:`_backend.iddp_aid`,
:func:`_backend.iddp_rid`, :func:`_backend.iddr_id`,
:func:`_backend.iddr_aid`, :func:`_backend.iddr_rid`,
:func:`_backend.idzp_id`, :func:`_backend.idzp_aid`,
:func:`_backend.idzp_rid`, :func:`_backend.idzr_id`,
:func:`_backend.idzr_aid`, and :func:`_backend.idzr_rid`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator` with `rmatvec`
Matrix to be factored
eps_or_k : float or int
Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of
approximation.
rand : bool, optional
Whether to use random sampling if `A` is of type :class:`numpy.ndarray`
(randomized algorithms are always used if `A` is of type
:class:`scipy.sparse.linalg.LinearOperator`).
Returns
-------
k : int
Rank required to achieve specified relative precision if
`eps_or_k < 1`.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
"""
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if eps_or_k < 1:
eps = eps_or_k
if rand:
if real:
k, idx, proj = _backend.iddp_aid(eps, A)
else:
if _IS_32BIT:
raise _32BIT_ERROR
k, idx, proj = _backend.idzp_aid(eps, A)
else:
if real:
k, idx, proj = _backend.iddp_id(eps, A)
else:
k, idx, proj = _backend.idzp_id(eps, A)
return k, idx - 1, proj
else:
k = int(eps_or_k)
if rand:
if real:
idx, proj = _backend.iddr_aid(A, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
idx, proj = _backend.idzr_aid(A, k)
else:
if real:
idx, proj = _backend.iddr_id(A, k)
else:
idx, proj = _backend.idzr_id(A, k)
return idx - 1, proj
elif isinstance(A, LinearOperator):
m, n = A.shape
matveca = A.rmatvec
if eps_or_k < 1:
eps = eps_or_k
if real:
k, idx, proj = _backend.iddp_rid(eps, m, n, matveca)
else:
if _IS_32BIT:
raise _32BIT_ERROR
k, idx, proj = _backend.idzp_rid(eps, m, n, matveca)
return k, idx - 1, proj
else:
k = int(eps_or_k)
if real:
idx, proj = _backend.iddr_rid(m, n, matveca, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
idx, proj = _backend.idzr_rid(m, n, matveca, k)
return idx - 1, proj
else:
raise _TYPE_ERROR
def reconstruct_matrix_from_id(B, idx, proj):
"""
Reconstruct matrix from its ID.
A matrix `A` with skeleton matrix `B` and ID indices and coefficients `idx`
and `proj`, respectively, can be reconstructed as::
numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]
See also :func:`reconstruct_interp_matrix` and
:func:`reconstruct_skel_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_reconid` and
:func:`_backend.idz_reconid`.
Parameters
----------
B : :class:`numpy.ndarray`
Skeleton matrix.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
:class:`numpy.ndarray`
Reconstructed matrix.
"""
if _is_real(B):
return _backend.idd_reconid(B, idx + 1, proj)
else:
return _backend.idz_reconid(B, idx + 1, proj)
def reconstruct_interp_matrix(idx, proj):
"""
Reconstruct interpolation matrix from ID.
The interpolation matrix can be reconstructed from the ID indices and
coefficients `idx` and `proj`, respectively, as::
P = numpy.hstack([numpy.eye(proj.shape[0]), proj])[:,numpy.argsort(idx)]
The original matrix can then be reconstructed from its skeleton matrix `B`
via::
numpy.dot(B, P)
See also :func:`reconstruct_matrix_from_id` and
:func:`reconstruct_skel_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_reconint` and
:func:`_backend.idz_reconint`.
Parameters
----------
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
:class:`numpy.ndarray`
Interpolation matrix.
"""
if _is_real(proj):
return _backend.idd_reconint(idx + 1, proj)
else:
return _backend.idz_reconint(idx + 1, proj)
def reconstruct_skel_matrix(A, k, idx):
"""
Reconstruct skeleton matrix from ID.
The skeleton matrix can be reconstructed from the original matrix `A` and its
ID rank and indices `k` and `idx`, respectively, as::
B = A[:,idx[:k]]
The original matrix can then be reconstructed via::
numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)]
See also :func:`reconstruct_matrix_from_id` and
:func:`reconstruct_interp_matrix`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_copycols` and
:func:`_backend.idz_copycols`.
Parameters
----------
A : :class:`numpy.ndarray`
Original matrix.
k : int
Rank of ID.
idx : :class:`numpy.ndarray`
Column index array.
Returns
-------
:class:`numpy.ndarray`
Skeleton matrix.
"""
if _is_real(A):
return _backend.idd_copycols(A, k, idx + 1)
else:
return _backend.idz_copycols(A, k, idx + 1)
def id_to_svd(B, idx, proj):
"""
Convert ID to SVD.
The SVD reconstruction of a matrix with skeleton matrix `B` and ID indices and
coefficients `idx` and `proj`, respectively, is::
U, S, V = id_to_svd(B, idx, proj)
A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))
See also :func:`svd`.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_id2svd` and
:func:`_backend.idz_id2svd`.
Parameters
----------
B : :class:`numpy.ndarray`
Skeleton matrix.
idx : :class:`numpy.ndarray`
Column index array.
proj : :class:`numpy.ndarray`
Interpolation coefficients.
Returns
-------
U : :class:`numpy.ndarray`
Left singular vectors.
S : :class:`numpy.ndarray`
Singular values.
V : :class:`numpy.ndarray`
Right singular vectors.
"""
if _is_real(B):
U, V, S = _backend.idd_id2svd(B, idx + 1, proj)
else:
U, V, S = _backend.idz_id2svd(B, idx + 1, proj)
return U, S, V
def estimate_spectral_norm(A, its=20):
"""
Estimate spectral norm of a matrix by the randomized power method.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_snorm` and
:func:`_backend.idz_snorm`.
Parameters
----------
A : :class:`scipy.sparse.linalg.LinearOperator`
Matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the
`matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
its : int, optional
Number of power method iterations.
Returns
-------
float
Spectral norm estimate.
"""
from scipy.sparse.linalg import aslinearoperator
A = aslinearoperator(A)
m, n = A.shape
matvec = lambda x: A. matvec(x)
matveca = lambda x: A.rmatvec(x)
if _is_real(A):
return _backend.idd_snorm(m, n, matveca, matvec, its=its)
else:
return _backend.idz_snorm(m, n, matveca, matvec, its=its)
def estimate_spectral_norm_diff(A, B, its=20):
"""
Estimate spectral norm of the difference of two matrices by the randomized
power method.
.. This function automatically detects the matrix data type and calls the
appropriate backend. For details, see :func:`_backend.idd_diffsnorm` and
:func:`_backend.idz_diffsnorm`.
Parameters
----------
A : :class:`scipy.sparse.linalg.LinearOperator`
First matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with the
`matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
B : :class:`scipy.sparse.linalg.LinearOperator`
Second matrix given as a :class:`scipy.sparse.linalg.LinearOperator` with
the `matvec` and `rmatvec` methods (to apply the matrix and its adjoint).
its : int, optional
Number of power method iterations.
Returns
-------
float
Spectral norm estimate of matrix difference.
"""
from scipy.sparse.linalg import aslinearoperator
A = aslinearoperator(A)
B = aslinearoperator(B)
m, n = A.shape
matvec1 = lambda x: A. matvec(x)
matveca1 = lambda x: A.rmatvec(x)
matvec2 = lambda x: B. matvec(x)
matveca2 = lambda x: B.rmatvec(x)
if _is_real(A):
return _backend.idd_diffsnorm(
m, n, matveca1, matveca2, matvec1, matvec2, its=its)
else:
return _backend.idz_diffsnorm(
m, n, matveca1, matveca2, matvec1, matvec2, its=its)
def svd(A, eps_or_k, rand=True):
"""
Compute SVD of a matrix via an ID.
An SVD of a matrix `A` is a factorization::
A = numpy.dot(U, numpy.dot(numpy.diag(S), V.conj().T))
where `U` and `V` have orthonormal columns and `S` is nonnegative.
The SVD can be computed to any relative precision or rank (depending on the
value of `eps_or_k`).
See also :func:`interp_decomp` and :func:`id_to_svd`.
.. This function automatically detects the form of the input parameters and
passes them to the appropriate backend. For details, see
:func:`_backend.iddp_svd`, :func:`_backend.iddp_asvd`,
:func:`_backend.iddp_rsvd`, :func:`_backend.iddr_svd`,
:func:`_backend.iddr_asvd`, :func:`_backend.iddr_rsvd`,
:func:`_backend.idzp_svd`, :func:`_backend.idzp_asvd`,
:func:`_backend.idzp_rsvd`, :func:`_backend.idzr_svd`,
:func:`_backend.idzr_asvd`, and :func:`_backend.idzr_rsvd`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`
Matrix to be factored, given as either a :class:`numpy.ndarray` or a
:class:`scipy.sparse.linalg.LinearOperator` with the `matvec` and
`rmatvec` methods (to apply the matrix and its adjoint).
eps_or_k : float or int
Relative error (if `eps_or_k < 1`) or rank (if `eps_or_k >= 1`) of
approximation.
rand : bool, optional
Whether to use random sampling if `A` is of type :class:`numpy.ndarray`
(randomized algorithms are always used if `A` is of type
:class:`scipy.sparse.linalg.LinearOperator`).
Returns
-------
U : :class:`numpy.ndarray`
Left singular vectors.
S : :class:`numpy.ndarray`
Singular values.
V : :class:`numpy.ndarray`
Right singular vectors.
"""
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if eps_or_k < 1:
eps = eps_or_k
if rand:
if real:
U, V, S = _backend.iddp_asvd(eps, A)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzp_asvd(eps, A)
else:
if real:
U, V, S = _backend.iddp_svd(eps, A)
else:
U, V, S = _backend.idzp_svd(eps, A)
else:
k = int(eps_or_k)
if k > min(A.shape):
raise ValueError("Approximation rank %s exceeds min(A.shape) = "
" %s " % (k, min(A.shape)))
if rand:
if real:
U, V, S = _backend.iddr_asvd(A, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzr_asvd(A, k)
else:
if real:
U, V, S = _backend.iddr_svd(A, k)
else:
U, V, S = _backend.idzr_svd(A, k)
elif isinstance(A, LinearOperator):
m, n = A.shape
matvec = lambda x: A.matvec(x)
matveca = lambda x: A.rmatvec(x)
if eps_or_k < 1:
eps = eps_or_k
if real:
U, V, S = _backend.iddp_rsvd(eps, m, n, matveca, matvec)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzp_rsvd(eps, m, n, matveca, matvec)
else:
k = int(eps_or_k)
if real:
U, V, S = _backend.iddr_rsvd(m, n, matveca, matvec, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzr_rsvd(m, n, matveca, matvec, k)
else:
raise _TYPE_ERROR
return U, S, V
def estimate_rank(A, eps):
"""
Estimate matrix rank to a specified relative precision using randomized
methods.
The matrix `A` can be given as either a :class:`numpy.ndarray` or a
:class:`scipy.sparse.linalg.LinearOperator`, with different algorithms used
for each case. If `A` is of type :class:`numpy.ndarray`, then the output
rank is typically about 8 higher than the actual numerical rank.
.. This function automatically detects the form of the input parameters and
passes them to the appropriate backend. For details,
see :func:`_backend.idd_estrank`, :func:`_backend.idd_findrank`,
:func:`_backend.idz_estrank`, and :func:`_backend.idz_findrank`.
Parameters
----------
A : :class:`numpy.ndarray` or :class:`scipy.sparse.linalg.LinearOperator`
Matrix whose rank is to be estimated, given as either a
:class:`numpy.ndarray` or a :class:`scipy.sparse.linalg.LinearOperator`
with the `rmatvec` method (to apply the matrix adjoint).
eps : float
Relative error for numerical rank definition.
Returns
-------
int
Estimated matrix rank.
"""
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if real:
rank = _backend.idd_estrank(eps, A)
else:
rank = _backend.idz_estrank(eps, A)
if rank == 0:
# special return value for nearly full rank
rank = min(A.shape)
return rank
elif isinstance(A, LinearOperator):
m, n = A.shape
matveca = A.rmatvec
if real:
return _backend.idd_findrank(eps, m, n, matveca)
else:
return _backend.idz_findrank(eps, m, n, matveca)
else:
raise _TYPE_ERROR
| 31.931343
| 92
| 0.628743
|
import scipy.linalg._interpolative_backend as _backend
import numpy as np
import sys
__all__ = [
'estimate_rank',
'estimate_spectral_norm',
'estimate_spectral_norm_diff',
'id_to_svd',
'interp_decomp',
'rand',
'reconstruct_interp_matrix',
'reconstruct_matrix_from_id',
'reconstruct_skel_matrix',
'seed',
'svd',
]
_DTYPE_ERROR = ValueError("invalid input dtype (input must be float64 or complex128)")
_TYPE_ERROR = TypeError("invalid input type (must be array or LinearOperator)")
_32BIT_ERROR = ValueError("interpolative decomposition on 32-bit systems "
"with complex128 is buggy")
_IS_32BIT = (sys.maxsize < 2**32)
def _is_real(A):
try:
if A.dtype == np.complex128:
return False
elif A.dtype == np.float64:
return True
else:
raise _DTYPE_ERROR
except AttributeError as e:
raise _TYPE_ERROR from e
def seed(seed=None):
if isinstance(seed, str) and seed == 'default':
_backend.id_srando()
elif hasattr(seed, '__len__'):
state = np.asfortranarray(seed, dtype=float)
if state.shape != (55,):
raise ValueError("invalid input size")
elif state.min() < 0 or state.max() > 1:
raise ValueError("values not in range [0,1]")
_backend.id_srandi(state)
elif seed is None:
_backend.id_srandi(np.random.rand(55))
else:
rnd = np.random.RandomState(seed)
_backend.id_srandi(rnd.rand(55))
def rand(*shape):
return _backend.id_srand(np.prod(shape)).reshape(shape)
def interp_decomp(A, eps_or_k, rand=True):
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if eps_or_k < 1:
eps = eps_or_k
if rand:
if real:
k, idx, proj = _backend.iddp_aid(eps, A)
else:
if _IS_32BIT:
raise _32BIT_ERROR
k, idx, proj = _backend.idzp_aid(eps, A)
else:
if real:
k, idx, proj = _backend.iddp_id(eps, A)
else:
k, idx, proj = _backend.idzp_id(eps, A)
return k, idx - 1, proj
else:
k = int(eps_or_k)
if rand:
if real:
idx, proj = _backend.iddr_aid(A, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
idx, proj = _backend.idzr_aid(A, k)
else:
if real:
idx, proj = _backend.iddr_id(A, k)
else:
idx, proj = _backend.idzr_id(A, k)
return idx - 1, proj
elif isinstance(A, LinearOperator):
m, n = A.shape
matveca = A.rmatvec
if eps_or_k < 1:
eps = eps_or_k
if real:
k, idx, proj = _backend.iddp_rid(eps, m, n, matveca)
else:
if _IS_32BIT:
raise _32BIT_ERROR
k, idx, proj = _backend.idzp_rid(eps, m, n, matveca)
return k, idx - 1, proj
else:
k = int(eps_or_k)
if real:
idx, proj = _backend.iddr_rid(m, n, matveca, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
idx, proj = _backend.idzr_rid(m, n, matveca, k)
return idx - 1, proj
else:
raise _TYPE_ERROR
def reconstruct_matrix_from_id(B, idx, proj):
if _is_real(B):
return _backend.idd_reconid(B, idx + 1, proj)
else:
return _backend.idz_reconid(B, idx + 1, proj)
def reconstruct_interp_matrix(idx, proj):
if _is_real(proj):
return _backend.idd_reconint(idx + 1, proj)
else:
return _backend.idz_reconint(idx + 1, proj)
def reconstruct_skel_matrix(A, k, idx):
if _is_real(A):
return _backend.idd_copycols(A, k, idx + 1)
else:
return _backend.idz_copycols(A, k, idx + 1)
def id_to_svd(B, idx, proj):
if _is_real(B):
U, V, S = _backend.idd_id2svd(B, idx + 1, proj)
else:
U, V, S = _backend.idz_id2svd(B, idx + 1, proj)
return U, S, V
def estimate_spectral_norm(A, its=20):
from scipy.sparse.linalg import aslinearoperator
A = aslinearoperator(A)
m, n = A.shape
matvec = lambda x: A. matvec(x)
matveca = lambda x: A.rmatvec(x)
if _is_real(A):
return _backend.idd_snorm(m, n, matveca, matvec, its=its)
else:
return _backend.idz_snorm(m, n, matveca, matvec, its=its)
def estimate_spectral_norm_diff(A, B, its=20):
from scipy.sparse.linalg import aslinearoperator
A = aslinearoperator(A)
B = aslinearoperator(B)
m, n = A.shape
matvec1 = lambda x: A. matvec(x)
matveca1 = lambda x: A.rmatvec(x)
matvec2 = lambda x: B. matvec(x)
matveca2 = lambda x: B.rmatvec(x)
if _is_real(A):
return _backend.idd_diffsnorm(
m, n, matveca1, matveca2, matvec1, matvec2, its=its)
else:
return _backend.idz_diffsnorm(
m, n, matveca1, matveca2, matvec1, matvec2, its=its)
def svd(A, eps_or_k, rand=True):
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if eps_or_k < 1:
eps = eps_or_k
if rand:
if real:
U, V, S = _backend.iddp_asvd(eps, A)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzp_asvd(eps, A)
else:
if real:
U, V, S = _backend.iddp_svd(eps, A)
else:
U, V, S = _backend.idzp_svd(eps, A)
else:
k = int(eps_or_k)
if k > min(A.shape):
raise ValueError("Approximation rank %s exceeds min(A.shape) = "
" %s " % (k, min(A.shape)))
if rand:
if real:
U, V, S = _backend.iddr_asvd(A, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzr_asvd(A, k)
else:
if real:
U, V, S = _backend.iddr_svd(A, k)
else:
U, V, S = _backend.idzr_svd(A, k)
elif isinstance(A, LinearOperator):
m, n = A.shape
matvec = lambda x: A.matvec(x)
matveca = lambda x: A.rmatvec(x)
if eps_or_k < 1:
eps = eps_or_k
if real:
U, V, S = _backend.iddp_rsvd(eps, m, n, matveca, matvec)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzp_rsvd(eps, m, n, matveca, matvec)
else:
k = int(eps_or_k)
if real:
U, V, S = _backend.iddr_rsvd(m, n, matveca, matvec, k)
else:
if _IS_32BIT:
raise _32BIT_ERROR
U, V, S = _backend.idzr_rsvd(m, n, matveca, matvec, k)
else:
raise _TYPE_ERROR
return U, S, V
def estimate_rank(A, eps):
from scipy.sparse.linalg import LinearOperator
real = _is_real(A)
if isinstance(A, np.ndarray):
if real:
rank = _backend.idd_estrank(eps, A)
else:
rank = _backend.idz_estrank(eps, A)
if rank == 0:
rank = min(A.shape)
return rank
elif isinstance(A, LinearOperator):
m, n = A.shape
matveca = A.rmatvec
if real:
return _backend.idd_findrank(eps, m, n, matveca)
else:
return _backend.idz_findrank(eps, m, n, matveca)
else:
raise _TYPE_ERROR
| true
| true
|
7902c40e0f551462066ba9cc008879b2ca3e1188
| 14,764
|
py
|
Python
|
dataloader.py
|
WuYichen-97/Learning-to-Purify-Noisy-Labels-via-Meta-Soft-Label-Corrector
|
9fda4caf75a35de891a48aae44b6cb0cd36ea8cc
|
[
"MIT"
] | 6
|
2021-02-01T07:13:36.000Z
|
2021-12-29T10:36:07.000Z
|
dataloader.py
|
WuYichen-97/Learning-to-Purify-Noisy-Labels-via-Meta-Soft-Label-Corrector
|
9fda4caf75a35de891a48aae44b6cb0cd36ea8cc
|
[
"MIT"
] | null | null | null |
dataloader.py
|
WuYichen-97/Learning-to-Purify-Noisy-Labels-via-Meta-Soft-Label-Corrector
|
9fda4caf75a35de891a48aae44b6cb0cd36ea8cc
|
[
"MIT"
] | 1
|
2021-08-18T15:57:04.000Z
|
2021-08-18T15:57:04.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 17:38:25 2020
@author: Wu Yichen
"""
from PIL import Image
import os
import os.path
import errno
import numpy as np
import sys
import pickle
import torch.utils.data as data
from torchvision.datasets.utils import download_url, check_integrity
import torch
import torch.nn.functional as F
from torch.autograd import Variable as V
import wideresnet as wrn
import torchvision.transforms as transforms
def uniform_mix_C(mixing_ratio, num_classes):
'''
returns a linear interpolation of a uniform matrix and an identity matrix
'''
return mixing_ratio * np.full((num_classes, num_classes), 1 / num_classes) + \
(1 - mixing_ratio) * np.eye(num_classes)
def flip_labels_C(corruption_prob, num_classes, seed=1):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(seed)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i])] = corruption_prob
return C
def flip_labels_C_two(corruption_prob, num_classes, seed=1):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(seed)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i], 2, replace=False)] = corruption_prob / 2
return C
class CIFAR10(data.Dataset):
base_folder = 'cifar-10-batches-py'
url = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
def __init__(self, root='', train=True, meta=True, num_meta=1000,
corruption_prob=0, corruption_type='unif', transform=None, target_transform=None,
download=False, seed=1):
self.count = 0
self.root = root
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.meta = meta
self.corruption_prob = corruption_prob
self.num_meta = num_meta
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# now load the picked numpy arrays
if self.train:
self.train_data = []
self.train_labels = []
self.train_coarse_labels = []
self.train_labels_true = []
self.soft_labels = []
for fentry in self.train_list:
f = fentry[0]
file = os.path.join(root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.train_data.append(entry['data'])
if 'labels' in entry:
self.train_labels += entry['labels']
self.train_labels_true += entry['labels']
img_num_list = [int(self.num_meta/10)] * 10
num_classes = 10
else:
self.train_labels += entry['fine_labels']
self.train_labels_true += entry['fine_labels']
self.train_coarse_labels += entry['coarse_labels']
img_num_list = [int(self.num_meta/100)] * 100
num_classes = 100
fo.close()
self.train_data = np.concatenate(self.train_data)
self.train_data = self.train_data.reshape((50000, 3, 32, 32))
self.train_data = self.train_data.transpose((0, 2, 3, 1)) # convert to HWC
data_list_val = {}
for j in range(num_classes):
data_list_val[j] = [i for i, label in enumerate(self.train_labels) if label == j]
idx_to_meta = []
idx_to_train = []
print(img_num_list)
for cls_idx, img_id_list in data_list_val.items():
np.random.shuffle(img_id_list)
img_num = img_num_list[int(cls_idx)]
idx_to_meta.extend(img_id_list[:img_num])
idx_to_train.extend(img_id_list[img_num:])
if meta is True:
self.train_data = self.train_data[idx_to_meta]
self.train_labels = list(np.array(self.train_labels)[idx_to_meta])
else:
self.train_data = self.train_data[idx_to_train]
self.train_labels = list(np.array(self.train_labels)[idx_to_train])
self.train_labels_true = list(np.array(self.train_labels_true)[idx_to_train])
self.soft_labels = list(np.zeros((len(self.train_data),num_classes),dtype=np.float32))
self.prediction = np.zeros((len(self.train_data),10,num_classes),dtype=np.float32)
clean_labels = self.train_labels
np.save('clean_labels.npy', clean_labels)
if corruption_type == 'unif':
C = uniform_mix_C(self.corruption_prob, num_classes)
print(C)
self.C = C
elif corruption_type == 'flip':
C = flip_labels_C(self.corruption_prob, num_classes)
print(C)
self.C = C
elif corruption_type == 'flip2':
C = flip_labels_C_two(self.corruption_prob, num_classes)
print(C)
self.C = C
elif corruption_type == 'hierarchical':
assert num_classes == 100, 'You must use CIFAR-100 with the hierarchical corruption.'
coarse_fine = []
for i in range(20):
coarse_fine.append(set())
for i in range(len(self.train_labels)):
coarse_fine[self.train_coarse_labels[i]].add(self.train_labels[i])
for i in range(20):
coarse_fine[i] = list(coarse_fine[i])
C = np.eye(num_classes) * (1 - corruption_prob)
for i in range(20):
tmp = np.copy(coarse_fine[i])
for j in range(len(tmp)):
tmp2 = np.delete(np.copy(tmp), j)
C[tmp[j], tmp2] += corruption_prob * 1/len(tmp2)
self.C = C
print(C)
elif corruption_type == 'clabels':
net = wrn.WideResNet(40, num_classes, 2, dropRate=0.3).cuda()
model_name = './cifar{}_labeler'.format(num_classes)
net.load_state_dict(torch.load(model_name))
net.eval()
else:
assert False, "Invalid corruption type '{}' given. Must be in {'unif', 'flip', 'hierarchical'}".format(corruption_type)
np.random.seed(seed)
if corruption_type == 'clabels':
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)])
# obtain sampling probabilities
sampling_probs = []
print('Starting labeling')
for i in range((len(self.train_labels) // 64) + 1):
current = self.train_data[i*64:(i+1)*64]
current = [Image.fromarray(current[i]) for i in range(len(current))]
current = torch.cat([test_transform(current[i]).unsqueeze(0) for i in range(len(current))], dim=0)
data = V(current).cuda()
logits = net(data)
smax = F.softmax(logits / 5) # temperature of 1
sampling_probs.append(smax.data.cpu().numpy())
sampling_probs = np.concatenate(sampling_probs, 0)
print('Finished labeling 1')
new_labeling_correct = 0
argmax_labeling_correct = 0
for i in range(len(self.train_labels)):
old_label = self.train_labels[i]
new_label = np.random.choice(num_classes, p=sampling_probs[i])
self.train_labels[i] = new_label
if old_label == new_label:
new_labeling_correct += 1
if old_label == np.argmax(sampling_probs[i]):
argmax_labeling_correct += 1
print('Finished labeling 2')
print('New labeling accuracy:', new_labeling_correct / len(self.train_labels))
print('Argmax labeling accuracy:', argmax_labeling_correct / len(self.train_labels))
else:
for i in range(len(self.train_labels)):
self.train_labels_true[i] = self.train_labels[i]
for i in range(len(self.train_labels)):
self.train_labels[i] = np.random.choice(num_classes, p=C[self.train_labels[i]])
print('train',len(self.train_labels))
print('type',type(self.train_labels))
self.corruption_matrix = C
noise_labels = self.train_labels
np.save('noise_labels.npy', noise_labels)
else:
f = self.test_list[0][0]
file = os.path.join(root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.test_data = entry['data']
if 'labels' in entry:
self.test_labels = entry['labels']
else:
self.test_labels = entry['fine_labels']
fo.close()
self.test_data = self.test_data.reshape((10000, 3, 32, 32))
self.test_data = self.test_data.transpose((0, 2, 3, 1)) # convert to HWC
def label_update(self, results):
self.count += 1
# While updating the noisy label y_i by the probability s, we used the average output probability of the network of the past 10 epochs as s.
idx = (self.count - 1) % 10#10 #10
self.prediction[:, idx] = results
#self.prediction[:] =results
#print(self.prediction)
if self.count == 79: #79
self.soft_labels = self.prediction.mean(axis=1)
#print(self.soft_labels.shape)
#print(self.soft_labels)
#self.soft_labels = list(np.argmax(self.soft_labels, axis=1).astype(np.int64))
if self.count > 79:
self.soft_labels = results
#self.soft_labels = list(np.argmax(self.soft_labels, axis=1).astype(np.int64))
def __getitem__(self, index):
if self.train:
if self.meta:
#print(self.train_labels[index])
img, target, target_true= self.train_data[index], self.train_labels[index],self.train_labels_true[index]
else:
img, target, target_true= self.train_data[index], self.train_labels[index],self.train_labels_true[index]
soft_labels = self.soft_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.train :
if self.meta:
return img, target
else:
return img,target,target_true,soft_labels,index
else:
return img, target
def __len__(self):
if self.train:
if self.meta is True:
return self.num_meta
else:
return 50000 - self.num_meta
else:
return 10000
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
root = self.root
download_url(self.url, root, self.filename, self.tgz_md5)
# extract file
cwd = os.getcwd()
tar = tarfile.open(os.path.join(root, self.filename), "r:gz")
os.chdir(root)
tar.extractall()
tar.close()
os.chdir(cwd)
class CIFAR100(CIFAR10):
base_folder = 'cifar-100-python'
url = "http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
| 41.125348
| 149
| 0.5424
|
from PIL import Image
import os
import os.path
import errno
import numpy as np
import sys
import pickle
import torch.utils.data as data
from torchvision.datasets.utils import download_url, check_integrity
import torch
import torch.nn.functional as F
from torch.autograd import Variable as V
import wideresnet as wrn
import torchvision.transforms as transforms
def uniform_mix_C(mixing_ratio, num_classes):
return mixing_ratio * np.full((num_classes, num_classes), 1 / num_classes) + \
(1 - mixing_ratio) * np.eye(num_classes)
def flip_labels_C(corruption_prob, num_classes, seed=1):
np.random.seed(seed)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i])] = corruption_prob
return C
def flip_labels_C_two(corruption_prob, num_classes, seed=1):
np.random.seed(seed)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i], 2, replace=False)] = corruption_prob / 2
return C
class CIFAR10(data.Dataset):
base_folder = 'cifar-10-batches-py'
url = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
def __init__(self, root='', train=True, meta=True, num_meta=1000,
corruption_prob=0, corruption_type='unif', transform=None, target_transform=None,
download=False, seed=1):
self.count = 0
self.root = root
self.transform = transform
self.target_transform = target_transform
self.train = train
self.meta = meta
self.corruption_prob = corruption_prob
self.num_meta = num_meta
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
if self.train:
self.train_data = []
self.train_labels = []
self.train_coarse_labels = []
self.train_labels_true = []
self.soft_labels = []
for fentry in self.train_list:
f = fentry[0]
file = os.path.join(root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.train_data.append(entry['data'])
if 'labels' in entry:
self.train_labels += entry['labels']
self.train_labels_true += entry['labels']
img_num_list = [int(self.num_meta/10)] * 10
num_classes = 10
else:
self.train_labels += entry['fine_labels']
self.train_labels_true += entry['fine_labels']
self.train_coarse_labels += entry['coarse_labels']
img_num_list = [int(self.num_meta/100)] * 100
num_classes = 100
fo.close()
self.train_data = np.concatenate(self.train_data)
self.train_data = self.train_data.reshape((50000, 3, 32, 32))
self.train_data = self.train_data.transpose((0, 2, 3, 1))
data_list_val = {}
for j in range(num_classes):
data_list_val[j] = [i for i, label in enumerate(self.train_labels) if label == j]
idx_to_meta = []
idx_to_train = []
print(img_num_list)
for cls_idx, img_id_list in data_list_val.items():
np.random.shuffle(img_id_list)
img_num = img_num_list[int(cls_idx)]
idx_to_meta.extend(img_id_list[:img_num])
idx_to_train.extend(img_id_list[img_num:])
if meta is True:
self.train_data = self.train_data[idx_to_meta]
self.train_labels = list(np.array(self.train_labels)[idx_to_meta])
else:
self.train_data = self.train_data[idx_to_train]
self.train_labels = list(np.array(self.train_labels)[idx_to_train])
self.train_labels_true = list(np.array(self.train_labels_true)[idx_to_train])
self.soft_labels = list(np.zeros((len(self.train_data),num_classes),dtype=np.float32))
self.prediction = np.zeros((len(self.train_data),10,num_classes),dtype=np.float32)
clean_labels = self.train_labels
np.save('clean_labels.npy', clean_labels)
if corruption_type == 'unif':
C = uniform_mix_C(self.corruption_prob, num_classes)
print(C)
self.C = C
elif corruption_type == 'flip':
C = flip_labels_C(self.corruption_prob, num_classes)
print(C)
self.C = C
elif corruption_type == 'flip2':
C = flip_labels_C_two(self.corruption_prob, num_classes)
print(C)
self.C = C
elif corruption_type == 'hierarchical':
assert num_classes == 100, 'You must use CIFAR-100 with the hierarchical corruption.'
coarse_fine = []
for i in range(20):
coarse_fine.append(set())
for i in range(len(self.train_labels)):
coarse_fine[self.train_coarse_labels[i]].add(self.train_labels[i])
for i in range(20):
coarse_fine[i] = list(coarse_fine[i])
C = np.eye(num_classes) * (1 - corruption_prob)
for i in range(20):
tmp = np.copy(coarse_fine[i])
for j in range(len(tmp)):
tmp2 = np.delete(np.copy(tmp), j)
C[tmp[j], tmp2] += corruption_prob * 1/len(tmp2)
self.C = C
print(C)
elif corruption_type == 'clabels':
net = wrn.WideResNet(40, num_classes, 2, dropRate=0.3).cuda()
model_name = './cifar{}_labeler'.format(num_classes)
net.load_state_dict(torch.load(model_name))
net.eval()
else:
assert False, "Invalid corruption type '{}' given. Must be in {'unif', 'flip', 'hierarchical'}".format(corruption_type)
np.random.seed(seed)
if corruption_type == 'clabels':
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)])
sampling_probs = []
print('Starting labeling')
for i in range((len(self.train_labels) // 64) + 1):
current = self.train_data[i*64:(i+1)*64]
current = [Image.fromarray(current[i]) for i in range(len(current))]
current = torch.cat([test_transform(current[i]).unsqueeze(0) for i in range(len(current))], dim=0)
data = V(current).cuda()
logits = net(data)
smax = F.softmax(logits / 5)
sampling_probs.append(smax.data.cpu().numpy())
sampling_probs = np.concatenate(sampling_probs, 0)
print('Finished labeling 1')
new_labeling_correct = 0
argmax_labeling_correct = 0
for i in range(len(self.train_labels)):
old_label = self.train_labels[i]
new_label = np.random.choice(num_classes, p=sampling_probs[i])
self.train_labels[i] = new_label
if old_label == new_label:
new_labeling_correct += 1
if old_label == np.argmax(sampling_probs[i]):
argmax_labeling_correct += 1
print('Finished labeling 2')
print('New labeling accuracy:', new_labeling_correct / len(self.train_labels))
print('Argmax labeling accuracy:', argmax_labeling_correct / len(self.train_labels))
else:
for i in range(len(self.train_labels)):
self.train_labels_true[i] = self.train_labels[i]
for i in range(len(self.train_labels)):
self.train_labels[i] = np.random.choice(num_classes, p=C[self.train_labels[i]])
print('train',len(self.train_labels))
print('type',type(self.train_labels))
self.corruption_matrix = C
noise_labels = self.train_labels
np.save('noise_labels.npy', noise_labels)
else:
f = self.test_list[0][0]
file = os.path.join(root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.test_data = entry['data']
if 'labels' in entry:
self.test_labels = entry['labels']
else:
self.test_labels = entry['fine_labels']
fo.close()
self.test_data = self.test_data.reshape((10000, 3, 32, 32))
self.test_data = self.test_data.transpose((0, 2, 3, 1))
def label_update(self, results):
self.count += 1
idx = (self.count - 1) % 10 self.prediction[:, idx] = results
if self.count == 79:
self.soft_labels = self.prediction.mean(axis=1)
if self.count > 79:
self.soft_labels = results
def __getitem__(self, index):
if self.train:
if self.meta:
img, target, target_true= self.train_data[index], self.train_labels[index],self.train_labels_true[index]
else:
img, target, target_true= self.train_data[index], self.train_labels[index],self.train_labels_true[index]
soft_labels = self.soft_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.train :
if self.meta:
return img, target
else:
return img,target,target_true,soft_labels,index
else:
return img, target
def __len__(self):
if self.train:
if self.meta is True:
return self.num_meta
else:
return 50000 - self.num_meta
else:
return 10000
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
root = self.root
download_url(self.url, root, self.filename, self.tgz_md5)
cwd = os.getcwd()
tar = tarfile.open(os.path.join(root, self.filename), "r:gz")
os.chdir(root)
tar.extractall()
tar.close()
os.chdir(cwd)
class CIFAR100(CIFAR10):
base_folder = 'cifar-100-python'
url = "http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
| true
| true
|
7902c53b185572acc62ef81ebd0bc6fd7de38a1b
| 4,733
|
py
|
Python
|
src/sentry/api/serializers/models/event.py
|
E-LLP/sentry
|
83d97a0ca45cdaac1d5f3026058131a3aeae0068
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/api/serializers/models/event.py
|
E-LLP/sentry
|
83d97a0ca45cdaac1d5f3026058131a3aeae0068
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/api/serializers/models/event.py
|
E-LLP/sentry
|
83d97a0ca45cdaac1d5f3026058131a3aeae0068
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from datetime import datetime
from django.utils import timezone
from sentry.api.serializers import Serializer, register
from sentry.models import Event, EventError
@register(Event)
class EventSerializer(Serializer):
_reserved_keys = frozenset(['sentry.interfaces.User', 'sdk', 'device'])
def _get_entries(self, event, user, is_public=False):
# XXX(dcramer): These are called entries for future-proofing
interface_list = []
for key, interface in event.interfaces.iteritems():
# we treat user as a special contextual item
if key in self._reserved_keys:
continue
data = interface.get_api_context(is_public=is_public)
# data might not be returned for e.g. a public HTTP repr
if not data:
continue
entry = {
'data': data,
'type': interface.get_alias(),
}
interface_list.append((interface, entry))
interface_list.sort(key=lambda x: x[0].get_display_score(), reverse=True)
return [i[1] for i in interface_list]
def get_attrs(self, item_list, user, is_public=False):
Event.objects.bind_nodes(item_list, 'data')
results = {}
for item in item_list:
user_interface = item.interfaces.get('sentry.interfaces.User')
if user_interface:
user_data = user_interface.to_json()
else:
user_data = None
device_interface = item.interfaces.get('device')
if device_interface:
device_data = device_interface.to_json()
else:
device_data = None
sdk_interface = item.interfaces.get('sdk')
if sdk_interface:
sdk_data = sdk_interface.to_json()
else:
sdk_data = None
results[item] = {
'entries': self._get_entries(item, user, is_public=is_public),
'user': user_data,
'sdk': sdk_data,
'device': device_data,
}
return results
def serialize(self, obj, attrs, user):
errors = []
error_set = set()
for error in obj.data.get('errors', []):
message = EventError.get_message(error)
if message in error_set:
continue
error_set.add(message)
error_result = {
'type': error['type'],
'message': message,
'data': {
k: v for k, v in error.iteritems()
if k != 'type'
},
}
errors.append(error_result)
tags = sorted([
{
'key': k.split('sentry:', 1)[-1],
'value': v
} for k, v in obj.get_tags()
], key=lambda x: x['key'])
received = obj.data.get('received')
if received:
# Sentry at one point attempted to record invalid types here.
# Remove after June 2 2016
try:
received = datetime.utcfromtimestamp(received).replace(
tzinfo=timezone.utc,
)
except TypeError:
received = None
event_type = obj.data.get('type', 'default')
metadata = obj.data.get('metadata') or {
'title': obj.message_short,
}
# TODO(dcramer): move release serialization here
d = {
'id': str(obj.id),
'groupID': obj.group.id,
'eventID': str(obj.event_id),
'size': obj.size,
'entries': attrs['entries'],
# See GH-3248
'message': obj.get_legacy_message(),
'user': attrs['user'],
'sdk': attrs['sdk'],
'device': attrs['device'],
'context': obj.data.get('extra', {}),
'packages': obj.data.get('modules', {}),
'type': event_type,
'metadata': metadata,
'tags': tags,
'platform': obj.platform,
'dateCreated': obj.datetime,
'dateReceived': received,
'errors': errors,
}
return d
class SharedEventSerializer(EventSerializer):
def get_attrs(self, item_list, user):
return super(SharedEventSerializer, self).get_attrs(
item_list, user, is_public=True
)
def serialize(self, obj, attrs, user):
result = super(SharedEventSerializer, self).serialize(obj, attrs, user)
del result['context']
del result['user']
del result['tags']
return result
| 32.868056
| 81
| 0.530319
|
from __future__ import absolute_import
from datetime import datetime
from django.utils import timezone
from sentry.api.serializers import Serializer, register
from sentry.models import Event, EventError
@register(Event)
class EventSerializer(Serializer):
_reserved_keys = frozenset(['sentry.interfaces.User', 'sdk', 'device'])
def _get_entries(self, event, user, is_public=False):
interface_list = []
for key, interface in event.interfaces.iteritems():
if key in self._reserved_keys:
continue
data = interface.get_api_context(is_public=is_public)
if not data:
continue
entry = {
'data': data,
'type': interface.get_alias(),
}
interface_list.append((interface, entry))
interface_list.sort(key=lambda x: x[0].get_display_score(), reverse=True)
return [i[1] for i in interface_list]
def get_attrs(self, item_list, user, is_public=False):
Event.objects.bind_nodes(item_list, 'data')
results = {}
for item in item_list:
user_interface = item.interfaces.get('sentry.interfaces.User')
if user_interface:
user_data = user_interface.to_json()
else:
user_data = None
device_interface = item.interfaces.get('device')
if device_interface:
device_data = device_interface.to_json()
else:
device_data = None
sdk_interface = item.interfaces.get('sdk')
if sdk_interface:
sdk_data = sdk_interface.to_json()
else:
sdk_data = None
results[item] = {
'entries': self._get_entries(item, user, is_public=is_public),
'user': user_data,
'sdk': sdk_data,
'device': device_data,
}
return results
def serialize(self, obj, attrs, user):
errors = []
error_set = set()
for error in obj.data.get('errors', []):
message = EventError.get_message(error)
if message in error_set:
continue
error_set.add(message)
error_result = {
'type': error['type'],
'message': message,
'data': {
k: v for k, v in error.iteritems()
if k != 'type'
},
}
errors.append(error_result)
tags = sorted([
{
'key': k.split('sentry:', 1)[-1],
'value': v
} for k, v in obj.get_tags()
], key=lambda x: x['key'])
received = obj.data.get('received')
if received:
try:
received = datetime.utcfromtimestamp(received).replace(
tzinfo=timezone.utc,
)
except TypeError:
received = None
event_type = obj.data.get('type', 'default')
metadata = obj.data.get('metadata') or {
'title': obj.message_short,
}
d = {
'id': str(obj.id),
'groupID': obj.group.id,
'eventID': str(obj.event_id),
'size': obj.size,
'entries': attrs['entries'],
'message': obj.get_legacy_message(),
'user': attrs['user'],
'sdk': attrs['sdk'],
'device': attrs['device'],
'context': obj.data.get('extra', {}),
'packages': obj.data.get('modules', {}),
'type': event_type,
'metadata': metadata,
'tags': tags,
'platform': obj.platform,
'dateCreated': obj.datetime,
'dateReceived': received,
'errors': errors,
}
return d
class SharedEventSerializer(EventSerializer):
def get_attrs(self, item_list, user):
return super(SharedEventSerializer, self).get_attrs(
item_list, user, is_public=True
)
def serialize(self, obj, attrs, user):
result = super(SharedEventSerializer, self).serialize(obj, attrs, user)
del result['context']
del result['user']
del result['tags']
return result
| true
| true
|
7902c56f3ea0b4239139062b5bcad280b0812ae0
| 3,385
|
py
|
Python
|
test/test_misc.py
|
abulimov/lyricstagger
|
f57caa78357391178348ae80d5c969724343292a
|
[
"MIT"
] | 5
|
2015-12-03T08:40:32.000Z
|
2020-08-17T20:30:03.000Z
|
test/test_misc.py
|
abulimov/lyricstagger
|
f57caa78357391178348ae80d5c969724343292a
|
[
"MIT"
] | 1
|
2018-07-14T15:54:04.000Z
|
2018-07-14T15:54:04.000Z
|
test/test_misc.py
|
abulimov/lyricstagger
|
f57caa78357391178348ae80d5c969724343292a
|
[
"MIT"
] | null | null | null |
"""
Tests for lyrics_tagger
"""
from __future__ import unicode_literals
from __future__ import print_function
import unittest
import mock
import lyricstagger.misc as misc
import test.fakers as fakers
# pylint: disable=R0904
class MiscCheck(unittest.TestCase):
"""Test miscelanous functions"""
def test_get_tags_multi(self):
"""Test get_tags with multi-tag file"""
for mime in ['audio/mp3', 'audio/ogg']:
audio = fakers.FakeFile(mime, ['Artist'], ['Album'],
['Title'], 'Lyrics')
tags = misc.get_tags(audio)
self.assertEqual(tags['album'], "Album")
self.assertEqual(tags['artist'], "Artist")
self.assertEqual(tags['title'], "Title")
self.assertEqual(tags['lyrics'], "Lyrics")
def test_get_tags_single(self):
"""Test get_tags with single-tag file"""
for mime in ['audio/mp3', 'audio/ogg']:
audio = fakers.FakeFile(mime, 'Artist', 'Album', 'Title', 'Lyrics')
tags = misc.get_tags(audio)
self.assertEqual(tags['album'], "Album")
self.assertEqual(tags['artist'], "Artist")
self.assertEqual(tags['title'], "Title")
self.assertEqual(tags['lyrics'], "Lyrics")
def test_get_tags_broken(self):
"""Test get_tags with broken tags"""
audio = fakers.BrokenFile('audio/ogg', {'test': 'Test',
'album': 'Album',
'title': 'Title'})
tags = misc.get_tags(audio)
self.assertEqual(tags, None)
@mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_ok)
def test_edit_lyrics_empty_ok(self):
"""Test edit_lyrics with empty lyrics and correct edit"""
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album', 'Title')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, "")
@mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_fail)
def test_edit_lyrics_empty_fail(self):
"""Test edit_lyrics with empty lyrics and errored edit"""
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album', 'Title')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, None)
@mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_ok)
def test_edit_lyrics_nonempty_ok(self):
"""Test edit_lyrics with non-empty lyrics and correct edit"""
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album',
'Title', 'Lyrics')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, "Lyrics")
@mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_fail)
def test_edit_lyrics_nonempty_fail(self):
"""Test edit_lyrics with non-empty lyrics and errored edit"""
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album',
'Title', 'Lyrics')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, None)
def test_get_file_list(self):
file_list = list(misc.get_file_list(["test/test_data"]))
self.assertIn("test/test_data/test_dir_0/test_file_0.ogg", file_list)
self.assertIn("test/test_data/test_dir_1/test_file_1.ogg", file_list)
# pylint: enable=R0904
if __name__ == '__main__':
unittest.main()
| 41.280488
| 79
| 0.612703
|
from __future__ import unicode_literals
from __future__ import print_function
import unittest
import mock
import lyricstagger.misc as misc
import test.fakers as fakers
class MiscCheck(unittest.TestCase):
def test_get_tags_multi(self):
for mime in ['audio/mp3', 'audio/ogg']:
audio = fakers.FakeFile(mime, ['Artist'], ['Album'],
['Title'], 'Lyrics')
tags = misc.get_tags(audio)
self.assertEqual(tags['album'], "Album")
self.assertEqual(tags['artist'], "Artist")
self.assertEqual(tags['title'], "Title")
self.assertEqual(tags['lyrics'], "Lyrics")
def test_get_tags_single(self):
for mime in ['audio/mp3', 'audio/ogg']:
audio = fakers.FakeFile(mime, 'Artist', 'Album', 'Title', 'Lyrics')
tags = misc.get_tags(audio)
self.assertEqual(tags['album'], "Album")
self.assertEqual(tags['artist'], "Artist")
self.assertEqual(tags['title'], "Title")
self.assertEqual(tags['lyrics'], "Lyrics")
def test_get_tags_broken(self):
audio = fakers.BrokenFile('audio/ogg', {'test': 'Test',
'album': 'Album',
'title': 'Title'})
tags = misc.get_tags(audio)
self.assertEqual(tags, None)
@mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_ok)
def test_edit_lyrics_empty_ok(self):
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album', 'Title')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, "")
@mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_fail)
def test_edit_lyrics_empty_fail(self):
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album', 'Title')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, None)
@mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_ok)
def test_edit_lyrics_nonempty_ok(self):
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album',
'Title', 'Lyrics')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, "Lyrics")
@mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_fail)
def test_edit_lyrics_nonempty_fail(self):
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album',
'Title', 'Lyrics')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, None)
def test_get_file_list(self):
file_list = list(misc.get_file_list(["test/test_data"]))
self.assertIn("test/test_data/test_dir_0/test_file_0.ogg", file_list)
self.assertIn("test/test_data/test_dir_1/test_file_1.ogg", file_list)
if __name__ == '__main__':
unittest.main()
| true
| true
|
7902c5942ba0de21459bf49c2f14c0a72d985738
| 8,138
|
py
|
Python
|
AiR-M/ban/base_model.py
|
szzexpoi/AiR
|
938ecfec51a306144eb72758530d42e35a10208d
|
[
"MIT"
] | 35
|
2020-07-07T03:01:46.000Z
|
2022-03-17T23:33:35.000Z
|
AiR-M/ban/base_model.py
|
szzexpoi/AiR
|
938ecfec51a306144eb72758530d42e35a10208d
|
[
"MIT"
] | 3
|
2020-09-15T08:14:09.000Z
|
2021-12-29T21:56:23.000Z
|
AiR-M/ban/base_model.py
|
szzexpoi/AiR
|
938ecfec51a306144eb72758530d42e35a10208d
|
[
"MIT"
] | 5
|
2020-07-07T10:14:54.000Z
|
2020-12-07T07:27:24.000Z
|
"""
Bilinear Attention Networks
Jin-Hwa Kim, Jaehyun Jun, Byoung-Tak Zhang
https://arxiv.org/abs/1805.07932
This code is written by Jin-Hwa Kim.
"""
import sys
sys.path.append('./ban')
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.weight_norm import weight_norm
from attention import BiAttention
from language_model import WordEmbedding, QuestionEmbedding
from classifier import SimpleClassifier
from fc import FCNet
from bc import BCNet
from counting import Counter
from torch.autograd import Variable
class GRU(nn.Module):
"""
Gated Recurrent Unit without long-term memory
"""
def __init__(self,embed_size=512):
super(GRU,self).__init__()
self.update_x = nn.Linear(embed_size,embed_size,bias=True)
self.update_h = nn.Linear(embed_size,embed_size,bias=True)
self.reset_x = nn.Linear(embed_size,embed_size,bias=True)
self.reset_h = nn.Linear(embed_size,embed_size,bias=True)
self.memory_x = nn.Linear(embed_size,embed_size,bias=True)
self.memory_h = nn.Linear(embed_size,embed_size,bias=True)
def forward(self,x,state):
z = F.sigmoid(self.update_x(x) + self.update_h(state))
r = F.sigmoid(self.reset_x(x) + self.reset_h(state))
mem = F.tanh(self.memory_x(x) + self.memory_h(torch.mul(r,state)))
state = torch.mul(1-z,state) + torch.mul(z,mem)
return state
def process_lengths(input):
"""
Computing the lengths of sentences in current batchs
"""
max_length = input.size(1)
lengths = list(max_length - input.data.eq(0).sum(1).squeeze())
return lengths
def select_last(x, lengths):
"""
Adaptively select the hidden state at the end of sentences
"""
batch_size = x.size(0)
seq_length = x.size(1)
mask = x.data.new().resize_as_(x.data).fill_(0)
for i in range(batch_size):
mask[i][lengths[i]-1].fill_(1)
mask = Variable(mask)
x = x.mul(mask)
x = x.sum(1).view(batch_size, x.size(2), x.size(3))
return x
class BanModel(nn.Module):
def __init__(self, w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, op, glimpse,num_hid):
super(BanModel, self).__init__()
self.op = op
self.glimpse = glimpse
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.b_net = nn.ModuleList(b_net)
self.q_prj = nn.ModuleList(q_prj)
self.c_prj = nn.ModuleList(c_prj)
self.classifier = classifier
self.counter = counter
self.drop = nn.Dropout(.5)
self.tanh = nn.Tanh()
def forward(self, v, b, q):
"""Forward
v: [batch, num_objs, obj_dim]
b: [batch, num_objs, b_dim]
q: [batch_size, seq_length]
return: logits, not probs
"""
w_emb = self.w_emb(q)
q_emb = self.q_emb.forward_all(w_emb) # [batch, q_len, q_dim]
boxes = b[:,:,:4].transpose(1,2)
b_emb = [0] * self.glimpse
att, logits = self.v_att.forward_all(v, q_emb) # b x g x v x q
for g in range(self.glimpse):
b_emb[g] = self.b_net[g].forward_with_weights(v, q_emb, att[:,g,:,:]) # b x l x h
atten, _ = logits[:,g,:,:].max(2)
embed = self.counter(boxes, atten)
q_emb = self.q_prj[g](b_emb[g].unsqueeze(1)) + q_emb
q_emb = q_emb + self.c_prj[g](embed).unsqueeze(1)
logits = self.classifier(q_emb.sum(1))
return F.softmax(logits,dim=-1), att
def build_ban(num_token, v_dim, num_hid, num_ans, op='', gamma=4, reasoning=False):
w_emb = WordEmbedding(num_token, 300, .0, op)
q_emb = QuestionEmbedding(300 if 'c' not in op else 600, num_hid, 1, False, .0)
if not reasoning:
v_att = BiAttention(v_dim, num_hid, num_hid, gamma)
else:
v_att = BiAttention(v_dim, num_hid, num_hid, 1)
# constructing the model
b_net = []
q_prj = []
c_prj = []
objects = 36 # minimum number of boxes, originally 10
for i in range(gamma):
b_net.append(BCNet(v_dim, num_hid, num_hid, None, k=1))
q_prj.append(FCNet([num_hid, num_hid], '', .2))
c_prj.append(FCNet([objects + 1, num_hid], 'ReLU', .0))
classifier = SimpleClassifier(
num_hid, num_hid * 2, num_ans, .5)
counter = Counter(objects)
if not reasoning:
return BanModel(w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, op, gamma, num_hid)
else:
return BanModel_Reasoning(w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, op, gamma, num_hid)
class BanModel_Reasoning(nn.Module):
def __init__(self, w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, op, glimpse,num_hid):
super(BanModel_Reasoning, self).__init__()
self.op = op
self.glimpse = glimpse
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.b_net = nn.ModuleList(b_net)
self.q_prj = nn.ModuleList(q_prj)
self.c_prj = nn.ModuleList(c_prj)
self.classifier = classifier
self.counter = counter
self.drop = nn.Dropout(.5)
self.tanh = nn.Tanh()
self.semantic_rnn = GRU(256)
self.semantic_q = nn.Linear(num_hid,256)
self.semantic_pred = nn.Linear(256,9)
self.semantic_embed = nn.Embedding(num_embeddings=9,embedding_dim=256) # embedding layer for the semantic operations
self.att_p = nn.Linear(num_hid,num_hid)
self.att = nn.Linear(num_hid,1)
self.att_s = nn.Linear(256,num_hid)
self.att_v = nn.Linear(2048,num_hid)
def init_hidden_state(self,batch,s_embed=256):
init_s = torch.zeros(batch,s_embed).cuda()
return init_s
def forward(self, v, b, q):
"""Forward
v: [batch, num_objs, obj_dim]
b: [batch, num_objs, b_dim]
q: [batch_size, seq_length]
return: logits, not probs
"""
w_emb = self.w_emb(q)
q_emb = self.q_emb.forward_all(w_emb) # [batch, q_len, q_dim]
ori_q_emb = q_emb
boxes = b[:,:,:4].transpose(1,2)
b_emb = [0] * self.glimpse
s_x = self.init_hidden_state(len(q),256)
s_h = torch.tanh(self.semantic_q(ori_q_emb.mean(1)))
v_att = torch.tanh(self.att_v(F.dropout(v,0.25)))
op = []
att_mask = []
q_emb_pool = []
for g in range(self.glimpse):
# reasoning attention
s_h = self.semantic_rnn(s_x,s_h)
s_x = F.softmax(self.semantic_pred(s_h),dim=-1)
op.append(s_x)
s_x = torch.max(s_x,dim=-1)[1]
s_x = self.semantic_embed(s_x)
s_att = torch.tanh(self.att_s(s_h)).unsqueeze(1).expand_as(v_att)
fuse_feat = torch.tanh(self.att_p(torch.mul(s_att,v_att)))
reason_att = self.att(fuse_feat)
reason_att = F.softmax(reason_att.view(reason_att.size(0),-1),dim=-1)
# reason_att = torch.sigmoid(reason_att.view(reason_att.size(0),-1),dim=-1)
# cur_v = v + torch.mul(v,reason_att.unsqueeze(-1).expand_as(v))
cur_v = torch.mul(v,reason_att.unsqueeze(-1).expand_as(v))
# original ban
att, logits = self.v_att(cur_v, ori_q_emb) # b x g x v x q
att, logits = att.squeeze(), logits.squeeze()
b_emb[g] = self.b_net[g].forward_with_weights(v, q_emb, att) # b x l x h
atten, _ = logits.max(2)
embed = self.counter(boxes, atten)
q_emb = self.q_prj[g](b_emb[g].unsqueeze(1)) + q_emb
q_emb = q_emb + self.c_prj[g](embed).unsqueeze(1)
q_emb_pool.append(q_emb)
att_mask.append(reason_att)
op = torch.cat([_.unsqueeze(1) for _ in op],dim=1)
att_mask = torch.cat([_.unsqueeze(1) for _ in att_mask],dim=1)
valid_op = process_lengths(torch.max(op,dim=-1)[1])
q_emb_pool = torch.cat([_.unsqueeze(1) for _ in q_emb_pool],dim=1)
q_emb = select_last(q_emb_pool,valid_op)
logits = self.classifier(q_emb.sum(1))
return F.softmax(logits,dim=-1), op, att_mask
| 36.00885
| 124
| 0.617842
|
import sys
sys.path.append('./ban')
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.weight_norm import weight_norm
from attention import BiAttention
from language_model import WordEmbedding, QuestionEmbedding
from classifier import SimpleClassifier
from fc import FCNet
from bc import BCNet
from counting import Counter
from torch.autograd import Variable
class GRU(nn.Module):
def __init__(self,embed_size=512):
super(GRU,self).__init__()
self.update_x = nn.Linear(embed_size,embed_size,bias=True)
self.update_h = nn.Linear(embed_size,embed_size,bias=True)
self.reset_x = nn.Linear(embed_size,embed_size,bias=True)
self.reset_h = nn.Linear(embed_size,embed_size,bias=True)
self.memory_x = nn.Linear(embed_size,embed_size,bias=True)
self.memory_h = nn.Linear(embed_size,embed_size,bias=True)
def forward(self,x,state):
z = F.sigmoid(self.update_x(x) + self.update_h(state))
r = F.sigmoid(self.reset_x(x) + self.reset_h(state))
mem = F.tanh(self.memory_x(x) + self.memory_h(torch.mul(r,state)))
state = torch.mul(1-z,state) + torch.mul(z,mem)
return state
def process_lengths(input):
max_length = input.size(1)
lengths = list(max_length - input.data.eq(0).sum(1).squeeze())
return lengths
def select_last(x, lengths):
batch_size = x.size(0)
seq_length = x.size(1)
mask = x.data.new().resize_as_(x.data).fill_(0)
for i in range(batch_size):
mask[i][lengths[i]-1].fill_(1)
mask = Variable(mask)
x = x.mul(mask)
x = x.sum(1).view(batch_size, x.size(2), x.size(3))
return x
class BanModel(nn.Module):
def __init__(self, w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, op, glimpse,num_hid):
super(BanModel, self).__init__()
self.op = op
self.glimpse = glimpse
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.b_net = nn.ModuleList(b_net)
self.q_prj = nn.ModuleList(q_prj)
self.c_prj = nn.ModuleList(c_prj)
self.classifier = classifier
self.counter = counter
self.drop = nn.Dropout(.5)
self.tanh = nn.Tanh()
def forward(self, v, b, q):
w_emb = self.w_emb(q)
q_emb = self.q_emb.forward_all(w_emb)
boxes = b[:,:,:4].transpose(1,2)
b_emb = [0] * self.glimpse
att, logits = self.v_att.forward_all(v, q_emb)
for g in range(self.glimpse):
b_emb[g] = self.b_net[g].forward_with_weights(v, q_emb, att[:,g,:,:])
atten, _ = logits[:,g,:,:].max(2)
embed = self.counter(boxes, atten)
q_emb = self.q_prj[g](b_emb[g].unsqueeze(1)) + q_emb
q_emb = q_emb + self.c_prj[g](embed).unsqueeze(1)
logits = self.classifier(q_emb.sum(1))
return F.softmax(logits,dim=-1), att
def build_ban(num_token, v_dim, num_hid, num_ans, op='', gamma=4, reasoning=False):
w_emb = WordEmbedding(num_token, 300, .0, op)
q_emb = QuestionEmbedding(300 if 'c' not in op else 600, num_hid, 1, False, .0)
if not reasoning:
v_att = BiAttention(v_dim, num_hid, num_hid, gamma)
else:
v_att = BiAttention(v_dim, num_hid, num_hid, 1)
b_net = []
q_prj = []
c_prj = []
objects = 36
for i in range(gamma):
b_net.append(BCNet(v_dim, num_hid, num_hid, None, k=1))
q_prj.append(FCNet([num_hid, num_hid], '', .2))
c_prj.append(FCNet([objects + 1, num_hid], 'ReLU', .0))
classifier = SimpleClassifier(
num_hid, num_hid * 2, num_ans, .5)
counter = Counter(objects)
if not reasoning:
return BanModel(w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, op, gamma, num_hid)
else:
return BanModel_Reasoning(w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, op, gamma, num_hid)
class BanModel_Reasoning(nn.Module):
def __init__(self, w_emb, q_emb, v_att, b_net, q_prj, c_prj, classifier, counter, op, glimpse,num_hid):
super(BanModel_Reasoning, self).__init__()
self.op = op
self.glimpse = glimpse
self.w_emb = w_emb
self.q_emb = q_emb
self.v_att = v_att
self.b_net = nn.ModuleList(b_net)
self.q_prj = nn.ModuleList(q_prj)
self.c_prj = nn.ModuleList(c_prj)
self.classifier = classifier
self.counter = counter
self.drop = nn.Dropout(.5)
self.tanh = nn.Tanh()
self.semantic_rnn = GRU(256)
self.semantic_q = nn.Linear(num_hid,256)
self.semantic_pred = nn.Linear(256,9)
self.semantic_embed = nn.Embedding(num_embeddings=9,embedding_dim=256)
self.att_p = nn.Linear(num_hid,num_hid)
self.att = nn.Linear(num_hid,1)
self.att_s = nn.Linear(256,num_hid)
self.att_v = nn.Linear(2048,num_hid)
def init_hidden_state(self,batch,s_embed=256):
init_s = torch.zeros(batch,s_embed).cuda()
return init_s
def forward(self, v, b, q):
w_emb = self.w_emb(q)
q_emb = self.q_emb.forward_all(w_emb)
ori_q_emb = q_emb
boxes = b[:,:,:4].transpose(1,2)
b_emb = [0] * self.glimpse
s_x = self.init_hidden_state(len(q),256)
s_h = torch.tanh(self.semantic_q(ori_q_emb.mean(1)))
v_att = torch.tanh(self.att_v(F.dropout(v,0.25)))
op = []
att_mask = []
q_emb_pool = []
for g in range(self.glimpse):
s_h = self.semantic_rnn(s_x,s_h)
s_x = F.softmax(self.semantic_pred(s_h),dim=-1)
op.append(s_x)
s_x = torch.max(s_x,dim=-1)[1]
s_x = self.semantic_embed(s_x)
s_att = torch.tanh(self.att_s(s_h)).unsqueeze(1).expand_as(v_att)
fuse_feat = torch.tanh(self.att_p(torch.mul(s_att,v_att)))
reason_att = self.att(fuse_feat)
reason_att = F.softmax(reason_att.view(reason_att.size(0),-1),dim=-1)
cur_v = torch.mul(v,reason_att.unsqueeze(-1).expand_as(v))
att, logits = self.v_att(cur_v, ori_q_emb)
att, logits = att.squeeze(), logits.squeeze()
b_emb[g] = self.b_net[g].forward_with_weights(v, q_emb, att)
atten, _ = logits.max(2)
embed = self.counter(boxes, atten)
q_emb = self.q_prj[g](b_emb[g].unsqueeze(1)) + q_emb
q_emb = q_emb + self.c_prj[g](embed).unsqueeze(1)
q_emb_pool.append(q_emb)
att_mask.append(reason_att)
op = torch.cat([_.unsqueeze(1) for _ in op],dim=1)
att_mask = torch.cat([_.unsqueeze(1) for _ in att_mask],dim=1)
valid_op = process_lengths(torch.max(op,dim=-1)[1])
q_emb_pool = torch.cat([_.unsqueeze(1) for _ in q_emb_pool],dim=1)
q_emb = select_last(q_emb_pool,valid_op)
logits = self.classifier(q_emb.sum(1))
return F.softmax(logits,dim=-1), op, att_mask
| true
| true
|
7902c59b488d0959fa886c67b556db7f20b57f52
| 8,624
|
py
|
Python
|
BlackJack.py
|
tse4a/Python-Challenge
|
d85fdbb4b291443e9be0a93ea1efce57b5e0b44b
|
[
"MIT"
] | null | null | null |
BlackJack.py
|
tse4a/Python-Challenge
|
d85fdbb4b291443e9be0a93ea1efce57b5e0b44b
|
[
"MIT"
] | null | null | null |
BlackJack.py
|
tse4a/Python-Challenge
|
d85fdbb4b291443e9be0a93ea1efce57b5e0b44b
|
[
"MIT"
] | null | null | null |
import random
class Card:
def __init__(self, suit, rank):
self.suit = suit
self.rank = rank
def __str__(self):
return f"{self.suit} {self.rank}: {BlackJack.values[self.rank]}"
class Hand:
def __init__(self):
self.cards = [] # start with empty list
self.value = 0
self.aces = 0
def adjust_for_ace(self):
while self.value > 21 and self.aces:
self.value -= 10
self.aces -= 1
def add_card(self, card):
self.cards.append(card)
self.value += BlackJack.values[card.rank]
if card.rank == 'Ace':
self.aces += 1
def __str__(self):
return f"Current Hand:{self.cards}\nCurrent Value:{self.value}\nCurrent Aces:{self.aces}\n"
class Deck:
def __init__(self, card_game):
self.game = card_game
# create deck with all 52 cards
self.cards = list()
for suit in self.game.suits:
for rank in self.game.ranks:
self.cards.append(Card(suit, rank))
def shuffle(self):
random.shuffle(self.cards)
def deal_card(self):
return self.cards.pop()
def __str__(self):
return f"{[x for x in self.cards]}"
class Chips:
def __init__(self, total=100):
self.total = total
self.bet = 0
def win_bet(self):
self.total += self.bet
self.bet = 0
def lose_bet(self):
self.total -= self.bet
self.bet = 0
def make_bet(self, bet):
if bet <= self.total:
self.bet = bet
else:
raise ValueError(f"The bet ({bet}) exceeds available chips ({self.total})")
def __str__(self):
return f"Total: {self.total}\nCurrent Bet:{self.bet}\n"
class Player:
def __init__(self, name):
self.name = name
self.wins = 0
self.lost_games = 0
self.chips = Chips()
def __str__(self):
return f"{self.name}:\n{self.wins} wins\n{self.lost_games} losses\nChips:{self.chips}\n"
class BlackJack:
suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs')
ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')
values = {'Two': 2, 'Three': 3, 'Four': 4, 'Five': 5, 'Six': 6, 'Seven': 7, 'Eight': 8, 'Nine': 9, 'Ten': 10,
'Jack': 10, 'Queen': 10, 'King': 10, 'Ace': 11}
def __init__(self, player):
self.player = player
self.deck = Deck(self)
self.playing = False
def greeting(self):
print("WELCOME TO BLACKJACK!")
def take_bet(self):
while True:
try:
# Ask the Player for their bet
bet = int(input("Please put your bet: "))
# Make sure that the Player's bet does not exceed their available chips
self.player.chips.make_bet(bet)
break
except TypeError:
print("Invalid input. Please try again")
except ValueError as exc:
print(f"{exc} Please try again")
def hit(self, hand):
cd = self.deck.deal_card()
# print(f"Deal Card: {cd}")
hand.add_card(cd)
hand.adjust_for_ace()
def hit_or_stand(self, hand):
while True:
print(f"{self.player.name}: current {hand.value}")
action = input("Hit or Stand? Enter 'h' or 's': ")
if action[0].lower() == 's':
print("STAY\n")
self.playing = False
elif action[0].lower() == 'h':
print("HIT\n")
self.hit(hand)
else:
print(f"Sorry, I do not understand your choice '{action}'. Please try again")
continue
break
def player_busts(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} BUSTED!")
self.player.chips.lose_bet()
self.player.lost_games += 1
def player_wins(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} WINS! ")
self.player.chips.win_bet()
self.player.wins += 1
def dealer_busts(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} WINS - Dealer BUSTED!")
self.player.chips.win_bet()
self.player.wins += 1
def dealer_wins(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: Dealer WINS")
self.player.chips.lose_bet()
self.player.lost_games += 1
def push(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: Dealer and {self.player.name} tie - PUSH!")
def show_some(self, p_hand, d_hand):
# Show only one of the Dealer's cards, the other remains hidden
print(f"Dealer's card (one hidden): {d_hand.cards[0]}")
# Show both of the Player's cards
print(f"{self.player.name}'s Cards:")
for card in p_hand.cards:
print(card)
print(f"total= {p_hand.value}")
def show_all_cards(self, p_hand, d_hand):
# Show both of the Player's cards
print(f"{self.player.name}'s Cards:")
for card in p_hand.cards:
print(card)
print(f"total= {p_hand.value}")
# Show both of the Player's cards
print(f"Dealer's Cards:")
for card in d_hand.cards:
print(card)
print(f"total= {d_hand.value}")
def play(self):
"""
# 1. Create a deck of 52 cards
# 2. Shuffle the deck
# 3. Ask the Player for their bet
# 4. Make sure that the Player's bet does not exceed their available chips
# 5. Deal two cards to the Dealer and two cards to the Player
# 6. Show only one of the Dealer's cards, the other remains hidden
# 7. Show both of the Player's cards
# 8. Ask the Player if they wish to Hit, and take another card
# 9. If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again.
# 10. If a Player Stands, play the Dealer's hand.
# The dealer will always Hit until the Dealer's value meets or exceeds 17
# 11. Determine the winner and adjust the Player's chips accordingly
# 12. Ask the Player if they'd like to play again
"""
print("--NEW GAME---")
self.playing = True
self.deck.shuffle()
dealer_hand = Hand()
player_hand = Hand()
# Deal two cards to the Dealer and two cards to the Player
player_hand.add_card(self.deck.deal_card())
dealer_hand.add_card(self.deck.deal_card())
player_hand.add_card(self.deck.deal_card())
dealer_hand.add_card(self.deck.deal_card())
self.take_bet()
# show cards, but keep one dealer card hidden
self.show_some(player_hand, dealer_hand)
while self.playing:
# Ask the Player if they wish to Hit, and take another card
# If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again.
self.hit_or_stand(player_hand)
self.show_some(player_hand, dealer_hand)
if player_hand.value > 21:
# player busts - lost his bet
self.player_busts(player_hand, dealer_hand)
break
# If Player has not busted
if player_hand.value <= 21:
# The dealer will always Hit until the Dealer's value meets or exceeds 17
while dealer_hand.value < 17:
self.hit(dealer_hand)
# Determine for the winner - show all cards
self.show_all_cards(player_hand, dealer_hand)
# Determine the winner and adjust the Player's chips accordingly
if dealer_hand.value > 21:
self.dealer_busts(player_hand, dealer_hand)
elif player_hand.value > dealer_hand.value:
self.player_wins(player_hand, dealer_hand)
elif player_hand.value < dealer_hand.value:
self.dealer_wins(player_hand, dealer_hand)
else:
self.push(player_hand, dealer_hand)
if __name__ == "__main__":
game_on = True
# Play a new game of BlackJack with Player Daniela
player = Player('Daniela')
game = BlackJack(player)
game.greeting()
while game_on:
game.play()
print(f"GAME DONE.\nGame Stats:\n\n{player}")
# Ask the Player if they'd like to play again
if input("Would you like another game? y/n: ") != 'y':
game_on = False
| 31.822878
| 116
| 0.57224
|
import random
class Card:
def __init__(self, suit, rank):
self.suit = suit
self.rank = rank
def __str__(self):
return f"{self.suit} {self.rank}: {BlackJack.values[self.rank]}"
class Hand:
def __init__(self):
self.cards = []
self.value = 0
self.aces = 0
def adjust_for_ace(self):
while self.value > 21 and self.aces:
self.value -= 10
self.aces -= 1
def add_card(self, card):
self.cards.append(card)
self.value += BlackJack.values[card.rank]
if card.rank == 'Ace':
self.aces += 1
def __str__(self):
return f"Current Hand:{self.cards}\nCurrent Value:{self.value}\nCurrent Aces:{self.aces}\n"
class Deck:
def __init__(self, card_game):
self.game = card_game
self.cards = list()
for suit in self.game.suits:
for rank in self.game.ranks:
self.cards.append(Card(suit, rank))
def shuffle(self):
random.shuffle(self.cards)
def deal_card(self):
return self.cards.pop()
def __str__(self):
return f"{[x for x in self.cards]}"
class Chips:
def __init__(self, total=100):
self.total = total
self.bet = 0
def win_bet(self):
self.total += self.bet
self.bet = 0
def lose_bet(self):
self.total -= self.bet
self.bet = 0
def make_bet(self, bet):
if bet <= self.total:
self.bet = bet
else:
raise ValueError(f"The bet ({bet}) exceeds available chips ({self.total})")
def __str__(self):
return f"Total: {self.total}\nCurrent Bet:{self.bet}\n"
class Player:
def __init__(self, name):
self.name = name
self.wins = 0
self.lost_games = 0
self.chips = Chips()
def __str__(self):
return f"{self.name}:\n{self.wins} wins\n{self.lost_games} losses\nChips:{self.chips}\n"
class BlackJack:
suits = ('Hearts', 'Diamonds', 'Spades', 'Clubs')
ranks = ('Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', 'Jack', 'Queen', 'King', 'Ace')
values = {'Two': 2, 'Three': 3, 'Four': 4, 'Five': 5, 'Six': 6, 'Seven': 7, 'Eight': 8, 'Nine': 9, 'Ten': 10,
'Jack': 10, 'Queen': 10, 'King': 10, 'Ace': 11}
def __init__(self, player):
self.player = player
self.deck = Deck(self)
self.playing = False
def greeting(self):
print("WELCOME TO BLACKJACK!")
def take_bet(self):
while True:
try:
bet = int(input("Please put your bet: "))
self.player.chips.make_bet(bet)
break
except TypeError:
print("Invalid input. Please try again")
except ValueError as exc:
print(f"{exc} Please try again")
def hit(self, hand):
cd = self.deck.deal_card()
# print(f"Deal Card: {cd}")
hand.add_card(cd)
hand.adjust_for_ace()
def hit_or_stand(self, hand):
while True:
print(f"{self.player.name}: current {hand.value}")
action = input("Hit or Stand? Enter 'h' or 's': ")
if action[0].lower() == 's':
print("STAY\n")
self.playing = False
elif action[0].lower() == 'h':
print("HIT\n")
self.hit(hand)
else:
print(f"Sorry, I do not understand your choice '{action}'. Please try again")
continue
break
def player_busts(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} BUSTED!")
self.player.chips.lose_bet()
self.player.lost_games += 1
def player_wins(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} WINS! ")
self.player.chips.win_bet()
self.player.wins += 1
def dealer_busts(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: {self.player.name} WINS - Dealer BUSTED!")
self.player.chips.win_bet()
self.player.wins += 1
def dealer_wins(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: Dealer WINS")
self.player.chips.lose_bet()
self.player.lost_games += 1
def push(self, p_hand, d_hand):
print(f"[P={p_hand.value},D={d_hand.value}]: Dealer and {self.player.name} tie - PUSH!")
def show_some(self, p_hand, d_hand):
# Show only one of the Dealer's cards, the other remains hidden
print(f"Dealer's card (one hidden): {d_hand.cards[0]}")
# Show both of the Player's cards
print(f"{self.player.name}'s Cards:")
for card in p_hand.cards:
print(card)
print(f"total= {p_hand.value}")
def show_all_cards(self, p_hand, d_hand):
# Show both of the Player's cards
print(f"{self.player.name}'s Cards:")
for card in p_hand.cards:
print(card)
print(f"total= {p_hand.value}")
# Show both of the Player's cards
print(f"Dealer's Cards:")
for card in d_hand.cards:
print(card)
print(f"total= {d_hand.value}")
def play(self):
print("--NEW GAME---")
self.playing = True
self.deck.shuffle()
dealer_hand = Hand()
player_hand = Hand()
# Deal two cards to the Dealer and two cards to the Player
player_hand.add_card(self.deck.deal_card())
dealer_hand.add_card(self.deck.deal_card())
player_hand.add_card(self.deck.deal_card())
dealer_hand.add_card(self.deck.deal_card())
self.take_bet()
# show cards, but keep one dealer card hidden
self.show_some(player_hand, dealer_hand)
while self.playing:
# Ask the Player if they wish to Hit, and take another card
# If the Player's hand doesn't Bust (go over 21), ask if they'd like to Hit again.
self.hit_or_stand(player_hand)
self.show_some(player_hand, dealer_hand)
if player_hand.value > 21:
self.player_busts(player_hand, dealer_hand)
break
if player_hand.value <= 21:
while dealer_hand.value < 17:
self.hit(dealer_hand)
# Determine for the winner - show all cards
self.show_all_cards(player_hand, dealer_hand)
# Determine the winner and adjust the Player's chips accordingly
if dealer_hand.value > 21:
self.dealer_busts(player_hand, dealer_hand)
elif player_hand.value > dealer_hand.value:
self.player_wins(player_hand, dealer_hand)
elif player_hand.value < dealer_hand.value:
self.dealer_wins(player_hand, dealer_hand)
else:
self.push(player_hand, dealer_hand)
if __name__ == "__main__":
game_on = True
player = Player('Daniela')
game = BlackJack(player)
game.greeting()
while game_on:
game.play()
print(f"GAME DONE.\nGame Stats:\n\n{player}")
if input("Would you like another game? y/n: ") != 'y':
game_on = False
| true
| true
|
7902c7384238931709171b3fd07b5a318cbd9914
| 1,134
|
py
|
Python
|
cardea/fhir/Annotation.py
|
sarahmish/Cardea
|
85c4246c12178e6d1b9cc12eb39c264f3c20f3e9
|
[
"MIT"
] | 69
|
2021-01-28T22:25:10.000Z
|
2022-03-15T00:23:33.000Z
|
cardea/fhir/Annotation.py
|
sarahmish/Cardea
|
85c4246c12178e6d1b9cc12eb39c264f3c20f3e9
|
[
"MIT"
] | 30
|
2018-08-29T12:45:23.000Z
|
2019-12-24T11:08:12.000Z
|
cardea/fhir/Annotation.py
|
sarahmish/Cardea
|
85c4246c12178e6d1b9cc12eb39c264f3c20f3e9
|
[
"MIT"
] | 14
|
2021-03-24T01:21:25.000Z
|
2022-03-12T11:53:40.000Z
|
from .fhirbase import fhirbase
class Annotation(fhirbase):
"""
A text note which also contains information about who made the
statement and when.
Args:
authorReference: The individual responsible for making the annotation.
authorString: The individual responsible for making the annotation.
time: Indicates when this particular annotation was made.
text: The text of the annotation.
"""
__name__ = 'Annotation'
def __init__(self, dict_values=None):
self.authorReference = None
# reference to Reference: identifier
self.authorString = None
# type: str
self.time = None
# type: str
self.text = None
# type: str
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Annotation',
'child_variable': 'authorReference'},
]
| 25.2
| 78
| 0.614638
|
from .fhirbase import fhirbase
class Annotation(fhirbase):
__name__ = 'Annotation'
def __init__(self, dict_values=None):
self.authorReference = None
self.authorString = None
self.time = None
self.text = None
self.object_id = None
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Annotation',
'child_variable': 'authorReference'},
]
| true
| true
|
7902c773ba74624a3d1cfe76d12ec8fe7987d2f2
| 74
|
py
|
Python
|
pypi_package_project/core.py
|
rexzhang/pypi-package-project-template
|
bd7948a5d01a62c607cd82b72023d118b1860454
|
[
"MIT"
] | 38
|
2021-03-25T06:33:07.000Z
|
2022-03-11T03:56:15.000Z
|
pypi_package_project/core.py
|
rexzhang/pypi-package-project-template
|
bd7948a5d01a62c607cd82b72023d118b1860454
|
[
"MIT"
] | 15
|
2021-05-19T03:24:51.000Z
|
2022-03-25T22:40:02.000Z
|
pypi_package_project/core.py
|
rexzhang/pypi-package-project-template
|
bd7948a5d01a62c607cd82b72023d118b1860454
|
[
"MIT"
] | 8
|
2021-03-29T06:59:34.000Z
|
2022-03-11T03:56:17.000Z
|
#!/usr/bin/env python
# coding=utf-8
class PyPIPackageProject:
pass
| 10.571429
| 25
| 0.702703
|
class PyPIPackageProject:
pass
| true
| true
|
7902c79bcbd5c7f0c40b1e42b37e2c2c6033cc96
| 9,853
|
py
|
Python
|
smdebug/mxnet/hook.py
|
jsspric/sagemaker-debugger
|
d7010869e19ae49c4f371935f27afcb585195f79
|
[
"Apache-2.0"
] | 133
|
2019-12-03T18:56:27.000Z
|
2022-03-18T19:54:49.000Z
|
smdebug/mxnet/hook.py
|
jsspric/sagemaker-debugger
|
d7010869e19ae49c4f371935f27afcb585195f79
|
[
"Apache-2.0"
] | 384
|
2019-12-04T03:04:14.000Z
|
2022-03-31T20:42:48.000Z
|
smdebug/mxnet/hook.py
|
jsspric/sagemaker-debugger
|
d7010869e19ae49c4f371935f27afcb585195f79
|
[
"Apache-2.0"
] | 64
|
2019-12-05T20:39:51.000Z
|
2022-03-25T13:30:54.000Z
|
# Third Party
import mxnet as mx
from mxnet.ndarray import NDArray
# First Party
from smdebug.core.collection import DEFAULT_MXNET_COLLECTIONS, CollectionKeys
from smdebug.core.hook import CallbackHook
from smdebug.core.json_config import DEFAULT_WORKER_NAME
from smdebug.core.utils import FRAMEWORK, error_handling_agent
from smdebug.mxnet.collection import CollectionManager
from smdebug.mxnet.graph import _net2pb
from smdebug.mxnet.singleton_utils import set_hook
from smdebug.mxnet.utils import get_reduction_of_data, make_numpy_array
from smdebug.profiler.profiler_config_parser import get_profiler_config_parser
DEFAULT_INCLUDE_COLLECTIONS = [CollectionKeys.LOSSES]
COLLECTIONS_NOT_REQUIRING_RECURSIVE_HOOK = [
CollectionKeys.WEIGHTS,
CollectionKeys.BIASES,
CollectionKeys.GRADIENTS,
CollectionKeys.LOSSES,
]
profiler_config_parser = get_profiler_config_parser(FRAMEWORK.PYTORCH)
class Hook(CallbackHook):
def __init__(
self,
out_dir=None,
export_tensorboard=False,
tensorboard_dir=None,
dry_run=False,
reduction_config=None,
save_config=None,
include_regex=None,
include_collections=None,
save_all=False,
include_workers="one",
):
collection_manager = CollectionManager()
super().__init__(
collection_manager=collection_manager,
default_include_collections=DEFAULT_INCLUDE_COLLECTIONS,
profiler_config_parser=profiler_config_parser,
data_type_name=mx.ndarray.NDArray.__name__,
out_dir=out_dir,
export_tensorboard=export_tensorboard,
tensorboard_dir=tensorboard_dir,
dry_run=dry_run,
reduction_config=reduction_config,
save_config=save_config,
include_regex=include_regex,
include_collections=include_collections,
save_all=save_all,
include_workers=include_workers,
)
self.last_block = None
self.model = None
self.exported_model = False
# Keep the set of blocks to which this hook is registered. The blocks include loss blocks as well.
self.registered_blocks = set()
self.worker = self._get_worker_name()
set_hook(self)
def _get_worker_name(self):
try:
import horovod.mxnet as hvd
if hvd.size():
return f"worker_{hvd.rank()}"
except (ModuleNotFoundError, ValueError, ImportError):
pass
return DEFAULT_WORKER_NAME
def _get_num_workers(self):
try:
import horovod.mxnet as hvd
if hvd.size():
return hvd.size()
except (ModuleNotFoundError, ValueError, ImportError):
pass
return 1
def _cleanup(self):
# Write the gradients of the past step if the writer is still available.
if self.writer is not None and self.last_block is not None:
self._log_params(self.last_block)
if self.exported_model is False:
self._export_model()
super()._cleanup()
def _log_params(self, block):
params = block.collect_params().values()
for param in params:
self._log_param(param)
def _log_param(self, param):
try:
self._save_for_tensor(
tensor_name=param.name, tensor_value=param.data(param.list_ctx()[0])
)
# If Gradient for this param is available
if param.grad_req != "null":
self._save_for_tensor(
tensor_name=self.GRADIENT_PREFIX + param.name,
tensor_value=param.grad(param.list_ctx()[0]),
)
except RuntimeError as e:
self.logger.warning(
f"Could not log parameter {param.name} due to the mxnet exception: {e}"
)
def _export_model(self):
if self.model is not None:
try:
tb_writer = self._maybe_get_tb_writer()
if tb_writer:
tb_writer.write_graph(_net2pb(self.model))
except (RuntimeError, TypeError) as e:
self.logger.warning(
f"Could not export model graph for tensorboard "
f"due to the mxnet exception: {e}"
)
def _get_default_collections(self):
return DEFAULT_MXNET_COLLECTIONS
# This hook is invoked by trainer prior to running the forward pass.
@error_handling_agent.catch_smdebug_errors()
def forward_pre_hook(self, block, inputs):
if self.writer is not None:
# Write the params and gradients of the
# past step if the writer is still available.
self._log_params(block)
self._close_writers()
self._close_tb_writer()
if not self.prepared_collections:
# at this point we need all collections to be ready
# this may not be the case at creation of hook
# as user's code after hook might add collections
self._prepare_collections()
self.prepared_collections = True
self._increment_step()
if self._get_collections_to_save_for_step():
self._initialize_writers()
if self.exported_model is False:
self._export_model()
self.exported_model = True
if self.last_saved_step is not None and not self.exported_collections:
self.export_collections()
self.exported_collections = True
self.last_block = block
self._save_custom_tensors_post_step()
# This hook is invoked by trainer after running the forward pass.
@error_handling_agent.catch_smdebug_errors()
def forward_hook(self, block, inputs, outputs):
if not self._get_collections_to_save_for_step():
return
block_name = block.name
# This overwhelms the logs; turn back on if you really need it
# logger.debug("Processing the global step {0} for block {1}".format(self.step, block_name))
# Output input tensor
self._write_inputs(block_name, inputs)
# Output output tensors
self._write_outputs(block_name, outputs)
self.last_saved_step = self.step
def _recursive_apply(self, block):
"""
This function is "applied" to every child in the block. This function in turn
registers the forward hook to each module. It helps logging the input output tensors
of that module.
"""
# Check if the hook is already registered for this block.
if block in self.registered_blocks:
self.logger.warning(f"The hook is already registered to block {block.name}")
return
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block)
def _is_recursive_needed(self):
collections_to_save = self.include_collections
# Check if default collection has a regex associated with it.
# If it does we would need to apply hook recursively.
if (
len(self.collection_manager.get(CollectionKeys.DEFAULT).include_regex) != 0
and CollectionKeys.DEFAULT in collections_to_save
):
return True
# Get the collections that are to be saved but are not part of default collections
# We will need to apply hook recursively to get tensors specified in those collections.
extra_coll = [
value
for value in collections_to_save
if value not in COLLECTIONS_NOT_REQUIRING_RECURSIVE_HOOK
]
# extra_coll contains the collections that are not part of default collections.
return len(extra_coll) != 0
def register_hook(self, block):
# for compatibility with ZCC patches which call this
self.register_block(block)
@error_handling_agent.catch_smdebug_errors()
def register_block(self, block):
"""
This function registers the forward hook. If user wants to register the hook
for every child in the given block, then the function calls "apply" API for
registration of the hook.
The hook is registered recursively, if user has specified the collections that are more than
the default collectors viz. gradients, weight and bias
"""
if not isinstance(block, mx.gluon.Block):
self.logger.error(f"The given block type {block.__class__.__name__} is unsupported.")
return
# Check if the hook is already registered for this block.
if block in self.registered_blocks:
self.logger.warning(f"The hook is already registered to block {block.name}")
return
# Skip the forward pre hook for the Loss blocks.
if isinstance(block, mx.gluon.loss.Loss):
self.logger.info(f"Registering hook for block {block.name}")
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block)
return
else:
self.model = block
is_recursive = self._is_recursive_needed()
block.register_forward_pre_hook(self.forward_pre_hook)
if is_recursive is True:
block.apply(self._recursive_apply)
else:
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block)
@staticmethod
def _get_reduction_of_data(reduction_name, tensor_value, tensor_name, abs):
return get_reduction_of_data(reduction_name, tensor_value, tensor_name, abs)
@staticmethod
def _make_numpy_array(tensor_value):
if isinstance(tensor_value, NDArray):
return tensor_value.asnumpy()
return make_numpy_array(tensor_value)
| 36.628253
| 106
| 0.653405
|
import mxnet as mx
from mxnet.ndarray import NDArray
from smdebug.core.collection import DEFAULT_MXNET_COLLECTIONS, CollectionKeys
from smdebug.core.hook import CallbackHook
from smdebug.core.json_config import DEFAULT_WORKER_NAME
from smdebug.core.utils import FRAMEWORK, error_handling_agent
from smdebug.mxnet.collection import CollectionManager
from smdebug.mxnet.graph import _net2pb
from smdebug.mxnet.singleton_utils import set_hook
from smdebug.mxnet.utils import get_reduction_of_data, make_numpy_array
from smdebug.profiler.profiler_config_parser import get_profiler_config_parser
DEFAULT_INCLUDE_COLLECTIONS = [CollectionKeys.LOSSES]
COLLECTIONS_NOT_REQUIRING_RECURSIVE_HOOK = [
CollectionKeys.WEIGHTS,
CollectionKeys.BIASES,
CollectionKeys.GRADIENTS,
CollectionKeys.LOSSES,
]
profiler_config_parser = get_profiler_config_parser(FRAMEWORK.PYTORCH)
class Hook(CallbackHook):
def __init__(
self,
out_dir=None,
export_tensorboard=False,
tensorboard_dir=None,
dry_run=False,
reduction_config=None,
save_config=None,
include_regex=None,
include_collections=None,
save_all=False,
include_workers="one",
):
collection_manager = CollectionManager()
super().__init__(
collection_manager=collection_manager,
default_include_collections=DEFAULT_INCLUDE_COLLECTIONS,
profiler_config_parser=profiler_config_parser,
data_type_name=mx.ndarray.NDArray.__name__,
out_dir=out_dir,
export_tensorboard=export_tensorboard,
tensorboard_dir=tensorboard_dir,
dry_run=dry_run,
reduction_config=reduction_config,
save_config=save_config,
include_regex=include_regex,
include_collections=include_collections,
save_all=save_all,
include_workers=include_workers,
)
self.last_block = None
self.model = None
self.exported_model = False
self.registered_blocks = set()
self.worker = self._get_worker_name()
set_hook(self)
def _get_worker_name(self):
try:
import horovod.mxnet as hvd
if hvd.size():
return f"worker_{hvd.rank()}"
except (ModuleNotFoundError, ValueError, ImportError):
pass
return DEFAULT_WORKER_NAME
def _get_num_workers(self):
try:
import horovod.mxnet as hvd
if hvd.size():
return hvd.size()
except (ModuleNotFoundError, ValueError, ImportError):
pass
return 1
def _cleanup(self):
if self.writer is not None and self.last_block is not None:
self._log_params(self.last_block)
if self.exported_model is False:
self._export_model()
super()._cleanup()
def _log_params(self, block):
params = block.collect_params().values()
for param in params:
self._log_param(param)
def _log_param(self, param):
try:
self._save_for_tensor(
tensor_name=param.name, tensor_value=param.data(param.list_ctx()[0])
)
if param.grad_req != "null":
self._save_for_tensor(
tensor_name=self.GRADIENT_PREFIX + param.name,
tensor_value=param.grad(param.list_ctx()[0]),
)
except RuntimeError as e:
self.logger.warning(
f"Could not log parameter {param.name} due to the mxnet exception: {e}"
)
def _export_model(self):
if self.model is not None:
try:
tb_writer = self._maybe_get_tb_writer()
if tb_writer:
tb_writer.write_graph(_net2pb(self.model))
except (RuntimeError, TypeError) as e:
self.logger.warning(
f"Could not export model graph for tensorboard "
f"due to the mxnet exception: {e}"
)
def _get_default_collections(self):
return DEFAULT_MXNET_COLLECTIONS
@error_handling_agent.catch_smdebug_errors()
def forward_pre_hook(self, block, inputs):
if self.writer is not None:
self._log_params(block)
self._close_writers()
self._close_tb_writer()
if not self.prepared_collections:
self._prepare_collections()
self.prepared_collections = True
self._increment_step()
if self._get_collections_to_save_for_step():
self._initialize_writers()
if self.exported_model is False:
self._export_model()
self.exported_model = True
if self.last_saved_step is not None and not self.exported_collections:
self.export_collections()
self.exported_collections = True
self.last_block = block
self._save_custom_tensors_post_step()
# This hook is invoked by trainer after running the forward pass.
@error_handling_agent.catch_smdebug_errors()
def forward_hook(self, block, inputs, outputs):
if not self._get_collections_to_save_for_step():
return
block_name = block.name
# This overwhelms the logs; turn back on if you really need it
# logger.debug("Processing the global step {0} for block {1}".format(self.step, block_name))
# Output input tensor
self._write_inputs(block_name, inputs)
# Output output tensors
self._write_outputs(block_name, outputs)
self.last_saved_step = self.step
def _recursive_apply(self, block):
# Check if the hook is already registered for this block.
if block in self.registered_blocks:
self.logger.warning(f"The hook is already registered to block {block.name}")
return
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block)
def _is_recursive_needed(self):
collections_to_save = self.include_collections
# Check if default collection has a regex associated with it.
# If it does we would need to apply hook recursively.
if (
len(self.collection_manager.get(CollectionKeys.DEFAULT).include_regex) != 0
and CollectionKeys.DEFAULT in collections_to_save
):
return True
# Get the collections that are to be saved but are not part of default collections
# We will need to apply hook recursively to get tensors specified in those collections.
extra_coll = [
value
for value in collections_to_save
if value not in COLLECTIONS_NOT_REQUIRING_RECURSIVE_HOOK
]
# extra_coll contains the collections that are not part of default collections.
return len(extra_coll) != 0
def register_hook(self, block):
# for compatibility with ZCC patches which call this
self.register_block(block)
@error_handling_agent.catch_smdebug_errors()
def register_block(self, block):
if not isinstance(block, mx.gluon.Block):
self.logger.error(f"The given block type {block.__class__.__name__} is unsupported.")
return
# Check if the hook is already registered for this block.
if block in self.registered_blocks:
self.logger.warning(f"The hook is already registered to block {block.name}")
return
# Skip the forward pre hook for the Loss blocks.
if isinstance(block, mx.gluon.loss.Loss):
self.logger.info(f"Registering hook for block {block.name}")
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block)
return
else:
self.model = block
is_recursive = self._is_recursive_needed()
block.register_forward_pre_hook(self.forward_pre_hook)
if is_recursive is True:
block.apply(self._recursive_apply)
else:
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block)
@staticmethod
def _get_reduction_of_data(reduction_name, tensor_value, tensor_name, abs):
return get_reduction_of_data(reduction_name, tensor_value, tensor_name, abs)
@staticmethod
def _make_numpy_array(tensor_value):
if isinstance(tensor_value, NDArray):
return tensor_value.asnumpy()
return make_numpy_array(tensor_value)
| true
| true
|
7902c7e16383dbcff154b497b31f425d77375cca
| 7,736
|
py
|
Python
|
homevision_netio_controller/controller.py
|
jackoson/homevision-netio-controller
|
a50fc68654db9cbfbe49dacfa5235bcd5afbcef0
|
[
"MIT"
] | 1
|
2018-09-24T17:57:18.000Z
|
2018-09-24T17:57:18.000Z
|
homevision_netio_controller/controller.py
|
jackoson/homevision-netio-controller
|
a50fc68654db9cbfbe49dacfa5235bcd5afbcef0
|
[
"MIT"
] | null | null | null |
homevision_netio_controller/controller.py
|
jackoson/homevision-netio-controller
|
a50fc68654db9cbfbe49dacfa5235bcd5afbcef0
|
[
"MIT"
] | null | null | null |
import socket
class UserException(Exception):
pass
def user_exception(s): raise UserException(s)
class Macro:
"""Represents a macro to be run"""
def __init__(self, code):
"""code: int - index of macro to run"""
self.code = code
class Command:
"""Represents a macro to be run"""
def __init__(self, command):
"""command: string - command to send"""
self.command = command
class HomeVisionController:
def __init__(
self,
ip_address,
port,
auth,
on_off_appliance_codes={},
actions={},
process_actions={},
var_queries={},
flag_queries={},
flag_return_values = {True: ["True", "On", "Yes", "Occupied", "Set", "1"], False: ["False", "Off", "No", "Vacant", "Clear", "0"]},
on_off_commands = None
):
"""
Args:
ip_address: string
port: int
auth: string
- key for authenticating with netio
on_off_appliance_codes: dict[string] => int - codes to be fed to 'on_off_commands' for each appliance
actions: dict[string] => Macro/Command/(_, _, ...) - named actions to be completed
process_actions: dict[string] => {"START": X, "STOP": X} where X is Macro/Command/(_, _, ...) - named processes to be started and stopped
var_queries: dict[string] => int - mapping of names to variable indexes
flag_queries: dict[string] => int - mapping of names to flag indexes
flag_return_values: {True: [string], False: [string]} - synonyms for true and false that are returned by netio 'read flag command'. (ignore if you haven't set them up)
on_off_commands: {"ON": (int) => Macro/Command/(_, _, ...), "OFF": (int) => Macro/Command} - how to handle on and off commands
"""
self.ip_address = ip_address
self.port = port
self.auth = auth
self.on_off_appliance_codes = on_off_appliance_codes
self.actions = actions
self.process_actions = process_actions
self.var_queries = var_queries
self.flag_queries = flag_queries
self.flag_return_values = flag_return_values
self.on_off_commands = on_off_commands
def on_off_command(self, details):
"""Send an on or off command to an appliance
Sends the specified command to the homevision through netio interface to control the specified appliance.
Args:
details: {"appliance": string, "state": string}
"""
if "appliance" not in details:
raise Exception("appliance not specified")
elif "state" not in details:
raise Exception("state not specified")
if details["appliance"] not in self.on_off_appliance_codes.keys():
raise Exception("appliance not supported. Must be one of: " + ",".join(self.on_off_appliance_codes.keys()))
appliance_code = self.on_off_appliance_codes[details["appliance"]]
if details['state'] == "ON":
self._switch_on(appliance_code)
elif details["state"] == "OFF":
self._switch_off(appliance_code)
else:
raise Exception("state not supported. Must be either \"ON\" or \"OFF\".")
def action_command(self, details):
"""Send an action command
Sends the specified command to the homevision through netio interface.
Args:
details: {"command": string}
"""
if "command" not in details:
raise Exception("Command not specified")
if details["command"] not in self.actions.keys():
raise Exception("Command not supported. Must be one of: " + ",".join(self.actions.keys()))
self._handle_action(self.actions[details["command"]])
def start_stop_command(self, details):
"""Starts or stops a process
Sends the specified command to the homevision through netio interface to control the specified process.
Args:
details: {"action": string, "process": string}
"""
if "action" not in details:
raise Exception("action not specified")
elif "process" not in details:
raise Exception("process not specified")
if details["process"] not in self.process_actions.keys():
raise Exception("process not supported. Must be one of: " + ",".join(self.process_actions.keys()))
if details['action'] == "START":
self._handle_action(self.process_actions[details["process"]]["START"])
elif details["action"] == "STOP":
self._handle_action(self.process_actions[details["process"]]["STOP"])
else:
raise Exception("action not supported. Must be either \"START\" or \"STOP\".")
def _handle_action(self, action):
def handle_single(a):
if type(a) == Macro:
self._run_macro(a.code)
elif type(a) == Command:
self._send_command(a.command)
elif type(a) == Exception:
raise a
else:
raise Exception("Internal Error: invalid action type. Should be Macro, Command or Exception")
if type(action) == tuple:
for a in action:
handle_single(a)
else:
handle_single(action)
def var_query(self, details):
"""Returns the answer to a query on variable
Returns the answer to a query on the specified variable using netio
Args:
details: {"query": string}
"""
if "query" not in details:
raise Exception("query not specified")
if details["query"] not in self.var_queries.keys():
raise Exception("query not supported. Must be one of: " + ",".join(self.var_queries.keys()))
code = self.var_queries[details["query"]]
if type(code) == int:
val = self._get_var(code)
elif type(code) == tuple:
val = [self._get_var(c) for c in code]
else:
raise Exception("Internal Exception: variable code is not valid")
return val
def flag_query(self, details):
"""Returns the answer to a query on flag
Returns the answer to a query on the specified variable using netio
Args:
details: {"query": string}
"""
if "query" not in details:
raise Exception("query not specified")
if details["query"] not in self.flag_queries.keys():
raise Exception("query not supported. Must be one of: " + ",".join(self.flag_queries.keys()))
val = self._get_flag(self.flag_queries[details["query"]])
return "yes" if val else "no"
def _switch_on(self, code):
if self.on_off_commands == None:
raise Exception("No On/Off command set")
self._handle_action(self.on_off_commands["ON"](code))
def _switch_off(self, code):
if self.on_off_commands == None:
raise Exception("No On/Off command set")
self._handle_action(self.on_off_commands["OFF"](code))
def _run_macro(self, code):
self._send_command(b'action macro run ' + bytes(str(code), encoding="ascii") + b'; __wait 100')
def _send_command(self, command):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.ip_address, self.port))
s.send(bytes("auth " + self.auth + "\n", encoding="ascii"))
s.send(command)
s.close()
def _get_var(self, id):
return int(self._run_read_command(b"get var state " + bytes(str(id), encoding="ascii")))
def _get_flag(self, id):
ret = self._run_read_command(b"get flag state " + bytes(str(id), encoding="ascii"))
if ret in self.flag_return_values[False]:
return False
elif ret in self.flag_return_values[True]:
return True
else:
raise Exception("Flag value not supported: " + ret)
def _run_read_command(self, command):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.ip_address, self.port))
s.send(bytes("auth " + self.auth + "\n", encoding="ascii"))
s.recv(10)
s.send(command)
s.send(b'\n')
response = s.recv(10).decode(encoding="ascii").rstrip()
s.close()
return response
| 34.230088
| 173
| 0.650724
|
import socket
class UserException(Exception):
pass
def user_exception(s): raise UserException(s)
class Macro:
def __init__(self, code):
self.code = code
class Command:
def __init__(self, command):
self.command = command
class HomeVisionController:
def __init__(
self,
ip_address,
port,
auth,
on_off_appliance_codes={},
actions={},
process_actions={},
var_queries={},
flag_queries={},
flag_return_values = {True: ["True", "On", "Yes", "Occupied", "Set", "1"], False: ["False", "Off", "No", "Vacant", "Clear", "0"]},
on_off_commands = None
):
self.ip_address = ip_address
self.port = port
self.auth = auth
self.on_off_appliance_codes = on_off_appliance_codes
self.actions = actions
self.process_actions = process_actions
self.var_queries = var_queries
self.flag_queries = flag_queries
self.flag_return_values = flag_return_values
self.on_off_commands = on_off_commands
def on_off_command(self, details):
if "appliance" not in details:
raise Exception("appliance not specified")
elif "state" not in details:
raise Exception("state not specified")
if details["appliance"] not in self.on_off_appliance_codes.keys():
raise Exception("appliance not supported. Must be one of: " + ",".join(self.on_off_appliance_codes.keys()))
appliance_code = self.on_off_appliance_codes[details["appliance"]]
if details['state'] == "ON":
self._switch_on(appliance_code)
elif details["state"] == "OFF":
self._switch_off(appliance_code)
else:
raise Exception("state not supported. Must be either \"ON\" or \"OFF\".")
def action_command(self, details):
if "command" not in details:
raise Exception("Command not specified")
if details["command"] not in self.actions.keys():
raise Exception("Command not supported. Must be one of: " + ",".join(self.actions.keys()))
self._handle_action(self.actions[details["command"]])
def start_stop_command(self, details):
if "action" not in details:
raise Exception("action not specified")
elif "process" not in details:
raise Exception("process not specified")
if details["process"] not in self.process_actions.keys():
raise Exception("process not supported. Must be one of: " + ",".join(self.process_actions.keys()))
if details['action'] == "START":
self._handle_action(self.process_actions[details["process"]]["START"])
elif details["action"] == "STOP":
self._handle_action(self.process_actions[details["process"]]["STOP"])
else:
raise Exception("action not supported. Must be either \"START\" or \"STOP\".")
def _handle_action(self, action):
def handle_single(a):
if type(a) == Macro:
self._run_macro(a.code)
elif type(a) == Command:
self._send_command(a.command)
elif type(a) == Exception:
raise a
else:
raise Exception("Internal Error: invalid action type. Should be Macro, Command or Exception")
if type(action) == tuple:
for a in action:
handle_single(a)
else:
handle_single(action)
def var_query(self, details):
if "query" not in details:
raise Exception("query not specified")
if details["query"] not in self.var_queries.keys():
raise Exception("query not supported. Must be one of: " + ",".join(self.var_queries.keys()))
code = self.var_queries[details["query"]]
if type(code) == int:
val = self._get_var(code)
elif type(code) == tuple:
val = [self._get_var(c) for c in code]
else:
raise Exception("Internal Exception: variable code is not valid")
return val
def flag_query(self, details):
if "query" not in details:
raise Exception("query not specified")
if details["query"] not in self.flag_queries.keys():
raise Exception("query not supported. Must be one of: " + ",".join(self.flag_queries.keys()))
val = self._get_flag(self.flag_queries[details["query"]])
return "yes" if val else "no"
def _switch_on(self, code):
if self.on_off_commands == None:
raise Exception("No On/Off command set")
self._handle_action(self.on_off_commands["ON"](code))
def _switch_off(self, code):
if self.on_off_commands == None:
raise Exception("No On/Off command set")
self._handle_action(self.on_off_commands["OFF"](code))
def _run_macro(self, code):
self._send_command(b'action macro run ' + bytes(str(code), encoding="ascii") + b'; __wait 100')
def _send_command(self, command):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.ip_address, self.port))
s.send(bytes("auth " + self.auth + "\n", encoding="ascii"))
s.send(command)
s.close()
def _get_var(self, id):
return int(self._run_read_command(b"get var state " + bytes(str(id), encoding="ascii")))
def _get_flag(self, id):
ret = self._run_read_command(b"get flag state " + bytes(str(id), encoding="ascii"))
if ret in self.flag_return_values[False]:
return False
elif ret in self.flag_return_values[True]:
return True
else:
raise Exception("Flag value not supported: " + ret)
def _run_read_command(self, command):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.ip_address, self.port))
s.send(bytes("auth " + self.auth + "\n", encoding="ascii"))
s.recv(10)
s.send(command)
s.send(b'\n')
response = s.recv(10).decode(encoding="ascii").rstrip()
s.close()
return response
| true
| true
|
7902c98a93d7736cd750fff46aacbca1fb7ce426
| 353
|
py
|
Python
|
plugins/digitalbitbox/cmdline.py
|
surinder83singh/Elymus
|
054ba5138da886db0523182101f862961b18e6f7
|
[
"MIT"
] | 153
|
2018-02-26T16:22:27.000Z
|
2020-10-08T09:15:05.000Z
|
plugins/digitalbitbox/cmdline.py
|
surinder83singh/Elymus
|
054ba5138da886db0523182101f862961b18e6f7
|
[
"MIT"
] | 89
|
2018-03-03T23:17:11.000Z
|
2020-07-13T10:19:29.000Z
|
plugins/digitalbitbox/cmdline.py
|
surinder83singh/Elymus
|
054ba5138da886db0523182101f862961b18e6f7
|
[
"MIT"
] | 30
|
2018-03-03T13:41:14.000Z
|
2019-11-01T18:05:07.000Z
|
from electrum.plugins import hook
from .digitalbitbox import DigitalBitboxPlugin
from ..hw_wallet import CmdLineHandler
class Plugin(DigitalBitboxPlugin):
handler = CmdLineHandler()
@hook
def init_keystore(self, keystore):
if not isinstance(keystore, self.keystore_class):
return
keystore.handler = self.handler
| 29.416667
| 57
| 0.736544
|
from electrum.plugins import hook
from .digitalbitbox import DigitalBitboxPlugin
from ..hw_wallet import CmdLineHandler
class Plugin(DigitalBitboxPlugin):
handler = CmdLineHandler()
@hook
def init_keystore(self, keystore):
if not isinstance(keystore, self.keystore_class):
return
keystore.handler = self.handler
| true
| true
|
7902ca1541187ac7d6d64c3f6ce1de5be3f2e3df
| 2,178
|
py
|
Python
|
DP/Leetcode1143.py
|
Rylie-W/LeetRecord
|
623c4efe88b3af54b8a65f6ec23db850b8c6f46f
|
[
"Apache-2.0"
] | null | null | null |
DP/Leetcode1143.py
|
Rylie-W/LeetRecord
|
623c4efe88b3af54b8a65f6ec23db850b8c6f46f
|
[
"Apache-2.0"
] | null | null | null |
DP/Leetcode1143.py
|
Rylie-W/LeetRecord
|
623c4efe88b3af54b8a65f6ec23db850b8c6f46f
|
[
"Apache-2.0"
] | null | null | null |
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
'''
#最长连续公共子串
l1=len(text1)
l2=len(text2)
if l1==0 or l2==0:
return 0
dp = [[0 for i in range(l2)] for i in range(l1)]
res = 0
if text1[0]==text2[0]:
dp[0][0]=1
res=1
for i in range(1,l2):
if text2[i]==text1[0]:
dp[0][i]=1
res=1
for i in range(1,l1):
if text1[i]==text2[0]:
dp[i][0]=1
res=1
for i in range(1,l1):
for j in range(1,l2):
if text1[i]==text2[j]:
dp[i][j]=dp[i-1][j-1]+1
res=max(res,dp[i][j])
return res
'''
'''
#最长子串(可不连续):其实就是在问text1[:i+1]和text2[:j+1]有多少个相同的字母
l1 = len(text1)
l2 = len(text2)
if l1 == 0 or l2 == 0:
return 0
dp = [[0 for i in range(l2)] for i in range(l1)]
if text1[0] == text2[0]:
dp[0][0] = 1
for i in range(1, l2):
if text2[i] == text1[0] or dp[0][0]==1 or dp[0][i-1]==1:
dp[0][i] = 1
for i in range(1, l1):
if text1[i] == text2[0] or dp[0][0]==1 or dp[i-1][0]==1:
dp[i][0] = 1
for i in range(1, l1):
for j in range(1, l2):
if text1[i] == text2[j]:
dp[i][j] = dp[i - 1][j - 1] + 1
else:
dp[i][j]=max(dp[i][j-1],dp[i-1][j])
return dp[-1][-1]
'''
#recursion
#exit case
if len(text1)==0 or len(text2)==0:
return 0
if text1[-1]==text2[-1]:
return 1+self.longestCommonSubsequence(text1[:-1],text2[:-1])
else:
return max(self.longestCommonSubsequence(text1[:-1],text2),self.longestCommonSubsequence(text1,text2[:-1]))
if __name__ == '__main__':
sol=Solution()
text1 ="ylqpejqbalahwr"
text2 ="yrkzavgdmdgtqpg"
# "hofubmnylkra"
# "pqhgxgdofcvmr"
print(sol.longestCommonSubsequence(text1,text2))
| 27.225
| 119
| 0.435721
|
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
if len(text1)==0 or len(text2)==0:
return 0
if text1[-1]==text2[-1]:
return 1+self.longestCommonSubsequence(text1[:-1],text2[:-1])
else:
return max(self.longestCommonSubsequence(text1[:-1],text2),self.longestCommonSubsequence(text1,text2[:-1]))
if __name__ == '__main__':
sol=Solution()
text1 ="ylqpejqbalahwr"
text2 ="yrkzavgdmdgtqpg"
print(sol.longestCommonSubsequence(text1,text2))
| true
| true
|
7902ca1e83acd9aee63248151912385c91291913
| 3,129
|
py
|
Python
|
tdsa_augmentation/data_creation/shrink_glove_to_targets.py
|
apmoore1/tdsa_augmentation
|
71c9ffa79ea48e817408d0dc496cc146ce75a942
|
[
"Apache-2.0"
] | null | null | null |
tdsa_augmentation/data_creation/shrink_glove_to_targets.py
|
apmoore1/tdsa_augmentation
|
71c9ffa79ea48e817408d0dc496cc146ce75a942
|
[
"Apache-2.0"
] | null | null | null |
tdsa_augmentation/data_creation/shrink_glove_to_targets.py
|
apmoore1/tdsa_augmentation
|
71c9ffa79ea48e817408d0dc496cc146ce75a942
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from pathlib import Path
import tempfile
from typing import List
from gensim.models import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
from target_extraction.data_types import TargetTextCollection
from target_extraction.tokenizers import spacy_tokenizer
def parse_path(path_string: str) -> Path:
path_string = Path(path_string).resolve()
return path_string
def shrink_glove_file(glove_fp: Path, filter_words: List[str], save_fp: Path
) -> None:
'''
:param glove_fp: File path to the glove file that is to be shrinked
:param filter_words: List of words to filter/shrink the glove file/vectors
by
:param save_fp:
'''
with save_fp.open('w+') as save_file:
with glove_fp.open('r') as glove_file:
for glove_vector in glove_file:
glove_parts = glove_vector.split()
if (len(glove_parts) == 301 or len(glove_parts) == 51 or
len(glove_parts) == 201):
pass
else:
continue
glove_word = glove_parts[0]
if glove_word in filter_words:
save_file.write(glove_vector)
#python tdsa_augmentation/data_creation/shrink_glove_to_targets.py ./data/original_restaurant_sentiment/train.json ./resources/word_embeddings/glove.840B.300d.txt ./here
if __name__ == '__main__':
glove_fp_help = 'File path to the Glove embedding to be shrunk and '\
'converted to Word2Vec format'
parser = argparse.ArgumentParser()
parser.add_argument("json_train_data", type=parse_path,
help='File path JSON training data')
parser.add_argument("glove_embedding_fp", type=parse_path,
help=glove_fp_help)
parser.add_argument("target_only_word2vec_path", type=parse_path,
help='File path to save the embedding too.')
args = parser.parse_args()
save_fp = args.target_only_word2vec_path
if save_fp.exists():
print('A file already exists at the location to store '
f'the new Word2Vec model/vector: {save_fp}\n'
'Thus skipping the rest of this script.')
else:
dataset = TargetTextCollection.load_json(args.json_train_data)
all_targets = list(dataset.target_count(lower=True).keys())
tokenizer = spacy_tokenizer()
tokenised_targets = [target for targets in all_targets for target in tokenizer(targets)]
with tempfile.TemporaryDirectory() as temp_dir:
shrink_glove_temp_fp = Path(temp_dir, 'temp_glove')
shrink_word_vec_temp_fp = Path(temp_dir, 'temp_wordvec')
shrink_glove_file(args.glove_embedding_fp, tokenised_targets, shrink_glove_temp_fp)
glove2word2vec(shrink_glove_temp_fp, shrink_word_vec_temp_fp)
model = KeyedVectors.load_word2vec_format(shrink_word_vec_temp_fp)
model.save(str(save_fp))
print(f'Word2Vec shrunk to target model saved to {save_fp}')
| 42.863014
| 169
| 0.660914
|
import argparse
from pathlib import Path
import tempfile
from typing import List
from gensim.models import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
from target_extraction.data_types import TargetTextCollection
from target_extraction.tokenizers import spacy_tokenizer
def parse_path(path_string: str) -> Path:
path_string = Path(path_string).resolve()
return path_string
def shrink_glove_file(glove_fp: Path, filter_words: List[str], save_fp: Path
) -> None:
with save_fp.open('w+') as save_file:
with glove_fp.open('r') as glove_file:
for glove_vector in glove_file:
glove_parts = glove_vector.split()
if (len(glove_parts) == 301 or len(glove_parts) == 51 or
len(glove_parts) == 201):
pass
else:
continue
glove_word = glove_parts[0]
if glove_word in filter_words:
save_file.write(glove_vector)
if __name__ == '__main__':
glove_fp_help = 'File path to the Glove embedding to be shrunk and '\
'converted to Word2Vec format'
parser = argparse.ArgumentParser()
parser.add_argument("json_train_data", type=parse_path,
help='File path JSON training data')
parser.add_argument("glove_embedding_fp", type=parse_path,
help=glove_fp_help)
parser.add_argument("target_only_word2vec_path", type=parse_path,
help='File path to save the embedding too.')
args = parser.parse_args()
save_fp = args.target_only_word2vec_path
if save_fp.exists():
print('A file already exists at the location to store '
f'the new Word2Vec model/vector: {save_fp}\n'
'Thus skipping the rest of this script.')
else:
dataset = TargetTextCollection.load_json(args.json_train_data)
all_targets = list(dataset.target_count(lower=True).keys())
tokenizer = spacy_tokenizer()
tokenised_targets = [target for targets in all_targets for target in tokenizer(targets)]
with tempfile.TemporaryDirectory() as temp_dir:
shrink_glove_temp_fp = Path(temp_dir, 'temp_glove')
shrink_word_vec_temp_fp = Path(temp_dir, 'temp_wordvec')
shrink_glove_file(args.glove_embedding_fp, tokenised_targets, shrink_glove_temp_fp)
glove2word2vec(shrink_glove_temp_fp, shrink_word_vec_temp_fp)
model = KeyedVectors.load_word2vec_format(shrink_word_vec_temp_fp)
model.save(str(save_fp))
print(f'Word2Vec shrunk to target model saved to {save_fp}')
| true
| true
|
7902cb2718542b07ccbd7a246e71308ac495f714
| 3,429
|
py
|
Python
|
vertica_python/tests/base.py
|
jakubjedelsky/vertica-python
|
f379576b6949638c90908f5ebded321dce9330e5
|
[
"MIT"
] | 1
|
2019-06-17T19:05:10.000Z
|
2019-06-17T19:05:10.000Z
|
vertica_python/tests/base.py
|
jakubjedelsky/vertica-python
|
f379576b6949638c90908f5ebded321dce9330e5
|
[
"MIT"
] | null | null | null |
vertica_python/tests/base.py
|
jakubjedelsky/vertica-python
|
f379576b6949638c90908f5ebded321dce9330e5
|
[
"MIT"
] | 2
|
2020-06-20T21:26:31.000Z
|
2021-04-03T10:44:40.000Z
|
from __future__ import print_function, division, absolute_import
import os
import unittest
from six import string_types
from .. import *
from ..compat import as_text, as_str, as_bytes
DEFAULT_VP_TEST_HOST = '127.0.0.1'
DEFAULT_VP_TEST_PORT = 5433
DEFAULT_VP_TEST_USER = 'dbadmin'
DEFAULT_VP_TEST_PASSWD = ''
DEFAULT_VP_TEST_DB = 'docker'
DEFAULT_VP_TEST_TABLE = 'vertica_python_unit_test'
class VerticaPythonTestCase(unittest.TestCase):
"""Base class for tests that query Vertica."""
@classmethod
def setUpClass(cls):
cls._host = os.getenv('VP_TEST_HOST', DEFAULT_VP_TEST_HOST)
cls._port = int(os.getenv('VP_TEST_PORT', DEFAULT_VP_TEST_PORT))
cls._user = os.getenv('VP_TEST_USER', DEFAULT_VP_TEST_USER)
cls._password = os.getenv('VP_TEST_PASSWD', DEFAULT_VP_TEST_PASSWD)
cls._database = os.getenv('VP_TEST_DB', DEFAULT_VP_TEST_DB)
cls._table = os.getenv('VP_TEST_TABLE', DEFAULT_VP_TEST_TABLE)
cls._conn_info = {
'host': cls._host,
'port': cls._port,
'database': cls._database,
'user': cls._user,
'password': cls._password,
}
@classmethod
def tearDownClass(cls):
with cls._connect() as conn:
cur = conn.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(cls._table))
@classmethod
def _connect(cls):
"""Connects to vertica.
:return: a connection to vertica.
"""
return connect(**cls._conn_info)
def _query_and_fetchall(self, query):
"""Creates a new connection, executes a query and fetches all the results.
:param query: query to execute
:return: all fetched results as returned by cursor.fetchall()
"""
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query)
results = cur.fetchall()
return results
def _query_and_fetchone(self, query):
"""Creates a new connection, executes a query and fetches one result.
:param query: query to execute
:return: the first result fetched by cursor.fetchone()
"""
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query)
result = cur.fetchone()
return result
def assertTextEqual(self, first, second, msg=None):
first_text = as_text(first)
second_text = as_text(second)
self.assertEqual(first=first_text, second=second_text, msg=msg)
def assertStrEqual(self, first, second, msg=None):
first_str = as_str(first)
second_str = as_str(second)
self.assertEqual(first=first_str, second=second_str, msg=msg)
def assertBytesEqual(self, first, second, msg=None):
first_bytes = as_bytes(first)
second_bytes = as_bytes(second)
self.assertEqual(first=first_bytes, second=second_bytes, msg=msg)
def assertResultEqual(self, value, result, msg=None):
if isinstance(value, string_types):
self.assertTextEqual(first=value, second=result, msg=msg)
else:
self.assertEqual(first=value, second=result, msg=msg)
def assertListOfListsEqual(self, list1, list2, msg=None):
self.assertEqual(len(list1), len(list2), msg=msg)
for l1, l2 in zip(list1, list2):
self.assertListEqual(l1, l2, msg=msg)
| 32.971154
| 82
| 0.642461
|
from __future__ import print_function, division, absolute_import
import os
import unittest
from six import string_types
from .. import *
from ..compat import as_text, as_str, as_bytes
DEFAULT_VP_TEST_HOST = '127.0.0.1'
DEFAULT_VP_TEST_PORT = 5433
DEFAULT_VP_TEST_USER = 'dbadmin'
DEFAULT_VP_TEST_PASSWD = ''
DEFAULT_VP_TEST_DB = 'docker'
DEFAULT_VP_TEST_TABLE = 'vertica_python_unit_test'
class VerticaPythonTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._host = os.getenv('VP_TEST_HOST', DEFAULT_VP_TEST_HOST)
cls._port = int(os.getenv('VP_TEST_PORT', DEFAULT_VP_TEST_PORT))
cls._user = os.getenv('VP_TEST_USER', DEFAULT_VP_TEST_USER)
cls._password = os.getenv('VP_TEST_PASSWD', DEFAULT_VP_TEST_PASSWD)
cls._database = os.getenv('VP_TEST_DB', DEFAULT_VP_TEST_DB)
cls._table = os.getenv('VP_TEST_TABLE', DEFAULT_VP_TEST_TABLE)
cls._conn_info = {
'host': cls._host,
'port': cls._port,
'database': cls._database,
'user': cls._user,
'password': cls._password,
}
@classmethod
def tearDownClass(cls):
with cls._connect() as conn:
cur = conn.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(cls._table))
@classmethod
def _connect(cls):
return connect(**cls._conn_info)
def _query_and_fetchall(self, query):
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query)
results = cur.fetchall()
return results
def _query_and_fetchone(self, query):
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query)
result = cur.fetchone()
return result
def assertTextEqual(self, first, second, msg=None):
first_text = as_text(first)
second_text = as_text(second)
self.assertEqual(first=first_text, second=second_text, msg=msg)
def assertStrEqual(self, first, second, msg=None):
first_str = as_str(first)
second_str = as_str(second)
self.assertEqual(first=first_str, second=second_str, msg=msg)
def assertBytesEqual(self, first, second, msg=None):
first_bytes = as_bytes(first)
second_bytes = as_bytes(second)
self.assertEqual(first=first_bytes, second=second_bytes, msg=msg)
def assertResultEqual(self, value, result, msg=None):
if isinstance(value, string_types):
self.assertTextEqual(first=value, second=result, msg=msg)
else:
self.assertEqual(first=value, second=result, msg=msg)
def assertListOfListsEqual(self, list1, list2, msg=None):
self.assertEqual(len(list1), len(list2), msg=msg)
for l1, l2 in zip(list1, list2):
self.assertListEqual(l1, l2, msg=msg)
| true
| true
|
7902cca06e3a841cee96255c053ca834cc5022f5
| 7,223
|
py
|
Python
|
src/pte/filetools/filefinder_abc.py
|
richardkoehler/pynm-decode
|
3120a410d79d3fce45d0f59025d68ba2d5e80d9e
|
[
"MIT"
] | 1
|
2022-01-08T09:33:09.000Z
|
2022-01-08T09:33:09.000Z
|
src/pte/filetools/filefinder_abc.py
|
richardkoehler/pynm-decode
|
3120a410d79d3fce45d0f59025d68ba2d5e80d9e
|
[
"MIT"
] | null | null | null |
src/pte/filetools/filefinder_abc.py
|
richardkoehler/pynm-decode
|
3120a410d79d3fce45d0f59025d68ba2d5e80d9e
|
[
"MIT"
] | null | null | null |
"""Define abstract base classes to construct FileFinder classes."""
import os
import shutil
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Sequence, Union
import mne_bids
@dataclass
class FileFinder(ABC):
"""Basic representation of class for finding and filtering files."""
hemispheres: Union[dict, None] = field(default_factory=dict)
directory: Union[Path, str] = field(init=False)
files: list = field(init=False, default_factory=list)
def __str__(self):
if not self.files:
return "No corresponding files found."
headers = ["Index", "Filename"]
col_width = max(len(os.path.basename(file)) for file in self.files)
format_row = f"{{:>{len(headers[0]) + 2}}}{{:>{col_width + 2}}}"
terminal_size = "\u2500" * shutil.get_terminal_size().columns
return "\n".join(
(
"Corresponding files found:",
"".join(
f"{{:>{len(header) + 2}}}".format(header)
for header in headers
),
terminal_size,
*(
format_row.format(idx, os.path.basename(file))
for idx, file in enumerate(self.files)
),
)
)
def __len__(self) -> int:
if not self.files:
return 0
return len(self.files)
@abstractmethod
def find_files(
self,
directory: Union[str, Path],
extensions: Optional[Union[Sequence, str]] = None,
keywords: Optional[Union[list, str]] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[Union[str, list]] = None,
verbose: bool = False,
) -> None:
"""Find files in directory with optional
keywords and extensions."""
@abstractmethod
def filter_files(
self,
keywords: Optional[Union[str, list]] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[Union[str, list]] = None,
verbose: bool = False,
) -> None:
"""Filter list of filepaths for given parameters."""
@staticmethod
def _keyword_search(
files: list[str], keywords: Optional[Union[str, list]]
) -> list:
if not keywords:
return files
if not isinstance(keywords, list):
keywords = [keywords]
filtered_files = [
file for file in files if any(key in file for key in keywords)
]
return filtered_files
def _find_files(
self,
directory: Union[Path, str],
extensions: Optional[Union[list, str]] = None,
) -> None:
"""Find files in directory with optional extensions.
Args:
directory (string)
keywords (list): e.g. ["SelfpacedRota", "ButtonPress] (optional)
extensions (list): e.g. [".json" or "tsv"] (optional)
verbose (bool): verbosity level (optional, default=True)
"""
files = []
for root, _, fnames in os.walk(directory):
fnames = [os.path.join(root, file) for file in fnames]
fnames = self._keyword_search(fnames, extensions)
if fnames:
files.extend(fnames)
self.files = files
def _filter_files(
self,
keywords: Optional[Union[str, list[str]]] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[Union[str, list[str]]] = None,
) -> None:
"""Filter filepaths for given parameters."""
filtered_files = self.files
if exclude:
if not isinstance(exclude, list):
exclude = [exclude]
filtered_files = [
file
for file in filtered_files
if not any(item in file for item in exclude)
]
if keywords:
if not isinstance(keywords, list):
keywords = [keywords]
filtered_files = self._keyword_search(filtered_files, keywords)
if stimulation:
if stimulation.lower() in "stimon":
stim = "StimOn"
elif stimulation.lower() in "stimoff":
stim = "StimOff"
else:
raise ValueError("Keyword for stimulation not valid.")
filtered_files = self._keyword_search(filtered_files, [stim])
if medication:
if medication.lower() in "medon":
med = "MedOn"
elif medication.lower() in "medoff":
med = "MedOff"
else:
raise ValueError("Keyword for medication not valid.")
filtered_files = self._keyword_search(filtered_files, [med])
if hemisphere:
matching_files = []
for file in filtered_files:
subject = mne_bids.get_entities_from_fname(file)["subject"]
if (
subject not in self.hemispheres
or self.hemispheres[subject] is None
):
raise HemisphereNotSpecifiedError(
subject, self.hemispheres
)
hem = self.hemispheres[subject] + "_"
if hemisphere.lower() in "ipsilateral" and hem in file:
matching_files.append(file)
if hemisphere.lower() in "contralateral" and hem not in file:
matching_files.append(file)
filtered_files = matching_files
self.files = filtered_files
class DirectoryNotFoundError(Exception):
"""Exception raised when invalid Reader is passed.
Attributes:
directory -- input directory which caused the error
"""
def __init__(
self,
directory: Union[Path, str],
message="Input directory was not found.",
):
self.directory = directory
self.message = message
super().__init__(self.message)
def __str__(self):
return f"{self.message} Got: {self.directory}."
class HemisphereNotSpecifiedError(Exception):
"""Exception raised when electrode hemisphere is not specified in settings.
Attributes:
subject -- input subject which caused the error
hemisphere -- specified hemispheres
message -- explanation of the error
"""
def __init__(
self,
subject,
hemispheres,
message=(
"Input ECOG hemisphere is not specified in"
" `filefinder_settings.py` for given subject."
),
) -> None:
self.subject = subject
self.hemispheres = hemispheres
self.message = message
super().__init__(self.message)
def __str__(self):
return (
f"{self.message} Unspecified subject: {self.subject}."
f" Specified hemispheres: {self.hemispheres}."
)
| 33.439815
| 79
| 0.56417
|
import os
import shutil
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Sequence, Union
import mne_bids
@dataclass
class FileFinder(ABC):
hemispheres: Union[dict, None] = field(default_factory=dict)
directory: Union[Path, str] = field(init=False)
files: list = field(init=False, default_factory=list)
def __str__(self):
if not self.files:
return "No corresponding files found."
headers = ["Index", "Filename"]
col_width = max(len(os.path.basename(file)) for file in self.files)
format_row = f"{{:>{len(headers[0]) + 2}}}{{:>{col_width + 2}}}"
terminal_size = "\u2500" * shutil.get_terminal_size().columns
return "\n".join(
(
"Corresponding files found:",
"".join(
f"{{:>{len(header) + 2}}}".format(header)
for header in headers
),
terminal_size,
*(
format_row.format(idx, os.path.basename(file))
for idx, file in enumerate(self.files)
),
)
)
def __len__(self) -> int:
if not self.files:
return 0
return len(self.files)
@abstractmethod
def find_files(
self,
directory: Union[str, Path],
extensions: Optional[Union[Sequence, str]] = None,
keywords: Optional[Union[list, str]] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[Union[str, list]] = None,
verbose: bool = False,
) -> None:
@abstractmethod
def filter_files(
self,
keywords: Optional[Union[str, list]] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[Union[str, list]] = None,
verbose: bool = False,
) -> None:
@staticmethod
def _keyword_search(
files: list[str], keywords: Optional[Union[str, list]]
) -> list:
if not keywords:
return files
if not isinstance(keywords, list):
keywords = [keywords]
filtered_files = [
file for file in files if any(key in file for key in keywords)
]
return filtered_files
def _find_files(
self,
directory: Union[Path, str],
extensions: Optional[Union[list, str]] = None,
) -> None:
files = []
for root, _, fnames in os.walk(directory):
fnames = [os.path.join(root, file) for file in fnames]
fnames = self._keyword_search(fnames, extensions)
if fnames:
files.extend(fnames)
self.files = files
def _filter_files(
self,
keywords: Optional[Union[str, list[str]]] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[Union[str, list[str]]] = None,
) -> None:
filtered_files = self.files
if exclude:
if not isinstance(exclude, list):
exclude = [exclude]
filtered_files = [
file
for file in filtered_files
if not any(item in file for item in exclude)
]
if keywords:
if not isinstance(keywords, list):
keywords = [keywords]
filtered_files = self._keyword_search(filtered_files, keywords)
if stimulation:
if stimulation.lower() in "stimon":
stim = "StimOn"
elif stimulation.lower() in "stimoff":
stim = "StimOff"
else:
raise ValueError("Keyword for stimulation not valid.")
filtered_files = self._keyword_search(filtered_files, [stim])
if medication:
if medication.lower() in "medon":
med = "MedOn"
elif medication.lower() in "medoff":
med = "MedOff"
else:
raise ValueError("Keyword for medication not valid.")
filtered_files = self._keyword_search(filtered_files, [med])
if hemisphere:
matching_files = []
for file in filtered_files:
subject = mne_bids.get_entities_from_fname(file)["subject"]
if (
subject not in self.hemispheres
or self.hemispheres[subject] is None
):
raise HemisphereNotSpecifiedError(
subject, self.hemispheres
)
hem = self.hemispheres[subject] + "_"
if hemisphere.lower() in "ipsilateral" and hem in file:
matching_files.append(file)
if hemisphere.lower() in "contralateral" and hem not in file:
matching_files.append(file)
filtered_files = matching_files
self.files = filtered_files
class DirectoryNotFoundError(Exception):
def __init__(
self,
directory: Union[Path, str],
message="Input directory was not found.",
):
self.directory = directory
self.message = message
super().__init__(self.message)
def __str__(self):
return f"{self.message} Got: {self.directory}."
class HemisphereNotSpecifiedError(Exception):
def __init__(
self,
subject,
hemispheres,
message=(
"Input ECOG hemisphere is not specified in"
" `filefinder_settings.py` for given subject."
),
) -> None:
self.subject = subject
self.hemispheres = hemispheres
self.message = message
super().__init__(self.message)
def __str__(self):
return (
f"{self.message} Unspecified subject: {self.subject}."
f" Specified hemispheres: {self.hemispheres}."
)
| true
| true
|
7902cd3008a52c8c535baaa6c0bc85a9ac49f83c
| 2,525
|
py
|
Python
|
ilxutils/ilxutils/database_client.py
|
tmsincomb/pyontutils
|
dad24e7178d8d8cd3bd60d53b9039952fa7a5a1e
|
[
"MIT"
] | 11
|
2017-05-12T08:50:03.000Z
|
2022-01-22T20:23:25.000Z
|
ilxutils/ilxutils/database_client.py
|
tmsincomb/pyontutils
|
dad24e7178d8d8cd3bd60d53b9039952fa7a5a1e
|
[
"MIT"
] | 81
|
2016-02-25T07:39:15.000Z
|
2022-02-17T20:20:27.000Z
|
ilxutils/ilxutils/database_client.py
|
tmsincomb/pyontutils
|
dad24e7178d8d8cd3bd60d53b9039952fa7a5a1e
|
[
"MIT"
] | 257
|
2017-07-18T19:32:22.000Z
|
2022-02-03T17:26:18.000Z
|
from collections import defaultdict
import pandas as pd
import pickle
from sqlalchemy import create_engine, inspect, Table, Column
from sqlalchemy.engine.url import make_url
from sys import exit
class DatabaseClient:
""" Takes care of the database pass opening to find the url and can query
the respected database.
Input:
dbpass_path path to the text file with the list of database urls
dbname database name so we know which database to query from the list
"""
def __init__(self, dbpass_path, dbname):
self.dbpass_path = dbpass_path
self.dbname = dbname
self.db_url = self.get_db_url()
self.engine = create_engine(self.db_url)
def get_db_url(self):
with open(self.dbpass_path, 'r') as infile:
db_names = []
for raw_url in infile.read().splitlines():
url_obj = make_url(raw_url)
if url_obj.database == self.dbname:
infile.close()
return raw_url
db_names.append(url_obj.database)
infile.close()
exit('database name does not exist in dbpass given:' + ', '.join(db_names))
def get_df_with_query(self, query):
""" WARNING :: Will crash if too large. If so, you should just create the df file
first via create_df_file(query=).
load example:
with open(input, 'rb') as infile:
objs = []
while True:
try:
obj = pickle.load(infile)
except EOFError:
break
...
"""
return pd.read_sql(query, self.engine)
def create_df_file_with_query(self, query, output):
""" Dumps in df in chunks to avoid crashes.
"""
chunk_size = 100000
offset = 0
data = defaultdict(lambda : defaultdict(list))
with open(output, 'wb') as outfile:
query = query.replace(';', '')
query += """ LIMIT {chunk_size} OFFSET {offset};"""
while True:
print(offset)
query = query.format(
chunk_size=chunk_size,
offset=offset
)
df = pd.read_sql(query, self.engine)
pickle.dump(df, outfile)
offset += chunk_size
if len(df) < chunk_size:
break
outfile.close()
| 33.666667
| 90
| 0.540594
|
from collections import defaultdict
import pandas as pd
import pickle
from sqlalchemy import create_engine, inspect, Table, Column
from sqlalchemy.engine.url import make_url
from sys import exit
class DatabaseClient:
def __init__(self, dbpass_path, dbname):
self.dbpass_path = dbpass_path
self.dbname = dbname
self.db_url = self.get_db_url()
self.engine = create_engine(self.db_url)
def get_db_url(self):
with open(self.dbpass_path, 'r') as infile:
db_names = []
for raw_url in infile.read().splitlines():
url_obj = make_url(raw_url)
if url_obj.database == self.dbname:
infile.close()
return raw_url
db_names.append(url_obj.database)
infile.close()
exit('database name does not exist in dbpass given:' + ', '.join(db_names))
def get_df_with_query(self, query):
return pd.read_sql(query, self.engine)
def create_df_file_with_query(self, query, output):
chunk_size = 100000
offset = 0
data = defaultdict(lambda : defaultdict(list))
with open(output, 'wb') as outfile:
query = query.replace(';', '')
query += """ LIMIT {chunk_size} OFFSET {offset};"""
while True:
print(offset)
query = query.format(
chunk_size=chunk_size,
offset=offset
)
df = pd.read_sql(query, self.engine)
pickle.dump(df, outfile)
offset += chunk_size
if len(df) < chunk_size:
break
outfile.close()
| true
| true
|
7902ce0e0f474b9d52cbbbf288f0fab89b6b4bfe
| 4,422
|
py
|
Python
|
app.py
|
skrzypak/soaf
|
f742d4b090fad72893ed1f509f4abdbb020aa99d
|
[
"MIT"
] | null | null | null |
app.py
|
skrzypak/soaf
|
f742d4b090fad72893ed1f509f4abdbb020aa99d
|
[
"MIT"
] | null | null | null |
app.py
|
skrzypak/soaf
|
f742d4b090fad72893ed1f509f4abdbb020aa99d
|
[
"MIT"
] | null | null | null |
import glob
import shutil
import subprocess
import os
import sys
import argparse
# Read and save metadata from file
def exiftool_metadata(path):
metadata = {}
exifToolPath = 'exifTool.exe'
''' use Exif tool to get the metadata '''
process = subprocess.Popen(
[
exifToolPath,
path
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True
)
''' get the tags in dict '''
for tag in process.stdout:
tag = tag.strip()
key = tag[:tag.find(':')].strip()
value = tag[tag.find(':') + 1:].strip()
metadata[key] = value
return metadata
class File:
def __init__(self, path):
self.metadata = exiftool_metadata(path)
def _get_file_metadata(self, key, no=''):
if key in self.metadata:
return self.metadata[key]
else:
return no
def copyCore(self, source, dst_dir: str, copy_duplicate=False):
logs = []
# if value of metadata not exists - folder name
no_metadata = 'none'
date = File._get_file_metadata(self, 'Date/Time Original')
if date == '':
date = File._get_file_metadata(self, 'Create Date', no_metadata)
mime_type = File._get_file_metadata(self, 'MIME Type', no_metadata)
dst_dir += f'''/{mime_type[:mime_type.find('/')]}/{date[:4]}/{date[5:7]}'''
filename = File._get_file_metadata(self, 'File Name')
f_name = filename
dst = dst_dir + '/' + filename
# File with the same name exists in dst. If source and dst have same size then determines 'copy_exists'
if os.path.isfile(dst):
i = 0
f_pth = File(dst)
if_same_size: bool = f_pth._get_file_metadata("File Size") == File._get_file_metadata(self, 'File Size')
if (not if_same_size) or copy_duplicate:
while os.path.isfile(dst):
filename = f'''{f_name[:f_name.find('.')]}_D{str(i)}.{File._get_file_metadata(self, 'File Type Extension')}'''
dst = f'''{dst_dir}/{filename}'''
i = i + 1
if if_same_size:
logs.append(f"Warning: file already exists but I must copy all files"
f" [copy_duplicate={copy_duplicate}], so I try do it ...")
else:
logs.append(f"Warning: file already exists but have other size, so I try copy it ...")
else:
logs.append(f"Warning: file already duplicate [copy_exists={copy_duplicate}]."
f"\nCopy aboard: {source} -> {dst}")
return logs
try:
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
logs.append(f"New directory created: {dst_dir}")
shutil.copy(source, dst)
logs.append(f'''Copy done: {source} -> {dst}''')
except Exception as e:
logs.append(f'''Copy error [{e}]: {source} -> {dst}''')
return logs
def main():
# Arguments from console
parser = argparse.ArgumentParser()
parser.add_argument('-s', help="Obligatory: source directory path")
parser.add_argument('-d', help="Obligatory: destination folder path")
parser.add_argument('-e', help="Obligatory: copy duplicate files (T/True/F/False)")
args = parser.parse_args(sys.argv[1:])
# Setup variable
source_dir = args.s
dst_dir = args.d
df = {
"T": True,
"TRUE": True,
"F": False,
"FALSE": False
}
try:
copy_duplicate = df.get(args.e.upper(), False)
except AttributeError:
copy_duplicate = False
print(f"app.py: error: unrecognized arguments. Use -h or --help to see options")
exit(1)
# Number of log
l_lpm = 0
# source_dir = 'C:/Users'
# dst_dir = 'C:/Users'
# copy_duplicate = False
for f_inx, source in enumerate(glob.glob(source_dir + '/**/*.*', recursive=True)):
try:
f = File(source)
print("----------")
for log in f.copyCore(source, dst_dir, copy_duplicate):
l_lpm = l_lpm + 1
print(f'''{str(l_lpm)}.{f_inx + 1}) {log}''')
except Exception as e:
print(f'Copy error [{e}]: {source}')
if __name__ == '__main__':
main()
| 32.755556
| 130
| 0.556083
|
import glob
import shutil
import subprocess
import os
import sys
import argparse
def exiftool_metadata(path):
metadata = {}
exifToolPath = 'exifTool.exe'
process = subprocess.Popen(
[
exifToolPath,
path
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True
)
for tag in process.stdout:
tag = tag.strip()
key = tag[:tag.find(':')].strip()
value = tag[tag.find(':') + 1:].strip()
metadata[key] = value
return metadata
class File:
def __init__(self, path):
self.metadata = exiftool_metadata(path)
def _get_file_metadata(self, key, no=''):
if key in self.metadata:
return self.metadata[key]
else:
return no
def copyCore(self, source, dst_dir: str, copy_duplicate=False):
logs = []
no_metadata = 'none'
date = File._get_file_metadata(self, 'Date/Time Original')
if date == '':
date = File._get_file_metadata(self, 'Create Date', no_metadata)
mime_type = File._get_file_metadata(self, 'MIME Type', no_metadata)
dst_dir += f'''/{mime_type[:mime_type.find('/')]}/{date[:4]}/{date[5:7]}'''
filename = File._get_file_metadata(self, 'File Name')
f_name = filename
dst = dst_dir + '/' + filename
if os.path.isfile(dst):
i = 0
f_pth = File(dst)
if_same_size: bool = f_pth._get_file_metadata("File Size") == File._get_file_metadata(self, 'File Size')
if (not if_same_size) or copy_duplicate:
while os.path.isfile(dst):
filename = f'''{f_name[:f_name.find('.')]}_D{str(i)}.{File._get_file_metadata(self, 'File Type Extension')}'''
dst = f'''{dst_dir}/{filename}'''
i = i + 1
if if_same_size:
logs.append(f"Warning: file already exists but I must copy all files"
f" [copy_duplicate={copy_duplicate}], so I try do it ...")
else:
logs.append(f"Warning: file already exists but have other size, so I try copy it ...")
else:
logs.append(f"Warning: file already duplicate [copy_exists={copy_duplicate}]."
f"\nCopy aboard: {source} -> {dst}")
return logs
try:
if not os.path.isdir(dst_dir):
os.makedirs(dst_dir)
logs.append(f"New directory created: {dst_dir}")
shutil.copy(source, dst)
logs.append(f'''Copy done: {source} -> {dst}''')
except Exception as e:
logs.append(f'''Copy error [{e}]: {source} -> {dst}''')
return logs
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', help="Obligatory: source directory path")
parser.add_argument('-d', help="Obligatory: destination folder path")
parser.add_argument('-e', help="Obligatory: copy duplicate files (T/True/F/False)")
args = parser.parse_args(sys.argv[1:])
source_dir = args.s
dst_dir = args.d
df = {
"T": True,
"TRUE": True,
"F": False,
"FALSE": False
}
try:
copy_duplicate = df.get(args.e.upper(), False)
except AttributeError:
copy_duplicate = False
print(f"app.py: error: unrecognized arguments. Use -h or --help to see options")
exit(1)
l_lpm = 0
for f_inx, source in enumerate(glob.glob(source_dir + '/**/*.*', recursive=True)):
try:
f = File(source)
print("----------")
for log in f.copyCore(source, dst_dir, copy_duplicate):
l_lpm = l_lpm + 1
print(f'''{str(l_lpm)}.{f_inx + 1}) {log}''')
except Exception as e:
print(f'Copy error [{e}]: {source}')
if __name__ == '__main__':
main()
| true
| true
|
7902cfa1585d2886bd5352c72d9a37fa9e074b40
| 7,243
|
py
|
Python
|
homeassistant/components/dyson/vacuum.py
|
FlorianLudwig/home-assistant
|
29ad3961e581d3591ce0963a7fa01672abadedf7
|
[
"Apache-2.0"
] | 2
|
2017-10-26T19:43:55.000Z
|
2017-12-30T23:29:00.000Z
|
homeassistant/components/dyson/vacuum.py
|
FlorianLudwig/home-assistant
|
29ad3961e581d3591ce0963a7fa01672abadedf7
|
[
"Apache-2.0"
] | 2
|
2019-04-15T02:43:04.000Z
|
2019-04-15T02:49:10.000Z
|
homeassistant/components/dyson/vacuum.py
|
FlorianLudwig/home-assistant
|
29ad3961e581d3591ce0963a7fa01672abadedf7
|
[
"Apache-2.0"
] | 1
|
2019-06-19T07:43:11.000Z
|
2019-06-19T07:43:11.000Z
|
"""
Support for the Dyson 360 eye vacuum cleaner robot.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/vacuum.dyson/
"""
import logging
from homeassistant.components.vacuum import (
SUPPORT_BATTERY, SUPPORT_FAN_SPEED, SUPPORT_PAUSE, SUPPORT_RETURN_HOME,
SUPPORT_STATUS, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
VacuumDevice)
from homeassistant.helpers.icon import icon_for_battery_level
from . import DYSON_DEVICES
_LOGGER = logging.getLogger(__name__)
ATTR_CLEAN_ID = 'clean_id'
ATTR_FULL_CLEAN_TYPE = 'full_clean_type'
ATTR_POSITION = 'position'
DEPENDENCIES = ['dyson']
DYSON_360_EYE_DEVICES = "dyson_360_eye_devices"
SUPPORT_DYSON = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PAUSE | \
SUPPORT_RETURN_HOME | SUPPORT_FAN_SPEED | SUPPORT_STATUS | \
SUPPORT_BATTERY | SUPPORT_STOP
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Dyson 360 Eye robot vacuum platform."""
from libpurecoollink.dyson_360_eye import Dyson360Eye
_LOGGER.debug("Creating new Dyson 360 Eye robot vacuum")
if DYSON_360_EYE_DEVICES not in hass.data:
hass.data[DYSON_360_EYE_DEVICES] = []
# Get Dyson Devices from parent component
for device in [d for d in hass.data[DYSON_DEVICES] if
isinstance(d, Dyson360Eye)]:
dyson_entity = Dyson360EyeDevice(device)
hass.data[DYSON_360_EYE_DEVICES].append(dyson_entity)
add_entities(hass.data[DYSON_360_EYE_DEVICES])
return True
class Dyson360EyeDevice(VacuumDevice):
"""Dyson 360 Eye robot vacuum device."""
def __init__(self, device):
"""Dyson 360 Eye robot vacuum device."""
_LOGGER.debug("Creating device %s", device.name)
self._device = device
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.async_add_job(
self._device.add_message_listener, self.on_message)
def on_message(self, message):
"""Handle a new messages that was received from the vacuum."""
_LOGGER.debug("Message received for %s device: %s", self.name, message)
self.schedule_update_ha_state()
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
@property
def name(self):
"""Return the name of the device."""
return self._device.name
@property
def status(self):
"""Return the status of the vacuum cleaner."""
from libpurecoollink.const import Dyson360EyeMode
dyson_labels = {
Dyson360EyeMode.INACTIVE_CHARGING: "Stopped - Charging",
Dyson360EyeMode.INACTIVE_CHARGED: "Stopped - Charged",
Dyson360EyeMode.FULL_CLEAN_PAUSED: "Paused",
Dyson360EyeMode.FULL_CLEAN_RUNNING: "Cleaning",
Dyson360EyeMode.FULL_CLEAN_ABORTED: "Returning home",
Dyson360EyeMode.FULL_CLEAN_INITIATED: "Start cleaning",
Dyson360EyeMode.FAULT_USER_RECOVERABLE: "Error - device blocked",
Dyson360EyeMode.FAULT_REPLACE_ON_DOCK:
"Error - Replace device on dock",
Dyson360EyeMode.FULL_CLEAN_FINISHED: "Finished",
Dyson360EyeMode.FULL_CLEAN_NEEDS_CHARGE: "Need charging"
}
return dyson_labels.get(
self._device.state.state, self._device.state.state)
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
return self._device.state.battery_level
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
from libpurecoollink.const import PowerMode
speed_labels = {
PowerMode.MAX: "Max",
PowerMode.QUIET: "Quiet"
}
return speed_labels[self._device.state.power_mode]
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return ["Quiet", "Max"]
@property
def device_state_attributes(self):
"""Return the specific state attributes of this vacuum cleaner."""
return {
ATTR_POSITION: str(self._device.state.position)
}
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
from libpurecoollink.const import Dyson360EyeMode
return self._device.state.state in [
Dyson360EyeMode.FULL_CLEAN_INITIATED,
Dyson360EyeMode.FULL_CLEAN_ABORTED,
Dyson360EyeMode.FULL_CLEAN_RUNNING
]
@property
def available(self) -> bool:
"""Return True if entity is available."""
return True
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_DYSON
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
from libpurecoollink.const import Dyson360EyeMode
charging = self._device.state.state in [
Dyson360EyeMode.INACTIVE_CHARGING]
return icon_for_battery_level(
battery_level=self.battery_level, charging=charging)
def turn_on(self, **kwargs):
"""Turn the vacuum on."""
from libpurecoollink.const import Dyson360EyeMode
_LOGGER.debug("Turn on device %s", self.name)
if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]:
self._device.resume()
else:
self._device.start()
def turn_off(self, **kwargs):
"""Turn the vacuum off and return to home."""
_LOGGER.debug("Turn off device %s", self.name)
self._device.pause()
def stop(self, **kwargs):
"""Stop the vacuum cleaner."""
_LOGGER.debug("Stop device %s", self.name)
self._device.pause()
def set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
from libpurecoollink.const import PowerMode
_LOGGER.debug("Set fan speed %s on device %s", fan_speed, self.name)
power_modes = {
"Quiet": PowerMode.QUIET,
"Max": PowerMode.MAX
}
self._device.set_power_mode(power_modes[fan_speed])
def start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task."""
from libpurecoollink.const import Dyson360EyeMode
if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]:
_LOGGER.debug("Resume device %s", self.name)
self._device.resume()
elif self._device.state.state in [Dyson360EyeMode.INACTIVE_CHARGED,
Dyson360EyeMode.INACTIVE_CHARGING]:
_LOGGER.debug("Start device %s", self.name)
self._device.start()
else:
_LOGGER.debug("Pause device %s", self.name)
self._device.pause()
def return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
_LOGGER.debug("Return to base device %s", self.name)
self._device.abort()
| 34.655502
| 79
| 0.654701
|
import logging
from homeassistant.components.vacuum import (
SUPPORT_BATTERY, SUPPORT_FAN_SPEED, SUPPORT_PAUSE, SUPPORT_RETURN_HOME,
SUPPORT_STATUS, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
VacuumDevice)
from homeassistant.helpers.icon import icon_for_battery_level
from . import DYSON_DEVICES
_LOGGER = logging.getLogger(__name__)
ATTR_CLEAN_ID = 'clean_id'
ATTR_FULL_CLEAN_TYPE = 'full_clean_type'
ATTR_POSITION = 'position'
DEPENDENCIES = ['dyson']
DYSON_360_EYE_DEVICES = "dyson_360_eye_devices"
SUPPORT_DYSON = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PAUSE | \
SUPPORT_RETURN_HOME | SUPPORT_FAN_SPEED | SUPPORT_STATUS | \
SUPPORT_BATTERY | SUPPORT_STOP
def setup_platform(hass, config, add_entities, discovery_info=None):
from libpurecoollink.dyson_360_eye import Dyson360Eye
_LOGGER.debug("Creating new Dyson 360 Eye robot vacuum")
if DYSON_360_EYE_DEVICES not in hass.data:
hass.data[DYSON_360_EYE_DEVICES] = []
for device in [d for d in hass.data[DYSON_DEVICES] if
isinstance(d, Dyson360Eye)]:
dyson_entity = Dyson360EyeDevice(device)
hass.data[DYSON_360_EYE_DEVICES].append(dyson_entity)
add_entities(hass.data[DYSON_360_EYE_DEVICES])
return True
class Dyson360EyeDevice(VacuumDevice):
def __init__(self, device):
_LOGGER.debug("Creating device %s", device.name)
self._device = device
async def async_added_to_hass(self):
self.hass.async_add_job(
self._device.add_message_listener, self.on_message)
def on_message(self, message):
_LOGGER.debug("Message received for %s device: %s", self.name, message)
self.schedule_update_ha_state()
@property
def should_poll(self) -> bool:
return False
@property
def name(self):
return self._device.name
@property
def status(self):
from libpurecoollink.const import Dyson360EyeMode
dyson_labels = {
Dyson360EyeMode.INACTIVE_CHARGING: "Stopped - Charging",
Dyson360EyeMode.INACTIVE_CHARGED: "Stopped - Charged",
Dyson360EyeMode.FULL_CLEAN_PAUSED: "Paused",
Dyson360EyeMode.FULL_CLEAN_RUNNING: "Cleaning",
Dyson360EyeMode.FULL_CLEAN_ABORTED: "Returning home",
Dyson360EyeMode.FULL_CLEAN_INITIATED: "Start cleaning",
Dyson360EyeMode.FAULT_USER_RECOVERABLE: "Error - device blocked",
Dyson360EyeMode.FAULT_REPLACE_ON_DOCK:
"Error - Replace device on dock",
Dyson360EyeMode.FULL_CLEAN_FINISHED: "Finished",
Dyson360EyeMode.FULL_CLEAN_NEEDS_CHARGE: "Need charging"
}
return dyson_labels.get(
self._device.state.state, self._device.state.state)
@property
def battery_level(self):
return self._device.state.battery_level
@property
def fan_speed(self):
from libpurecoollink.const import PowerMode
speed_labels = {
PowerMode.MAX: "Max",
PowerMode.QUIET: "Quiet"
}
return speed_labels[self._device.state.power_mode]
@property
def fan_speed_list(self):
return ["Quiet", "Max"]
@property
def device_state_attributes(self):
return {
ATTR_POSITION: str(self._device.state.position)
}
@property
def is_on(self) -> bool:
from libpurecoollink.const import Dyson360EyeMode
return self._device.state.state in [
Dyson360EyeMode.FULL_CLEAN_INITIATED,
Dyson360EyeMode.FULL_CLEAN_ABORTED,
Dyson360EyeMode.FULL_CLEAN_RUNNING
]
@property
def available(self) -> bool:
return True
@property
def supported_features(self):
return SUPPORT_DYSON
@property
def battery_icon(self):
from libpurecoollink.const import Dyson360EyeMode
charging = self._device.state.state in [
Dyson360EyeMode.INACTIVE_CHARGING]
return icon_for_battery_level(
battery_level=self.battery_level, charging=charging)
def turn_on(self, **kwargs):
from libpurecoollink.const import Dyson360EyeMode
_LOGGER.debug("Turn on device %s", self.name)
if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]:
self._device.resume()
else:
self._device.start()
def turn_off(self, **kwargs):
_LOGGER.debug("Turn off device %s", self.name)
self._device.pause()
def stop(self, **kwargs):
_LOGGER.debug("Stop device %s", self.name)
self._device.pause()
def set_fan_speed(self, fan_speed, **kwargs):
from libpurecoollink.const import PowerMode
_LOGGER.debug("Set fan speed %s on device %s", fan_speed, self.name)
power_modes = {
"Quiet": PowerMode.QUIET,
"Max": PowerMode.MAX
}
self._device.set_power_mode(power_modes[fan_speed])
def start_pause(self, **kwargs):
from libpurecoollink.const import Dyson360EyeMode
if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]:
_LOGGER.debug("Resume device %s", self.name)
self._device.resume()
elif self._device.state.state in [Dyson360EyeMode.INACTIVE_CHARGED,
Dyson360EyeMode.INACTIVE_CHARGING]:
_LOGGER.debug("Start device %s", self.name)
self._device.start()
else:
_LOGGER.debug("Pause device %s", self.name)
self._device.pause()
def return_to_base(self, **kwargs):
_LOGGER.debug("Return to base device %s", self.name)
self._device.abort()
| true
| true
|
7902cff85dd9987e371017e720b8eb7568470281
| 428
|
py
|
Python
|
output/models/nist_data/union/short_g_year/schema_instance/nistschema_sv_iv_union_short_g_year_pattern_3_xsd/nistschema_sv_iv_union_short_g_year_pattern_3.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/union/short_g_year/schema_instance/nistschema_sv_iv_union_short_g_year_pattern_3_xsd/nistschema_sv_iv_union_short_g_year_pattern_3.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/union/short_g_year/schema_instance/nistschema_sv_iv_union_short_g_year_pattern_3_xsd/nistschema_sv_iv_union_short_g_year_pattern_3.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass, field
__NAMESPACE__ = "NISTSchema-SV-IV-union-short-gYear-pattern-3-NS"
@dataclass
class NistschemaSvIvUnionShortGYearPattern3:
class Meta:
name = "NISTSchema-SV-IV-union-short-gYear-pattern-3"
namespace = "NISTSchema-SV-IV-union-short-gYear-pattern-3-NS"
value: str = field(
default="",
metadata={
"pattern": r"\d\d50",
}
)
| 23.777778
| 69
| 0.64486
|
from dataclasses import dataclass, field
__NAMESPACE__ = "NISTSchema-SV-IV-union-short-gYear-pattern-3-NS"
@dataclass
class NistschemaSvIvUnionShortGYearPattern3:
class Meta:
name = "NISTSchema-SV-IV-union-short-gYear-pattern-3"
namespace = "NISTSchema-SV-IV-union-short-gYear-pattern-3-NS"
value: str = field(
default="",
metadata={
"pattern": r"\d\d50",
}
)
| true
| true
|
7902d0a5a3ebe22ec9f92c39e04b99535a9f9cb4
| 3,270
|
py
|
Python
|
benchmarks/jhaskellbenchmark.py
|
hnefatl/dissertation-project
|
400152de25fd476c20730d85663da7f1e16b9b52
|
[
"BSD-3-Clause"
] | 3
|
2019-01-27T22:13:29.000Z
|
2022-03-10T20:11:47.000Z
|
benchmarks/jhaskellbenchmark.py
|
hnefatl/dissertation-project
|
400152de25fd476c20730d85663da7f1e16b9b52
|
[
"BSD-3-Clause"
] | 2
|
2019-01-27T14:07:15.000Z
|
2019-04-23T15:13:57.000Z
|
benchmarks/jhaskellbenchmark.py
|
hnefatl/dissertation-project
|
400152de25fd476c20730d85663da7f1e16b9b52
|
[
"BSD-3-Clause"
] | 1
|
2019-01-27T21:42:55.000Z
|
2019-01-27T21:42:55.000Z
|
import os
import shutil
import subprocess
import re
import string
import pathlib
import timeit
import jmhbenchmark
class JHaskellBenchmark(jmhbenchmark.JMHBenchmark):
def __init__(self, name, source_path, compiler_args=None):
if compiler_args is None:
compiler_args = []
source_path = pathlib.Path(source_path)
super().__init__(name, source_path.stem.lower(), source_path.stem.capitalize())
self._source_path = source_path
self._compiler_args = compiler_args.copy()
def __enter__(self):
ret = super().__enter__()
self._output_jar = (self._temp_dir / self._name).with_suffix(".jar")
return ret
def get_run_args(self):
return ["-jar", f"{self._name}.jar"]
def _compile(self):
self._run_jhaskell_compiler()
def _post_compile(self):
self._results["size"] = jmhbenchmark.get_jar_entry_size(
self._output_jar,
[
f"{self._package_name}/{s}.class"
for s in [self._class_name, "Data", "Function", "BoxedData", "HeapObject"]
],
)
return super()._post_compile()
def _get_classpath(self):
return [f"{self._name}.jar"]
def _run_jhaskell_compiler(self, extra_args=None):
if extra_args is None:
extra_args = []
original_dir = pathlib.Path.cwd()
# Build the source program
args = (
[
"compiler-exe",
"--build-dir",
f"{self._temp_dir / 'out'}",
"--output-jar",
str(self._output_jar),
"--output-class",
self._class_name,
"--runtime-file-dir",
str(original_dir.parent / "runtime"),
]
+ self._compiler_args
+ extra_args
+ [f"programs/{self._package_name}.hs"]
)
try:
return subprocess.check_output(args)
except subprocess.CalledProcessError as e:
print(e.stdout.decode())
raise
# For JHaskell, time for each stage of the compiler
def _benchmark_compilation(self, iterations=50):
number = 1
# Record the output of each invocation
outputs = []
def bench_func():
outputs.append(self._run_jhaskell_compiler(["--time-stages"]).decode())
overall_times = timeit.repeat(stmt=bench_func, setup=self._pre_compile, number=number, repeat=iterations)
time_data = []
data_extractor = re.compile(r"(.+): (.+)ms")
for output, overall_time in zip(outputs, overall_times):
cumulative_time = 0
this_run_data = []
for line in output.splitlines():
match = data_extractor.fullmatch(line)
if match is None:
raise RuntimeError("Invalid line from compiler: " + line)
this_time = float(match.group(2))
this_run_data.append((match.group(1), this_time))
cumulative_time += this_time
#this_run_data.append(("Other", overall_time * 1000 - cumulative_time))
time_data.append(this_run_data)
self._results["times"] = time_data
| 32.376238
| 113
| 0.576758
|
import os
import shutil
import subprocess
import re
import string
import pathlib
import timeit
import jmhbenchmark
class JHaskellBenchmark(jmhbenchmark.JMHBenchmark):
def __init__(self, name, source_path, compiler_args=None):
if compiler_args is None:
compiler_args = []
source_path = pathlib.Path(source_path)
super().__init__(name, source_path.stem.lower(), source_path.stem.capitalize())
self._source_path = source_path
self._compiler_args = compiler_args.copy()
def __enter__(self):
ret = super().__enter__()
self._output_jar = (self._temp_dir / self._name).with_suffix(".jar")
return ret
def get_run_args(self):
return ["-jar", f"{self._name}.jar"]
def _compile(self):
self._run_jhaskell_compiler()
def _post_compile(self):
self._results["size"] = jmhbenchmark.get_jar_entry_size(
self._output_jar,
[
f"{self._package_name}/{s}.class"
for s in [self._class_name, "Data", "Function", "BoxedData", "HeapObject"]
],
)
return super()._post_compile()
def _get_classpath(self):
return [f"{self._name}.jar"]
def _run_jhaskell_compiler(self, extra_args=None):
if extra_args is None:
extra_args = []
original_dir = pathlib.Path.cwd()
args = (
[
"compiler-exe",
"--build-dir",
f"{self._temp_dir / 'out'}",
"--output-jar",
str(self._output_jar),
"--output-class",
self._class_name,
"--runtime-file-dir",
str(original_dir.parent / "runtime"),
]
+ self._compiler_args
+ extra_args
+ [f"programs/{self._package_name}.hs"]
)
try:
return subprocess.check_output(args)
except subprocess.CalledProcessError as e:
print(e.stdout.decode())
raise
def _benchmark_compilation(self, iterations=50):
number = 1
outputs = []
def bench_func():
outputs.append(self._run_jhaskell_compiler(["--time-stages"]).decode())
overall_times = timeit.repeat(stmt=bench_func, setup=self._pre_compile, number=number, repeat=iterations)
time_data = []
data_extractor = re.compile(r"(.+): (.+)ms")
for output, overall_time in zip(outputs, overall_times):
cumulative_time = 0
this_run_data = []
for line in output.splitlines():
match = data_extractor.fullmatch(line)
if match is None:
raise RuntimeError("Invalid line from compiler: " + line)
this_time = float(match.group(2))
this_run_data.append((match.group(1), this_time))
cumulative_time += this_time
time_data.append(this_run_data)
self._results["times"] = time_data
| true
| true
|
7902d1fca3bc1c75daba249dfaafa2560662d62f
| 1,103
|
py
|
Python
|
website-generator.d/80-not-found-page.py
|
Lyrics/lyrics-website
|
d16073e4a4abc9032942d36d29c72aee1b245ff6
|
[
"BSD-3-Clause"
] | 3
|
2020-12-04T13:40:44.000Z
|
2021-06-19T10:02:52.000Z
|
website-generator.d/80-not-found-page.py
|
Lyrics/lyrics-website
|
d16073e4a4abc9032942d36d29c72aee1b245ff6
|
[
"BSD-3-Clause"
] | 20
|
2020-02-12T06:40:16.000Z
|
2022-01-16T08:45:19.000Z
|
website-generator.d/80-not-found-page.py
|
Lyrics/lyrics-website
|
d16073e4a4abc9032942d36d29c72aee1b245ff6
|
[
"BSD-3-Clause"
] | 1
|
2020-12-10T11:29:15.000Z
|
2020-12-10T11:29:15.000Z
|
## Creates 404 page
import pystache
import utils
def main(data):
html = pystache.render(data["templates"]["page"], {
"title": "Page not found",
"description": "Error 404: page not found",
## Since we don't know the depth of this page relative to the root,
## we have to assume the db directory is located in the root of this web resource
"navigation": utils.generateTopBarNavigation("/" + data["config"].get("Site", "DbPath")),
"name": "error",
"content": pystache.render(data["templates"]["not-found-page-contents"]),
## Since we don't know the depth of this page relative to the root,
## we have to assume the search page is located in the root of this web resource
"search": "/" + data["definitions"]["filenames"]["search"],
})
notFoundFile = utils.mkfile(
data["definitions"]["runtime"]["cwd"],
data["config"].get("Filesystem", "DestinationDirPath"),
data["definitions"]["filenames"]["notfound"],
)
notFoundFile.write(html)
notFoundFile.close()
| 40.851852
| 98
| 0.614687
|
import utils
def main(data):
html = pystache.render(data["templates"]["page"], {
"title": "Page not found",
"description": "Error 404: page not found",
oot of this web resource
"navigation": utils.generateTopBarNavigation("/" + data["config"].get("Site", "DbPath")),
"name": "error",
"content": pystache.render(data["templates"]["not-found-page-contents"]),
## Since we don't know the depth of this page relative to the root,
)
notFoundFile = utils.mkfile(
data["definitions"]["runtime"]["cwd"],
data["config"].get("Filesystem", "DestinationDirPath"),
data["definitions"]["filenames"]["notfound"],
)
notFoundFile.write(html)
notFoundFile.close()
| true
| true
|
7902d2d6689267442854e94c42c7ba663bc01b2a
| 3,504
|
py
|
Python
|
db/dbClient.py
|
dota2heqiuzhi/proxy_pool
|
c5aa6b37799c265dd5f331db0449fee788b8cc0d
|
[
"MIT"
] | null | null | null |
db/dbClient.py
|
dota2heqiuzhi/proxy_pool
|
c5aa6b37799c265dd5f331db0449fee788b8cc0d
|
[
"MIT"
] | null | null | null |
db/dbClient.py
|
dota2heqiuzhi/proxy_pool
|
c5aa6b37799c265dd5f331db0449fee788b8cc0d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
File Name: DbClient.py
Description : DB工厂类
Author : JHao
date: 2016/12/2
-------------------------------------------------
Change Activity:
2016/12/02: DB工厂类
2020/07/03: 取消raw_proxy储存
-------------------------------------------------
"""
__author__ = 'JHao'
import os
import sys
from util.six import urlparse, withMetaclass
from util.singleton import Singleton
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
class DbClient(withMetaclass(Singleton)):
"""
DbClient DB工厂类 提供get/put/update/pop/delete/exists/getAll/clean/getCount/changeTable方法
抽象方法定义:
get(): 随机返回一个proxy;
put(proxy): 存入一个proxy;
pop(): 顺序返回并删除一个proxy;
update(proxy): 更新指定proxy信息;
delete(proxy): 删除指定proxy;
exists(proxy): 判断指定proxy是否存在;
getAll(): 返回所有代理;
clean(): 清除所有proxy信息;
getCount(): 返回proxy统计信息;
changeTable(name): 切换操作对象
所有方法需要相应类去具体实现:
ssdb: ssdbClient.py
redis: redisClient.py
mongodb: mongodbClient.py
"""
def __init__(self, db_conn):
"""
init
:return:
"""
self.parseDbConn(db_conn)
self.__initDbClient()
@classmethod
def parseDbConn(cls, db_conn):
db_conf = urlparse(db_conn)
cls.db_type = db_conf.scheme.upper().strip()
cls.db_host = db_conf.hostname
cls.db_port = db_conf.port
cls.db_user = db_conf.username
cls.db_pwd = db_conf.password
cls.db_name = db_conf.path[1:]
return cls
def __initDbClient(self):
"""
init DB Client
:return:
"""
__type = None
if "SSDB" == self.db_type:
__type = "ssdbClient"
elif "REDIS" == self.db_type:
__type = "redisClient"
elif "POSTGRESQL" == self.db_type:
__type = "postgresqlClient"
else:
pass
assert __type, 'type error, Not support DB type: {}'.format(self.db_type)
self.client = getattr(__import__(__type), "%sClient" % self.db_type.title())(host=self.db_host,
port=self.db_port,
username=self.db_user,
password=self.db_pwd,
db=self.db_name)
def get(self, **kwargs):
return self.client.get(**kwargs)
def put(self, key, **kwargs):
return self.client.put(key, **kwargs)
def update(self, key, value, **kwargs):
return self.client.update(key, value, **kwargs)
def delete(self, key, **kwargs):
return self.client.delete(key, **kwargs)
def exists(self, key, **kwargs):
return self.client.exists(key, **kwargs)
def pop(self, **kwargs):
return self.client.pop(**kwargs)
def getAll(self):
return self.client.getAll()
def clear(self):
return self.client.clear()
def changeTable(self, name):
self.client.changeTable(name)
def getCount(self):
return self.client.getCount()
def test(self):
return self.client.test()
| 28.487805
| 107
| 0.50742
|
__author__ = 'JHao'
import os
import sys
from util.six import urlparse, withMetaclass
from util.singleton import Singleton
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
class DbClient(withMetaclass(Singleton)):
def __init__(self, db_conn):
self.parseDbConn(db_conn)
self.__initDbClient()
@classmethod
def parseDbConn(cls, db_conn):
db_conf = urlparse(db_conn)
cls.db_type = db_conf.scheme.upper().strip()
cls.db_host = db_conf.hostname
cls.db_port = db_conf.port
cls.db_user = db_conf.username
cls.db_pwd = db_conf.password
cls.db_name = db_conf.path[1:]
return cls
def __initDbClient(self):
__type = None
if "SSDB" == self.db_type:
__type = "ssdbClient"
elif "REDIS" == self.db_type:
__type = "redisClient"
elif "POSTGRESQL" == self.db_type:
__type = "postgresqlClient"
else:
pass
assert __type, 'type error, Not support DB type: {}'.format(self.db_type)
self.client = getattr(__import__(__type), "%sClient" % self.db_type.title())(host=self.db_host,
port=self.db_port,
username=self.db_user,
password=self.db_pwd,
db=self.db_name)
def get(self, **kwargs):
return self.client.get(**kwargs)
def put(self, key, **kwargs):
return self.client.put(key, **kwargs)
def update(self, key, value, **kwargs):
return self.client.update(key, value, **kwargs)
def delete(self, key, **kwargs):
return self.client.delete(key, **kwargs)
def exists(self, key, **kwargs):
return self.client.exists(key, **kwargs)
def pop(self, **kwargs):
return self.client.pop(**kwargs)
def getAll(self):
return self.client.getAll()
def clear(self):
return self.client.clear()
def changeTable(self, name):
self.client.changeTable(name)
def getCount(self):
return self.client.getCount()
def test(self):
return self.client.test()
| true
| true
|
7902d3771bc4bc4186cf4f97a4a24526e1f11130
| 27,032
|
py
|
Python
|
tests/cupy_tests/core_tests/test_ndarray_elementwise_op.py
|
Onkar627/cupy
|
8eef1ad5393c0a92c5065bc05137bf997f37044a
|
[
"MIT"
] | 1
|
2022-01-12T22:57:54.000Z
|
2022-01-12T22:57:54.000Z
|
tests/cupy_tests/core_tests/test_ndarray_elementwise_op.py
|
Onkar627/cupy
|
8eef1ad5393c0a92c5065bc05137bf997f37044a
|
[
"MIT"
] | null | null | null |
tests/cupy_tests/core_tests/test_ndarray_elementwise_op.py
|
Onkar627/cupy
|
8eef1ad5393c0a92c5065bc05137bf997f37044a
|
[
"MIT"
] | 1
|
2022-03-21T20:19:12.000Z
|
2022-03-21T20:19:12.000Z
|
import operator
import numpy
import pytest
import cupy
from cupy import testing
class TestArrayElementwiseOp:
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(rtol=1e-6, accept_error=TypeError)
def check_array_scalar_op(self, op, xp, x_type, y_type, swap=False,
no_bool=False, no_complex=False):
x_dtype = numpy.dtype(x_type)
y_dtype = numpy.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.array(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.array(True)
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
if swap:
return op(y_type(3), a)
else:
return op(a, y_type(3))
def test_add_scalar(self):
self.check_array_scalar_op(operator.add)
def test_radd_scalar(self):
self.check_array_scalar_op(operator.add, swap=True)
def test_iadd_scalar(self):
self.check_array_scalar_op(operator.iadd)
def test_sub_scalar(self):
self.check_array_scalar_op(operator.sub, no_bool=True)
def test_rsub_scalar(self):
self.check_array_scalar_op(operator.sub, swap=True, no_bool=True)
def test_isub_scalar(self):
self.check_array_scalar_op(operator.isub, no_bool=True)
def test_mul_scalar(self):
self.check_array_scalar_op(operator.mul)
def test_rmul_scalar(self):
self.check_array_scalar_op(operator.mul, swap=True)
def test_imul_scalar(self):
self.check_array_scalar_op(operator.imul)
def test_truediv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.truediv)
def test_rtruediv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.truediv, swap=True)
def test_itruediv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.itruediv)
def test_floordiv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.floordiv, no_complex=True)
def test_rfloordiv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.floordiv, swap=True,
no_complex=True)
def test_ifloordiv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.ifloordiv, no_complex=True)
def test_pow_scalar(self):
self.check_array_scalar_op(operator.pow)
def test_rpow_scalar(self):
self.check_array_scalar_op(operator.pow, swap=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError)
def check_ipow_scalar(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
return operator.ipow(a, y_type(3))
def test_ipow_scalar(self):
self.check_ipow_scalar()
def test_divmod0_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(lambda x, y: divmod(x, y)[0],
no_complex=True)
def test_divmod1_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(lambda x, y: divmod(x, y)[1],
no_complex=True)
def test_rdivmod0_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(lambda x, y: divmod(x, y)[0], swap=True,
no_complex=True)
def test_rdivmod1_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(lambda x, y: divmod(x, y)[1], swap=True,
no_complex=True)
def test_lt_scalar(self):
self.check_array_scalar_op(operator.lt, no_complex=False)
def test_le_scalar(self):
self.check_array_scalar_op(operator.le, no_complex=False)
def test_gt_scalar(self):
self.check_array_scalar_op(operator.gt, no_complex=False)
def test_ge_scalar(self):
self.check_array_scalar_op(operator.ge, no_complex=False)
def test_eq_scalar(self):
self.check_array_scalar_op(operator.eq)
def test_ne_scalar(self):
self.check_array_scalar_op(operator.ne)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_array_op(self, op, xp, x_type, y_type,
no_bool=False, no_complex=False):
x_dtype = numpy.dtype(x_type)
y_dtype = numpy.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.array(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.array(True)
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[6, 5, 4], [3, 2, 1]], y_type)
return op(a, b)
def test_add_array(self):
self.check_array_array_op(operator.add)
def test_iadd_array(self):
self.check_array_array_op(operator.iadd)
def test_sub_array(self):
self.check_array_array_op(operator.sub, no_bool=True)
def test_isub_array(self):
self.check_array_array_op(operator.isub, no_bool=True)
def test_mul_array(self):
self.check_array_array_op(operator.mul)
def test_imul_array(self):
self.check_array_array_op(operator.imul)
def test_truediv_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(operator.truediv)
def test_itruediv_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(operator.itruediv)
def test_floordiv_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(operator.floordiv, no_complex=True)
def test_ifloordiv_array(self):
if '1.16.1' <= numpy.lib.NumpyVersion(numpy.__version__) < '1.18.0':
self.skipTest("NumPy Issue #12927")
with numpy.errstate(divide='ignore'):
self.check_array_array_op(operator.ifloordiv, no_complex=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-6, accept_error=TypeError)
def check_pow_array(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[6, 5, 4], [3, 2, 1]], y_type)
return operator.pow(a, b)
def test_pow_array(self):
# There are some precission issues in HIP that prevent
# checking with atol=0
if cupy.cuda.runtime.is_hip:
self.check_pow_array()
else:
self.check_array_array_op(operator.pow)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError)
def check_ipow_array(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[6, 5, 4], [3, 2, 1]], y_type)
return operator.ipow(a, b)
def test_ipow_array(self):
self.check_ipow_array()
def test_divmod0_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(lambda x, y: divmod(x, y)[0])
def test_divmod1_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(lambda x, y: divmod(x, y)[1])
def test_lt_array(self):
self.check_array_array_op(operator.lt, no_complex=True)
def test_le_array(self):
self.check_array_array_op(operator.le, no_complex=True)
def test_gt_array(self):
self.check_array_array_op(operator.gt, no_complex=True)
def test_ge_array(self):
self.check_array_array_op(operator.ge, no_complex=True)
def test_eq_array(self):
self.check_array_array_op(operator.eq)
def test_ne_array(self):
self.check_array_array_op(operator.ne)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_broadcasted_op(self, op, xp, x_type, y_type,
no_bool=False, no_complex=False):
x_dtype = numpy.dtype(x_type)
y_dtype = numpy.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.array(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.array(True)
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[1], [2]], y_type)
return op(a, b)
def test_broadcasted_add(self):
self.check_array_broadcasted_op(operator.add)
def test_broadcasted_iadd(self):
self.check_array_broadcasted_op(operator.iadd)
def test_broadcasted_sub(self):
# TODO(unno): sub for boolean array is deprecated in numpy>=1.13
self.check_array_broadcasted_op(operator.sub, no_bool=True)
def test_broadcasted_isub(self):
# TODO(unno): sub for boolean array is deprecated in numpy>=1.13
self.check_array_broadcasted_op(operator.isub, no_bool=True)
def test_broadcasted_mul(self):
self.check_array_broadcasted_op(operator.mul)
def test_broadcasted_imul(self):
self.check_array_broadcasted_op(operator.imul)
def test_broadcasted_truediv(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(operator.truediv)
def test_broadcasted_itruediv(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(operator.itruediv)
def test_broadcasted_floordiv(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(operator.floordiv, no_complex=True)
def test_broadcasted_ifloordiv(self):
if '1.16.1' <= numpy.lib.NumpyVersion(numpy.__version__) < '1.18.0':
self.skipTest("NumPy Issue #12927")
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(operator.ifloordiv,
no_complex=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-6, accept_error=TypeError)
def check_broadcasted_pow(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[1], [2]], y_type)
return operator.pow(a, b)
def test_broadcasted_pow(self):
# There are some precission issues in HIP that prevent
# checking with atol=0
if cupy.cuda.runtime.is_hip:
self.check_broadcasted_pow()
else:
self.check_array_broadcasted_op(operator.pow)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError)
def check_broadcasted_ipow(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[1], [2]], y_type)
return operator.ipow(a, b)
def test_broadcasted_ipow(self):
self.check_broadcasted_ipow()
def test_broadcasted_divmod0(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(lambda x, y: divmod(x, y)[0],
no_complex=True)
def test_broadcasted_divmod1(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(lambda x, y: divmod(x, y)[1],
no_complex=True)
def test_broadcasted_lt(self):
self.check_array_broadcasted_op(operator.lt, no_complex=True)
def test_broadcasted_le(self):
self.check_array_broadcasted_op(operator.le, no_complex=True)
def test_broadcasted_gt(self):
self.check_array_broadcasted_op(operator.gt, no_complex=True)
def test_broadcasted_ge(self):
self.check_array_broadcasted_op(operator.ge, no_complex=True)
def test_broadcasted_eq(self):
self.check_array_broadcasted_op(operator.eq)
def test_broadcasted_ne(self):
self.check_array_broadcasted_op(operator.ne)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(rtol=1e-6)
def check_array_doubly_broadcasted_op(self, op, xp, x_type, y_type,
no_bool=False, no_complex=False):
x_dtype = numpy.dtype(x_type)
y_dtype = numpy.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.array(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.array(True)
a = xp.array([[[1, 2, 3]], [[4, 5, 6]]], x_type)
b = xp.array([[1], [2], [3]], y_type)
return op(a, b)
def test_doubly_broadcasted_add(self):
self.check_array_doubly_broadcasted_op(operator.add)
def test_doubly_broadcasted_sub(self):
self.check_array_doubly_broadcasted_op(operator.sub, no_bool=True)
def test_doubly_broadcasted_mul(self):
self.check_array_doubly_broadcasted_op(operator.mul)
def test_doubly_broadcasted_truediv(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_doubly_broadcasted_op(operator.truediv)
def test_doubly_broadcasted_floordiv(self):
with numpy.errstate(divide='ignore'):
self.check_array_doubly_broadcasted_op(operator.floordiv,
no_complex=True)
def test_doubly_broadcasted_pow(self):
self.check_array_doubly_broadcasted_op(operator.pow)
def test_doubly_broadcasted_divmod0(self):
with numpy.errstate(divide='ignore'):
self.check_array_doubly_broadcasted_op(
lambda x, y: divmod(x, y)[0],
no_complex=True)
def test_doubly_broadcasted_divmod1(self):
with numpy.errstate(divide='ignore'):
self.check_array_doubly_broadcasted_op(
lambda x, y: divmod(x, y)[1],
no_complex=True)
def test_doubly_broadcasted_lt(self):
self.check_array_doubly_broadcasted_op(operator.lt, no_complex=True)
def test_doubly_broadcasted_le(self):
self.check_array_doubly_broadcasted_op(operator.le, no_complex=True)
def test_doubly_broadcasted_gt(self):
self.check_array_doubly_broadcasted_op(operator.gt, no_complex=True)
def test_doubly_broadcasted_ge(self):
self.check_array_doubly_broadcasted_op(operator.ge, no_complex=True)
def test_doubly_broadcasted_eq(self):
self.check_array_doubly_broadcasted_op(operator.eq)
def test_doubly_broadcasted_ne(self):
self.check_array_doubly_broadcasted_op(operator.ne)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose()
def check_array_reversed_op(self, op, xp, x_type, y_type, no_bool=False):
if no_bool and x_type == numpy.bool_ and y_type == numpy.bool_:
return xp.array(True)
a = xp.array([1, 2, 3, 4, 5], x_type)
b = xp.array([1, 2, 3, 4, 5], y_type)
return op(a, b[::-1])
def test_array_reversed_add(self):
self.check_array_reversed_op(operator.add)
def test_array_reversed_sub(self):
self.check_array_reversed_op(operator.sub, no_bool=True)
def test_array_reversed_mul(self):
self.check_array_reversed_op(operator.mul)
@testing.for_all_dtypes(no_bool=True)
def check_typecast(self, val, dtype):
operators = [
operator.add, operator.sub, operator.mul, operator.truediv]
for op in operators:
with numpy.errstate(divide='ignore', invalid='ignore'):
a = op(val, (testing.shaped_arange((5,), numpy, dtype) - 2))
b = op(val, (testing.shaped_arange((5,), cupy, dtype) - 2))
assert a.dtype == b.dtype
def test_typecast_bool1(self):
self.check_typecast(True)
def test_typecast_bool2(self):
self.check_typecast(False)
def test_typecast_int1(self):
self.check_typecast(0)
def test_typecast_int2(self):
self.check_typecast(-127)
def test_typecast_int3(self):
self.check_typecast(255)
def test_typecast_int4(self):
self.check_typecast(-32768)
def test_typecast_int5(self):
self.check_typecast(65535)
def test_typecast_int6(self):
self.check_typecast(-2147483648)
def test_typecast_int7(self):
self.check_typecast(4294967295)
def test_typecast_float1(self):
self.check_typecast(0.0)
def test_typecast_float2(self):
self.check_typecast(100000.0)
# Skip float16 because of NumPy #19514
@testing.for_all_dtypes(name='x_type', no_float16=True)
@testing.numpy_cupy_allclose()
def check_array_boolarray_op(self, op, xp, x_type):
a = xp.array([[2, 7, 1], [8, 2, 8]], x_type)
# Cast from np.bool8 array should not read bytes
b = xp.array([[3, 1, 4], [-1, -5, -9]], numpy.int8).view(bool)
return op(a, b)
def test_add_array_boolarray(self):
self.check_array_boolarray_op(operator.add)
def test_iadd_array_boolarray(self):
self.check_array_boolarray_op(operator.iadd)
class TestArrayIntElementwiseOp:
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_scalar_op(self, op, xp, x_type, y_type, swap=False):
a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type)
if swap:
return op(y_type(2), a)
else:
return op(a, y_type(2))
def test_lshift_scalar(self):
self.check_array_scalar_op(operator.lshift)
def test_rlshift_scalar(self):
self.check_array_scalar_op(operator.lshift, swap=True)
def test_rshift_scalar(self):
self.check_array_scalar_op(operator.rshift)
def test_rrshift_scalar(self):
self.check_array_scalar_op(operator.rshift, swap=True)
def test_and_scalar(self):
self.check_array_scalar_op(operator.and_)
def test_rand_scalar(self):
self.check_array_scalar_op(operator.and_, swap=True)
def test_or_scalar(self):
self.check_array_scalar_op(operator.or_)
def test_ror_scalar(self):
self.check_array_scalar_op(operator.or_, swap=True)
def test_xor_scalar(self):
self.check_array_scalar_op(operator.xor)
def test_rxor_scalar(self):
self.check_array_scalar_op(operator.xor, swap=True)
def test_mod_scalar(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_scalar_op(operator.mod)
def test_rmod_scalar(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_scalar_op(operator.mod, swap=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_scalarzero_op(self, op, xp, x_type, y_type, swap=False):
a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type)
if swap:
return op(y_type(0), a)
else:
return op(a, y_type(0))
def test_lshift_scalarzero(self):
self.check_array_scalarzero_op(operator.lshift)
def test_rlshift_scalarzero(self):
self.check_array_scalarzero_op(operator.lshift, swap=True)
def test_rshift_scalarzero(self):
self.check_array_scalarzero_op(operator.rshift)
def test_rrshift_scalarzero(self):
self.check_array_scalarzero_op(operator.rshift, swap=True)
def test_and_scalarzero(self):
self.check_array_scalarzero_op(operator.and_)
def test_rand_scalarzero(self):
self.check_array_scalarzero_op(operator.and_, swap=True)
def test_or_scalarzero(self):
self.check_array_scalarzero_op(operator.or_)
def test_ror_scalarzero(self):
self.check_array_scalarzero_op(operator.or_, swap=True)
def test_xor_scalarzero(self):
self.check_array_scalarzero_op(operator.xor)
def test_rxor_scalarzero(self):
self.check_array_scalarzero_op(operator.xor, swap=True)
def test_mod_scalarzero(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_scalarzero_op(operator.mod)
def test_rmod_scalarzero(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_scalarzero_op(operator.mod, swap=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_array_op(self, op, xp, x_type, y_type):
a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type)
b = xp.array([[0, 0, 1], [0, 1, 2]], dtype=y_type)
return op(a, b)
def test_lshift_array(self):
self.check_array_array_op(operator.lshift)
def test_ilshift_array(self):
self.check_array_array_op(operator.ilshift)
def test_rshift_array(self):
self.check_array_array_op(operator.rshift)
def test_irshift_array(self):
self.check_array_array_op(operator.irshift)
def test_and_array(self):
self.check_array_array_op(operator.and_)
def test_iand_array(self):
self.check_array_array_op(operator.iand)
def test_or_array(self):
self.check_array_array_op(operator.or_)
def test_ior_array(self):
self.check_array_array_op(operator.ior)
def test_xor_array(self):
self.check_array_array_op(operator.xor)
def test_ixor_array(self):
self.check_array_array_op(operator.ixor)
def test_mod_array(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_array_op(operator.mod)
def test_imod_array(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_array_op(operator.imod)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_broadcasted_op(self, op, xp, x_type, y_type):
a = xp.array([[0, 1, 2], [1, 0, 2], [2, 1, 0]], dtype=x_type)
b = xp.array([[0, 0, 1]], dtype=y_type)
return op(a, b)
def test_broadcasted_lshift(self):
self.check_array_broadcasted_op(operator.lshift)
def test_broadcasted_ilshift(self):
self.check_array_broadcasted_op(operator.ilshift)
def test_broadcasted_rshift(self):
self.check_array_broadcasted_op(operator.rshift)
def test_broadcasted_irshift(self):
self.check_array_broadcasted_op(operator.irshift)
def test_broadcasted_and(self):
self.check_array_broadcasted_op(operator.and_)
def test_broadcasted_iand(self):
self.check_array_broadcasted_op(operator.iand)
def test_broadcasted_or(self):
self.check_array_broadcasted_op(operator.or_)
def test_broadcasted_ior(self):
self.check_array_broadcasted_op(operator.ior)
def test_broadcasted_xor(self):
self.check_array_broadcasted_op(operator.xor)
def test_broadcasted_ixor(self):
self.check_array_broadcasted_op(operator.ixor)
def test_broadcasted_mod(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_broadcasted_op(operator.mod)
def test_broadcasted_imod(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_broadcasted_op(operator.imod)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_doubly_broadcasted_op(self, op, xp, x_type, y_type):
a = xp.array([[[0, 1, 2]], [[1, 0, 2]]], dtype=x_type)
b = xp.array([[0], [0], [1]], dtype=y_type)
return op(a, b)
def test_doubly_broadcasted_lshift(self):
self.check_array_doubly_broadcasted_op(operator.lshift)
def test_doubly_broadcasted_rshift(self):
self.check_array_doubly_broadcasted_op(operator.rshift)
def test_doubly_broadcasted_and(self):
self.check_array_doubly_broadcasted_op(operator.and_)
def test_doubly_broadcasted_or(self):
self.check_array_doubly_broadcasted_op(operator.or_)
def test_doubly_broadcasted_xor(self):
self.check_array_doubly_broadcasted_op(operator.xor)
def test_doubly_broadcasted_mod(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_doubly_broadcasted_op(operator.mod)
@pytest.mark.parametrize('value', [
None,
Ellipsis,
object(),
numpy._NoValue,
])
class TestArrayObjectComparison:
@pytest.mark.parametrize('swap', [False, True])
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_eq_object(self, xp, dtype, value, swap):
a = xp.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
if swap:
return value == a
else:
return a == value
@pytest.mark.parametrize('swap', [False, True])
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_ne_object(self, xp, dtype, value, swap):
a = xp.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
if swap:
return value != a
else:
return a != value
class HasEq:
def __eq__(self, other):
return (other == 2) | (other == 4)
class HasNe:
def __ne__(self, other):
return (other == 2) | (other == 4)
class HasEqSub(HasEq):
pass
class CustomInt(int):
pass
@pytest.mark.parametrize('dtype', ['int32', 'float64'])
@pytest.mark.parametrize('value', [
HasEq(),
HasNe(), # eq test passes because `==` does not fall back to `__ne__`.
HasEqSub(),
CustomInt(3),
])
class TestArrayObjectComparisonDifficult:
# OK to raise TypeError.
# If CuPy returns a result, it should match with NumPy's result.
def test_eq_object(self, dtype, value):
expected = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) == value
a = cupy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
try:
res = a == value
except TypeError:
pytest.skip()
cupy.testing.assert_array_equal(res, expected)
def test_ne_object(self, dtype, value):
expected = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) != value
a = cupy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
try:
res = a != value
except TypeError:
pytest.skip()
cupy.testing.assert_array_equal(res, expected)
| 34.835052
| 79
| 0.660328
|
import operator
import numpy
import pytest
import cupy
from cupy import testing
class TestArrayElementwiseOp:
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(rtol=1e-6, accept_error=TypeError)
def check_array_scalar_op(self, op, xp, x_type, y_type, swap=False,
no_bool=False, no_complex=False):
x_dtype = numpy.dtype(x_type)
y_dtype = numpy.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.array(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.array(True)
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
if swap:
return op(y_type(3), a)
else:
return op(a, y_type(3))
def test_add_scalar(self):
self.check_array_scalar_op(operator.add)
def test_radd_scalar(self):
self.check_array_scalar_op(operator.add, swap=True)
def test_iadd_scalar(self):
self.check_array_scalar_op(operator.iadd)
def test_sub_scalar(self):
self.check_array_scalar_op(operator.sub, no_bool=True)
def test_rsub_scalar(self):
self.check_array_scalar_op(operator.sub, swap=True, no_bool=True)
def test_isub_scalar(self):
self.check_array_scalar_op(operator.isub, no_bool=True)
def test_mul_scalar(self):
self.check_array_scalar_op(operator.mul)
def test_rmul_scalar(self):
self.check_array_scalar_op(operator.mul, swap=True)
def test_imul_scalar(self):
self.check_array_scalar_op(operator.imul)
def test_truediv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.truediv)
def test_rtruediv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.truediv, swap=True)
def test_itruediv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.itruediv)
def test_floordiv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.floordiv, no_complex=True)
def test_rfloordiv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.floordiv, swap=True,
no_complex=True)
def test_ifloordiv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.ifloordiv, no_complex=True)
def test_pow_scalar(self):
self.check_array_scalar_op(operator.pow)
def test_rpow_scalar(self):
self.check_array_scalar_op(operator.pow, swap=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError)
def check_ipow_scalar(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
return operator.ipow(a, y_type(3))
def test_ipow_scalar(self):
self.check_ipow_scalar()
def test_divmod0_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(lambda x, y: divmod(x, y)[0],
no_complex=True)
def test_divmod1_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(lambda x, y: divmod(x, y)[1],
no_complex=True)
def test_rdivmod0_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(lambda x, y: divmod(x, y)[0], swap=True,
no_complex=True)
def test_rdivmod1_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(lambda x, y: divmod(x, y)[1], swap=True,
no_complex=True)
def test_lt_scalar(self):
self.check_array_scalar_op(operator.lt, no_complex=False)
def test_le_scalar(self):
self.check_array_scalar_op(operator.le, no_complex=False)
def test_gt_scalar(self):
self.check_array_scalar_op(operator.gt, no_complex=False)
def test_ge_scalar(self):
self.check_array_scalar_op(operator.ge, no_complex=False)
def test_eq_scalar(self):
self.check_array_scalar_op(operator.eq)
def test_ne_scalar(self):
self.check_array_scalar_op(operator.ne)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_array_op(self, op, xp, x_type, y_type,
no_bool=False, no_complex=False):
x_dtype = numpy.dtype(x_type)
y_dtype = numpy.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.array(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.array(True)
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[6, 5, 4], [3, 2, 1]], y_type)
return op(a, b)
def test_add_array(self):
self.check_array_array_op(operator.add)
def test_iadd_array(self):
self.check_array_array_op(operator.iadd)
def test_sub_array(self):
self.check_array_array_op(operator.sub, no_bool=True)
def test_isub_array(self):
self.check_array_array_op(operator.isub, no_bool=True)
def test_mul_array(self):
self.check_array_array_op(operator.mul)
def test_imul_array(self):
self.check_array_array_op(operator.imul)
def test_truediv_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(operator.truediv)
def test_itruediv_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(operator.itruediv)
def test_floordiv_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(operator.floordiv, no_complex=True)
def test_ifloordiv_array(self):
if '1.16.1' <= numpy.lib.NumpyVersion(numpy.__version__) < '1.18.0':
self.skipTest("NumPy Issue #12927")
with numpy.errstate(divide='ignore'):
self.check_array_array_op(operator.ifloordiv, no_complex=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-6, accept_error=TypeError)
def check_pow_array(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[6, 5, 4], [3, 2, 1]], y_type)
return operator.pow(a, b)
def test_pow_array(self):
if cupy.cuda.runtime.is_hip:
self.check_pow_array()
else:
self.check_array_array_op(operator.pow)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError)
def check_ipow_array(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[6, 5, 4], [3, 2, 1]], y_type)
return operator.ipow(a, b)
def test_ipow_array(self):
self.check_ipow_array()
def test_divmod0_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(lambda x, y: divmod(x, y)[0])
def test_divmod1_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(lambda x, y: divmod(x, y)[1])
def test_lt_array(self):
self.check_array_array_op(operator.lt, no_complex=True)
def test_le_array(self):
self.check_array_array_op(operator.le, no_complex=True)
def test_gt_array(self):
self.check_array_array_op(operator.gt, no_complex=True)
def test_ge_array(self):
self.check_array_array_op(operator.ge, no_complex=True)
def test_eq_array(self):
self.check_array_array_op(operator.eq)
def test_ne_array(self):
self.check_array_array_op(operator.ne)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_broadcasted_op(self, op, xp, x_type, y_type,
no_bool=False, no_complex=False):
x_dtype = numpy.dtype(x_type)
y_dtype = numpy.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.array(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.array(True)
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[1], [2]], y_type)
return op(a, b)
def test_broadcasted_add(self):
self.check_array_broadcasted_op(operator.add)
def test_broadcasted_iadd(self):
self.check_array_broadcasted_op(operator.iadd)
def test_broadcasted_sub(self):
self.check_array_broadcasted_op(operator.sub, no_bool=True)
def test_broadcasted_isub(self):
self.check_array_broadcasted_op(operator.isub, no_bool=True)
def test_broadcasted_mul(self):
self.check_array_broadcasted_op(operator.mul)
def test_broadcasted_imul(self):
self.check_array_broadcasted_op(operator.imul)
def test_broadcasted_truediv(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(operator.truediv)
def test_broadcasted_itruediv(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(operator.itruediv)
def test_broadcasted_floordiv(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(operator.floordiv, no_complex=True)
def test_broadcasted_ifloordiv(self):
if '1.16.1' <= numpy.lib.NumpyVersion(numpy.__version__) < '1.18.0':
self.skipTest("NumPy Issue #12927")
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(operator.ifloordiv,
no_complex=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-6, accept_error=TypeError)
def check_broadcasted_pow(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[1], [2]], y_type)
return operator.pow(a, b)
def test_broadcasted_pow(self):
if cupy.cuda.runtime.is_hip:
self.check_broadcasted_pow()
else:
self.check_array_broadcasted_op(operator.pow)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError)
def check_broadcasted_ipow(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[1], [2]], y_type)
return operator.ipow(a, b)
def test_broadcasted_ipow(self):
self.check_broadcasted_ipow()
def test_broadcasted_divmod0(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(lambda x, y: divmod(x, y)[0],
no_complex=True)
def test_broadcasted_divmod1(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(lambda x, y: divmod(x, y)[1],
no_complex=True)
def test_broadcasted_lt(self):
self.check_array_broadcasted_op(operator.lt, no_complex=True)
def test_broadcasted_le(self):
self.check_array_broadcasted_op(operator.le, no_complex=True)
def test_broadcasted_gt(self):
self.check_array_broadcasted_op(operator.gt, no_complex=True)
def test_broadcasted_ge(self):
self.check_array_broadcasted_op(operator.ge, no_complex=True)
def test_broadcasted_eq(self):
self.check_array_broadcasted_op(operator.eq)
def test_broadcasted_ne(self):
self.check_array_broadcasted_op(operator.ne)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(rtol=1e-6)
def check_array_doubly_broadcasted_op(self, op, xp, x_type, y_type,
no_bool=False, no_complex=False):
x_dtype = numpy.dtype(x_type)
y_dtype = numpy.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.array(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.array(True)
a = xp.array([[[1, 2, 3]], [[4, 5, 6]]], x_type)
b = xp.array([[1], [2], [3]], y_type)
return op(a, b)
def test_doubly_broadcasted_add(self):
self.check_array_doubly_broadcasted_op(operator.add)
def test_doubly_broadcasted_sub(self):
self.check_array_doubly_broadcasted_op(operator.sub, no_bool=True)
def test_doubly_broadcasted_mul(self):
self.check_array_doubly_broadcasted_op(operator.mul)
def test_doubly_broadcasted_truediv(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_doubly_broadcasted_op(operator.truediv)
def test_doubly_broadcasted_floordiv(self):
with numpy.errstate(divide='ignore'):
self.check_array_doubly_broadcasted_op(operator.floordiv,
no_complex=True)
def test_doubly_broadcasted_pow(self):
self.check_array_doubly_broadcasted_op(operator.pow)
def test_doubly_broadcasted_divmod0(self):
with numpy.errstate(divide='ignore'):
self.check_array_doubly_broadcasted_op(
lambda x, y: divmod(x, y)[0],
no_complex=True)
def test_doubly_broadcasted_divmod1(self):
with numpy.errstate(divide='ignore'):
self.check_array_doubly_broadcasted_op(
lambda x, y: divmod(x, y)[1],
no_complex=True)
def test_doubly_broadcasted_lt(self):
self.check_array_doubly_broadcasted_op(operator.lt, no_complex=True)
def test_doubly_broadcasted_le(self):
self.check_array_doubly_broadcasted_op(operator.le, no_complex=True)
def test_doubly_broadcasted_gt(self):
self.check_array_doubly_broadcasted_op(operator.gt, no_complex=True)
def test_doubly_broadcasted_ge(self):
self.check_array_doubly_broadcasted_op(operator.ge, no_complex=True)
def test_doubly_broadcasted_eq(self):
self.check_array_doubly_broadcasted_op(operator.eq)
def test_doubly_broadcasted_ne(self):
self.check_array_doubly_broadcasted_op(operator.ne)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose()
def check_array_reversed_op(self, op, xp, x_type, y_type, no_bool=False):
if no_bool and x_type == numpy.bool_ and y_type == numpy.bool_:
return xp.array(True)
a = xp.array([1, 2, 3, 4, 5], x_type)
b = xp.array([1, 2, 3, 4, 5], y_type)
return op(a, b[::-1])
def test_array_reversed_add(self):
self.check_array_reversed_op(operator.add)
def test_array_reversed_sub(self):
self.check_array_reversed_op(operator.sub, no_bool=True)
def test_array_reversed_mul(self):
self.check_array_reversed_op(operator.mul)
@testing.for_all_dtypes(no_bool=True)
def check_typecast(self, val, dtype):
operators = [
operator.add, operator.sub, operator.mul, operator.truediv]
for op in operators:
with numpy.errstate(divide='ignore', invalid='ignore'):
a = op(val, (testing.shaped_arange((5,), numpy, dtype) - 2))
b = op(val, (testing.shaped_arange((5,), cupy, dtype) - 2))
assert a.dtype == b.dtype
def test_typecast_bool1(self):
self.check_typecast(True)
def test_typecast_bool2(self):
self.check_typecast(False)
def test_typecast_int1(self):
self.check_typecast(0)
def test_typecast_int2(self):
self.check_typecast(-127)
def test_typecast_int3(self):
self.check_typecast(255)
def test_typecast_int4(self):
self.check_typecast(-32768)
def test_typecast_int5(self):
self.check_typecast(65535)
def test_typecast_int6(self):
self.check_typecast(-2147483648)
def test_typecast_int7(self):
self.check_typecast(4294967295)
def test_typecast_float1(self):
self.check_typecast(0.0)
def test_typecast_float2(self):
self.check_typecast(100000.0)
testing.for_all_dtypes(name='x_type', no_float16=True)
@testing.numpy_cupy_allclose()
def check_array_boolarray_op(self, op, xp, x_type):
a = xp.array([[2, 7, 1], [8, 2, 8]], x_type)
b = xp.array([[3, 1, 4], [-1, -5, -9]], numpy.int8).view(bool)
return op(a, b)
def test_add_array_boolarray(self):
self.check_array_boolarray_op(operator.add)
def test_iadd_array_boolarray(self):
self.check_array_boolarray_op(operator.iadd)
class TestArrayIntElementwiseOp:
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_scalar_op(self, op, xp, x_type, y_type, swap=False):
a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type)
if swap:
return op(y_type(2), a)
else:
return op(a, y_type(2))
def test_lshift_scalar(self):
self.check_array_scalar_op(operator.lshift)
def test_rlshift_scalar(self):
self.check_array_scalar_op(operator.lshift, swap=True)
def test_rshift_scalar(self):
self.check_array_scalar_op(operator.rshift)
def test_rrshift_scalar(self):
self.check_array_scalar_op(operator.rshift, swap=True)
def test_and_scalar(self):
self.check_array_scalar_op(operator.and_)
def test_rand_scalar(self):
self.check_array_scalar_op(operator.and_, swap=True)
def test_or_scalar(self):
self.check_array_scalar_op(operator.or_)
def test_ror_scalar(self):
self.check_array_scalar_op(operator.or_, swap=True)
def test_xor_scalar(self):
self.check_array_scalar_op(operator.xor)
def test_rxor_scalar(self):
self.check_array_scalar_op(operator.xor, swap=True)
def test_mod_scalar(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_scalar_op(operator.mod)
def test_rmod_scalar(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_scalar_op(operator.mod, swap=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_scalarzero_op(self, op, xp, x_type, y_type, swap=False):
a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type)
if swap:
return op(y_type(0), a)
else:
return op(a, y_type(0))
def test_lshift_scalarzero(self):
self.check_array_scalarzero_op(operator.lshift)
def test_rlshift_scalarzero(self):
self.check_array_scalarzero_op(operator.lshift, swap=True)
def test_rshift_scalarzero(self):
self.check_array_scalarzero_op(operator.rshift)
def test_rrshift_scalarzero(self):
self.check_array_scalarzero_op(operator.rshift, swap=True)
def test_and_scalarzero(self):
self.check_array_scalarzero_op(operator.and_)
def test_rand_scalarzero(self):
self.check_array_scalarzero_op(operator.and_, swap=True)
def test_or_scalarzero(self):
self.check_array_scalarzero_op(operator.or_)
def test_ror_scalarzero(self):
self.check_array_scalarzero_op(operator.or_, swap=True)
def test_xor_scalarzero(self):
self.check_array_scalarzero_op(operator.xor)
def test_rxor_scalarzero(self):
self.check_array_scalarzero_op(operator.xor, swap=True)
def test_mod_scalarzero(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_scalarzero_op(operator.mod)
def test_rmod_scalarzero(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_scalarzero_op(operator.mod, swap=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_array_op(self, op, xp, x_type, y_type):
a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type)
b = xp.array([[0, 0, 1], [0, 1, 2]], dtype=y_type)
return op(a, b)
def test_lshift_array(self):
self.check_array_array_op(operator.lshift)
def test_ilshift_array(self):
self.check_array_array_op(operator.ilshift)
def test_rshift_array(self):
self.check_array_array_op(operator.rshift)
def test_irshift_array(self):
self.check_array_array_op(operator.irshift)
def test_and_array(self):
self.check_array_array_op(operator.and_)
def test_iand_array(self):
self.check_array_array_op(operator.iand)
def test_or_array(self):
self.check_array_array_op(operator.or_)
def test_ior_array(self):
self.check_array_array_op(operator.ior)
def test_xor_array(self):
self.check_array_array_op(operator.xor)
def test_ixor_array(self):
self.check_array_array_op(operator.ixor)
def test_mod_array(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_array_op(operator.mod)
def test_imod_array(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_array_op(operator.imod)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_broadcasted_op(self, op, xp, x_type, y_type):
a = xp.array([[0, 1, 2], [1, 0, 2], [2, 1, 0]], dtype=x_type)
b = xp.array([[0, 0, 1]], dtype=y_type)
return op(a, b)
def test_broadcasted_lshift(self):
self.check_array_broadcasted_op(operator.lshift)
def test_broadcasted_ilshift(self):
self.check_array_broadcasted_op(operator.ilshift)
def test_broadcasted_rshift(self):
self.check_array_broadcasted_op(operator.rshift)
def test_broadcasted_irshift(self):
self.check_array_broadcasted_op(operator.irshift)
def test_broadcasted_and(self):
self.check_array_broadcasted_op(operator.and_)
def test_broadcasted_iand(self):
self.check_array_broadcasted_op(operator.iand)
def test_broadcasted_or(self):
self.check_array_broadcasted_op(operator.or_)
def test_broadcasted_ior(self):
self.check_array_broadcasted_op(operator.ior)
def test_broadcasted_xor(self):
self.check_array_broadcasted_op(operator.xor)
def test_broadcasted_ixor(self):
self.check_array_broadcasted_op(operator.ixor)
def test_broadcasted_mod(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_broadcasted_op(operator.mod)
def test_broadcasted_imod(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_broadcasted_op(operator.imod)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_doubly_broadcasted_op(self, op, xp, x_type, y_type):
a = xp.array([[[0, 1, 2]], [[1, 0, 2]]], dtype=x_type)
b = xp.array([[0], [0], [1]], dtype=y_type)
return op(a, b)
def test_doubly_broadcasted_lshift(self):
self.check_array_doubly_broadcasted_op(operator.lshift)
def test_doubly_broadcasted_rshift(self):
self.check_array_doubly_broadcasted_op(operator.rshift)
def test_doubly_broadcasted_and(self):
self.check_array_doubly_broadcasted_op(operator.and_)
def test_doubly_broadcasted_or(self):
self.check_array_doubly_broadcasted_op(operator.or_)
def test_doubly_broadcasted_xor(self):
self.check_array_doubly_broadcasted_op(operator.xor)
def test_doubly_broadcasted_mod(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_doubly_broadcasted_op(operator.mod)
@pytest.mark.parametrize('value', [
None,
Ellipsis,
object(),
numpy._NoValue,
])
class TestArrayObjectComparison:
@pytest.mark.parametrize('swap', [False, True])
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_eq_object(self, xp, dtype, value, swap):
a = xp.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
if swap:
return value == a
else:
return a == value
@pytest.mark.parametrize('swap', [False, True])
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_ne_object(self, xp, dtype, value, swap):
a = xp.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
if swap:
return value != a
else:
return a != value
class HasEq:
def __eq__(self, other):
return (other == 2) | (other == 4)
class HasNe:
def __ne__(self, other):
return (other == 2) | (other == 4)
class HasEqSub(HasEq):
pass
class CustomInt(int):
pass
@pytest.mark.parametrize('dtype', ['int32', 'float64'])
@pytest.mark.parametrize('value', [
HasEq(),
HasNe(),
HasEqSub(),
CustomInt(3),
])
class TestArrayObjectComparisonDifficult:
def test_eq_object(self, dtype, value):
expected = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) == value
a = cupy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
try:
res = a == value
except TypeError:
pytest.skip()
cupy.testing.assert_array_equal(res, expected)
def test_ne_object(self, dtype, value):
expected = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) != value
a = cupy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
try:
res = a != value
except TypeError:
pytest.skip()
cupy.testing.assert_array_equal(res, expected)
| true
| true
|
7902d3d65beb69e0425055a6b53fa94e39aa393d
| 434
|
py
|
Python
|
data/get-sensor-data.py
|
PecceG2/Home-Assistant-SNMP-Sensor-Server
|
5b8a27de098c160d2280b08109c8da270bd13730
|
[
"MIT"
] | null | null | null |
data/get-sensor-data.py
|
PecceG2/Home-Assistant-SNMP-Sensor-Server
|
5b8a27de098c160d2280b08109c8da270bd13730
|
[
"MIT"
] | null | null | null |
data/get-sensor-data.py
|
PecceG2/Home-Assistant-SNMP-Sensor-Server
|
5b8a27de098c160d2280b08109c8da270bd13730
|
[
"MIT"
] | null | null | null |
import sys, os
import json
from requests import get
sensorID = sys.argv[1]
SupervisorToken = os.environ["SUPERVISOR_TOKEN"]
url = "http://supervisor/core/api/states/"+sensorID
headers = {
"Authorization": "Bearer "+SupervisorToken,
"content-type": "application/json",
}
ha_sensor_data_request = get(url, headers=headers)
ha_sensor = json.loads(ha_sensor_data_request.text)
# Sensor state output
print(ha_sensor["state"])
| 21.7
| 51
| 0.748848
|
import sys, os
import json
from requests import get
sensorID = sys.argv[1]
SupervisorToken = os.environ["SUPERVISOR_TOKEN"]
url = "http://supervisor/core/api/states/"+sensorID
headers = {
"Authorization": "Bearer "+SupervisorToken,
"content-type": "application/json",
}
ha_sensor_data_request = get(url, headers=headers)
ha_sensor = json.loads(ha_sensor_data_request.text)
print(ha_sensor["state"])
| true
| true
|
7902d433aad59b705c0a7a17b81b4925abd64ba1
| 6,043
|
py
|
Python
|
examples/app/clustering/main.py
|
SiggyF/bokeh
|
52a2ce993b0f1102fd9e136f66036f52e91cdcc3
|
[
"BSD-3-Clause"
] | null | null | null |
examples/app/clustering/main.py
|
SiggyF/bokeh
|
52a2ce993b0f1102fd9e136f66036f52e91cdcc3
|
[
"BSD-3-Clause"
] | null | null | null |
examples/app/clustering/main.py
|
SiggyF/bokeh
|
52a2ce993b0f1102fd9e136f66036f52e91cdcc3
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
np.random.seed(0)
from bokeh.io import curdoc
from bokeh.layouts import widgetbox, row, column
from bokeh.models import ColumnDataSource, Select, Slider
from bokeh.plotting import figure
from bokeh.palettes import Spectral6
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
# define some helper functions
def clustering(X, algorithm, n_clusters):
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# Generate the new colors:
if algorithm=='MiniBatchKMeans':
model = cluster.MiniBatchKMeans(n_clusters=n_clusters)
elif algorithm=='Birch':
model = cluster.Birch(n_clusters=n_clusters)
elif algorithm=='DBSCAN':
model = cluster.DBSCAN(eps=.2)
elif algorithm=='AffinityPropagation':
model = cluster.AffinityPropagation(damping=.9,
preference=-200)
elif algorithm=='MeanShift':
model = cluster.MeanShift(bandwidth=bandwidth,
bin_seeding=True)
elif algorithm=='SpectralClustering':
model = cluster.SpectralClustering(n_clusters=n_clusters,
eigen_solver='arpack',
affinity="nearest_neighbors")
elif algorithm=='Ward':
model = cluster.AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward',
connectivity=connectivity)
elif algorithm=='AgglomerativeClustering':
model = cluster.AgglomerativeClustering(linkage="average",
affinity="cityblock",
n_clusters=n_clusters,
connectivity=connectivity)
model.fit(X)
if hasattr(model, 'labels_'):
y_pred = model.labels_.astype(np.int)
else:
y_pred = model.predict(X)
return X, y_pred
def get_dataset(dataset, n_samples):
if dataset == 'Noisy Circles':
return datasets.make_circles(n_samples=n_samples,
factor=0.5,
noise=0.05)
elif dataset == 'Noisy Moons':
return datasets.make_moons(n_samples=n_samples,
noise=0.05)
elif dataset == 'Blobs':
return datasets.make_blobs(n_samples=n_samples,
random_state=8)
elif dataset == "No Structure":
return np.random.rand(n_samples, 2), None
# set up initial data
n_samples = 1500
n_clusters = 2
algorithm = 'MiniBatchKMeans'
dataset = 'Noisy Circles'
X, y = get_dataset(dataset, n_samples)
X, y_pred = clustering(X, algorithm, n_clusters)
spectral = np.hstack([Spectral6] * 20)
colors = [spectral[i] for i in y]
# set up plot (styling in theme.yaml)
plot = figure(toolbar_location=None, title=algorithm)
source = ColumnDataSource(data=dict(x=X[:, 0], y=X[:, 1], colors=colors))
plot.circle('x', 'y', fill_color='colors', line_color=None, source=source)
# set up widgets
clustering_algorithms= [
'MiniBatchKMeans',
'AffinityPropagation',
'MeanShift',
'SpectralClustering',
'Ward',
'AgglomerativeClustering',
'DBSCAN',
'Birch'
]
datasets_names = [
'Noisy Circles',
'Noisy Moons',
'Blobs',
'No Structure'
]
algorithm_select = Select(value='MiniBatchKMeans',
title='Select algorithm:',
width=200,
options=clustering_algorithms)
dataset_select = Select(value='Noisy Circles',
title='Select dataset:',
width=200,
options=datasets_names)
samples_slider = Slider(title="Number of samples",
value=1500.0,
start=1000.0,
end=3000.0,
step=100,
width=400)
clusters_slider = Slider(title="Number of clusters",
value=2.0,
start=2.0,
end=10.0,
step=1,
width=400)
# set up callbacks
def update_algorithm_or_clusters(attrname, old, new):
global X
algorithm = algorithm_select.value
n_clusters = int(clusters_slider.value)
X, y_pred = clustering(X, algorithm, n_clusters)
colors = [spectral[i] for i in y_pred]
source.data['colors'] = colors
source.data['x'] = X[:, 0]
source.data['y'] = X[:, 1]
plot.title.text = algorithm
def update_samples_or_dataset(attrname, old, new):
global X, y
dataset = dataset_select.value
algorithm = algorithm_select.value
n_clusters = int(clusters_slider.value)
n_samples = int(samples_slider.value)
X, y = get_dataset(dataset, n_samples)
X, y_pred = clustering(X, algorithm, n_clusters)
colors = [spectral[i] for i in y_pred]
source.data['x'] = X[:, 0]
source.data['y'] = X[:, 1]
source.data['colors'] = colors
algorithm_select.on_change('value', update_algorithm_or_clusters)
clusters_slider.on_change('value', update_algorithm_or_clusters)
dataset_select.on_change('value', update_samples_or_dataset)
samples_slider.on_change('value', update_samples_or_dataset)
# set up layout
selects = row(dataset_select, algorithm_select, width=420)
inputs = column(selects, widgetbox(samples_slider, clusters_slider))
# add to document
curdoc().add_root(row(inputs, plot))
curdoc().title = "Clustering"
| 31.473958
| 74
| 0.608307
|
import numpy as np
np.random.seed(0)
from bokeh.io import curdoc
from bokeh.layouts import widgetbox, row, column
from bokeh.models import ColumnDataSource, Select, Slider
from bokeh.plotting import figure
from bokeh.palettes import Spectral6
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
def clustering(X, algorithm, n_clusters):
X = StandardScaler().fit_transform(X)
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
connectivity = 0.5 * (connectivity + connectivity.T)
if algorithm=='MiniBatchKMeans':
model = cluster.MiniBatchKMeans(n_clusters=n_clusters)
elif algorithm=='Birch':
model = cluster.Birch(n_clusters=n_clusters)
elif algorithm=='DBSCAN':
model = cluster.DBSCAN(eps=.2)
elif algorithm=='AffinityPropagation':
model = cluster.AffinityPropagation(damping=.9,
preference=-200)
elif algorithm=='MeanShift':
model = cluster.MeanShift(bandwidth=bandwidth,
bin_seeding=True)
elif algorithm=='SpectralClustering':
model = cluster.SpectralClustering(n_clusters=n_clusters,
eigen_solver='arpack',
affinity="nearest_neighbors")
elif algorithm=='Ward':
model = cluster.AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward',
connectivity=connectivity)
elif algorithm=='AgglomerativeClustering':
model = cluster.AgglomerativeClustering(linkage="average",
affinity="cityblock",
n_clusters=n_clusters,
connectivity=connectivity)
model.fit(X)
if hasattr(model, 'labels_'):
y_pred = model.labels_.astype(np.int)
else:
y_pred = model.predict(X)
return X, y_pred
def get_dataset(dataset, n_samples):
if dataset == 'Noisy Circles':
return datasets.make_circles(n_samples=n_samples,
factor=0.5,
noise=0.05)
elif dataset == 'Noisy Moons':
return datasets.make_moons(n_samples=n_samples,
noise=0.05)
elif dataset == 'Blobs':
return datasets.make_blobs(n_samples=n_samples,
random_state=8)
elif dataset == "No Structure":
return np.random.rand(n_samples, 2), None
n_samples = 1500
n_clusters = 2
algorithm = 'MiniBatchKMeans'
dataset = 'Noisy Circles'
X, y = get_dataset(dataset, n_samples)
X, y_pred = clustering(X, algorithm, n_clusters)
spectral = np.hstack([Spectral6] * 20)
colors = [spectral[i] for i in y]
plot = figure(toolbar_location=None, title=algorithm)
source = ColumnDataSource(data=dict(x=X[:, 0], y=X[:, 1], colors=colors))
plot.circle('x', 'y', fill_color='colors', line_color=None, source=source)
clustering_algorithms= [
'MiniBatchKMeans',
'AffinityPropagation',
'MeanShift',
'SpectralClustering',
'Ward',
'AgglomerativeClustering',
'DBSCAN',
'Birch'
]
datasets_names = [
'Noisy Circles',
'Noisy Moons',
'Blobs',
'No Structure'
]
algorithm_select = Select(value='MiniBatchKMeans',
title='Select algorithm:',
width=200,
options=clustering_algorithms)
dataset_select = Select(value='Noisy Circles',
title='Select dataset:',
width=200,
options=datasets_names)
samples_slider = Slider(title="Number of samples",
value=1500.0,
start=1000.0,
end=3000.0,
step=100,
width=400)
clusters_slider = Slider(title="Number of clusters",
value=2.0,
start=2.0,
end=10.0,
step=1,
width=400)
def update_algorithm_or_clusters(attrname, old, new):
global X
algorithm = algorithm_select.value
n_clusters = int(clusters_slider.value)
X, y_pred = clustering(X, algorithm, n_clusters)
colors = [spectral[i] for i in y_pred]
source.data['colors'] = colors
source.data['x'] = X[:, 0]
source.data['y'] = X[:, 1]
plot.title.text = algorithm
def update_samples_or_dataset(attrname, old, new):
global X, y
dataset = dataset_select.value
algorithm = algorithm_select.value
n_clusters = int(clusters_slider.value)
n_samples = int(samples_slider.value)
X, y = get_dataset(dataset, n_samples)
X, y_pred = clustering(X, algorithm, n_clusters)
colors = [spectral[i] for i in y_pred]
source.data['x'] = X[:, 0]
source.data['y'] = X[:, 1]
source.data['colors'] = colors
algorithm_select.on_change('value', update_algorithm_or_clusters)
clusters_slider.on_change('value', update_algorithm_or_clusters)
dataset_select.on_change('value', update_samples_or_dataset)
samples_slider.on_change('value', update_samples_or_dataset)
selects = row(dataset_select, algorithm_select, width=420)
inputs = column(selects, widgetbox(samples_slider, clusters_slider))
curdoc().add_root(row(inputs, plot))
curdoc().title = "Clustering"
| true
| true
|
7902d4cda740f439190486eb8a11420759266e38
| 4,842
|
py
|
Python
|
dqc/api/loadbasis.py
|
Jaikinator/dqc
|
47c964c7d1323a35f4f69521d40476c41843810e
|
[
"Apache-2.0"
] | 39
|
2021-05-31T17:01:23.000Z
|
2022-03-23T19:20:35.000Z
|
dqc/api/loadbasis.py
|
Jaikinator/dqc
|
47c964c7d1323a35f4f69521d40476c41843810e
|
[
"Apache-2.0"
] | 14
|
2021-09-01T13:39:11.000Z
|
2022-03-13T16:45:39.000Z
|
dqc/api/loadbasis.py
|
Jaikinator/dqc
|
47c964c7d1323a35f4f69521d40476c41843810e
|
[
"Apache-2.0"
] | 6
|
2021-07-16T09:08:29.000Z
|
2022-02-24T01:13:54.000Z
|
import os
import torch
from typing import List
from dqc.utils.datastruct import CGTOBasis
__all__ = ["loadbasis"]
_dtype = torch.double
_device = torch.device("cpu")
def loadbasis(cmd: str, dtype: torch.dtype = _dtype,
device: torch.device = _device, requires_grad: bool = False) -> \
List[CGTOBasis]:
"""
Load basis from a file and return the list of CGTOBasis.
Arguments
---------
cmd: str
This can be a file path where the basis is stored or a
string in format ``"atomz:basis"``, e.g. ``"1:6-311++G**"``.
dtype: torch.dtype
Tensor data type for ``alphas`` and ``coeffs`` of the GTO basis
device: torch.device
Tensor device for ``alphas`` and ``coeffs``
requires_grad: bool
If ``True``, the ``alphas`` and ``coeffs`` tensors become differentiable
Returns
-------
list of CGTOBasis
List of GTO basis loaded from the given file
"""
res = []
if not os.path.exists(cmd):
file = _get_basis_file(cmd)
else:
file = cmd
# read the content
with open(file, "r") as f:
lines = f.read().split("\n")
# skip the header
while True:
line = lines.pop(0)
if line == "":
continue
if line.startswith("!"):
continue
break
# now it is at the orbital description
while len(lines) > 0:
line = lines.pop(0)
if line.startswith("**"):
break
desc = line.split()
nlines = int(desc[1])
if nlines == 0:
raise RuntimeError("Zero line on basis %s" % file)
# read the exponents and the coefficients
alphas = []
coeffsT = []
for i in range(nlines):
alphacoeff = [_read_float(f) for f in lines.pop(0).split()]
alphas.append(alphacoeff[0])
coeffsT.append(alphacoeff[1:])
# coeffsT: list with shape (nbasis, ncontr)
# coeffs: list with shape (ncontr, nbasis)
coeffs = list(zip(*coeffsT))
ncoeffs = len(coeffs)
angmoms = _expand_angmoms(desc[0], ncoeffs)
# convert to tensor
alpha = torch.tensor(alphas, dtype=dtype, device=device, requires_grad=requires_grad)
for i in range(ncoeffs):
coeff = torch.tensor(coeffs[i], dtype=dtype, device=device, requires_grad=requires_grad)
basis = CGTOBasis(angmom=angmoms[i], alphas=alpha, coeffs=coeff)
basis.wfnormalize_()
res.append(basis)
return res
def _read_float(s: str) -> float:
s = s.replace("D", "E")
return float(s)
def _get_basis_file(cmd: str) -> str:
# parse the string command, check if the basis has already been downloaded
# (download if not), and return the file name
# parse to get the atomz and the basisname
atomz_str, raw_basisname = cmd.split(":")
raw_basisname = raw_basisname.strip()
atomz = int(atomz_str)
# get the path to the database
basisname = _normalize_basisname(raw_basisname)
thisdir = os.path.dirname(os.path.realpath(__file__))
fname = "%02d.gaussian94" % atomz
fdir = os.path.join(thisdir, ".database", basisname)
fpath = os.path.join(fdir, fname)
# if the file does not exist, download it
if not os.path.exists(fpath):
print("The %s basis for atomz %d does not exist, but we will download it" %
(raw_basisname, atomz))
if not os.path.exists(fdir):
os.makedirs(fdir)
_download_basis(fpath, atomz, raw_basisname)
return fpath
def _normalize_basisname(basisname: str) -> str:
b = basisname.lower()
b = b.replace("+", "p")
b = b.replace("*", "s")
b = b.replace("(", "_")
b = b.replace(")", "_")
b = b.replace(",", "_")
return b
def _download_basis(fname: str, atomz: int, basisname: str) -> None:
import basis_set_exchange as bse
s = bse.get_basis(basisname, elements=[atomz], fmt="gaussian94")
with open(fname, "w") as f:
f.write(s)
print("Downloaded to %s" % fname)
def _expand_angmoms(s: str, n: int) -> List[int]:
# convert the angular momentum characters into angmom and returns a list
# of n integer containing the angular momentums
if len(s) == n:
pass
elif n % len(s) == 0:
s = s * (n // len(s))
else:
raise RuntimeError("Do not know how to read orbital %s with %d coefficient columns" %
(s, n))
s = s.lower()
spdfmap = {
"s": 0,
"p": 1,
"d": 2,
"f": 3,
"g": 4,
"h": 5,
"i": 6,
}
angmoms = [spdfmap[c] for c in s]
return angmoms
| 31.647059
| 101
| 0.564436
|
import os
import torch
from typing import List
from dqc.utils.datastruct import CGTOBasis
__all__ = ["loadbasis"]
_dtype = torch.double
_device = torch.device("cpu")
def loadbasis(cmd: str, dtype: torch.dtype = _dtype,
device: torch.device = _device, requires_grad: bool = False) -> \
List[CGTOBasis]:
res = []
if not os.path.exists(cmd):
file = _get_basis_file(cmd)
else:
file = cmd
with open(file, "r") as f:
lines = f.read().split("\n")
while True:
line = lines.pop(0)
if line == "":
continue
if line.startswith("!"):
continue
break
while len(lines) > 0:
line = lines.pop(0)
if line.startswith("**"):
break
desc = line.split()
nlines = int(desc[1])
if nlines == 0:
raise RuntimeError("Zero line on basis %s" % file)
alphas = []
coeffsT = []
for i in range(nlines):
alphacoeff = [_read_float(f) for f in lines.pop(0).split()]
alphas.append(alphacoeff[0])
coeffsT.append(alphacoeff[1:])
coeffs = list(zip(*coeffsT))
ncoeffs = len(coeffs)
angmoms = _expand_angmoms(desc[0], ncoeffs)
alpha = torch.tensor(alphas, dtype=dtype, device=device, requires_grad=requires_grad)
for i in range(ncoeffs):
coeff = torch.tensor(coeffs[i], dtype=dtype, device=device, requires_grad=requires_grad)
basis = CGTOBasis(angmom=angmoms[i], alphas=alpha, coeffs=coeff)
basis.wfnormalize_()
res.append(basis)
return res
def _read_float(s: str) -> float:
s = s.replace("D", "E")
return float(s)
def _get_basis_file(cmd: str) -> str:
atomz_str, raw_basisname = cmd.split(":")
raw_basisname = raw_basisname.strip()
atomz = int(atomz_str)
basisname = _normalize_basisname(raw_basisname)
thisdir = os.path.dirname(os.path.realpath(__file__))
fname = "%02d.gaussian94" % atomz
fdir = os.path.join(thisdir, ".database", basisname)
fpath = os.path.join(fdir, fname)
if not os.path.exists(fpath):
print("The %s basis for atomz %d does not exist, but we will download it" %
(raw_basisname, atomz))
if not os.path.exists(fdir):
os.makedirs(fdir)
_download_basis(fpath, atomz, raw_basisname)
return fpath
def _normalize_basisname(basisname: str) -> str:
b = basisname.lower()
b = b.replace("+", "p")
b = b.replace("*", "s")
b = b.replace("(", "_")
b = b.replace(")", "_")
b = b.replace(",", "_")
return b
def _download_basis(fname: str, atomz: int, basisname: str) -> None:
import basis_set_exchange as bse
s = bse.get_basis(basisname, elements=[atomz], fmt="gaussian94")
with open(fname, "w") as f:
f.write(s)
print("Downloaded to %s" % fname)
def _expand_angmoms(s: str, n: int) -> List[int]:
if len(s) == n:
pass
elif n % len(s) == 0:
s = s * (n // len(s))
else:
raise RuntimeError("Do not know how to read orbital %s with %d coefficient columns" %
(s, n))
s = s.lower()
spdfmap = {
"s": 0,
"p": 1,
"d": 2,
"f": 3,
"g": 4,
"h": 5,
"i": 6,
}
angmoms = [spdfmap[c] for c in s]
return angmoms
| true
| true
|
7902d5e6c435aa46f407cf0e82499ae2efbef440
| 598
|
py
|
Python
|
examples/mycommands.py
|
gtmanfred/irc3
|
1ade641c35a75f5944f78707722c93af34ad8e86
|
[
"MIT"
] | 178
|
2015-01-10T22:44:41.000Z
|
2022-01-27T03:35:30.000Z
|
examples/mycommands.py
|
gtmanfred/irc3
|
1ade641c35a75f5944f78707722c93af34ad8e86
|
[
"MIT"
] | 136
|
2015-01-11T09:32:01.000Z
|
2022-02-07T13:33:40.000Z
|
examples/mycommands.py
|
gtmanfred/irc3
|
1ade641c35a75f5944f78707722c93af34ad8e86
|
[
"MIT"
] | 58
|
2015-02-02T13:34:01.000Z
|
2021-09-23T16:03:18.000Z
|
# -*- coding: utf-8 -*-
from irc3.plugins.command import command
@command
def echo(bot, mask, target, args):
"""Echo command
%%echo <words>...
"""
yield ' '.join(args['<words>'])
@command(permission='admin', public=False)
def adduser(bot, mask, target, args):
"""Add a user
%%adduser <name> <password>
"""
bot.privmsg(mask.nick, 'User added')
@command(show_in_help_list=False)
def my_secret_operation(bot, mask, target, args):
"""Do something you don't want in !help all the time
%%my_secret_operation
"""
yield "I like turtles"
| 19.933333
| 56
| 0.618729
|
from irc3.plugins.command import command
@command
def echo(bot, mask, target, args):
yield ' '.join(args['<words>'])
@command(permission='admin', public=False)
def adduser(bot, mask, target, args):
bot.privmsg(mask.nick, 'User added')
@command(show_in_help_list=False)
def my_secret_operation(bot, mask, target, args):
yield "I like turtles"
| true
| true
|
7902d6fc769f65ea7a3770011a1b1d995a07eea6
| 7,788
|
py
|
Python
|
pytorch_lightning/callbacks/early_stopping.py
|
shivin7/pytorch-lightning
|
9f2b29a7cd4b56c0d6afbbc4a1e0971d49c5f1d7
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/callbacks/early_stopping.py
|
shivin7/pytorch-lightning
|
9f2b29a7cd4b56c0d6afbbc4a1e0971d49c5f1d7
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/callbacks/early_stopping.py
|
shivin7/pytorch-lightning
|
9f2b29a7cd4b56c0d6afbbc4a1e0971d49c5f1d7
|
[
"Apache-2.0"
] | null | null | null |
r"""
Early Stopping
^^^^^^^^^^^^^^
Monitor a validation metric and stop training when it stops improving.
"""
from copy import deepcopy
import numpy as np
import torch
import torch.distributed as dist
from pytorch_lightning import _logger as log
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_warn
torch_inf = torch.tensor(np.Inf)
try:
import torch_xla
import torch_xla.core.xla_model as xm
except ImportError:
XLA_AVAILABLE = False
else:
XLA_AVAILABLE = True
class EarlyStopping(Callback):
r"""
Args:
monitor: quantity to be monitored. Default: ``'val_loss'``.
.. note:: Has no effect when using `EvalResult` or `TrainResult`
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than `min_delta`, will count as no
improvement. Default: ``0.0``.
patience: number of validation epochs with no improvement
after which training will be stopped. Default: ``3``.
verbose: verbosity mode. Default: ``False``.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity. Default: ``'auto'``.
strict: whether to crash the training if `monitor` is
not found in the validation metrics. Default: ``True``.
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import EarlyStopping
>>> early_stopping = EarlyStopping('val_loss')
>>> trainer = Trainer(early_stop_callback=early_stopping)
"""
mode_dict = {
'min': torch.lt,
'max': torch.gt,
}
def __init__(self, monitor: str = 'val_loss', min_delta: float = 0.0, patience: int = 3,
verbose: bool = False, mode: str = 'auto', strict: bool = True):
super().__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.strict = strict
self.min_delta = min_delta
self.wait_count = 0
self.stopped_epoch = 0
self.mode = mode
if mode not in self.mode_dict:
if self.verbose > 0:
log.info(f'EarlyStopping mode {mode} is unknown, fallback to auto mode.')
self.mode = 'auto'
if self.mode == 'auto':
if self.monitor == 'acc':
self.mode = 'max'
else:
self.mode = 'min'
if self.verbose > 0:
log.info(f'EarlyStopping mode set to {self.mode} for monitoring {self.monitor}.')
self.min_delta *= 1 if self.monitor_op == torch.gt else -1
self.best_score = torch_inf if self.monitor_op == torch.lt else -torch_inf
def _validate_condition_metric(self, logs):
monitor_val = logs.get(self.monitor)
error_msg = (f'Early stopping conditioned on metric `{self.monitor}`'
f' which is not available. Either add `{self.monitor}` to the return of '
f' validation_epoch end or modify your EarlyStopping callback to use any of the '
f'following: `{"`, `".join(list(logs.keys()))}`')
if monitor_val is None:
if self.strict:
raise RuntimeError(error_msg)
if self.verbose > 0:
rank_zero_warn(error_msg, RuntimeWarning)
return False
return True
@property
def monitor_op(self):
return self.mode_dict[self.mode]
def state_dict(self):
return {
'wait_count': self.wait_count,
'stopped_epoch': self.stopped_epoch,
'best_score': self.best_score,
'patience': self.patience
}
def load_state_dict(self, state_dict):
state_dict = deepcopy(state_dict)
self.wait_count = state_dict['wait_count']
self.stopped_epoch = state_dict['stopped_epoch']
self.best_score = state_dict['best_score']
self.patience = state_dict['patience']
def on_validation_end(self, trainer, pl_module):
self._run_early_stopping_check(trainer, pl_module)
def on_validation_epoch_end(self, trainer, pl_module):
val_es_key = 'val_early_stop_on'
if trainer.callback_metrics.get(val_es_key) is not None:
self.monitor = val_es_key
# disable strict checking when using structured results
if val_es_key in trainer.callback_metrics:
self.strict = False
self._validate_condition_metric(trainer.callback_metrics)
def on_train_epoch_end(self, trainer, pl_module):
# disable early stopping in train loop when there's a val loop
if self.monitor == 'val_early_stop_on':
return
# early stopping can also work in the train loop when there is no val loop and when using structured results
should_check_early_stop = False
train_es_key = 'early_stop_on'
if trainer.callback_metrics.get(train_es_key, None) is not None:
self.monitor = train_es_key
should_check_early_stop = True
if should_check_early_stop:
self._run_early_stopping_check(trainer, pl_module)
def _run_early_stopping_check(self, trainer, pl_module):
logs = trainer.callback_metrics
if not self._validate_condition_metric(logs):
return # short circuit if metric not present
current = logs.get(self.monitor)
# when in dev debugging
trainer.dev_debugger.track_early_stopping_history(current)
if not isinstance(current, torch.Tensor):
current = torch.tensor(current, device=pl_module.device)
if trainer.use_tpu and XLA_AVAILABLE:
current = current.cpu()
if self.monitor_op(current - self.min_delta, self.best_score):
self.best_score = current
self.wait_count = 0
else:
self.wait_count += 1
should_stop = self.wait_count >= self.patience
if bool(should_stop):
self.stopped_epoch = trainer.current_epoch
trainer.should_stop = True
# stop every ddp process if any world process decides to stop
self._stop_distributed_training(trainer, pl_module)
def _stop_distributed_training(self, trainer, pl_module):
# in ddp make sure all processes stop when one is flagged
if trainer.use_ddp or trainer.use_ddp2:
stop = torch.tensor(int(trainer.should_stop), device=pl_module.device)
dist.all_reduce(stop, op=dist.reduce_op.SUM)
dist.barrier()
trainer.should_stop = stop == trainer.world_size
if trainer.use_tpu:
stop = torch.tensor(int(trainer.should_stop), device=pl_module.device, dtype=torch.int32)
stop = xm.mesh_reduce("stop_signal", stop, torch.cat)
torch_xla.core.xla_model.rendezvous("pl.EarlyStoppingCallback.stop_distributed_training_check")
trainer.should_stop = int(stop.item()) == trainer.world_size
def on_train_end(self, trainer, pl_module):
if self.stopped_epoch > 0 and self.verbose > 0:
rank_zero_warn('Displayed epoch numbers by `EarlyStopping` start from "1" until v0.6.x,'
' but will start from "0" in v0.8.0.', DeprecationWarning)
log.info(f'Epoch {self.stopped_epoch + 1:05d}: early stopping triggered.')
| 37.442308
| 116
| 0.635336
|
from copy import deepcopy
import numpy as np
import torch
import torch.distributed as dist
from pytorch_lightning import _logger as log
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_warn
torch_inf = torch.tensor(np.Inf)
try:
import torch_xla
import torch_xla.core.xla_model as xm
except ImportError:
XLA_AVAILABLE = False
else:
XLA_AVAILABLE = True
class EarlyStopping(Callback):
mode_dict = {
'min': torch.lt,
'max': torch.gt,
}
def __init__(self, monitor: str = 'val_loss', min_delta: float = 0.0, patience: int = 3,
verbose: bool = False, mode: str = 'auto', strict: bool = True):
super().__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.strict = strict
self.min_delta = min_delta
self.wait_count = 0
self.stopped_epoch = 0
self.mode = mode
if mode not in self.mode_dict:
if self.verbose > 0:
log.info(f'EarlyStopping mode {mode} is unknown, fallback to auto mode.')
self.mode = 'auto'
if self.mode == 'auto':
if self.monitor == 'acc':
self.mode = 'max'
else:
self.mode = 'min'
if self.verbose > 0:
log.info(f'EarlyStopping mode set to {self.mode} for monitoring {self.monitor}.')
self.min_delta *= 1 if self.monitor_op == torch.gt else -1
self.best_score = torch_inf if self.monitor_op == torch.lt else -torch_inf
def _validate_condition_metric(self, logs):
monitor_val = logs.get(self.monitor)
error_msg = (f'Early stopping conditioned on metric `{self.monitor}`'
f' which is not available. Either add `{self.monitor}` to the return of '
f' validation_epoch end or modify your EarlyStopping callback to use any of the '
f'following: `{"`, `".join(list(logs.keys()))}`')
if monitor_val is None:
if self.strict:
raise RuntimeError(error_msg)
if self.verbose > 0:
rank_zero_warn(error_msg, RuntimeWarning)
return False
return True
@property
def monitor_op(self):
return self.mode_dict[self.mode]
def state_dict(self):
return {
'wait_count': self.wait_count,
'stopped_epoch': self.stopped_epoch,
'best_score': self.best_score,
'patience': self.patience
}
def load_state_dict(self, state_dict):
state_dict = deepcopy(state_dict)
self.wait_count = state_dict['wait_count']
self.stopped_epoch = state_dict['stopped_epoch']
self.best_score = state_dict['best_score']
self.patience = state_dict['patience']
def on_validation_end(self, trainer, pl_module):
self._run_early_stopping_check(trainer, pl_module)
def on_validation_epoch_end(self, trainer, pl_module):
val_es_key = 'val_early_stop_on'
if trainer.callback_metrics.get(val_es_key) is not None:
self.monitor = val_es_key
if val_es_key in trainer.callback_metrics:
self.strict = False
self._validate_condition_metric(trainer.callback_metrics)
def on_train_epoch_end(self, trainer, pl_module):
if self.monitor == 'val_early_stop_on':
return
# early stopping can also work in the train loop when there is no val loop and when using structured results
should_check_early_stop = False
train_es_key = 'early_stop_on'
if trainer.callback_metrics.get(train_es_key, None) is not None:
self.monitor = train_es_key
should_check_early_stop = True
if should_check_early_stop:
self._run_early_stopping_check(trainer, pl_module)
def _run_early_stopping_check(self, trainer, pl_module):
logs = trainer.callback_metrics
if not self._validate_condition_metric(logs):
return # short circuit if metric not present
current = logs.get(self.monitor)
# when in dev debugging
trainer.dev_debugger.track_early_stopping_history(current)
if not isinstance(current, torch.Tensor):
current = torch.tensor(current, device=pl_module.device)
if trainer.use_tpu and XLA_AVAILABLE:
current = current.cpu()
if self.monitor_op(current - self.min_delta, self.best_score):
self.best_score = current
self.wait_count = 0
else:
self.wait_count += 1
should_stop = self.wait_count >= self.patience
if bool(should_stop):
self.stopped_epoch = trainer.current_epoch
trainer.should_stop = True
# stop every ddp process if any world process decides to stop
self._stop_distributed_training(trainer, pl_module)
def _stop_distributed_training(self, trainer, pl_module):
# in ddp make sure all processes stop when one is flagged
if trainer.use_ddp or trainer.use_ddp2:
stop = torch.tensor(int(trainer.should_stop), device=pl_module.device)
dist.all_reduce(stop, op=dist.reduce_op.SUM)
dist.barrier()
trainer.should_stop = stop == trainer.world_size
if trainer.use_tpu:
stop = torch.tensor(int(trainer.should_stop), device=pl_module.device, dtype=torch.int32)
stop = xm.mesh_reduce("stop_signal", stop, torch.cat)
torch_xla.core.xla_model.rendezvous("pl.EarlyStoppingCallback.stop_distributed_training_check")
trainer.should_stop = int(stop.item()) == trainer.world_size
def on_train_end(self, trainer, pl_module):
if self.stopped_epoch > 0 and self.verbose > 0:
rank_zero_warn('Displayed epoch numbers by `EarlyStopping` start from "1" until v0.6.x,'
' but will start from "0" in v0.8.0.', DeprecationWarning)
log.info(f'Epoch {self.stopped_epoch + 1:05d}: early stopping triggered.')
| true
| true
|
7902d739fcae01a162ee9425a03e26a9514d0900
| 44
|
py
|
Python
|
arclet/edoves/main/message/__init__.py
|
ArcletProject/Edoves
|
b28423057ab765796d5299b5d8f98a0e8ce494dd
|
[
"MIT"
] | 2
|
2022-01-25T16:05:30.000Z
|
2022-02-12T18:33:39.000Z
|
arclet/edoves/main/message/__init__.py
|
ArcletProject/Edoves
|
b28423057ab765796d5299b5d8f98a0e8ce494dd
|
[
"MIT"
] | 4
|
2022-03-04T08:24:53.000Z
|
2022-03-23T15:22:26.000Z
|
arclet/edoves/main/message/__init__.py
|
ArcletProject/Edoves
|
b28423057ab765796d5299b5d8f98a0e8ce494dd
|
[
"MIT"
] | null | null | null |
from .chain import *
from .element import *
| 14.666667
| 22
| 0.727273
|
from .chain import *
from .element import *
| true
| true
|
7902d850c8928e9f903904e80cbc33fda5c7b2b3
| 884
|
py
|
Python
|
Lists/lists-beg.py
|
Peabody29/Python_Projects-ST
|
e15eec8ac00a501c09296163c9adaf25ca928f5b
|
[
"MIT"
] | null | null | null |
Lists/lists-beg.py
|
Peabody29/Python_Projects-ST
|
e15eec8ac00a501c09296163c9adaf25ca928f5b
|
[
"MIT"
] | null | null | null |
Lists/lists-beg.py
|
Peabody29/Python_Projects-ST
|
e15eec8ac00a501c09296163c9adaf25ca928f5b
|
[
"MIT"
] | null | null | null |
"""DATA STRUCTURES"""
# Algorithms are set of rules used to solve a problem
# Data structures are a way of organizing data in a computer
# colors = ['red', 'yellow', [5, 6], 'blue']
friends = ['Josh', 'Renee', 'Agnes']
# print(colors)
# print(colors[1])
# colors[2] = 'green' # mutability of lists
# print(colors)
# print(len(friends))
# print(len(colors)) # gives you the number of items in the list variable
# print(range(len(friends)))
# for i in range(len(friends)): # loops through list when you know position of items
# friend = friends[i]
# print('Happy new year,', friend)
# for friend in friends: # better for looping since you get to write less code
# print('Happy New Year, %s!' % friend)
numbers = [2, 4, 6, 8, 10]
for i in range(len(numbers)): # range can also be used as such to update elements using indices
numbers[i] = numbers[i] * 2
print(numbers)
| 35.36
| 96
| 0.676471
|
friends = ['Josh', 'Renee', 'Agnes']
| true
| true
|
7902d8816f7003d12c5ca747d93f8fe1b0722dff
| 277
|
py
|
Python
|
4/tests/test_python.py
|
microcoder/course-python-mipt
|
60be3713b1960e31beb355321170c7afe72535b0
|
[
"MIT"
] | null | null | null |
4/tests/test_python.py
|
microcoder/course-python-mipt
|
60be3713b1960e31beb355321170c7afe72535b0
|
[
"MIT"
] | null | null | null |
4/tests/test_python.py
|
microcoder/course-python-mipt
|
60be3713b1960e31beb355321170c7afe72535b0
|
[
"MIT"
] | null | null | null |
import unittest
class TestPython(unittest.TestCase):
def test_float_to_int_coercion(self):
self.assertEqual(1, int(1.0))
def test_get_empty_dict(self):
self.assertIsNone({}.get('key'))
def test_trueness(self):
self.assertTrue(bool(10))
| 19.785714
| 41
| 0.67509
|
import unittest
class TestPython(unittest.TestCase):
def test_float_to_int_coercion(self):
self.assertEqual(1, int(1.0))
def test_get_empty_dict(self):
self.assertIsNone({}.get('key'))
def test_trueness(self):
self.assertTrue(bool(10))
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.