hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c46640f92964d3b0eb444b0e03fd6c6ff9d1033
| 9,349
|
py
|
Python
|
platformio/commands/check.py
|
xeno010/platformio-core
|
94f8afec38fc8d35db1055368f5fbe4e67c89e7e
|
[
"Apache-2.0"
] | null | null | null |
platformio/commands/check.py
|
xeno010/platformio-core
|
94f8afec38fc8d35db1055368f5fbe4e67c89e7e
|
[
"Apache-2.0"
] | null | null | null |
platformio/commands/check.py
|
xeno010/platformio-core
|
94f8afec38fc8d35db1055368f5fbe4e67c89e7e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-arguments,too-many-locals,too-many-branches
# pylint: disable=redefined-builtin,too-many-statements
import os
from collections import Counter
from os.path import basename, dirname, isfile, join
from time import time
import click
from tabulate import tabulate
from platformio import exception, fs, util
from platformio.check.defect import DefectItem
from platformio.check.tools import CheckToolFactory
from platformio.compat import dump_json_to_unicode
from platformio.project.config import ProjectConfig
from platformio.project.helpers import (
find_project_dir_above,
get_project_dir,
get_project_include_dir,
get_project_src_dir,
)
@click.command("check", short_help="Run a static analysis tool on code")
@click.option("-e", "--environment", multiple=True)
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=True, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option(
"-c",
"--project-conf",
type=click.Path(
exists=True, file_okay=True, dir_okay=False, readable=True, resolve_path=True
),
)
@click.option("--filter", multiple=True, help="Pattern: +<include> -<exclude>")
@click.option("--flags", multiple=True)
@click.option(
"--severity", multiple=True, type=click.Choice(DefectItem.SEVERITY_LABELS.values())
)
@click.option("-s", "--silent", is_flag=True)
@click.option("-v", "--verbose", is_flag=True)
@click.option("--json-output", is_flag=True)
def cli(
environment,
project_dir,
project_conf,
filter,
flags,
severity,
silent,
verbose,
json_output,
):
# find project directory on upper level
if isfile(project_dir):
project_dir = find_project_dir_above(project_dir)
results = []
with fs.cd(project_dir):
config = ProjectConfig.get_instance(
project_conf or join(project_dir, "platformio.ini")
)
config.validate(environment)
default_envs = config.default_envs()
for envname in config.envs():
skipenv = any(
[
environment and envname not in environment,
not environment and default_envs and envname not in default_envs,
]
)
env_options = config.items(env=envname, as_dict=True)
env_dump = []
for k, v in env_options.items():
if k not in ("platform", "framework", "board"):
continue
env_dump.append(
"%s: %s" % (k, ", ".join(v) if isinstance(v, list) else v)
)
default_filter = [
"+<%s/>" % basename(d)
for d in (get_project_src_dir(), get_project_include_dir())
]
tool_options = dict(
verbose=verbose,
silent=silent,
filter=filter or env_options.get("check_filter", default_filter),
flags=flags or env_options.get("check_flags"),
severity=[DefectItem.SEVERITY_LABELS[DefectItem.SEVERITY_HIGH]]
if silent
else (severity or env_options.get("check_severity")),
)
for tool in env_options.get("check_tool", ["cppcheck"]):
if skipenv:
results.append({"env": envname, "tool": tool})
continue
if not silent and not json_output:
print_processing_header(tool, envname, env_dump)
ct = CheckToolFactory.new(
tool, project_dir, config, envname, tool_options
)
result = {"env": envname, "tool": tool, "duration": time()}
rc = ct.check(
on_defect_callback=None
if (json_output or verbose)
else lambda defect: click.echo(repr(defect))
)
result["defects"] = ct.get_defects()
result["duration"] = time() - result["duration"]
result["succeeded"] = rc == 0 and not any(
d.severity == DefectItem.SEVERITY_HIGH for d in result["defects"]
)
results.append(result)
if verbose:
click.echo("\n".join(repr(d) for d in result["defects"]))
if not json_output and not silent:
if not result["defects"]:
click.echo("No defects found")
print_processing_footer(result)
if json_output:
click.echo(dump_json_to_unicode(results_to_json(results)))
elif not silent:
print_check_summary(results)
command_failed = any(r.get("succeeded") is False for r in results)
if command_failed:
raise exception.ReturnErrorCode(1)
def results_to_json(raw):
results = []
for item in raw:
item.update(
{
"ignored": item.get("succeeded") is None,
"succeeded": bool(item.get("succeeded")),
"defects": [d.to_json() for d in item.get("defects", [])],
}
)
results.append(item)
return results
def print_processing_header(tool, envname, envdump):
click.echo(
"Checking %s > %s (%s)"
% (click.style(envname, fg="cyan", bold=True), tool, "; ".join(envdump))
)
terminal_width, _ = click.get_terminal_size()
click.secho("-" * terminal_width, bold=True)
def print_processing_footer(result):
is_failed = not result.get("succeeded")
util.print_labeled_bar(
"[%s] Took %.2f seconds"
% (
(
click.style("FAILED", fg="red", bold=True)
if is_failed
else click.style("PASSED", fg="green", bold=True)
),
result["duration"],
),
is_error=is_failed,
)
def print_defects_stats(results):
components = dict()
def _append_defect(component, defect):
if not components.get(component):
components[component] = Counter()
components[component].update({DefectItem.SEVERITY_LABELS[defect.severity]: 1})
for result in results:
for defect in result.get("defects", []):
component = dirname(defect.file) or defect.file
_append_defect(component, defect)
if component.startswith(get_project_dir()):
while os.sep in component:
component = dirname(component)
_append_defect(component, defect)
if not components:
return
severity_labels = list(DefectItem.SEVERITY_LABELS.values())
severity_labels.reverse()
tabular_data = list()
for k, v in components.items():
tool_defect = [v.get(s, 0) for s in severity_labels]
tabular_data.append([k] + tool_defect)
total = ["Total"] + [sum(d) for d in list(zip(*tabular_data))[1:]]
tabular_data.sort()
tabular_data.append([]) # Empty line as delimeter
tabular_data.append(total)
headers = ["Component"]
headers.extend([l.upper() for l in severity_labels])
headers = [click.style(h, bold=True) for h in headers]
click.echo(tabulate(tabular_data, headers=headers, numalign="center"))
click.echo()
def print_check_summary(results):
click.echo()
tabular_data = []
succeeded_nums = 0
failed_nums = 0
duration = 0
print_defects_stats(results)
for result in results:
duration += result.get("duration", 0)
if result.get("succeeded") is False:
failed_nums += 1
status_str = click.style("FAILED", fg="red")
elif result.get("succeeded") is None:
status_str = "IGNORED"
else:
succeeded_nums += 1
status_str = click.style("PASSED", fg="green")
tabular_data.append(
(
click.style(result["env"], fg="cyan"),
result["tool"],
status_str,
util.humanize_duration_time(result.get("duration")),
)
)
click.echo(
tabulate(
tabular_data,
headers=[
click.style(s, bold=True)
for s in ("Environment", "Tool", "Status", "Duration")
],
),
err=failed_nums,
)
util.print_labeled_bar(
"%s%d succeeded in %s"
% (
"%d failed, " % failed_nums if failed_nums else "",
succeeded_nums,
util.humanize_duration_time(duration),
),
is_error=failed_nums,
fg="red" if failed_nums else "green",
)
| 31.90785
| 87
| 0.587656
|
import os
from collections import Counter
from os.path import basename, dirname, isfile, join
from time import time
import click
from tabulate import tabulate
from platformio import exception, fs, util
from platformio.check.defect import DefectItem
from platformio.check.tools import CheckToolFactory
from platformio.compat import dump_json_to_unicode
from platformio.project.config import ProjectConfig
from platformio.project.helpers import (
find_project_dir_above,
get_project_dir,
get_project_include_dir,
get_project_src_dir,
)
@click.command("check", short_help="Run a static analysis tool on code")
@click.option("-e", "--environment", multiple=True)
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=True, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option(
"-c",
"--project-conf",
type=click.Path(
exists=True, file_okay=True, dir_okay=False, readable=True, resolve_path=True
),
)
@click.option("--filter", multiple=True, help="Pattern: +<include> -<exclude>")
@click.option("--flags", multiple=True)
@click.option(
"--severity", multiple=True, type=click.Choice(DefectItem.SEVERITY_LABELS.values())
)
@click.option("-s", "--silent", is_flag=True)
@click.option("-v", "--verbose", is_flag=True)
@click.option("--json-output", is_flag=True)
def cli(
environment,
project_dir,
project_conf,
filter,
flags,
severity,
silent,
verbose,
json_output,
):
if isfile(project_dir):
project_dir = find_project_dir_above(project_dir)
results = []
with fs.cd(project_dir):
config = ProjectConfig.get_instance(
project_conf or join(project_dir, "platformio.ini")
)
config.validate(environment)
default_envs = config.default_envs()
for envname in config.envs():
skipenv = any(
[
environment and envname not in environment,
not environment and default_envs and envname not in default_envs,
]
)
env_options = config.items(env=envname, as_dict=True)
env_dump = []
for k, v in env_options.items():
if k not in ("platform", "framework", "board"):
continue
env_dump.append(
"%s: %s" % (k, ", ".join(v) if isinstance(v, list) else v)
)
default_filter = [
"+<%s/>" % basename(d)
for d in (get_project_src_dir(), get_project_include_dir())
]
tool_options = dict(
verbose=verbose,
silent=silent,
filter=filter or env_options.get("check_filter", default_filter),
flags=flags or env_options.get("check_flags"),
severity=[DefectItem.SEVERITY_LABELS[DefectItem.SEVERITY_HIGH]]
if silent
else (severity or env_options.get("check_severity")),
)
for tool in env_options.get("check_tool", ["cppcheck"]):
if skipenv:
results.append({"env": envname, "tool": tool})
continue
if not silent and not json_output:
print_processing_header(tool, envname, env_dump)
ct = CheckToolFactory.new(
tool, project_dir, config, envname, tool_options
)
result = {"env": envname, "tool": tool, "duration": time()}
rc = ct.check(
on_defect_callback=None
if (json_output or verbose)
else lambda defect: click.echo(repr(defect))
)
result["defects"] = ct.get_defects()
result["duration"] = time() - result["duration"]
result["succeeded"] = rc == 0 and not any(
d.severity == DefectItem.SEVERITY_HIGH for d in result["defects"]
)
results.append(result)
if verbose:
click.echo("\n".join(repr(d) for d in result["defects"]))
if not json_output and not silent:
if not result["defects"]:
click.echo("No defects found")
print_processing_footer(result)
if json_output:
click.echo(dump_json_to_unicode(results_to_json(results)))
elif not silent:
print_check_summary(results)
command_failed = any(r.get("succeeded") is False for r in results)
if command_failed:
raise exception.ReturnErrorCode(1)
def results_to_json(raw):
results = []
for item in raw:
item.update(
{
"ignored": item.get("succeeded") is None,
"succeeded": bool(item.get("succeeded")),
"defects": [d.to_json() for d in item.get("defects", [])],
}
)
results.append(item)
return results
def print_processing_header(tool, envname, envdump):
click.echo(
"Checking %s > %s (%s)"
% (click.style(envname, fg="cyan", bold=True), tool, "; ".join(envdump))
)
terminal_width, _ = click.get_terminal_size()
click.secho("-" * terminal_width, bold=True)
def print_processing_footer(result):
is_failed = not result.get("succeeded")
util.print_labeled_bar(
"[%s] Took %.2f seconds"
% (
(
click.style("FAILED", fg="red", bold=True)
if is_failed
else click.style("PASSED", fg="green", bold=True)
),
result["duration"],
),
is_error=is_failed,
)
def print_defects_stats(results):
components = dict()
def _append_defect(component, defect):
if not components.get(component):
components[component] = Counter()
components[component].update({DefectItem.SEVERITY_LABELS[defect.severity]: 1})
for result in results:
for defect in result.get("defects", []):
component = dirname(defect.file) or defect.file
_append_defect(component, defect)
if component.startswith(get_project_dir()):
while os.sep in component:
component = dirname(component)
_append_defect(component, defect)
if not components:
return
severity_labels = list(DefectItem.SEVERITY_LABELS.values())
severity_labels.reverse()
tabular_data = list()
for k, v in components.items():
tool_defect = [v.get(s, 0) for s in severity_labels]
tabular_data.append([k] + tool_defect)
total = ["Total"] + [sum(d) for d in list(zip(*tabular_data))[1:]]
tabular_data.sort()
tabular_data.append([])
tabular_data.append(total)
headers = ["Component"]
headers.extend([l.upper() for l in severity_labels])
headers = [click.style(h, bold=True) for h in headers]
click.echo(tabulate(tabular_data, headers=headers, numalign="center"))
click.echo()
def print_check_summary(results):
click.echo()
tabular_data = []
succeeded_nums = 0
failed_nums = 0
duration = 0
print_defects_stats(results)
for result in results:
duration += result.get("duration", 0)
if result.get("succeeded") is False:
failed_nums += 1
status_str = click.style("FAILED", fg="red")
elif result.get("succeeded") is None:
status_str = "IGNORED"
else:
succeeded_nums += 1
status_str = click.style("PASSED", fg="green")
tabular_data.append(
(
click.style(result["env"], fg="cyan"),
result["tool"],
status_str,
util.humanize_duration_time(result.get("duration")),
)
)
click.echo(
tabulate(
tabular_data,
headers=[
click.style(s, bold=True)
for s in ("Environment", "Tool", "Status", "Duration")
],
),
err=failed_nums,
)
util.print_labeled_bar(
"%s%d succeeded in %s"
% (
"%d failed, " % failed_nums if failed_nums else "",
succeeded_nums,
util.humanize_duration_time(duration),
),
is_error=failed_nums,
fg="red" if failed_nums else "green",
)
| true
| true
|
1c46642673cb78f0a633c5d3ee8ef83f7a8d8c9d
| 3,761
|
py
|
Python
|
python/spidriver.py
|
boxofrox/spidriver
|
4e4cf254f1ad337fb299e3c08eb105ae13fa081f
|
[
"BSD-3-Clause"
] | 4
|
2021-04-21T21:37:57.000Z
|
2022-02-10T06:56:01.000Z
|
python/spidriver.py
|
boxofrox/spidriver
|
4e4cf254f1ad337fb299e3c08eb105ae13fa081f
|
[
"BSD-3-Clause"
] | null | null | null |
python/spidriver.py
|
boxofrox/spidriver
|
4e4cf254f1ad337fb299e3c08eb105ae13fa081f
|
[
"BSD-3-Clause"
] | 5
|
2019-09-25T15:19:48.000Z
|
2021-09-08T10:33:31.000Z
|
# coding=utf-8
import sys
import serial
__version__ = '0.0.2'
PYTHON2 = (sys.version_info < (3, 0))
class SPIDriver:
"""
SPIDriver interface.
The following variables are available:
product product code e.g. 'spidriver1'
serial serial string of SPIDriver
uptime time since SPIDriver boot, in seconds
voltage USB voltage, in V
current current used by attached device, in mA
temp temperature, in degrees C
cs state of CS pin
a state of A pin
b state of B pin
ccitt_crc CCITT-16 CRC of all transmitted and received bytes
"""
def __init__(self, port="/dev/ttyUSB0"):
self.ser = serial.Serial(port, 460800, timeout=1)
self.ser.write(b'@' * 64)
while self.ser.inWaiting():
self.ser.read(1)
for c in [0x55, 0x00, 0xff, 0xaa]:
r = self.__echo(c)
if r != c:
print('Echo test failed - not attached?')
print('Expected %r but received %r' % (c, r))
raise IOError
self.getstatus()
if PYTHON2:
def __ser_w(self, s):
if isinstance(s, list):
s = "".join([chr(c) for c in s])
self.ser.write(s)
else:
def __ser_w(self, s):
if isinstance(s, list):
s = bytes(s)
self.ser.write(s)
def __echo(self, c):
self.__ser_w([ord('e'), c])
r = self.ser.read(1)
if PYTHON2:
return ord(r[0])
else:
return r[0]
def detach(self):
""" Detach all signals """
self.ser.write(b'x')
def sel(self):
""" Select the SPI device by asserting CS """
self.ser.write(b's')
def unsel(self):
""" Unselect the SPI device by deasserting CS """
self.ser.write(b'u')
def read(self, l):
""" Read l bytes from the SPI device """
r = []
for i in range(0, l, 64):
rem = min(l - i, 64)
self.__ser_w([0x80 + rem - 1] + [0xff] * rem)
r.append(self.ser.read(rem))
return b''.join(r)
def write(self, bb):
""" Write bb to the SPI device """
for i in range(0, len(bb), 64):
sub = bb[i:i + 64]
self.__ser_w([0xc0 + len(sub) - 1])
self.__ser_w(sub)
def writeread(self, bb):
""" Write bb to the SPI device, return the read bytes """
r = []
ST = 64
for i in range(0, len(bb), ST):
sub = bb[i:i + 64]
self.__ser_w([0x80 + len(sub) - 1])
self.__ser_w(sub)
r.append(self.ser.read(len(sub)))
return b''.join(r)
def seta(self, v):
""" Set the A signal to 0 or 1 """
self.__ser_w([ord('a'), v])
def setb(self, v):
""" Set the B signal to 0 or 1 """
self.__ser_w([ord('b'), v])
def getstatus(self):
""" Update all status variables """
self.ser.write(b'?')
r = self.ser.read(80)
body = r[1:-1].decode() # remove [ and ]
(self.product,
self.serial,
uptime,
voltage,
current,
temp,
a,
b,
cs,
ccitt_crc) = body.split()
self.uptime = int(uptime)
self.voltage = float(voltage)
self.current = float(current)
self.temp = float(temp)
self.a = int(a)
self.b = int(b)
self.cs = int(cs)
self.ccitt_crc = int(ccitt_crc, 16)
def __repr__(self):
return "<%s serial=%s uptime=%d>" % (
self.product,
self.serial,
self.uptime)
| 26.864286
| 70
| 0.488168
|
import sys
import serial
__version__ = '0.0.2'
PYTHON2 = (sys.version_info < (3, 0))
class SPIDriver:
def __init__(self, port="/dev/ttyUSB0"):
self.ser = serial.Serial(port, 460800, timeout=1)
self.ser.write(b'@' * 64)
while self.ser.inWaiting():
self.ser.read(1)
for c in [0x55, 0x00, 0xff, 0xaa]:
r = self.__echo(c)
if r != c:
print('Echo test failed - not attached?')
print('Expected %r but received %r' % (c, r))
raise IOError
self.getstatus()
if PYTHON2:
def __ser_w(self, s):
if isinstance(s, list):
s = "".join([chr(c) for c in s])
self.ser.write(s)
else:
def __ser_w(self, s):
if isinstance(s, list):
s = bytes(s)
self.ser.write(s)
def __echo(self, c):
self.__ser_w([ord('e'), c])
r = self.ser.read(1)
if PYTHON2:
return ord(r[0])
else:
return r[0]
def detach(self):
self.ser.write(b'x')
def sel(self):
self.ser.write(b's')
def unsel(self):
self.ser.write(b'u')
def read(self, l):
r = []
for i in range(0, l, 64):
rem = min(l - i, 64)
self.__ser_w([0x80 + rem - 1] + [0xff] * rem)
r.append(self.ser.read(rem))
return b''.join(r)
def write(self, bb):
for i in range(0, len(bb), 64):
sub = bb[i:i + 64]
self.__ser_w([0xc0 + len(sub) - 1])
self.__ser_w(sub)
def writeread(self, bb):
r = []
ST = 64
for i in range(0, len(bb), ST):
sub = bb[i:i + 64]
self.__ser_w([0x80 + len(sub) - 1])
self.__ser_w(sub)
r.append(self.ser.read(len(sub)))
return b''.join(r)
def seta(self, v):
self.__ser_w([ord('a'), v])
def setb(self, v):
self.__ser_w([ord('b'), v])
def getstatus(self):
self.ser.write(b'?')
r = self.ser.read(80)
body = r[1:-1].decode()
(self.product,
self.serial,
uptime,
voltage,
current,
temp,
a,
b,
cs,
ccitt_crc) = body.split()
self.uptime = int(uptime)
self.voltage = float(voltage)
self.current = float(current)
self.temp = float(temp)
self.a = int(a)
self.b = int(b)
self.cs = int(cs)
self.ccitt_crc = int(ccitt_crc, 16)
def __repr__(self):
return "<%s serial=%s uptime=%d>" % (
self.product,
self.serial,
self.uptime)
| true
| true
|
1c46646062a09d65a5e7407db23335596075f971
| 5,160
|
py
|
Python
|
docs/source/conf.py
|
Zhiwei-Lu/pyvaspflow
|
b80eab3e8bfc52aed6a2459dd32655f1075d9058
|
[
"MIT"
] | 13
|
2019-06-03T11:41:35.000Z
|
2022-03-04T07:45:42.000Z
|
docs/source/conf.py
|
Zhiwei-Lu/pyvaspflow
|
b80eab3e8bfc52aed6a2459dd32655f1075d9058
|
[
"MIT"
] | 2
|
2019-03-12T10:51:15.000Z
|
2019-03-14T02:18:18.000Z
|
docs/source/conf.py
|
Zhiwei-Lu/pyvaspflow
|
b80eab3e8bfc52aed6a2459dd32655f1075d9058
|
[
"MIT"
] | 8
|
2019-06-03T03:20:20.000Z
|
2021-01-06T11:48:37.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
autodoc_mock_imports = ['spglib', 'ase']
# -- Project information -----------------------------------------------------
project = 'pyvaspflowdoc'
copyright = '2019, ChangChun He'
author = 'ChangChun He'
# The short X.Y version
version = '0.0.1'
# The full version, including alpha/beta/rc tags
release = '0.1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bizstyle'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyvaspflowdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyvaspflow.tex', 'pyvaspflow Documentation',
'ChangChun He', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyvaspflow', 'pyvaspflow Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyvaspflow', 'pyvaspflow Documentation',
author, 'pyvaspflow', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 30.352941
| 79
| 0.645543
|
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
autodoc_mock_imports = ['spglib', 'ase']
project = 'pyvaspflowdoc'
copyright = '2019, ChangChun He'
author = 'ChangChun He'
version = '0.0.1'
release = '0.1.0'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = None
exclude_patterns = []
pygments_style = 'sphinx'
html_theme = 'bizstyle'
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyvaspflowdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyvaspflow.tex', 'pyvaspflow Documentation',
'ChangChun He', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyvaspflow', 'pyvaspflow Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyvaspflow', 'pyvaspflow Documentation',
author, 'pyvaspflow', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| true
| true
|
1c4665040f1d2e9fa827001815877d94a40df77a
| 4,964
|
py
|
Python
|
pypureclient/flasharray/FA_2_7/models/policy_rule_snapshot_get_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/flasharray/FA_2_7/models/policy_rule_snapshot_get_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/flasharray/FA_2_7/models/policy_rule_snapshot_get_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_7 import models
class PolicyRuleSnapshotGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[PolicyRuleSnapshot]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None, # type: bool
total_item_count=None, # type: int
continuation_token=None, # type: str
items=None, # type: List[models.PolicyRuleSnapshot]
):
"""
Keyword args:
more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved.
total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned.
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
items (list[PolicyRuleSnapshot]): Returns a list of all items after filtering. The values are displayed for each name where meaningful.
"""
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyRuleSnapshotGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PolicyRuleSnapshotGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PolicyRuleSnapshotGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 38.184615
| 524
| 0.615834
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_7 import models
class PolicyRuleSnapshotGetResponse(object):
swagger_types = {
'more_items_remaining': 'bool',
'total_item_count': 'int',
'continuation_token': 'str',
'items': 'list[PolicyRuleSnapshot]'
}
attribute_map = {
'more_items_remaining': 'more_items_remaining',
'total_item_count': 'total_item_count',
'continuation_token': 'continuation_token',
'items': 'items'
}
required_args = {
}
def __init__(
self,
more_items_remaining=None,
total_item_count=None,
continuation_token=None,
items=None,
):
if more_items_remaining is not None:
self.more_items_remaining = more_items_remaining
if total_item_count is not None:
self.total_item_count = total_item_count
if continuation_token is not None:
self.continuation_token = continuation_token
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyRuleSnapshotGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PolicyRuleSnapshotGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, PolicyRuleSnapshotGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c466520334bf6074bf6400dd3e06d73f8b1465a
| 6,434
|
py
|
Python
|
electroncash_plugins/fusion/connection.py
|
christroutner/Electron-Cash
|
d5217ed3e878bd56977181f022f9e5c43f449241
|
[
"MIT"
] | 208
|
2017-07-25T19:52:15.000Z
|
2018-09-21T13:44:58.000Z
|
electroncash_plugins/fusion/connection.py
|
christroutner/Electron-Cash
|
d5217ed3e878bd56977181f022f9e5c43f449241
|
[
"MIT"
] | 1,478
|
2018-09-24T09:30:13.000Z
|
2022-03-29T15:48:17.000Z
|
electroncash_plugins/fusion/connection.py
|
christroutner/Electron-Cash
|
d5217ed3e878bd56977181f022f9e5c43f449241
|
[
"MIT"
] | 159
|
2018-09-24T12:56:47.000Z
|
2022-03-28T23:52:17.000Z
|
#!/usr/bin/env python3
#
# Electron Cash - a lightweight Bitcoin Cash client
# CashFusion - an advanced coin anonymizer
#
# Copyright (C) 2020 Mark B. Lundeberg
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Message-based communications system for CashFusion.
This only implements a framing protocol:
<8 byte magic><4 byte length (big endian) of message><message>
<8 byte magic><4 byte length (big endian) of message><message>
...
<8 byte magic><4 byte length (big endian) of message><message>
"""
import certifi
import socket
import socks
import ssl
import time
from contextlib import suppress
sslcontext = ssl.create_default_context(cafile=certifi.where())
class BadFrameError(Exception):
pass
def open_connection(host, port, conn_timeout = 5.0, default_timeout = 5.0, ssl = False, socks_opts=None):
"""Open a connection as client to the specified server.
If `socks_opts` is None, a direct connection will be made using
`socket.create_connection`. Otherwise, a proxied connection will be
made using `socks.create_connection`, including socks_opts as keyword
arguments. Within that connection, an SSL tunnel will be established
if `ssl` is True.
"""
if socks_opts is None:
bare_socket = socket.create_connection((host, port), timeout=conn_timeout)
else:
bare_socket = socks.create_connection((host, port), timeout=conn_timeout, **socks_opts)
if ssl:
try:
conn_socket = sslcontext.wrap_socket(bare_socket, server_hostname=host)
except:
bare_socket.close()
raise
else:
conn_socket = bare_socket
try:
return Connection(conn_socket, default_timeout)
except:
conn_socket.close()
raise
class Connection:
# Message length limit. Anything longer is considered to be a malicious server.
# The all-initial-commitments and all-components messages can be big (~100 kB in large fusions).
MAX_MSG_LENGTH = 200*1024
magic = bytes.fromhex("765be8b4e4396dcf")
def __init__(self, socket, timeout):
self.socket = socket
self.timeout = timeout
socket.settimeout(timeout)
self.recvbuf = bytearray()
def __enter__(self):
self.socket.__enter__()
def __exit__(self, etype, evalue, traceback):
self.socket.__exit__(etype, evalue, traceback)
def send_message(self, msg, timeout = None):
""" Sends message; if this times out, the connection should be
abandoned since it's not possible to know how much data was sent.
"""
lengthbytes = len(msg).to_bytes(4, byteorder='big')
frame = self.magic + lengthbytes + msg
if timeout is None:
timeout = self.timeout
self.socket.settimeout(timeout)
try:
self.socket.sendall(frame)
except (ssl.SSLWantWriteError, ssl.SSLWantReadError) as e:
raise socket.timeout from e
def recv_message(self, timeout = None):
""" Read message, default timeout is self.timeout.
If it times out, behaviour is well defined in that no data is lost,
and the next call will functions properly.
"""
if timeout is None:
timeout = self.timeout
if timeout is None:
max_time = None
self.socket.settimeout(timeout)
else:
max_time = time.monotonic() + timeout
recvbuf = self.recvbuf
def fillbuf(n):
# read until recvbuf contains at least n bytes
while True:
if len(recvbuf) >= n:
return
if max_time is not None:
remtime = max_time - time.monotonic()
if remtime < 0:
raise socket.timeout
self.socket.settimeout(remtime)
try:
data = self.socket.recv(65536)
except (ssl.SSLWantWriteError, ssl.SSLWantReadError) as e:
# these SSL errors should be reported as a timeout
raise socket.timeout from e
if not data:
if self.recvbuf:
raise ConnectionError("Connection ended mid-message.")
else:
raise ConnectionError("Connection ended while awaiting message.")
recvbuf.extend(data)
try:
fillbuf(12)
magic = recvbuf[:8]
if magic != self.magic:
raise BadFrameError("Bad magic in frame: {}".format(magic.hex()))
message_length = int.from_bytes(recvbuf[8:12], byteorder='big')
if message_length > self.MAX_MSG_LENGTH:
raise BadFrameError("Got a frame with msg_length={} > {} (max)".format(message_length, self.MAX_MSG_LENGTH))
fillbuf(12 + message_length)
# we have a complete message
message = bytes(recvbuf[12:12 + message_length])
del recvbuf[:12 + message_length]
return message
finally:
with suppress(OSError):
self.socket.settimeout(self.timeout)
def close(self):
with suppress(OSError):
self.socket.settimeout(self.timeout)
self.socket.shutdown(socket.SHUT_RDWR)
with suppress(OSError):
self.socket.close()
| 35.744444
| 124
| 0.639882
|
import certifi
import socket
import socks
import ssl
import time
from contextlib import suppress
sslcontext = ssl.create_default_context(cafile=certifi.where())
class BadFrameError(Exception):
pass
def open_connection(host, port, conn_timeout = 5.0, default_timeout = 5.0, ssl = False, socks_opts=None):
if socks_opts is None:
bare_socket = socket.create_connection((host, port), timeout=conn_timeout)
else:
bare_socket = socks.create_connection((host, port), timeout=conn_timeout, **socks_opts)
if ssl:
try:
conn_socket = sslcontext.wrap_socket(bare_socket, server_hostname=host)
except:
bare_socket.close()
raise
else:
conn_socket = bare_socket
try:
return Connection(conn_socket, default_timeout)
except:
conn_socket.close()
raise
class Connection:
MAX_MSG_LENGTH = 200*1024
magic = bytes.fromhex("765be8b4e4396dcf")
def __init__(self, socket, timeout):
self.socket = socket
self.timeout = timeout
socket.settimeout(timeout)
self.recvbuf = bytearray()
def __enter__(self):
self.socket.__enter__()
def __exit__(self, etype, evalue, traceback):
self.socket.__exit__(etype, evalue, traceback)
def send_message(self, msg, timeout = None):
lengthbytes = len(msg).to_bytes(4, byteorder='big')
frame = self.magic + lengthbytes + msg
if timeout is None:
timeout = self.timeout
self.socket.settimeout(timeout)
try:
self.socket.sendall(frame)
except (ssl.SSLWantWriteError, ssl.SSLWantReadError) as e:
raise socket.timeout from e
def recv_message(self, timeout = None):
if timeout is None:
timeout = self.timeout
if timeout is None:
max_time = None
self.socket.settimeout(timeout)
else:
max_time = time.monotonic() + timeout
recvbuf = self.recvbuf
def fillbuf(n):
while True:
if len(recvbuf) >= n:
return
if max_time is not None:
remtime = max_time - time.monotonic()
if remtime < 0:
raise socket.timeout
self.socket.settimeout(remtime)
try:
data = self.socket.recv(65536)
except (ssl.SSLWantWriteError, ssl.SSLWantReadError) as e:
raise socket.timeout from e
if not data:
if self.recvbuf:
raise ConnectionError("Connection ended mid-message.")
else:
raise ConnectionError("Connection ended while awaiting message.")
recvbuf.extend(data)
try:
fillbuf(12)
magic = recvbuf[:8]
if magic != self.magic:
raise BadFrameError("Bad magic in frame: {}".format(magic.hex()))
message_length = int.from_bytes(recvbuf[8:12], byteorder='big')
if message_length > self.MAX_MSG_LENGTH:
raise BadFrameError("Got a frame with msg_length={} > {} (max)".format(message_length, self.MAX_MSG_LENGTH))
fillbuf(12 + message_length)
message = bytes(recvbuf[12:12 + message_length])
del recvbuf[:12 + message_length]
return message
finally:
with suppress(OSError):
self.socket.settimeout(self.timeout)
def close(self):
with suppress(OSError):
self.socket.settimeout(self.timeout)
self.socket.shutdown(socket.SHUT_RDWR)
with suppress(OSError):
self.socket.close()
| true
| true
|
1c46653ea6548a2a103e0f864db3564637b4a532
| 6,220
|
py
|
Python
|
tests/test_validators.py
|
wigeria/selenium-yaml-core
|
9f953a24ad6f47d0a8423ec78f2e8d29babff89a
|
[
"Apache-2.0"
] | 2
|
2020-06-28T11:08:20.000Z
|
2021-12-01T13:12:11.000Z
|
tests/test_validators.py
|
wigeria/selenium-yaml-core
|
9f953a24ad6f47d0a8423ec78f2e8d29babff89a
|
[
"Apache-2.0"
] | 5
|
2020-10-12T13:02:20.000Z
|
2021-05-20T14:04:14.000Z
|
tests/test_validators.py
|
wigeria/selenium-yaml-core
|
9f953a24ad6f47d0a8423ec78f2e8d29babff89a
|
[
"Apache-2.0"
] | null | null | null |
"""
Contains tests for the base Validators included in selenium_yaml.validators
"""
from selenium_yaml import validators
import os
class ValidationTestMixin:
""" Contains basic methods for checking whether a validation was
a success or a failure
"""
def is_successful_validation(self, validator, value):
""" Uses basic assertions to test that the validation was a success
Parameters
----------
validator : An instance of a validator derived from
selenium_yaml.validators.Validator
value : The value that should be passed on to the validator
"""
assert validator.is_valid(value) is True
assert isinstance(validator.error, str)
assert len(validator.error) == 0
def is_unsuccessful_validation(self, validator, value):
""" Uses basic assertions to test that the validation was a failure
Parameters
----------
validator : An instance of a validator derived from
selenium_yaml.validators.Validator
value : The value that should be passed on to the validator
"""
assert validator.is_valid(value) is False
assert isinstance(validator.error, list)
assert len(validator.error) > 0
class TestRequiredValidation(ValidationTestMixin):
""" Tests the RequiredValidator on null and non-null values """
def test_required_on_null(self):
""" Tests that the required validator raises an exception on a Null
value and that the error attribute gets populated
"""
validator = validators.RequiredValidator()
self.is_unsuccessful_validation(validator, None)
def test_required_on_non_null_values(self):
""" Tests that the required validator doesn't raise an exception for
valid (non-null) values and that the error attribute is set to a
blank string
"""
validator = validators.RequiredValidator()
valid_values = ["Valid Value", 100, True, [], ["Crazy"]]
for value in valid_values:
self.is_successful_validation(validator, value)
class TestMaxLengthValidation(ValidationTestMixin):
""" Tests the MaxLengthValidator on values that don't have a len, values
that exceed a max length and values that fall within the max length
"""
def test_max_length_on_no_len(self):
""" Tests that the max-length validator fails on values that don't have
a len attribute
"""
validator = validators.MaxLengthValidator(length=3)
self.is_unsuccessful_validation(validator, 0)
def test_max_length_on_greater_len(self):
""" Tests that the max-length validator fails on values that have a len
greater than the specified max-length
"""
invalid_values = ["Test", [1, 2, 3, 4]]
validator = validators.MaxLengthValidator(length=3)
for value in invalid_values:
self.is_unsuccessful_validation(validator, value)
def test_max_length_on_valid_len(self):
""" Tests that the max-length validator succeeds on values that have a
len within the given threshold
"""
valid_values = ["XYZ", [1, 2]]
validator = validators.MaxLengthValidator(length=3)
for value in valid_values:
self.is_successful_validation(validator, value)
class TestTypeValidation(ValidationTestMixin):
""" Tests that the TypeValidator only succeeds on values that are
instances of the given type
"""
def test_validator_on_non_matching_type(self):
""" Tests that the validation fails for values that aren't of a
matching type
"""
invalid_values = [1, False]
validator = validators.TypeValidator(field_type=str)
for value in invalid_values:
self.is_unsuccessful_validation(validator, value)
def test_validator_on_matching_type(self):
""" Tests that the validation succeeds on values that are of a
matching type
"""
valid_values = ["This", "Is", "Valid"]
validator = validators.TypeValidator(field_type=str)
for value in valid_values:
self.is_successful_validation(validator, value)
class TestOptionsValidation(ValidationTestMixin):
""" Tests that the options validator only succeeds if the value is a
part of the given options array
"""
def test_validator_on_non_member(self):
""" Tests that the validation fails on non-members """
options = [1, 2]
validator = validators.OptionsValidator(options=options)
self.is_unsuccessful_validation(validator, 3)
def test_validator_on_member(self):
""" Tests that the validation succeeds on a member """
options = [1, 2]
validator = validators.OptionsValidator(options=options)
self.is_successful_validation(validator, 1)
class TestFilePathValidation(ValidationTestMixin):
""" Tests that the FilePath validator is only valid when the given
value is a valid file path
"""
def test_validator_on_invalid_filepath(self):
""" Tests that the validation fails on non-existent fpaths """
value = os.path.join(os.getcwd(), "thispathshouldnotexist.txt")
validator = validators.FilePathValidator()
self.is_unsuccessful_validation(validator, value)
def test_validator_on_valid_filepath(self):
""" Tests that the validation succeeds on valid fpaths """
value = os.path.join(os.getcwd(), ".gitignore")
validator = validators.FilePathValidator()
self.is_successful_validation(validator, value)
class TestResolvedVariableValidation(ValidationTestMixin):
""" Tests that the ResolvedVariable validation is only valid on a resolved
variable or an instance of the given type
"""
def test_validator_on_resolved_var(self):
""" Tests that the validation succeeds on a valid resolved var """
value = "${resolved_var}"
validator = validators.ResolvedVariableValidator()
self.is_successful_validation(validator, value)
| 39.119497
| 79
| 0.671383
|
from selenium_yaml import validators
import os
class ValidationTestMixin:
def is_successful_validation(self, validator, value):
assert validator.is_valid(value) is True
assert isinstance(validator.error, str)
assert len(validator.error) == 0
def is_unsuccessful_validation(self, validator, value):
assert validator.is_valid(value) is False
assert isinstance(validator.error, list)
assert len(validator.error) > 0
class TestRequiredValidation(ValidationTestMixin):
def test_required_on_null(self):
validator = validators.RequiredValidator()
self.is_unsuccessful_validation(validator, None)
def test_required_on_non_null_values(self):
validator = validators.RequiredValidator()
valid_values = ["Valid Value", 100, True, [], ["Crazy"]]
for value in valid_values:
self.is_successful_validation(validator, value)
class TestMaxLengthValidation(ValidationTestMixin):
def test_max_length_on_no_len(self):
validator = validators.MaxLengthValidator(length=3)
self.is_unsuccessful_validation(validator, 0)
def test_max_length_on_greater_len(self):
invalid_values = ["Test", [1, 2, 3, 4]]
validator = validators.MaxLengthValidator(length=3)
for value in invalid_values:
self.is_unsuccessful_validation(validator, value)
def test_max_length_on_valid_len(self):
valid_values = ["XYZ", [1, 2]]
validator = validators.MaxLengthValidator(length=3)
for value in valid_values:
self.is_successful_validation(validator, value)
class TestTypeValidation(ValidationTestMixin):
def test_validator_on_non_matching_type(self):
invalid_values = [1, False]
validator = validators.TypeValidator(field_type=str)
for value in invalid_values:
self.is_unsuccessful_validation(validator, value)
def test_validator_on_matching_type(self):
valid_values = ["This", "Is", "Valid"]
validator = validators.TypeValidator(field_type=str)
for value in valid_values:
self.is_successful_validation(validator, value)
class TestOptionsValidation(ValidationTestMixin):
def test_validator_on_non_member(self):
options = [1, 2]
validator = validators.OptionsValidator(options=options)
self.is_unsuccessful_validation(validator, 3)
def test_validator_on_member(self):
options = [1, 2]
validator = validators.OptionsValidator(options=options)
self.is_successful_validation(validator, 1)
class TestFilePathValidation(ValidationTestMixin):
def test_validator_on_invalid_filepath(self):
value = os.path.join(os.getcwd(), "thispathshouldnotexist.txt")
validator = validators.FilePathValidator()
self.is_unsuccessful_validation(validator, value)
def test_validator_on_valid_filepath(self):
value = os.path.join(os.getcwd(), ".gitignore")
validator = validators.FilePathValidator()
self.is_successful_validation(validator, value)
class TestResolvedVariableValidation(ValidationTestMixin):
def test_validator_on_resolved_var(self):
value = "${resolved_var}"
validator = validators.ResolvedVariableValidator()
self.is_successful_validation(validator, value)
| true
| true
|
1c46658b4f19b64b4fc2645b72a26baec8f56676
| 1,490
|
py
|
Python
|
examples/python-guide/sklearn_example.py
|
harunpehlivan/LightGBM
|
8ba65be9c93b79c095ea06e74de2cc5bf35ab169
|
[
"MIT"
] | 59
|
2017-03-09T15:33:52.000Z
|
2021-09-16T05:47:10.000Z
|
examples/python-guide/sklearn_example.py
|
harunpehlivan/LightGBM
|
8ba65be9c93b79c095ea06e74de2cc5bf35ab169
|
[
"MIT"
] | 1
|
2017-03-09T07:43:02.000Z
|
2017-04-09T19:34:06.000Z
|
examples/python-guide/sklearn_example.py
|
harunpehlivan/LightGBM
|
8ba65be9c93b79c095ea06e74de2cc5bf35ab169
|
[
"MIT"
] | 17
|
2017-03-27T06:37:47.000Z
|
2020-05-28T09:17:38.000Z
|
# coding: utf-8
# pylint: disable = invalid-name, C0111
import lightgbm as lgb
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
# load or create your dataset
print('Load data...')
df_train = pd.read_csv('../regression/regression.train', header=None, sep='\t')
df_test = pd.read_csv('../regression/regression.test', header=None, sep='\t')
y_train = df_train[0].values
y_test = df_test[0].values
X_train = df_train.drop(0, axis=1).values
X_test = df_test.drop(0, axis=1).values
print('Start training...')
# train
gbm = lgb.LGBMRegressor(objective='regression',
num_leaves=31,
learning_rate=0.05,
n_estimators=20)
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l1',
early_stopping_rounds=5)
print('Start predicting...')
# predict
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# eval
print('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
print('Calculate feature importances...')
# feature importances
print('Feature importances:', list(gbm.feature_importances_))
# other scikit-learn modules
estimator = lgb.LGBMRegressor(num_leaves=31)
param_grid = {
'learning_rate': [0.01, 0.1, 1],
'n_estimators': [20, 40]
}
gbm = GridSearchCV(estimator, param_grid)
gbm.fit(X_train, y_train)
print('Best parameters found by grid search are:', gbm.best_params_)
| 28.653846
| 79
| 0.699329
|
import lightgbm as lgb
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import GridSearchCV
print('Load data...')
df_train = pd.read_csv('../regression/regression.train', header=None, sep='\t')
df_test = pd.read_csv('../regression/regression.test', header=None, sep='\t')
y_train = df_train[0].values
y_test = df_test[0].values
X_train = df_train.drop(0, axis=1).values
X_test = df_test.drop(0, axis=1).values
print('Start training...')
gbm = lgb.LGBMRegressor(objective='regression',
num_leaves=31,
learning_rate=0.05,
n_estimators=20)
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l1',
early_stopping_rounds=5)
print('Start predicting...')
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
print('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
print('Calculate feature importances...')
print('Feature importances:', list(gbm.feature_importances_))
estimator = lgb.LGBMRegressor(num_leaves=31)
param_grid = {
'learning_rate': [0.01, 0.1, 1],
'n_estimators': [20, 40]
}
gbm = GridSearchCV(estimator, param_grid)
gbm.fit(X_train, y_train)
print('Best parameters found by grid search are:', gbm.best_params_)
| true
| true
|
1c4665f87ee43e6b0bf3543beac6d08684c8b95e
| 1,638
|
py
|
Python
|
examples/01-filter/decimate.py
|
sthagen/pyvista-pyvista
|
450db9a8d8ad2feea78e10368c47d9aa0e575e65
|
[
"MIT"
] | 25
|
2018-12-03T18:22:58.000Z
|
2019-02-26T01:30:35.000Z
|
examples/01-filter/decimate.py
|
sthagen/pyvista
|
ffba268b285925eb6103c8ff5072fcf1c0212c53
|
[
"MIT"
] | 108
|
2019-02-27T19:52:12.000Z
|
2019-05-08T02:15:21.000Z
|
examples/01-filter/decimate.py
|
pyvista/vista
|
c49a6abae7cc62d242f12ec45a6b22b524db1ec8
|
[
"MIT"
] | 8
|
2019-03-02T13:41:48.000Z
|
2019-04-22T16:57:44.000Z
|
"""
.. _decimate_example:
Decimation
~~~~~~~~~~
Decimate a mesh
"""
# sphinx_gallery_thumbnail_number = 4
import pyvista as pv
from pyvista import examples
mesh = examples.download_face()
# Define a camera position that shows this mesh properly
cpos = [(0.4, -0.07, -0.31), (0.05, -0.13, -0.06), (-0.1, 1, 0.08)]
dargs = dict(show_edges=True, color=True)
# Preview the mesh
mesh.plot(cpos=cpos, **dargs)
###############################################################################
# Now let's define a target reduction and compare the
# :func:`pyvista.PolyData.decimate` and :func:`pyvista.PolyData.decimate_pro`
# filters.
target_reduction = 0.7
print(f"Reducing {target_reduction * 100.0} percent out of the original mesh")
###############################################################################
decimated = mesh.decimate(target_reduction)
decimated.plot(cpos=cpos, **dargs)
###############################################################################
pro_decimated = mesh.decimate_pro(target_reduction, preserve_topology=True)
pro_decimated.plot(cpos=cpos, **dargs)
###############################################################################
# Side by side comparison:
p = pv.Plotter(shape=(1, 3))
p.add_mesh(mesh, **dargs)
p.add_text("Input mesh", font_size=24)
p.camera_position = cpos
p.reset_camera()
p.subplot(0, 1)
p.add_mesh(decimated, **dargs)
p.add_text("Decimated mesh", font_size=24)
p.camera_position = cpos
p.reset_camera()
p.subplot(0, 2)
p.add_mesh(pro_decimated, **dargs)
p.add_text("Pro Decimated mesh", font_size=24)
p.camera_position = cpos
p.reset_camera()
p.link_views()
p.show()
| 26.419355
| 79
| 0.589133
|
import pyvista as pv
from pyvista import examples
mesh = examples.download_face()
cpos = [(0.4, -0.07, -0.31), (0.05, -0.13, -0.06), (-0.1, 1, 0.08)]
dargs = dict(show_edges=True, color=True)
mesh.plot(cpos=cpos, **dargs)
| true
| true
|
1c46662dc412d1a358e2418e8c957b2eb8513589
| 1,984
|
py
|
Python
|
whitetube/migrations/0001_initial.py
|
AmanGiri007/youtube
|
b58009581378bf74cabfd791691dee65c9516685
|
[
"MIT"
] | null | null | null |
whitetube/migrations/0001_initial.py
|
AmanGiri007/youtube
|
b58009581378bf74cabfd791691dee65c9516685
|
[
"MIT"
] | null | null | null |
whitetube/migrations/0001_initial.py
|
AmanGiri007/youtube
|
b58009581378bf74cabfd791691dee65c9516685
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.7 on 2020-07-31 15:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('description', models.TextField(max_length=300)),
('path', models.CharField(max_length=60)),
('datetime', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(max_length=300)),
('datetime', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('video', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='whitetube.Video')),
],
),
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('channel_name', models.CharField(max_length=50)),
('subscribers', models.IntegerField(default=0)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 41.333333
| 118
| 0.602319
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Video',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('description', models.TextField(max_length=300)),
('path', models.CharField(max_length=60)),
('datetime', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(max_length=300)),
('datetime', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('video', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='whitetube.Video')),
],
),
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('channel_name', models.CharField(max_length=50)),
('subscribers', models.IntegerField(default=0)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true
| true
|
1c4666b57524192be8803ca0b7c97e5673ce1bbb
| 7,276
|
py
|
Python
|
telethon/password.py
|
bb010g/Telethon
|
278f0e9e983d938589b6d541e71135ad5b6857c5
|
[
"MIT"
] | 2
|
2021-04-29T14:19:25.000Z
|
2021-09-17T07:13:49.000Z
|
telethon/password.py
|
exceloo/Telethon
|
30a0e390603072d3ec57a2f0eef0a297a9b0321b
|
[
"MIT"
] | 5
|
2021-04-30T21:14:18.000Z
|
2022-03-12T00:21:58.000Z
|
telethon/password.py
|
exceloo/Telethon
|
30a0e390603072d3ec57a2f0eef0a297a9b0321b
|
[
"MIT"
] | 1
|
2020-04-16T22:02:26.000Z
|
2020-04-16T22:02:26.000Z
|
import hashlib
import os
from .crypto import factorization
from .tl import types
def check_prime_and_good_check(prime: int, g: int):
good_prime_bits_count = 2048
if prime < 0 or prime.bit_length() != good_prime_bits_count:
raise ValueError('bad prime count {}, expected {}'
.format(prime.bit_length(), good_prime_bits_count))
# TODO This is awfully slow
if factorization.Factorization.factorize(prime)[0] != 1:
raise ValueError('given "prime" is not prime')
if g == 2:
if prime % 8 != 7:
raise ValueError('bad g {}, mod8 {}'.format(g, prime % 8))
elif g == 3:
if prime % 3 != 2:
raise ValueError('bad g {}, mod3 {}'.format(g, prime % 3))
elif g == 4:
pass
elif g == 5:
if prime % 5 not in (1, 4):
raise ValueError('bad g {}, mod5 {}'.format(g, prime % 5))
elif g == 6:
if prime % 24 not in (19, 23):
raise ValueError('bad g {}, mod24 {}'.format(g, prime % 24))
elif g == 7:
if prime % 7 not in (3, 5, 6):
raise ValueError('bad g {}, mod7 {}'.format(g, prime % 7))
else:
raise ValueError('bad g {}'.format(g))
prime_sub1_div2 = (prime - 1) // 2
if factorization.Factorization.factorize(prime_sub1_div2)[0] != 1:
raise ValueError('(prime - 1) // 2 is not prime')
# Else it's good
def check_prime_and_good(prime_bytes: bytes, g: int):
good_prime = bytes((
0xC7, 0x1C, 0xAE, 0xB9, 0xC6, 0xB1, 0xC9, 0x04, 0x8E, 0x6C, 0x52, 0x2F, 0x70, 0xF1, 0x3F, 0x73,
0x98, 0x0D, 0x40, 0x23, 0x8E, 0x3E, 0x21, 0xC1, 0x49, 0x34, 0xD0, 0x37, 0x56, 0x3D, 0x93, 0x0F,
0x48, 0x19, 0x8A, 0x0A, 0xA7, 0xC1, 0x40, 0x58, 0x22, 0x94, 0x93, 0xD2, 0x25, 0x30, 0xF4, 0xDB,
0xFA, 0x33, 0x6F, 0x6E, 0x0A, 0xC9, 0x25, 0x13, 0x95, 0x43, 0xAE, 0xD4, 0x4C, 0xCE, 0x7C, 0x37,
0x20, 0xFD, 0x51, 0xF6, 0x94, 0x58, 0x70, 0x5A, 0xC6, 0x8C, 0xD4, 0xFE, 0x6B, 0x6B, 0x13, 0xAB,
0xDC, 0x97, 0x46, 0x51, 0x29, 0x69, 0x32, 0x84, 0x54, 0xF1, 0x8F, 0xAF, 0x8C, 0x59, 0x5F, 0x64,
0x24, 0x77, 0xFE, 0x96, 0xBB, 0x2A, 0x94, 0x1D, 0x5B, 0xCD, 0x1D, 0x4A, 0xC8, 0xCC, 0x49, 0x88,
0x07, 0x08, 0xFA, 0x9B, 0x37, 0x8E, 0x3C, 0x4F, 0x3A, 0x90, 0x60, 0xBE, 0xE6, 0x7C, 0xF9, 0xA4,
0xA4, 0xA6, 0x95, 0x81, 0x10, 0x51, 0x90, 0x7E, 0x16, 0x27, 0x53, 0xB5, 0x6B, 0x0F, 0x6B, 0x41,
0x0D, 0xBA, 0x74, 0xD8, 0xA8, 0x4B, 0x2A, 0x14, 0xB3, 0x14, 0x4E, 0x0E, 0xF1, 0x28, 0x47, 0x54,
0xFD, 0x17, 0xED, 0x95, 0x0D, 0x59, 0x65, 0xB4, 0xB9, 0xDD, 0x46, 0x58, 0x2D, 0xB1, 0x17, 0x8D,
0x16, 0x9C, 0x6B, 0xC4, 0x65, 0xB0, 0xD6, 0xFF, 0x9C, 0xA3, 0x92, 0x8F, 0xEF, 0x5B, 0x9A, 0xE4,
0xE4, 0x18, 0xFC, 0x15, 0xE8, 0x3E, 0xBE, 0xA0, 0xF8, 0x7F, 0xA9, 0xFF, 0x5E, 0xED, 0x70, 0x05,
0x0D, 0xED, 0x28, 0x49, 0xF4, 0x7B, 0xF9, 0x59, 0xD9, 0x56, 0x85, 0x0C, 0xE9, 0x29, 0x85, 0x1F,
0x0D, 0x81, 0x15, 0xF6, 0x35, 0xB1, 0x05, 0xEE, 0x2E, 0x4E, 0x15, 0xD0, 0x4B, 0x24, 0x54, 0xBF,
0x6F, 0x4F, 0xAD, 0xF0, 0x34, 0xB1, 0x04, 0x03, 0x11, 0x9C, 0xD8, 0xE3, 0xB9, 0x2F, 0xCC, 0x5B))
if good_prime == prime_bytes:
if g in (3, 4, 5, 7):
return # It's good
check_prime_and_good_check(int.from_bytes(prime_bytes, 'big'), g)
def is_good_large(number: int, p: int) -> bool:
return number > 0 and p - number > 0
SIZE_FOR_HASH = 256
def num_bytes_for_hash(number: bytes) -> bytes:
return bytes(SIZE_FOR_HASH - len(number)) + number
def big_num_for_hash(g: int) -> bytes:
return g.to_bytes(SIZE_FOR_HASH, 'big')
def sha256(*p: bytes) -> bytes:
hash = hashlib.sha256()
for q in p:
hash.update(q)
return hash.digest()
def is_good_mod_exp_first(modexp, prime) -> bool:
diff = prime - modexp
min_diff_bits_count = 2048 - 64
max_mod_exp_size = 256
if diff < 0 or \
diff.bit_length() < min_diff_bits_count or \
modexp.bit_length() < min_diff_bits_count or \
(modexp.bit_length() + 7) // 8 > max_mod_exp_size:
return False
return True
def xor(a: bytes, b: bytes) -> bytes:
return bytes(x ^ y for x, y in zip(a, b))
def pbkdf2sha512(password: bytes, salt: bytes, iterations: int):
return hashlib.pbkdf2_hmac('sha512', password, salt, iterations)
def compute_hash(algo: types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow,
password: str):
hash1 = sha256(algo.salt1, password.encode('utf-8'), algo.salt1)
hash2 = sha256(algo.salt2, hash1, algo.salt2)
hash3 = pbkdf2sha512(hash2, algo.salt1, 100000)
return sha256(algo.salt2, hash3, algo.salt2)
def compute_digest(algo: types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow,
password: str):
try:
check_prime_and_good(algo.p, algo.g)
except ValueError:
raise ValueError('bad p/g in password')
value = pow(algo.g,
int.from_bytes(compute_hash(algo, password), 'big'),
int.from_bytes(algo.p, 'big'))
return big_num_for_hash(value)
# https://github.com/telegramdesktop/tdesktop/blob/18b74b90451a7db2379a9d753c9cbaf8734b4d5d/Telegram/SourceFiles/core/core_cloud_password.cpp
def compute_check(request: types.account.Password, password: str):
algo = request.current_algo
if not isinstance(algo, types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow):
raise ValueError('unsupported password algorithm {}'
.format(algo.__class__.__name__))
pw_hash = compute_hash(algo, password)
p = int.from_bytes(algo.p, 'big')
g = algo.g
B = int.from_bytes(request.srp_B, 'big')
try:
check_prime_and_good(algo.p, g)
except ValueError:
raise ValueError('bad p/g in password')
if not is_good_large(B, p):
raise ValueError('bad b in check')
x = int.from_bytes(pw_hash, 'big')
p_for_hash = num_bytes_for_hash(algo.p)
g_for_hash = big_num_for_hash(g)
b_for_hash = num_bytes_for_hash(request.srp_B)
g_x = pow(g, x, p)
k = int.from_bytes(sha256(p_for_hash, g_for_hash), 'big')
kg_x = (k * g_x) % p
def generate_and_check_random():
random_size = 256
import time
while True:
random = os.urandom(random_size)
a = int.from_bytes(random, 'big')
A = pow(g, a, p)
if is_good_mod_exp_first(A, p):
a_for_hash = big_num_for_hash(A)
u = int.from_bytes(sha256(a_for_hash, b_for_hash), 'big')
if u > 0:
return (a, a_for_hash, u)
print(A, 'bad for', p)
time.sleep(1)
a, a_for_hash, u = generate_and_check_random()
g_b = (B - kg_x) % p
if not is_good_mod_exp_first(g_b, p):
raise ValueError('bad g_b')
ux = u * x
a_ux = a + ux
S = pow(g_b, a_ux, p)
K = sha256(big_num_for_hash(S))
M1 = sha256(
xor(sha256(p_for_hash), sha256(g_for_hash)),
sha256(algo.salt1),
sha256(algo.salt2),
a_for_hash,
b_for_hash,
K
)
return types.InputCheckPasswordSRP(
request.srp_id, bytes(a_for_hash), bytes(M1))
| 36.562814
| 141
| 0.617097
|
import hashlib
import os
from .crypto import factorization
from .tl import types
def check_prime_and_good_check(prime: int, g: int):
good_prime_bits_count = 2048
if prime < 0 or prime.bit_length() != good_prime_bits_count:
raise ValueError('bad prime count {}, expected {}'
.format(prime.bit_length(), good_prime_bits_count))
if factorization.Factorization.factorize(prime)[0] != 1:
raise ValueError('given "prime" is not prime')
if g == 2:
if prime % 8 != 7:
raise ValueError('bad g {}, mod8 {}'.format(g, prime % 8))
elif g == 3:
if prime % 3 != 2:
raise ValueError('bad g {}, mod3 {}'.format(g, prime % 3))
elif g == 4:
pass
elif g == 5:
if prime % 5 not in (1, 4):
raise ValueError('bad g {}, mod5 {}'.format(g, prime % 5))
elif g == 6:
if prime % 24 not in (19, 23):
raise ValueError('bad g {}, mod24 {}'.format(g, prime % 24))
elif g == 7:
if prime % 7 not in (3, 5, 6):
raise ValueError('bad g {}, mod7 {}'.format(g, prime % 7))
else:
raise ValueError('bad g {}'.format(g))
prime_sub1_div2 = (prime - 1) // 2
if factorization.Factorization.factorize(prime_sub1_div2)[0] != 1:
raise ValueError('(prime - 1) // 2 is not prime')
def check_prime_and_good(prime_bytes: bytes, g: int):
good_prime = bytes((
0xC7, 0x1C, 0xAE, 0xB9, 0xC6, 0xB1, 0xC9, 0x04, 0x8E, 0x6C, 0x52, 0x2F, 0x70, 0xF1, 0x3F, 0x73,
0x98, 0x0D, 0x40, 0x23, 0x8E, 0x3E, 0x21, 0xC1, 0x49, 0x34, 0xD0, 0x37, 0x56, 0x3D, 0x93, 0x0F,
0x48, 0x19, 0x8A, 0x0A, 0xA7, 0xC1, 0x40, 0x58, 0x22, 0x94, 0x93, 0xD2, 0x25, 0x30, 0xF4, 0xDB,
0xFA, 0x33, 0x6F, 0x6E, 0x0A, 0xC9, 0x25, 0x13, 0x95, 0x43, 0xAE, 0xD4, 0x4C, 0xCE, 0x7C, 0x37,
0x20, 0xFD, 0x51, 0xF6, 0x94, 0x58, 0x70, 0x5A, 0xC6, 0x8C, 0xD4, 0xFE, 0x6B, 0x6B, 0x13, 0xAB,
0xDC, 0x97, 0x46, 0x51, 0x29, 0x69, 0x32, 0x84, 0x54, 0xF1, 0x8F, 0xAF, 0x8C, 0x59, 0x5F, 0x64,
0x24, 0x77, 0xFE, 0x96, 0xBB, 0x2A, 0x94, 0x1D, 0x5B, 0xCD, 0x1D, 0x4A, 0xC8, 0xCC, 0x49, 0x88,
0x07, 0x08, 0xFA, 0x9B, 0x37, 0x8E, 0x3C, 0x4F, 0x3A, 0x90, 0x60, 0xBE, 0xE6, 0x7C, 0xF9, 0xA4,
0xA4, 0xA6, 0x95, 0x81, 0x10, 0x51, 0x90, 0x7E, 0x16, 0x27, 0x53, 0xB5, 0x6B, 0x0F, 0x6B, 0x41,
0x0D, 0xBA, 0x74, 0xD8, 0xA8, 0x4B, 0x2A, 0x14, 0xB3, 0x14, 0x4E, 0x0E, 0xF1, 0x28, 0x47, 0x54,
0xFD, 0x17, 0xED, 0x95, 0x0D, 0x59, 0x65, 0xB4, 0xB9, 0xDD, 0x46, 0x58, 0x2D, 0xB1, 0x17, 0x8D,
0x16, 0x9C, 0x6B, 0xC4, 0x65, 0xB0, 0xD6, 0xFF, 0x9C, 0xA3, 0x92, 0x8F, 0xEF, 0x5B, 0x9A, 0xE4,
0xE4, 0x18, 0xFC, 0x15, 0xE8, 0x3E, 0xBE, 0xA0, 0xF8, 0x7F, 0xA9, 0xFF, 0x5E, 0xED, 0x70, 0x05,
0x0D, 0xED, 0x28, 0x49, 0xF4, 0x7B, 0xF9, 0x59, 0xD9, 0x56, 0x85, 0x0C, 0xE9, 0x29, 0x85, 0x1F,
0x0D, 0x81, 0x15, 0xF6, 0x35, 0xB1, 0x05, 0xEE, 0x2E, 0x4E, 0x15, 0xD0, 0x4B, 0x24, 0x54, 0xBF,
0x6F, 0x4F, 0xAD, 0xF0, 0x34, 0xB1, 0x04, 0x03, 0x11, 0x9C, 0xD8, 0xE3, 0xB9, 0x2F, 0xCC, 0x5B))
if good_prime == prime_bytes:
if g in (3, 4, 5, 7):
return # It's good
check_prime_and_good_check(int.from_bytes(prime_bytes, 'big'), g)
def is_good_large(number: int, p: int) -> bool:
return number > 0 and p - number > 0
SIZE_FOR_HASH = 256
def num_bytes_for_hash(number: bytes) -> bytes:
return bytes(SIZE_FOR_HASH - len(number)) + number
def big_num_for_hash(g: int) -> bytes:
return g.to_bytes(SIZE_FOR_HASH, 'big')
def sha256(*p: bytes) -> bytes:
hash = hashlib.sha256()
for q in p:
hash.update(q)
return hash.digest()
def is_good_mod_exp_first(modexp, prime) -> bool:
diff = prime - modexp
min_diff_bits_count = 2048 - 64
max_mod_exp_size = 256
if diff < 0 or \
diff.bit_length() < min_diff_bits_count or \
modexp.bit_length() < min_diff_bits_count or \
(modexp.bit_length() + 7) // 8 > max_mod_exp_size:
return False
return True
def xor(a: bytes, b: bytes) -> bytes:
return bytes(x ^ y for x, y in zip(a, b))
def pbkdf2sha512(password: bytes, salt: bytes, iterations: int):
return hashlib.pbkdf2_hmac('sha512', password, salt, iterations)
def compute_hash(algo: types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow,
password: str):
hash1 = sha256(algo.salt1, password.encode('utf-8'), algo.salt1)
hash2 = sha256(algo.salt2, hash1, algo.salt2)
hash3 = pbkdf2sha512(hash2, algo.salt1, 100000)
return sha256(algo.salt2, hash3, algo.salt2)
def compute_digest(algo: types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow,
password: str):
try:
check_prime_and_good(algo.p, algo.g)
except ValueError:
raise ValueError('bad p/g in password')
value = pow(algo.g,
int.from_bytes(compute_hash(algo, password), 'big'),
int.from_bytes(algo.p, 'big'))
return big_num_for_hash(value)
def compute_check(request: types.account.Password, password: str):
algo = request.current_algo
if not isinstance(algo, types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow):
raise ValueError('unsupported password algorithm {}'
.format(algo.__class__.__name__))
pw_hash = compute_hash(algo, password)
p = int.from_bytes(algo.p, 'big')
g = algo.g
B = int.from_bytes(request.srp_B, 'big')
try:
check_prime_and_good(algo.p, g)
except ValueError:
raise ValueError('bad p/g in password')
if not is_good_large(B, p):
raise ValueError('bad b in check')
x = int.from_bytes(pw_hash, 'big')
p_for_hash = num_bytes_for_hash(algo.p)
g_for_hash = big_num_for_hash(g)
b_for_hash = num_bytes_for_hash(request.srp_B)
g_x = pow(g, x, p)
k = int.from_bytes(sha256(p_for_hash, g_for_hash), 'big')
kg_x = (k * g_x) % p
def generate_and_check_random():
random_size = 256
import time
while True:
random = os.urandom(random_size)
a = int.from_bytes(random, 'big')
A = pow(g, a, p)
if is_good_mod_exp_first(A, p):
a_for_hash = big_num_for_hash(A)
u = int.from_bytes(sha256(a_for_hash, b_for_hash), 'big')
if u > 0:
return (a, a_for_hash, u)
print(A, 'bad for', p)
time.sleep(1)
a, a_for_hash, u = generate_and_check_random()
g_b = (B - kg_x) % p
if not is_good_mod_exp_first(g_b, p):
raise ValueError('bad g_b')
ux = u * x
a_ux = a + ux
S = pow(g_b, a_ux, p)
K = sha256(big_num_for_hash(S))
M1 = sha256(
xor(sha256(p_for_hash), sha256(g_for_hash)),
sha256(algo.salt1),
sha256(algo.salt2),
a_for_hash,
b_for_hash,
K
)
return types.InputCheckPasswordSRP(
request.srp_id, bytes(a_for_hash), bytes(M1))
| true
| true
|
1c46679f018a8751f25fbf73754145fb78d2528e
| 982
|
py
|
Python
|
controller/liff_controller.py
|
louis70109/LIFF-to-LIFF-Example
|
1e3d90f7989b5b69090a7b2e3a41c74b3ae3c90b
|
[
"MIT"
] | null | null | null |
controller/liff_controller.py
|
louis70109/LIFF-to-LIFF-Example
|
1e3d90f7989b5b69090a7b2e3a41c74b3ae3c90b
|
[
"MIT"
] | null | null | null |
controller/liff_controller.py
|
louis70109/LIFF-to-LIFF-Example
|
1e3d90f7989b5b69090a7b2e3a41c74b3ae3c90b
|
[
"MIT"
] | null | null | null |
import os
from flask import request, render_template, Response
from flask_restful import Resource
LIFF_A = os.getenv('LIFF_SHARE_A')
LIFF_B = os.getenv('LIFF_SHARE_B')
SHARE_A = f"https://liff.line.me/{LIFF_A}"
SHARE_B = f"https://liff.line.me/{LIFF_B}"
class LiffAController(Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get(self):
if request.args.get('liff.state'):
return Response(render_template('liff_redirect.html', liff_id=LIFF_A))
return Response(render_template('a.html', liff_id=LIFF_A, text='AAAAAAAAA', next=SHARE_B))
class LiffBController(Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get(self):
if request.args.get('liff.state'):
return Response(render_template('liff_redirect.html', liff_id=LIFF_B))
return Response(render_template('b.html', liff_id=LIFF_B, text='BBBBBBBBBB', next=SHARE_A))
| 33.862069
| 99
| 0.689409
|
import os
from flask import request, render_template, Response
from flask_restful import Resource
LIFF_A = os.getenv('LIFF_SHARE_A')
LIFF_B = os.getenv('LIFF_SHARE_B')
SHARE_A = f"https://liff.line.me/{LIFF_A}"
SHARE_B = f"https://liff.line.me/{LIFF_B}"
class LiffAController(Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get(self):
if request.args.get('liff.state'):
return Response(render_template('liff_redirect.html', liff_id=LIFF_A))
return Response(render_template('a.html', liff_id=LIFF_A, text='AAAAAAAAA', next=SHARE_B))
class LiffBController(Resource):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get(self):
if request.args.get('liff.state'):
return Response(render_template('liff_redirect.html', liff_id=LIFF_B))
return Response(render_template('b.html', liff_id=LIFF_B, text='BBBBBBBBBB', next=SHARE_A))
| true
| true
|
1c4667e358c0ed6a68e36d3ec4c4fde1c83f9ab6
| 2,831
|
py
|
Python
|
members_only/views.py
|
TamasPalfi/FixedDB
|
be3e4e830b05099d33031759f4a7fc8a42f1e733
|
[
"BSD-2-Clause"
] | null | null | null |
members_only/views.py
|
TamasPalfi/FixedDB
|
be3e4e830b05099d33031759f4a7fc8a42f1e733
|
[
"BSD-2-Clause"
] | null | null | null |
members_only/views.py
|
TamasPalfi/FixedDB
|
be3e4e830b05099d33031759f4a7fc8a42f1e733
|
[
"BSD-2-Clause"
] | null | null | null |
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import action
from members_only.models import User, Post, Comment, Photo, ShortLink
from members_only.serializers import UserSerializer, UserSetupSerializer, PostSerializer, CommentSerializer, PhotoSerializer, ShortLinkSerializer
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication, SessionAuthentication
# Create your views here.
# Front End Views
def index(request):
return render(request, "index.html")
def feed(request):
return render(request, "feed.html")
# Back End Views
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
@action(detail=False, methods=['post'], serializer_class=UserSetupSerializer, permission_classes=[])
def setup(self, request):
serializer = UserSetupSerializer(data=request.data)
if serializer.is_valid():
if User.objects.filter(username=serializer.data['email']).exists():
new_user = User.objects.get(username=serializer.data['email'])
if new_user.reset_code != serializer.data['reset_code'] or new_user.reset_code == "":
return Response({"message": "Incorrect reset code"})
new_user.reset_code = ""
new_user.set_password(serializer.data['password'])
new_user.save()
else:
return Response({"message": "User does not exist"})
else:
return Response({"message": "Invalid data"})
class PostViewSet(viewsets.ModelViewSet):
queryset = Post.objects.all().order_by('-timestamp')
serializer_class = PostSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
class CommentViewSet(viewsets.ModelViewSet):
queryset = Comment.objects.all().order_by('-timestamp')
serializer_class = CommentSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
class PhotoViewSet(viewsets.ModelViewSet):
queryset = Photo.objects.all()
serializer_class = PhotoSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
class ShortLinkViewSet(viewsets.ModelViewSet):
queryset = ShortLink.objects.all()
serializer_class = ShortLinkSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
| 37.746667
| 145
| 0.741434
|
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.decorators import action
from members_only.models import User, Post, Comment, Photo, ShortLink
from members_only.serializers import UserSerializer, UserSetupSerializer, PostSerializer, CommentSerializer, PhotoSerializer, ShortLinkSerializer
from rest_framework.permissions import IsAuthenticated
from rest_framework.authentication import TokenAuthentication, SessionAuthentication
def index(request):
return render(request, "index.html")
def feed(request):
return render(request, "feed.html")
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
@action(detail=False, methods=['post'], serializer_class=UserSetupSerializer, permission_classes=[])
def setup(self, request):
serializer = UserSetupSerializer(data=request.data)
if serializer.is_valid():
if User.objects.filter(username=serializer.data['email']).exists():
new_user = User.objects.get(username=serializer.data['email'])
if new_user.reset_code != serializer.data['reset_code'] or new_user.reset_code == "":
return Response({"message": "Incorrect reset code"})
new_user.reset_code = ""
new_user.set_password(serializer.data['password'])
new_user.save()
else:
return Response({"message": "User does not exist"})
else:
return Response({"message": "Invalid data"})
class PostViewSet(viewsets.ModelViewSet):
queryset = Post.objects.all().order_by('-timestamp')
serializer_class = PostSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
class CommentViewSet(viewsets.ModelViewSet):
queryset = Comment.objects.all().order_by('-timestamp')
serializer_class = CommentSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
class PhotoViewSet(viewsets.ModelViewSet):
queryset = Photo.objects.all()
serializer_class = PhotoSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
class ShortLinkViewSet(viewsets.ModelViewSet):
queryset = ShortLink.objects.all()
serializer_class = ShortLinkSerializer
permission_classes = [IsAuthenticated]
authentication_classes = [TokenAuthentication, SessionAuthentication]
| true
| true
|
1c46683814368973d3a6672199085b7f7bf2c538
| 335
|
py
|
Python
|
challenge/D09/Point.py
|
pengfei99/AdventOfCode2021
|
7aeaf417521fc3acf6b34259b78b9534e1c9f912
|
[
"Apache-2.0"
] | 1
|
2022-03-27T09:48:34.000Z
|
2022-03-27T09:48:34.000Z
|
challenge/D09/Point.py
|
pengfei99/AdventOfCode2021
|
7aeaf417521fc3acf6b34259b78b9534e1c9f912
|
[
"Apache-2.0"
] | null | null | null |
challenge/D09/Point.py
|
pengfei99/AdventOfCode2021
|
7aeaf417521fc3acf6b34259b78b9534e1c9f912
|
[
"Apache-2.0"
] | null | null | null |
class Point:
def __init__(self, value):
self.value = value
self.mark = False
def __str__(self):
return f"value: {self.value}, marked: {self.mark}"
def get_value(self):
return self.value
def get_mark(self):
return self.mark
def mark_point(self):
self.mark = True
| 18.611111
| 58
| 0.58209
|
class Point:
def __init__(self, value):
self.value = value
self.mark = False
def __str__(self):
return f"value: {self.value}, marked: {self.mark}"
def get_value(self):
return self.value
def get_mark(self):
return self.mark
def mark_point(self):
self.mark = True
| true
| true
|
1c46686d9bf0cf7f5d046349ac7a9f5fc444fa5e
| 2,984
|
py
|
Python
|
openslides_backend/action/actions/committee/update.py
|
MJJojo97/openslides-backend
|
af0d1edb0070e352d46f285a1ba0bbe3702d49ae
|
[
"MIT"
] | null | null | null |
openslides_backend/action/actions/committee/update.py
|
MJJojo97/openslides-backend
|
af0d1edb0070e352d46f285a1ba0bbe3702d49ae
|
[
"MIT"
] | null | null | null |
openslides_backend/action/actions/committee/update.py
|
MJJojo97/openslides-backend
|
af0d1edb0070e352d46f285a1ba0bbe3702d49ae
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict
from ....models.models import Committee
from ....permissions.management_levels import (
CommitteeManagementLevel,
OrganizationManagementLevel,
)
from ....permissions.permission_helper import (
has_committee_management_level,
has_organization_management_level,
)
from ....shared.exceptions import ActionException, MissingPermission
from ....shared.patterns import Collection, FullQualifiedId
from ...generics.update import UpdateAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from .committee_common_mixin import CommitteeCommonCreateUpdateMixin
@register_action("committee.update")
class CommitteeUpdateAction(CommitteeCommonCreateUpdateMixin, UpdateAction):
"""
Action to update a committee.
"""
model = Committee()
schema = DefaultSchema(Committee()).get_update_schema(
optional_properties=[
"name",
"description",
"default_meeting_id",
"forward_to_committee_ids",
"receive_forwardings_from_committee_ids",
"organization_tag_ids",
"user_$_management_level",
],
)
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
instance = super().update_instance(instance)
if instance.get("default_meeting_id"):
self.check_meeting_in_committee(
instance["default_meeting_id"], instance["id"]
)
return instance
def check_meeting_in_committee(self, meeting_id: int, committee_id: int) -> None:
meeting = self.datastore.get(
FullQualifiedId(Collection("meeting"), meeting_id), ["committee_id"]
)
if meeting.get("committee_id") != committee_id:
raise ActionException(
f"Meeting {meeting_id} does not belong to committee {committee_id}"
)
def check_permissions(self, instance: Dict[str, Any]) -> None:
if has_organization_management_level(
self.datastore,
self.user_id,
OrganizationManagementLevel.CAN_MANAGE_ORGANIZATION,
):
return
if any(
[
field in instance
for field in [
"forward_to_committee_ids",
"receive_forwardings_from_committee_ids",
"user_$_management_level",
]
]
):
raise MissingPermission(OrganizationManagementLevel.CAN_MANAGE_ORGANIZATION)
if has_committee_management_level(
self.datastore,
self.user_id,
CommitteeManagementLevel.CAN_MANAGE,
instance["id"],
):
return
raise MissingPermission(
{
OrganizationManagementLevel.CAN_MANAGE_ORGANIZATION: 1,
CommitteeManagementLevel.CAN_MANAGE: instance["id"],
}
)
| 33.155556
| 88
| 0.632373
|
from typing import Any, Dict
from ....models.models import Committee
from ....permissions.management_levels import (
CommitteeManagementLevel,
OrganizationManagementLevel,
)
from ....permissions.permission_helper import (
has_committee_management_level,
has_organization_management_level,
)
from ....shared.exceptions import ActionException, MissingPermission
from ....shared.patterns import Collection, FullQualifiedId
from ...generics.update import UpdateAction
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from .committee_common_mixin import CommitteeCommonCreateUpdateMixin
@register_action("committee.update")
class CommitteeUpdateAction(CommitteeCommonCreateUpdateMixin, UpdateAction):
model = Committee()
schema = DefaultSchema(Committee()).get_update_schema(
optional_properties=[
"name",
"description",
"default_meeting_id",
"forward_to_committee_ids",
"receive_forwardings_from_committee_ids",
"organization_tag_ids",
"user_$_management_level",
],
)
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
instance = super().update_instance(instance)
if instance.get("default_meeting_id"):
self.check_meeting_in_committee(
instance["default_meeting_id"], instance["id"]
)
return instance
def check_meeting_in_committee(self, meeting_id: int, committee_id: int) -> None:
meeting = self.datastore.get(
FullQualifiedId(Collection("meeting"), meeting_id), ["committee_id"]
)
if meeting.get("committee_id") != committee_id:
raise ActionException(
f"Meeting {meeting_id} does not belong to committee {committee_id}"
)
def check_permissions(self, instance: Dict[str, Any]) -> None:
if has_organization_management_level(
self.datastore,
self.user_id,
OrganizationManagementLevel.CAN_MANAGE_ORGANIZATION,
):
return
if any(
[
field in instance
for field in [
"forward_to_committee_ids",
"receive_forwardings_from_committee_ids",
"user_$_management_level",
]
]
):
raise MissingPermission(OrganizationManagementLevel.CAN_MANAGE_ORGANIZATION)
if has_committee_management_level(
self.datastore,
self.user_id,
CommitteeManagementLevel.CAN_MANAGE,
instance["id"],
):
return
raise MissingPermission(
{
OrganizationManagementLevel.CAN_MANAGE_ORGANIZATION: 1,
CommitteeManagementLevel.CAN_MANAGE: instance["id"],
}
)
| true
| true
|
1c4668cd01cebff276c684dd00b647ddfd8b4381
| 40,948
|
py
|
Python
|
venv/lib/python2.7/site-packages/cffi/vengine_cpy.py
|
deandunbar/html2bwml
|
32c06a93c8daf6a26c89c0de58fd39859d1ddb1e
|
[
"MIT"
] | 4
|
2017-09-17T03:27:47.000Z
|
2020-04-29T00:10:20.000Z
|
venv/lib/python2.7/site-packages/cffi/vengine_cpy.py
|
deandunbar/html2bwml
|
32c06a93c8daf6a26c89c0de58fd39859d1ddb1e
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/cffi/vengine_cpy.py
|
deandunbar/html2bwml
|
32c06a93c8daf6a26c89c0de58fd39859d1ddb1e
|
[
"MIT"
] | 5
|
2017-09-20T08:08:43.000Z
|
2022-02-02T08:19:30.000Z
|
import sys, imp
from . import model, ffiplatform
class VCPythonEngine(object):
_class_key = 'x'
_gen_python_module = True
def __init__(self, verifier):
self.verifier = verifier
self.ffi = verifier.ffi
self._struct_pending_verification = {}
self._types_of_builtin_functions = {}
def patch_extension_kwds(self, kwds):
pass
def find_module(self, module_name, path, so_suffixes):
try:
f, filename, descr = imp.find_module(module_name, path)
except ImportError:
return None
if f is not None:
f.close()
# Note that after a setuptools installation, there are both .py
# and .so files with the same basename. The code here relies on
# imp.find_module() locating the .so in priority.
if descr[0] not in so_suffixes:
return None
return filename
def collect_types(self):
self._typesdict = {}
self._generate("collecttype")
def _prnt(self, what=''):
self._f.write(what + '\n')
def _gettypenum(self, type):
# a KeyError here is a bug. please report it! :-)
return self._typesdict[type]
def _do_collect_type(self, tp):
if ((not isinstance(tp, model.PrimitiveType)
or tp.name == 'long double')
and tp not in self._typesdict):
num = len(self._typesdict)
self._typesdict[tp] = num
def write_source_to_f(self):
self.collect_types()
#
# The new module will have a _cffi_setup() function that receives
# objects from the ffi world, and that calls some setup code in
# the module. This setup code is split in several independent
# functions, e.g. one per constant. The functions are "chained"
# by ending in a tail call to each other.
#
# This is further split in two chained lists, depending on if we
# can do it at import-time or if we must wait for _cffi_setup() to
# provide us with the <ctype> objects. This is needed because we
# need the values of the enum constants in order to build the
# <ctype 'enum'> that we may have to pass to _cffi_setup().
#
# The following two 'chained_list_constants' items contains
# the head of these two chained lists, as a string that gives the
# call to do, if any.
self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)']
#
prnt = self._prnt
# first paste some standard set of lines that are mostly '#define'
prnt(cffimod_header)
prnt()
# then paste the C source given by the user, verbatim.
prnt(self.verifier.preamble)
prnt()
#
# call generate_cpy_xxx_decl(), for every xxx found from
# ffi._parser._declarations. This generates all the functions.
self._generate("decl")
#
# implement the function _cffi_setup_custom() as calling the
# head of the chained list.
self._generate_setup_custom()
prnt()
#
# produce the method table, including the entries for the
# generated Python->C function wrappers, which are done
# by generate_cpy_function_method().
prnt('static PyMethodDef _cffi_methods[] = {')
self._generate("method")
prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},')
prnt(' {NULL, NULL, 0, NULL} /* Sentinel */')
prnt('};')
prnt()
#
# standard init.
modname = self.verifier.get_module_name()
constants = self._chained_list_constants[False]
prnt('#if PY_MAJOR_VERSION >= 3')
prnt()
prnt('static struct PyModuleDef _cffi_module_def = {')
prnt(' PyModuleDef_HEAD_INIT,')
prnt(' "%s",' % modname)
prnt(' NULL,')
prnt(' -1,')
prnt(' _cffi_methods,')
prnt(' NULL, NULL, NULL, NULL')
prnt('};')
prnt()
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = PyModule_Create(&_cffi_module_def);')
prnt(' if (lib == NULL)')
prnt(' return NULL;')
prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,))
prnt(' Py_DECREF(lib);')
prnt(' return NULL;')
prnt(' }')
prnt(' return lib;')
prnt('}')
prnt()
prnt('#else')
prnt()
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname)
prnt(' if (lib == NULL)')
prnt(' return;')
prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,))
prnt(' return;')
prnt(' return;')
prnt('}')
prnt()
prnt('#endif')
def load_library(self, flags=None):
# XXX review all usages of 'self' here!
# import it as a new extension module
if hasattr(sys, "getdlopenflags"):
previous_flags = sys.getdlopenflags()
try:
if hasattr(sys, "setdlopenflags") and flags is not None:
sys.setdlopenflags(flags)
module = imp.load_dynamic(self.verifier.get_module_name(),
self.verifier.modulefilename)
except ImportError as e:
error = "importing %r: %s" % (self.verifier.modulefilename, e)
raise ffiplatform.VerificationError(error)
finally:
if hasattr(sys, "setdlopenflags"):
sys.setdlopenflags(previous_flags)
#
# call loading_cpy_struct() to get the struct layout inferred by
# the C compiler
self._load(module, 'loading')
#
# the C code will need the <ctype> objects. Collect them in
# order in a list.
revmapping = dict([(value, key)
for (key, value) in self._typesdict.items()])
lst = [revmapping[i] for i in range(len(revmapping))]
lst = list(map(self.ffi._get_cached_btype, lst))
#
# build the FFILibrary class and instance and call _cffi_setup().
# this will set up some fields like '_cffi_types', and only then
# it will invoke the chained list of functions that will really
# build (notably) the constant objects, as <cdata> if they are
# pointers, and store them as attributes on the 'library' object.
class FFILibrary(object):
_cffi_python_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_dir + list(self.__dict__)
library = FFILibrary()
if module._cffi_setup(lst, ffiplatform.VerificationError, library):
import warnings
warnings.warn("reimporting %r might overwrite older definitions"
% (self.verifier.get_module_name()))
#
# finally, call the loaded_cpy_xxx() functions. This will perform
# the final adjustments, like copying the Python->C wrapper
# functions from the module to the 'library' object, and setting
# up the FFILibrary class with properties for the global C variables.
self._load(module, 'loaded', library=library)
module._cffi_original_ffi = self.ffi
module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions
return library
def _get_declarations(self):
return sorted(self.ffi._parser._declarations.items())
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
step_name))
except AttributeError:
raise ffiplatform.VerificationError(
"not implemented in verify(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _load(self, module, step_name, **kwds):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
method = getattr(self, '_%s_cpy_%s' % (step_name, kind))
try:
method(tp, realname, module, **kwds)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _generate_nothing(self, tp, name):
pass
def _loaded_noop(self, tp, name, module, **kwds):
pass
# ----------
def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
extraarg = ''
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
else:
converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''),
tp.name.replace(' ', '_'))
errvalue = '-1'
#
elif isinstance(tp, model.PointerType):
self._convert_funcarg_to_c_ptr_or_array(tp, fromvar,
tovar, errcode)
return
#
elif isinstance(tp, (model.StructOrUnion, model.EnumType)):
# a struct (not a struct pointer) as a function argument
self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
% (tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
return
#
elif isinstance(tp, model.FunctionPtrType):
converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('')
extraarg = ', _cffi_type(%d)' % self._gettypenum(tp)
errvalue = 'NULL'
#
else:
raise NotImplementedError(tp)
#
self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg))
self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % (
tovar, tp.get_c_name(''), errvalue))
self._prnt(' %s;' % errcode)
def _extra_local_variables(self, tp, localvars):
if isinstance(tp, model.PointerType):
localvars.add('Py_ssize_t datasize')
def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode):
self._prnt(' datasize = _cffi_prepare_pointer_call_argument(')
self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % (
self._gettypenum(tp), fromvar, tovar))
self._prnt(' if (datasize != 0) {')
self._prnt(' if (datasize < 0)')
self._prnt(' %s;' % errcode)
self._prnt(' %s = alloca((size_t)datasize);' % (tovar,))
self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,))
self._prnt(' if (_cffi_convert_array_from_object('
'(char *)%s, _cffi_type(%d), %s) < 0)' % (
tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
self._prnt(' }')
def _convert_expr_from_c(self, tp, var, context):
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type():
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
elif tp.name != 'long double':
return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, (model.PointerType, model.FunctionPtrType)):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.ArrayType):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(model.PointerType(tp.item)))
elif isinstance(tp, model.StructType):
if tp.fldnames is None:
raise TypeError("'%s' is used as %s, but is opaque" % (
tp._get_c_name(), context))
return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.EnumType):
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
else:
raise NotImplementedError(tp)
# ----------
# typedefs: generates no code so far
_generate_cpy_typedef_collecttype = _generate_nothing
_generate_cpy_typedef_decl = _generate_nothing
_generate_cpy_typedef_method = _generate_nothing
_loading_cpy_typedef = _loaded_noop
_loaded_cpy_typedef = _loaded_noop
# ----------
# function declarations
def _generate_cpy_function_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
self._do_collect_type(tp)
else:
# don't call _do_collect_type(tp) in this common case,
# otherwise test_autofilled_struct_as_argument fails
for type in tp.args:
self._do_collect_type(type)
self._do_collect_type(tp.result)
def _generate_cpy_function_decl(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no CPython wrapper)
self._generate_cpy_const(False, name, tp)
return
prnt = self._prnt
numargs = len(tp.args)
if numargs == 0:
argname = 'noarg'
elif numargs == 1:
argname = 'arg0'
else:
argname = 'args'
prnt('static PyObject *')
prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname))
prnt('{')
#
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
prnt(' %s;' % type.get_c_name(' x%d' % i, context))
#
localvars = set()
for type in tp.args:
self._extra_local_variables(type, localvars)
for decl in localvars:
prnt(' %s;' % (decl,))
#
if not isinstance(tp.result, model.VoidType):
result_code = 'result = '
context = 'result of %s' % name
prnt(' %s;' % tp.result.get_c_name(' result', context))
else:
result_code = ''
#
if len(tp.args) > 1:
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
prnt()
prnt(' if (!PyArg_ParseTuple(args, "%s:%s", %s))' % (
'O' * numargs, name, ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
prnt()
#
for i, type in enumerate(tp.args):
self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i,
'return NULL')
prnt()
#
prnt(' Py_BEGIN_ALLOW_THREADS')
prnt(' _cffi_restore_errno();')
prnt(' { %s%s(%s); }' % (
result_code, name,
', '.join(['x%d' % i for i in range(len(tp.args))])))
prnt(' _cffi_save_errno();')
prnt(' Py_END_ALLOW_THREADS')
prnt()
#
prnt(' (void)self; /* unused */')
if numargs == 0:
prnt(' (void)noarg; /* unused */')
if result_code:
prnt(' return %s;' %
self._convert_expr_from_c(tp.result, 'result', 'result type'))
else:
prnt(' Py_INCREF(Py_None);')
prnt(' return Py_None;')
prnt('}')
prnt()
def _generate_cpy_function_method(self, tp, name):
if tp.ellipsis:
return
numargs = len(tp.args)
if numargs == 0:
meth = 'METH_NOARGS'
elif numargs == 1:
meth = 'METH_O'
else:
meth = 'METH_VARARGS'
self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth))
_loading_cpy_function = _loaded_noop
def _loaded_cpy_function(self, tp, name, module, library):
if tp.ellipsis:
return
func = getattr(module, name)
setattr(library, name, func)
self._types_of_builtin_functions[func] = tp
# ----------
# named structs
_generate_cpy_struct_collecttype = _generate_nothing
def _generate_cpy_struct_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'struct', name)
def _generate_cpy_struct_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'struct', name)
def _loading_cpy_struct(self, tp, name, module):
self._loading_struct_or_union(tp, 'struct', name, module)
def _loaded_cpy_struct(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
_generate_cpy_union_collecttype = _generate_nothing
def _generate_cpy_union_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'union', name)
def _generate_cpy_union_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'union', name)
def _loading_cpy_union(self, tp, name, module):
self._loading_struct_or_union(tp, 'union', name, module)
def _loaded_cpy_union(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_struct_or_union_decl(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
checkfuncname = '_cffi_check_%s_%s' % (prefix, name)
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
cname = ('%s %s' % (prefix, name)).strip()
#
prnt = self._prnt
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(' (void)((p->%s) << 1);' % fname)
else:
# only accept exactly the type declared.
try:
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname), fname))
except ffiplatform.VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
prnt('static PyObject *')
prnt('%s(PyObject *self, PyObject *noarg)' % (layoutfuncname,))
prnt('{')
prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname)
prnt(' static Py_ssize_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
for fname, ftype, fbitsize in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
prnt(' offsetof(%s, %s),' % (cname, fname))
if isinstance(ftype, model.ArrayType) and ftype.length is None:
prnt(' 0, /* %s */' % ftype._get_c_name())
else:
prnt(' sizeof(((%s *)0)->%s),' % (cname, fname))
prnt(' -1')
prnt(' };')
prnt(' (void)self; /* unused */')
prnt(' (void)noarg; /* unused */')
prnt(' return _cffi_get_struct_layout(nums);')
prnt(' /* the next line is not executed, but compiled */')
prnt(' %s(0);' % (checkfuncname,))
prnt('}')
prnt()
def _generate_struct_or_union_method(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname,
layoutfuncname))
def _loading_struct_or_union(self, tp, prefix, name, module):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
#
function = getattr(module, layoutfuncname)
layout = function()
if isinstance(tp, model.StructOrUnion) and tp.partial:
# use the function()'s sizes and offsets to guide the
# layout of the struct
totalsize = layout[0]
totalalignment = layout[1]
fieldofs = layout[2::2]
fieldsize = layout[3::2]
tp.force_flatten()
assert len(fieldofs) == len(fieldsize) == len(tp.fldnames)
tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment
else:
cname = ('%s %s' % (prefix, name)).strip()
self._struct_pending_verification[tp] = layout, cname
def _loaded_struct_or_union(self, tp):
if tp.fldnames is None:
return # nothing to do with opaque structs
self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered
if tp in self._struct_pending_verification:
# check that the layout sizes and offsets match the real ones
def check(realvalue, expectedvalue, msg):
if realvalue != expectedvalue:
raise ffiplatform.VerificationError(
"%s (we have %d, but C compiler says %d)"
% (msg, expectedvalue, realvalue))
ffi = self.ffi
BStruct = ffi._get_cached_btype(tp)
layout, cname = self._struct_pending_verification.pop(tp)
check(layout[0], ffi.sizeof(BStruct), "wrong total size")
check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
i = 2
for fname, ftype, fbitsize in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
check(layout[i], ffi.offsetof(BStruct, fname),
"wrong offset for field %r" % (fname,))
if layout[i+1] != 0:
BField = ffi._get_cached_btype(ftype)
check(layout[i+1], ffi.sizeof(BField),
"wrong size for field %r" % (fname,))
i += 2
assert i == len(layout)
# ----------
# 'anonymous' declarations. These are produced for anonymous structs
# or unions; the 'name' is obtained by a typedef.
_generate_cpy_anonymous_collecttype = _generate_nothing
def _generate_cpy_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_decl(tp, name, '')
else:
self._generate_struct_or_union_decl(tp, '', name)
def _generate_cpy_anonymous_method(self, tp, name):
if not isinstance(tp, model.EnumType):
self._generate_struct_or_union_method(tp, '', name)
def _loading_cpy_anonymous(self, tp, name, module):
if isinstance(tp, model.EnumType):
self._loading_cpy_enum(tp, name, module)
else:
self._loading_struct_or_union(tp, '', name, module)
def _loaded_cpy_anonymous(self, tp, name, module, **kwds):
if isinstance(tp, model.EnumType):
self._loaded_cpy_enum(tp, name, module, **kwds)
else:
self._loaded_struct_or_union(tp)
# ----------
# constants, likely declared with '#define'
def _generate_cpy_const(self, is_int, name, tp=None, category='const',
vartp=None, delayed=True, size_too=False,
check_value=None):
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
prnt(' PyObject *o;')
prnt(' int res;')
if not is_int:
prnt(' %s;' % (vartp or tp).get_c_name(' i', name))
else:
assert category == 'const'
#
if check_value is not None:
self._check_int_constant_value(name, check_value)
#
if not is_int:
if category == 'var':
realexpr = '&' + name
else:
realexpr = name
prnt(' i = (%s);' % (realexpr,))
prnt(' o = %s;' % (self._convert_expr_from_c(tp, 'i',
'variable type'),))
assert delayed
else:
prnt(' o = _cffi_from_c_int_const(%s);' % name)
prnt(' if (o == NULL)')
prnt(' return -1;')
if size_too:
prnt(' {')
prnt(' PyObject *o1 = o;')
prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));'
% (name,))
prnt(' Py_DECREF(o1);')
prnt(' if (o == NULL)')
prnt(' return -1;')
prnt(' }')
prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name)
prnt(' Py_DECREF(o);')
prnt(' if (res < 0)')
prnt(' return -1;')
prnt(' return %s;' % self._chained_list_constants[delayed])
self._chained_list_constants[delayed] = funcname + '(lib)'
prnt('}')
prnt()
def _generate_cpy_constant_collecttype(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
if not is_int:
self._do_collect_type(tp)
def _generate_cpy_constant_decl(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
self._generate_cpy_const(is_int, name, tp)
_generate_cpy_constant_method = _generate_nothing
_loading_cpy_constant = _loaded_noop
_loaded_cpy_constant = _loaded_noop
# ----------
# enums
def _check_int_constant_value(self, name, value, err_prefix=''):
prnt = self._prnt
if value <= 0:
prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % (
name, name, value))
else:
prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % (
name, name, value))
prnt(' char buf[64];')
prnt(' if ((%s) <= 0)' % name)
prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name)
prnt(' else')
prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' %
name)
prnt(' PyErr_Format(_cffi_VerificationError,')
prnt(' "%s%s has the real value %s, not %s",')
prnt(' "%s", "%s", buf, "%d");' % (
err_prefix, name, value))
prnt(' return -1;')
prnt(' }')
def _enum_funcname(self, prefix, name):
# "$enum_$1" => "___D_enum____D_1"
name = name.replace('$', '___D_')
return '_cffi_e_%s_%s' % (prefix, name)
def _generate_cpy_enum_decl(self, tp, name, prefix='enum'):
if tp.partial:
for enumerator in tp.enumerators:
self._generate_cpy_const(True, enumerator, delayed=False)
return
#
funcname = self._enum_funcname(prefix, name)
prnt = self._prnt
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._check_int_constant_value(enumerator, enumvalue,
"enum %s: " % name)
prnt(' return %s;' % self._chained_list_constants[True])
self._chained_list_constants[True] = funcname + '(lib)'
prnt('}')
prnt()
_generate_cpy_enum_collecttype = _generate_nothing
_generate_cpy_enum_method = _generate_nothing
def _loading_cpy_enum(self, tp, name, module):
if tp.partial:
enumvalues = [getattr(module, enumerator)
for enumerator in tp.enumerators]
tp.enumvalues = tuple(enumvalues)
tp.partial_resolved = True
def _loaded_cpy_enum(self, tp, name, module, library):
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
setattr(library, enumerator, enumvalue)
# ----------
# macros: for now only for integers
def _generate_cpy_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp # an integer
self._generate_cpy_const(True, name, check_value=check_value)
_generate_cpy_macro_collecttype = _generate_nothing
_generate_cpy_macro_method = _generate_nothing
_loading_cpy_macro = _loaded_noop
_loaded_cpy_macro = _loaded_noop
# ----------
# global variables
def _generate_cpy_variable_collecttype(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
else:
tp_ptr = model.PointerType(tp)
self._do_collect_type(tp_ptr)
def _generate_cpy_variable_decl(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
self._generate_cpy_const(False, name, tp, vartp=tp_ptr,
size_too = (tp.length == '...'))
else:
tp_ptr = model.PointerType(tp)
self._generate_cpy_const(False, name, tp_ptr, category='var')
_generate_cpy_variable_method = _generate_nothing
_loading_cpy_variable = _loaded_noop
def _loaded_cpy_variable(self, tp, name, module, library):
value = getattr(library, name)
if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the
# sense that "a=..." is forbidden
if tp.length == '...':
assert isinstance(value, tuple)
(value, size) = value
BItemType = self.ffi._get_cached_btype(tp.item)
length, rest = divmod(size, self.ffi.sizeof(BItemType))
if rest != 0:
raise ffiplatform.VerificationError(
"bad size: %r does not seem to be an array of %s" %
(name, tp.item))
tp = tp.resolve_length(length)
# 'value' is a <cdata 'type *'> which we have to replace with
# a <cdata 'type[N]'> if the N is actually known
if tp.length is not None:
BArray = self.ffi._get_cached_btype(tp)
value = self.ffi.cast(BArray, value)
setattr(library, name, value)
return
# remove ptr=<cdata 'int *'> from the library instance, and replace
# it by a property on the class, which reads/writes into ptr[0].
ptr = value
delattr(library, name)
def getter(library):
return ptr[0]
def setter(library, value):
ptr[0] = value
setattr(type(library), name, property(getter, setter))
type(library)._cffi_dir.append(name)
# ----------
def _generate_setup_custom(self):
prnt = self._prnt
prnt('static int _cffi_setup_custom(PyObject *lib)')
prnt('{')
prnt(' return %s;' % self._chained_list_constants[True])
prnt('}')
cffimod_header = r'''
#include <Python.h>
#include <stddef.h>
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int8 int_least8_t;
typedef __int16 int_least16_t;
typedef __int32 int_least32_t;
typedef __int64 int_least64_t;
typedef unsigned __int8 uint_least8_t;
typedef unsigned __int16 uint_least16_t;
typedef unsigned __int32 uint_least32_t;
typedef unsigned __int64 uint_least64_t;
typedef __int8 int_fast8_t;
typedef __int16 int_fast16_t;
typedef __int32 int_fast32_t;
typedef __int64 int_fast64_t;
typedef unsigned __int8 uint_fast8_t;
typedef unsigned __int16 uint_fast16_t;
typedef unsigned __int32 uint_fast32_t;
typedef unsigned __int64 uint_fast64_t;
typedef __int64 intmax_t;
typedef unsigned __int64 uintmax_t;
# else
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
typedef unsigned char _Bool;
# endif
#else
# include <stdint.h>
# if (defined (__SVR4) && defined (__sun)) || defined(_AIX)
# include <alloca.h>
# endif
#endif
#if PY_MAJOR_VERSION < 3
# undef PyCapsule_CheckExact
# undef PyCapsule_GetPointer
# define PyCapsule_CheckExact(capsule) (PyCObject_Check(capsule))
# define PyCapsule_GetPointer(capsule, name) \
(PyCObject_AsVoidPtr(capsule))
#endif
#if PY_MAJOR_VERSION >= 3
# define PyInt_FromLong PyLong_FromLong
#endif
#define _cffi_from_c_double PyFloat_FromDouble
#define _cffi_from_c_float PyFloat_FromDouble
#define _cffi_from_c_long PyInt_FromLong
#define _cffi_from_c_ulong PyLong_FromUnsignedLong
#define _cffi_from_c_longlong PyLong_FromLongLong
#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong
#define _cffi_to_c_double PyFloat_AsDouble
#define _cffi_to_c_float PyFloat_AsDouble
#define _cffi_from_c_int_const(x) \
(((x) > 0) ? \
((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \
((long long)(x) >= (long long)LONG_MIN) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromLongLong((long long)(x)))
#define _cffi_from_c_int(x, type) \
(((type)-1) > 0 ? /* unsigned */ \
(sizeof(type) < sizeof(long) ? \
PyInt_FromLong((long)x) : \
sizeof(type) == sizeof(long) ? \
PyLong_FromUnsignedLong((unsigned long)x) : \
PyLong_FromUnsignedLongLong((unsigned long long)x)) : \
(sizeof(type) <= sizeof(long) ? \
PyInt_FromLong((long)x) : \
PyLong_FromLongLong((long long)x)))
#define _cffi_to_c_int(o, type) \
(sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \
: (type)_cffi_to_c_i8(o)) : \
sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \
: (type)_cffi_to_c_i16(o)) : \
sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \
: (type)_cffi_to_c_i32(o)) : \
sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \
: (type)_cffi_to_c_i64(o)) : \
(Py_FatalError("unsupported size for type " #type), (type)0))
#define _cffi_to_c_i8 \
((int(*)(PyObject *))_cffi_exports[1])
#define _cffi_to_c_u8 \
((int(*)(PyObject *))_cffi_exports[2])
#define _cffi_to_c_i16 \
((int(*)(PyObject *))_cffi_exports[3])
#define _cffi_to_c_u16 \
((int(*)(PyObject *))_cffi_exports[4])
#define _cffi_to_c_i32 \
((int(*)(PyObject *))_cffi_exports[5])
#define _cffi_to_c_u32 \
((unsigned int(*)(PyObject *))_cffi_exports[6])
#define _cffi_to_c_i64 \
((long long(*)(PyObject *))_cffi_exports[7])
#define _cffi_to_c_u64 \
((unsigned long long(*)(PyObject *))_cffi_exports[8])
#define _cffi_to_c_char \
((int(*)(PyObject *))_cffi_exports[9])
#define _cffi_from_c_pointer \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[10])
#define _cffi_to_c_pointer \
((char *(*)(PyObject *, CTypeDescrObject *))_cffi_exports[11])
#define _cffi_get_struct_layout \
((PyObject *(*)(Py_ssize_t[]))_cffi_exports[12])
#define _cffi_restore_errno \
((void(*)(void))_cffi_exports[13])
#define _cffi_save_errno \
((void(*)(void))_cffi_exports[14])
#define _cffi_from_c_char \
((PyObject *(*)(char))_cffi_exports[15])
#define _cffi_from_c_deref \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[16])
#define _cffi_to_c \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[17])
#define _cffi_from_c_struct \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[18])
#define _cffi_to_c_wchar_t \
((wchar_t(*)(PyObject *))_cffi_exports[19])
#define _cffi_from_c_wchar_t \
((PyObject *(*)(wchar_t))_cffi_exports[20])
#define _cffi_to_c_long_double \
((long double(*)(PyObject *))_cffi_exports[21])
#define _cffi_to_c__Bool \
((_Bool(*)(PyObject *))_cffi_exports[22])
#define _cffi_prepare_pointer_call_argument \
((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23])
#define _cffi_convert_array_from_object \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24])
#define _CFFI_NUM_EXPORTS 25
typedef struct _ctypedescr CTypeDescrObject;
static void *_cffi_exports[_CFFI_NUM_EXPORTS];
static PyObject *_cffi_types, *_cffi_VerificationError;
static int _cffi_setup_custom(PyObject *lib); /* forward */
static PyObject *_cffi_setup(PyObject *self, PyObject *args)
{
PyObject *library;
int was_alive = (_cffi_types != NULL);
(void)self; /* unused */
if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError,
&library))
return NULL;
Py_INCREF(_cffi_types);
Py_INCREF(_cffi_VerificationError);
if (_cffi_setup_custom(library) < 0)
return NULL;
return PyBool_FromLong(was_alive);
}
static int _cffi_init(void)
{
PyObject *module, *c_api_object = NULL;
module = PyImport_ImportModule("_cffi_backend");
if (module == NULL)
goto failure;
c_api_object = PyObject_GetAttrString(module, "_C_API");
if (c_api_object == NULL)
goto failure;
if (!PyCapsule_CheckExact(c_api_object)) {
PyErr_SetNone(PyExc_ImportError);
goto failure;
}
memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"),
_CFFI_NUM_EXPORTS * sizeof(void *));
Py_DECREF(module);
Py_DECREF(c_api_object);
return 0;
failure:
Py_XDECREF(module);
Py_XDECREF(c_api_object);
return -1;
}
#define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num))
/**********/
'''
| 40.988989
| 79
| 0.546962
|
import sys, imp
from . import model, ffiplatform
class VCPythonEngine(object):
_class_key = 'x'
_gen_python_module = True
def __init__(self, verifier):
self.verifier = verifier
self.ffi = verifier.ffi
self._struct_pending_verification = {}
self._types_of_builtin_functions = {}
def patch_extension_kwds(self, kwds):
pass
def find_module(self, module_name, path, so_suffixes):
try:
f, filename, descr = imp.find_module(module_name, path)
except ImportError:
return None
if f is not None:
f.close()
if descr[0] not in so_suffixes:
return None
return filename
def collect_types(self):
self._typesdict = {}
self._generate("collecttype")
def _prnt(self, what=''):
self._f.write(what + '\n')
def _gettypenum(self, type):
return self._typesdict[type]
def _do_collect_type(self, tp):
if ((not isinstance(tp, model.PrimitiveType)
or tp.name == 'long double')
and tp not in self._typesdict):
num = len(self._typesdict)
self._typesdict[tp] = num
def write_source_to_f(self):
self.collect_types()
self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)']
prnt = self._prnt
prnt(cffimod_header)
prnt()
prnt(self.verifier.preamble)
prnt()
self._generate("decl")
self._generate_setup_custom()
prnt()
prnt('static PyMethodDef _cffi_methods[] = {')
self._generate("method")
prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},')
prnt(' {NULL, NULL, 0, NULL} /* Sentinel */')
prnt('};')
prnt()
modname = self.verifier.get_module_name()
constants = self._chained_list_constants[False]
prnt('#if PY_MAJOR_VERSION >= 3')
prnt()
prnt('static struct PyModuleDef _cffi_module_def = {')
prnt(' PyModuleDef_HEAD_INIT,')
prnt(' "%s",' % modname)
prnt(' NULL,')
prnt(' -1,')
prnt(' _cffi_methods,')
prnt(' NULL, NULL, NULL, NULL')
prnt('};')
prnt()
prnt('PyMODINIT_FUNC')
prnt('PyInit_%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = PyModule_Create(&_cffi_module_def);')
prnt(' if (lib == NULL)')
prnt(' return NULL;')
prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,))
prnt(' Py_DECREF(lib);')
prnt(' return NULL;')
prnt(' }')
prnt(' return lib;')
prnt('}')
prnt()
prnt('#else')
prnt()
prnt('PyMODINIT_FUNC')
prnt('init%s(void)' % modname)
prnt('{')
prnt(' PyObject *lib;')
prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname)
prnt(' if (lib == NULL)')
prnt(' return;')
prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,))
prnt(' return;')
prnt(' return;')
prnt('}')
prnt()
prnt('#endif')
def load_library(self, flags=None):
if hasattr(sys, "getdlopenflags"):
previous_flags = sys.getdlopenflags()
try:
if hasattr(sys, "setdlopenflags") and flags is not None:
sys.setdlopenflags(flags)
module = imp.load_dynamic(self.verifier.get_module_name(),
self.verifier.modulefilename)
except ImportError as e:
error = "importing %r: %s" % (self.verifier.modulefilename, e)
raise ffiplatform.VerificationError(error)
finally:
if hasattr(sys, "setdlopenflags"):
sys.setdlopenflags(previous_flags)
self._load(module, 'loading')
revmapping = dict([(value, key)
for (key, value) in self._typesdict.items()])
lst = [revmapping[i] for i in range(len(revmapping))]
lst = list(map(self.ffi._get_cached_btype, lst))
class FFILibrary(object):
_cffi_python_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_dir + list(self.__dict__)
library = FFILibrary()
if module._cffi_setup(lst, ffiplatform.VerificationError, library):
import warnings
warnings.warn("reimporting %r might overwrite older definitions"
% (self.verifier.get_module_name()))
self._load(module, 'loaded', library=library)
module._cffi_original_ffi = self.ffi
module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions
return library
def _get_declarations(self):
return sorted(self.ffi._parser._declarations.items())
def _generate(self, step_name):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
try:
method = getattr(self, '_generate_cpy_%s_%s' % (kind,
step_name))
except AttributeError:
raise ffiplatform.VerificationError(
"not implemented in verify(): %r" % name)
try:
method(tp, realname)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _load(self, module, step_name, **kwds):
for name, tp in self._get_declarations():
kind, realname = name.split(' ', 1)
method = getattr(self, '_%s_cpy_%s' % (step_name, kind))
try:
method(tp, realname, module, **kwds)
except Exception as e:
model.attach_exception_info(e, name)
raise
def _generate_nothing(self, tp, name):
pass
def _loaded_noop(self, tp, name, module, **kwds):
pass
def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode):
extraarg = ''
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type() and tp.name != '_Bool':
converter = '_cffi_to_c_int'
extraarg = ', %s' % tp.name
else:
converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''),
tp.name.replace(' ', '_'))
errvalue = '-1'
elif isinstance(tp, model.PointerType):
self._convert_funcarg_to_c_ptr_or_array(tp, fromvar,
tovar, errcode)
return
elif isinstance(tp, (model.StructOrUnion, model.EnumType)):
self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)'
% (tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
return
elif isinstance(tp, model.FunctionPtrType):
converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('')
extraarg = ', _cffi_type(%d)' % self._gettypenum(tp)
errvalue = 'NULL'
else:
raise NotImplementedError(tp)
self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg))
self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % (
tovar, tp.get_c_name(''), errvalue))
self._prnt(' %s;' % errcode)
def _extra_local_variables(self, tp, localvars):
if isinstance(tp, model.PointerType):
localvars.add('Py_ssize_t datasize')
def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode):
self._prnt(' datasize = _cffi_prepare_pointer_call_argument(')
self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % (
self._gettypenum(tp), fromvar, tovar))
self._prnt(' if (datasize != 0) {')
self._prnt(' if (datasize < 0)')
self._prnt(' %s;' % errcode)
self._prnt(' %s = alloca((size_t)datasize);' % (tovar,))
self._prnt(' memset((void *)%s, 0, (size_t)datasize);' % (tovar,))
self._prnt(' if (_cffi_convert_array_from_object('
'(char *)%s, _cffi_type(%d), %s) < 0)' % (
tovar, self._gettypenum(tp), fromvar))
self._prnt(' %s;' % errcode)
self._prnt(' }')
def _convert_expr_from_c(self, tp, var, context):
if isinstance(tp, model.PrimitiveType):
if tp.is_integer_type():
return '_cffi_from_c_int(%s, %s)' % (var, tp.name)
elif tp.name != 'long double':
return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var)
else:
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, (model.PointerType, model.FunctionPtrType)):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.ArrayType):
return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % (
var, self._gettypenum(model.PointerType(tp.item)))
elif isinstance(tp, model.StructType):
if tp.fldnames is None:
raise TypeError("'%s' is used as %s, but is opaque" % (
tp._get_c_name(), context))
return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
elif isinstance(tp, model.EnumType):
return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % (
var, self._gettypenum(tp))
else:
raise NotImplementedError(tp)
_generate_cpy_typedef_collecttype = _generate_nothing
_generate_cpy_typedef_decl = _generate_nothing
_generate_cpy_typedef_method = _generate_nothing
_loading_cpy_typedef = _loaded_noop
_loaded_cpy_typedef = _loaded_noop
def _generate_cpy_function_collecttype(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
self._do_collect_type(tp)
else:
# otherwise test_autofilled_struct_as_argument fails
for type in tp.args:
self._do_collect_type(type)
self._do_collect_type(tp.result)
def _generate_cpy_function_decl(self, tp, name):
assert isinstance(tp, model.FunctionPtrType)
if tp.ellipsis:
# cannot support vararg functions better than this: check for its
# exact type (including the fixed arguments), and build it as a
# constant function pointer (no CPython wrapper)
self._generate_cpy_const(False, name, tp)
return
prnt = self._prnt
numargs = len(tp.args)
if numargs == 0:
argname = 'noarg'
elif numargs == 1:
argname = 'arg0'
else:
argname = 'args'
prnt('static PyObject *')
prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname))
prnt('{')
#
context = 'argument of %s' % name
for i, type in enumerate(tp.args):
prnt(' %s;' % type.get_c_name(' x%d' % i, context))
#
localvars = set()
for type in tp.args:
self._extra_local_variables(type, localvars)
for decl in localvars:
prnt(' %s;' % (decl,))
#
if not isinstance(tp.result, model.VoidType):
result_code = 'result = '
context = 'result of %s' % name
prnt(' %s;' % tp.result.get_c_name(' result', context))
else:
result_code = ''
#
if len(tp.args) > 1:
rng = range(len(tp.args))
for i in rng:
prnt(' PyObject *arg%d;' % i)
prnt()
prnt(' if (!PyArg_ParseTuple(args, "%s:%s", %s))' % (
'O' * numargs, name, ', '.join(['&arg%d' % i for i in rng])))
prnt(' return NULL;')
prnt()
#
for i, type in enumerate(tp.args):
self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i,
'return NULL')
prnt()
#
prnt(' Py_BEGIN_ALLOW_THREADS')
prnt(' _cffi_restore_errno();')
prnt(' { %s%s(%s); }' % (
result_code, name,
', '.join(['x%d' % i for i in range(len(tp.args))])))
prnt(' _cffi_save_errno();')
prnt(' Py_END_ALLOW_THREADS')
prnt()
#
prnt(' (void)self; /* unused */')
if numargs == 0:
prnt(' (void)noarg; /* unused */')
if result_code:
prnt(' return %s;' %
self._convert_expr_from_c(tp.result, 'result', 'result type'))
else:
prnt(' Py_INCREF(Py_None);')
prnt(' return Py_None;')
prnt('}')
prnt()
def _generate_cpy_function_method(self, tp, name):
if tp.ellipsis:
return
numargs = len(tp.args)
if numargs == 0:
meth = 'METH_NOARGS'
elif numargs == 1:
meth = 'METH_O'
else:
meth = 'METH_VARARGS'
self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth))
_loading_cpy_function = _loaded_noop
def _loaded_cpy_function(self, tp, name, module, library):
if tp.ellipsis:
return
func = getattr(module, name)
setattr(library, name, func)
self._types_of_builtin_functions[func] = tp
# ----------
# named structs
_generate_cpy_struct_collecttype = _generate_nothing
def _generate_cpy_struct_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'struct', name)
def _generate_cpy_struct_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'struct', name)
def _loading_cpy_struct(self, tp, name, module):
self._loading_struct_or_union(tp, 'struct', name, module)
def _loaded_cpy_struct(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
_generate_cpy_union_collecttype = _generate_nothing
def _generate_cpy_union_decl(self, tp, name):
assert name == tp.name
self._generate_struct_or_union_decl(tp, 'union', name)
def _generate_cpy_union_method(self, tp, name):
self._generate_struct_or_union_method(tp, 'union', name)
def _loading_cpy_union(self, tp, name, module):
self._loading_struct_or_union(tp, 'union', name, module)
def _loaded_cpy_union(self, tp, name, module, **kwds):
self._loaded_struct_or_union(tp)
def _generate_struct_or_union_decl(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
checkfuncname = '_cffi_check_%s_%s' % (prefix, name)
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
cname = ('%s %s' % (prefix, name)).strip()
#
prnt = self._prnt
prnt('static void %s(%s *p)' % (checkfuncname, cname))
prnt('{')
prnt(' /* only to generate compile-time warnings or errors */')
prnt(' (void)p;')
for fname, ftype, fbitsize in tp.enumfields():
if (isinstance(ftype, model.PrimitiveType)
and ftype.is_integer_type()) or fbitsize >= 0:
# accept all integers, but complain on float or double
prnt(' (void)((p->%s) << 1);' % fname)
else:
# only accept exactly the type declared.
try:
prnt(' { %s = &p->%s; (void)tmp; }' % (
ftype.get_c_name('*tmp', 'field %r'%fname), fname))
except ffiplatform.VerificationError as e:
prnt(' /* %s */' % str(e)) # cannot verify it, ignore
prnt('}')
prnt('static PyObject *')
prnt('%s(PyObject *self, PyObject *noarg)' % (layoutfuncname,))
prnt('{')
prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname)
prnt(' static Py_ssize_t nums[] = {')
prnt(' sizeof(%s),' % cname)
prnt(' offsetof(struct _cffi_aligncheck, y),')
for fname, ftype, fbitsize in tp.enumfields():
if fbitsize >= 0:
continue # xxx ignore fbitsize for now
prnt(' offsetof(%s, %s),' % (cname, fname))
if isinstance(ftype, model.ArrayType) and ftype.length is None:
prnt(' 0, /* %s */' % ftype._get_c_name())
else:
prnt(' sizeof(((%s *)0)->%s),' % (cname, fname))
prnt(' -1')
prnt(' };')
prnt(' (void)self; /* unused */')
prnt(' (void)noarg; /* unused */')
prnt(' return _cffi_get_struct_layout(nums);')
prnt(' /* the next line is not executed, but compiled */')
prnt(' %s(0);' % (checkfuncname,))
prnt('}')
prnt()
def _generate_struct_or_union_method(self, tp, prefix, name):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname,
layoutfuncname))
def _loading_struct_or_union(self, tp, prefix, name, module):
if tp.fldnames is None:
return # nothing to do with opaque structs
layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name)
#
function = getattr(module, layoutfuncname)
layout = function()
if isinstance(tp, model.StructOrUnion) and tp.partial:
# use the function()'s sizes and offsets to guide the
totalsize = layout[0]
totalalignment = layout[1]
fieldofs = layout[2::2]
fieldsize = layout[3::2]
tp.force_flatten()
assert len(fieldofs) == len(fieldsize) == len(tp.fldnames)
tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment
else:
cname = ('%s %s' % (prefix, name)).strip()
self._struct_pending_verification[tp] = layout, cname
def _loaded_struct_or_union(self, tp):
if tp.fldnames is None:
return
self.ffi._get_cached_btype(tp)
if tp in self._struct_pending_verification:
def check(realvalue, expectedvalue, msg):
if realvalue != expectedvalue:
raise ffiplatform.VerificationError(
"%s (we have %d, but C compiler says %d)"
% (msg, expectedvalue, realvalue))
ffi = self.ffi
BStruct = ffi._get_cached_btype(tp)
layout, cname = self._struct_pending_verification.pop(tp)
check(layout[0], ffi.sizeof(BStruct), "wrong total size")
check(layout[1], ffi.alignof(BStruct), "wrong total alignment")
i = 2
for fname, ftype, fbitsize in tp.enumfields():
if fbitsize >= 0:
continue
check(layout[i], ffi.offsetof(BStruct, fname),
"wrong offset for field %r" % (fname,))
if layout[i+1] != 0:
BField = ffi._get_cached_btype(ftype)
check(layout[i+1], ffi.sizeof(BField),
"wrong size for field %r" % (fname,))
i += 2
assert i == len(layout)
_generate_cpy_anonymous_collecttype = _generate_nothing
def _generate_cpy_anonymous_decl(self, tp, name):
if isinstance(tp, model.EnumType):
self._generate_cpy_enum_decl(tp, name, '')
else:
self._generate_struct_or_union_decl(tp, '', name)
def _generate_cpy_anonymous_method(self, tp, name):
if not isinstance(tp, model.EnumType):
self._generate_struct_or_union_method(tp, '', name)
def _loading_cpy_anonymous(self, tp, name, module):
if isinstance(tp, model.EnumType):
self._loading_cpy_enum(tp, name, module)
else:
self._loading_struct_or_union(tp, '', name, module)
def _loaded_cpy_anonymous(self, tp, name, module, **kwds):
if isinstance(tp, model.EnumType):
self._loaded_cpy_enum(tp, name, module, **kwds)
else:
self._loaded_struct_or_union(tp)
def _generate_cpy_const(self, is_int, name, tp=None, category='const',
vartp=None, delayed=True, size_too=False,
check_value=None):
prnt = self._prnt
funcname = '_cffi_%s_%s' % (category, name)
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
prnt(' PyObject *o;')
prnt(' int res;')
if not is_int:
prnt(' %s;' % (vartp or tp).get_c_name(' i', name))
else:
assert category == 'const'
if check_value is not None:
self._check_int_constant_value(name, check_value)
if not is_int:
if category == 'var':
realexpr = '&' + name
else:
realexpr = name
prnt(' i = (%s);' % (realexpr,))
prnt(' o = %s;' % (self._convert_expr_from_c(tp, 'i',
'variable type'),))
assert delayed
else:
prnt(' o = _cffi_from_c_int_const(%s);' % name)
prnt(' if (o == NULL)')
prnt(' return -1;')
if size_too:
prnt(' {')
prnt(' PyObject *o1 = o;')
prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));'
% (name,))
prnt(' Py_DECREF(o1);')
prnt(' if (o == NULL)')
prnt(' return -1;')
prnt(' }')
prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name)
prnt(' Py_DECREF(o);')
prnt(' if (res < 0)')
prnt(' return -1;')
prnt(' return %s;' % self._chained_list_constants[delayed])
self._chained_list_constants[delayed] = funcname + '(lib)'
prnt('}')
prnt()
def _generate_cpy_constant_collecttype(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
if not is_int:
self._do_collect_type(tp)
def _generate_cpy_constant_decl(self, tp, name):
is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type()
self._generate_cpy_const(is_int, name, tp)
_generate_cpy_constant_method = _generate_nothing
_loading_cpy_constant = _loaded_noop
_loaded_cpy_constant = _loaded_noop
def _check_int_constant_value(self, name, value, err_prefix=''):
prnt = self._prnt
if value <= 0:
prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % (
name, name, value))
else:
prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % (
name, name, value))
prnt(' char buf[64];')
prnt(' if ((%s) <= 0)' % name)
prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name)
prnt(' else')
prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' %
name)
prnt(' PyErr_Format(_cffi_VerificationError,')
prnt(' "%s%s has the real value %s, not %s",')
prnt(' "%s", "%s", buf, "%d");' % (
err_prefix, name, value))
prnt(' return -1;')
prnt(' }')
def _enum_funcname(self, prefix, name):
name = name.replace('$', '___D_')
return '_cffi_e_%s_%s' % (prefix, name)
def _generate_cpy_enum_decl(self, tp, name, prefix='enum'):
if tp.partial:
for enumerator in tp.enumerators:
self._generate_cpy_const(True, enumerator, delayed=False)
return
funcname = self._enum_funcname(prefix, name)
prnt = self._prnt
prnt('static int %s(PyObject *lib)' % funcname)
prnt('{')
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
self._check_int_constant_value(enumerator, enumvalue,
"enum %s: " % name)
prnt(' return %s;' % self._chained_list_constants[True])
self._chained_list_constants[True] = funcname + '(lib)'
prnt('}')
prnt()
_generate_cpy_enum_collecttype = _generate_nothing
_generate_cpy_enum_method = _generate_nothing
def _loading_cpy_enum(self, tp, name, module):
if tp.partial:
enumvalues = [getattr(module, enumerator)
for enumerator in tp.enumerators]
tp.enumvalues = tuple(enumvalues)
tp.partial_resolved = True
def _loaded_cpy_enum(self, tp, name, module, library):
for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues):
setattr(library, enumerator, enumvalue)
def _generate_cpy_macro_decl(self, tp, name):
if tp == '...':
check_value = None
else:
check_value = tp
self._generate_cpy_const(True, name, check_value=check_value)
_generate_cpy_macro_collecttype = _generate_nothing
_generate_cpy_macro_method = _generate_nothing
_loading_cpy_macro = _loaded_noop
_loaded_cpy_macro = _loaded_noop
def _generate_cpy_variable_collecttype(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
else:
tp_ptr = model.PointerType(tp)
self._do_collect_type(tp_ptr)
def _generate_cpy_variable_decl(self, tp, name):
if isinstance(tp, model.ArrayType):
tp_ptr = model.PointerType(tp.item)
self._generate_cpy_const(False, name, tp, vartp=tp_ptr,
size_too = (tp.length == '...'))
else:
tp_ptr = model.PointerType(tp)
self._generate_cpy_const(False, name, tp_ptr, category='var')
_generate_cpy_variable_method = _generate_nothing
_loading_cpy_variable = _loaded_noop
def _loaded_cpy_variable(self, tp, name, module, library):
value = getattr(library, name)
if isinstance(tp, model.ArrayType):
if tp.length == '...':
assert isinstance(value, tuple)
(value, size) = value
BItemType = self.ffi._get_cached_btype(tp.item)
length, rest = divmod(size, self.ffi.sizeof(BItemType))
if rest != 0:
raise ffiplatform.VerificationError(
"bad size: %r does not seem to be an array of %s" %
(name, tp.item))
tp = tp.resolve_length(length)
if tp.length is not None:
BArray = self.ffi._get_cached_btype(tp)
value = self.ffi.cast(BArray, value)
setattr(library, name, value)
return
ptr = value
delattr(library, name)
def getter(library):
return ptr[0]
def setter(library, value):
ptr[0] = value
setattr(type(library), name, property(getter, setter))
type(library)._cffi_dir.append(name)
def _generate_setup_custom(self):
prnt = self._prnt
prnt('static int _cffi_setup_custom(PyObject *lib)')
prnt('{')
prnt(' return %s;' % self._chained_list_constants[True])
prnt('}')
cffimod_header = r'''
#include <Python.h>
#include <stddef.h>
/* this block of #ifs should be kept exactly identical between
c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py */
#if defined(_MSC_VER)
# include <malloc.h> /* for alloca() */
# if _MSC_VER < 1600 /* MSVC < 2010 */
typedef __int8 int8_t;
typedef __int16 int16_t;
typedef __int32 int32_t;
typedef __int64 int64_t;
typedef unsigned __int8 uint8_t;
typedef unsigned __int16 uint16_t;
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int8 int_least8_t;
typedef __int16 int_least16_t;
typedef __int32 int_least32_t;
typedef __int64 int_least64_t;
typedef unsigned __int8 uint_least8_t;
typedef unsigned __int16 uint_least16_t;
typedef unsigned __int32 uint_least32_t;
typedef unsigned __int64 uint_least64_t;
typedef __int8 int_fast8_t;
typedef __int16 int_fast16_t;
typedef __int32 int_fast32_t;
typedef __int64 int_fast64_t;
typedef unsigned __int8 uint_fast8_t;
typedef unsigned __int16 uint_fast16_t;
typedef unsigned __int32 uint_fast32_t;
typedef unsigned __int64 uint_fast64_t;
typedef __int64 intmax_t;
typedef unsigned __int64 uintmax_t;
# else
# include <stdint.h>
# endif
# if _MSC_VER < 1800 /* MSVC < 2013 */
typedef unsigned char _Bool;
# endif
#else
# include <stdint.h>
# if (defined (__SVR4) && defined (__sun)) || defined(_AIX)
# include <alloca.h>
# endif
#endif
#if PY_MAJOR_VERSION < 3
# undef PyCapsule_CheckExact
# undef PyCapsule_GetPointer
# define PyCapsule_CheckExact(capsule) (PyCObject_Check(capsule))
# define PyCapsule_GetPointer(capsule, name) \
(PyCObject_AsVoidPtr(capsule))
#endif
#if PY_MAJOR_VERSION >= 3
# define PyInt_FromLong PyLong_FromLong
#endif
#define _cffi_from_c_double PyFloat_FromDouble
#define _cffi_from_c_float PyFloat_FromDouble
#define _cffi_from_c_long PyInt_FromLong
#define _cffi_from_c_ulong PyLong_FromUnsignedLong
#define _cffi_from_c_longlong PyLong_FromLongLong
#define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong
#define _cffi_to_c_double PyFloat_AsDouble
#define _cffi_to_c_float PyFloat_AsDouble
#define _cffi_from_c_int_const(x) \
(((x) > 0) ? \
((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \
((long long)(x) >= (long long)LONG_MIN) ? \
PyInt_FromLong((long)(x)) : \
PyLong_FromLongLong((long long)(x)))
#define _cffi_from_c_int(x, type) \
(((type)-1) > 0 ? /* unsigned */ \
(sizeof(type) < sizeof(long) ? \
PyInt_FromLong((long)x) : \
sizeof(type) == sizeof(long) ? \
PyLong_FromUnsignedLong((unsigned long)x) : \
PyLong_FromUnsignedLongLong((unsigned long long)x)) : \
(sizeof(type) <= sizeof(long) ? \
PyInt_FromLong((long)x) : \
PyLong_FromLongLong((long long)x)))
#define _cffi_to_c_int(o, type) \
(sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \
: (type)_cffi_to_c_i8(o)) : \
sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \
: (type)_cffi_to_c_i16(o)) : \
sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \
: (type)_cffi_to_c_i32(o)) : \
sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \
: (type)_cffi_to_c_i64(o)) : \
(Py_FatalError("unsupported size for type " #type), (type)0))
#define _cffi_to_c_i8 \
((int(*)(PyObject *))_cffi_exports[1])
#define _cffi_to_c_u8 \
((int(*)(PyObject *))_cffi_exports[2])
#define _cffi_to_c_i16 \
((int(*)(PyObject *))_cffi_exports[3])
#define _cffi_to_c_u16 \
((int(*)(PyObject *))_cffi_exports[4])
#define _cffi_to_c_i32 \
((int(*)(PyObject *))_cffi_exports[5])
#define _cffi_to_c_u32 \
((unsigned int(*)(PyObject *))_cffi_exports[6])
#define _cffi_to_c_i64 \
((long long(*)(PyObject *))_cffi_exports[7])
#define _cffi_to_c_u64 \
((unsigned long long(*)(PyObject *))_cffi_exports[8])
#define _cffi_to_c_char \
((int(*)(PyObject *))_cffi_exports[9])
#define _cffi_from_c_pointer \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[10])
#define _cffi_to_c_pointer \
((char *(*)(PyObject *, CTypeDescrObject *))_cffi_exports[11])
#define _cffi_get_struct_layout \
((PyObject *(*)(Py_ssize_t[]))_cffi_exports[12])
#define _cffi_restore_errno \
((void(*)(void))_cffi_exports[13])
#define _cffi_save_errno \
((void(*)(void))_cffi_exports[14])
#define _cffi_from_c_char \
((PyObject *(*)(char))_cffi_exports[15])
#define _cffi_from_c_deref \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[16])
#define _cffi_to_c \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[17])
#define _cffi_from_c_struct \
((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[18])
#define _cffi_to_c_wchar_t \
((wchar_t(*)(PyObject *))_cffi_exports[19])
#define _cffi_from_c_wchar_t \
((PyObject *(*)(wchar_t))_cffi_exports[20])
#define _cffi_to_c_long_double \
((long double(*)(PyObject *))_cffi_exports[21])
#define _cffi_to_c__Bool \
((_Bool(*)(PyObject *))_cffi_exports[22])
#define _cffi_prepare_pointer_call_argument \
((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23])
#define _cffi_convert_array_from_object \
((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24])
#define _CFFI_NUM_EXPORTS 25
typedef struct _ctypedescr CTypeDescrObject;
static void *_cffi_exports[_CFFI_NUM_EXPORTS];
static PyObject *_cffi_types, *_cffi_VerificationError;
static int _cffi_setup_custom(PyObject *lib); /* forward */
static PyObject *_cffi_setup(PyObject *self, PyObject *args)
{
PyObject *library;
int was_alive = (_cffi_types != NULL);
(void)self; /* unused */
if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError,
&library))
return NULL;
Py_INCREF(_cffi_types);
Py_INCREF(_cffi_VerificationError);
if (_cffi_setup_custom(library) < 0)
return NULL;
return PyBool_FromLong(was_alive);
}
static int _cffi_init(void)
{
PyObject *module, *c_api_object = NULL;
module = PyImport_ImportModule("_cffi_backend");
if (module == NULL)
goto failure;
c_api_object = PyObject_GetAttrString(module, "_C_API");
if (c_api_object == NULL)
goto failure;
if (!PyCapsule_CheckExact(c_api_object)) {
PyErr_SetNone(PyExc_ImportError);
goto failure;
}
memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"),
_CFFI_NUM_EXPORTS * sizeof(void *));
Py_DECREF(module);
Py_DECREF(c_api_object);
return 0;
failure:
Py_XDECREF(module);
Py_XDECREF(c_api_object);
return -1;
}
#define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num))
/**********/
'''
| true
| true
|
1c46691999965d62eea11bab0f726d607f2615d6
| 5,738
|
py
|
Python
|
iot/main.py
|
jonDufty/snsrpi-device
|
0e2d8023093385e1ec457560e8880c43036c73dc
|
[
"MIT"
] | null | null | null |
iot/main.py
|
jonDufty/snsrpi-device
|
0e2d8023093385e1ec457560e8880c43036c73dc
|
[
"MIT"
] | null | null | null |
iot/main.py
|
jonDufty/snsrpi-device
|
0e2d8023093385e1ec457560e8880c43036c73dc
|
[
"MIT"
] | null | null | null |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
import argparse
from ShadowHandler import SensorShadowHandler, GlobalShadowHandler
from awscrt import io, mqtt, auth, http
from awsiot import mqtt_connection_builder, iotshadow
import sys
import os
import signal
import threading
import json
import time
from datetime import datetime, timedelta
from uuid import uuid4
from Device import Device
AWS_IOT_ENDPOINT = os.environ["AWS_IOT_ENDPOINT"]
# io.init_logging()
received_count = 0
stop_recording_event = threading.Event()
# Callback when connection is accidentally lost.
def on_connection_interrupted(connection, error, **kwargs):
"""Callback for connection interrup. AWS example
Args:
connection ([type]): [description]
error ([type]): [description]
"""
print("Connection interrupted. error: {}".format(error))
# Callback when an interrupted connection is re-established.
def on_connection_resumed(connection, return_code, session_present, **kwargs):
"""Callback for connection resume event. Standard AWS example
Args:
connection (mqqt_connection):
return_code (int): Return code
session_present (bool): If there is an existing session
"""
print("Connection resumed. return_code: {} session_present: {}".format(
return_code, session_present))
if return_code == mqtt.ConnectReturnCode.ACCEPTED and not session_present:
print("Session did not persist. Resubscribing to existing topics...")
resubscribe_future, _ = connection.resubscribe_existing_topics()
# Cannot synchronously wait for resubscribe result because we're on the connection's event-loop thread,
# evaluate result with a callback instead.
resubscribe_future.add_done_callback(on_resubscribe_complete)
def on_resubscribe_complete(resubscribe_future):
"""Callback for resubscribe event. Standard AWS example
Args:
resubscribe_future ([type]): [description]
"""
resubscribe_results = resubscribe_future.result()
print("Resubscribe results: {}".format(resubscribe_results))
for topic, qos in resubscribe_results['topics']:
if qos is None:
sys.exit("Server rejected resubscribe to topic: {}".format(topic))
# Callback when the subscribed topic receives a message
def signal_handler(signal, frame):
"""Singnal handler for SIGINT/SIGTERM for graceful shutdown
"""
print("Terminate Signal Recieved")
stop_recording_event.set()
if __name__ == '__main__':
# Initialise
AWS_IOT_ENDPOINT = os.environ["AWS_IOT_ENDPOINT"]
DEVICE_ENDPOINT = os.environ["DEVICE_ENDPOINT"]
DEVICE_NAME = os.environ["DEVICE_NAME"]
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Spin up resources
event_loop_group = io.EventLoopGroup(1)
host_resolver = io.DefaultHostResolver(event_loop_group)
client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver)
proxy_options = None
# Initialise device object. This has most handler functions abstracted away
device = Device(DEVICE_NAME, DEVICE_ENDPOINT)
# Initialise mqtt connection object. This does all the talking essentially
mqtt_connection = mqtt_connection_builder.mtls_from_path(
endpoint=AWS_IOT_ENDPOINT,
port=443,
cert_filepath=device.auth.device_cert,
pri_key_filepath=device.auth.private_key,
client_bootstrap=client_bootstrap,
ca_filepath=device.auth.root_ca_cert,
on_connection_interrupted=on_connection_interrupted,
on_connection_resumed=on_connection_resumed,
client_id=device.name,
clean_session=False,
keep_alive_secs=30,
http_proxy_options=proxy_options)
device.set_mqtt(mqtt_connection)
# Iot shadow service client
shadow_client = iotshadow.IotShadowClient(mqtt_connection)
print(
f"Connecting to {AWS_IOT_ENDPOINT} with client ID '{device.name}'...")
connect_future = mqtt_connection.connect()
connect_future.result() #Wait for connection result
print("Connected!")
time.sleep(30) #Wait for other service to spin up otherwise we risk hitting an empty endpoint
# Conduct initial healthcheck to intialise global state/shadow
device.set_global_shadow(shadow_client)
device.get_healthcheck()
device.global_shadow.set_state(device.global_shadow.local_state, update_index=True)
device.global_shadow.update_state(override_desired=True)
# Iterrate through sensors in state and create individual shadows/states
sensors = device.global_shadow.local_state['sensors']
for s in sensors:
id = s['sensor_id']
shadow = SensorShadowHandler(
shadow_client, device.name, id, id, device.device_endpoint, device.get_healthcheck
)
shadow.set_state('active', s['active'])
result = shadow.get_or_update_sensor_settings()
if result['error']:
print(f'failed to get initial settings of sensor {id}')
# Need to call update state outside of call back functions otherwise we risk creating thread dead-lock
# and the program hangs
shadow.update_state(override_desired=True)
device.sensor_shadows.append(shadow)
# Enable periodic heartbest
device.enable_heartbeat()
# Listen continuously/wait until stop signal received
stop_recording_event.wait()
# Disconnect
print("Gracefully exitting")
device.delete_shadows()
print("Disconnecting...")
disconnect_future = mqtt_connection.disconnect()
disconnect_future.result()
print("Disconnected!")
exit()
| 34.154762
| 111
| 0.729697
|
import argparse
from ShadowHandler import SensorShadowHandler, GlobalShadowHandler
from awscrt import io, mqtt, auth, http
from awsiot import mqtt_connection_builder, iotshadow
import sys
import os
import signal
import threading
import json
import time
from datetime import datetime, timedelta
from uuid import uuid4
from Device import Device
AWS_IOT_ENDPOINT = os.environ["AWS_IOT_ENDPOINT"]
received_count = 0
stop_recording_event = threading.Event()
def on_connection_interrupted(connection, error, **kwargs):
print("Connection interrupted. error: {}".format(error))
def on_connection_resumed(connection, return_code, session_present, **kwargs):
print("Connection resumed. return_code: {} session_present: {}".format(
return_code, session_present))
if return_code == mqtt.ConnectReturnCode.ACCEPTED and not session_present:
print("Session did not persist. Resubscribing to existing topics...")
resubscribe_future, _ = connection.resubscribe_existing_topics()
resubscribe_future.add_done_callback(on_resubscribe_complete)
def on_resubscribe_complete(resubscribe_future):
resubscribe_results = resubscribe_future.result()
print("Resubscribe results: {}".format(resubscribe_results))
for topic, qos in resubscribe_results['topics']:
if qos is None:
sys.exit("Server rejected resubscribe to topic: {}".format(topic))
def signal_handler(signal, frame):
print("Terminate Signal Recieved")
stop_recording_event.set()
if __name__ == '__main__':
AWS_IOT_ENDPOINT = os.environ["AWS_IOT_ENDPOINT"]
DEVICE_ENDPOINT = os.environ["DEVICE_ENDPOINT"]
DEVICE_NAME = os.environ["DEVICE_NAME"]
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
event_loop_group = io.EventLoopGroup(1)
host_resolver = io.DefaultHostResolver(event_loop_group)
client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver)
proxy_options = None
device = Device(DEVICE_NAME, DEVICE_ENDPOINT)
mqtt_connection = mqtt_connection_builder.mtls_from_path(
endpoint=AWS_IOT_ENDPOINT,
port=443,
cert_filepath=device.auth.device_cert,
pri_key_filepath=device.auth.private_key,
client_bootstrap=client_bootstrap,
ca_filepath=device.auth.root_ca_cert,
on_connection_interrupted=on_connection_interrupted,
on_connection_resumed=on_connection_resumed,
client_id=device.name,
clean_session=False,
keep_alive_secs=30,
http_proxy_options=proxy_options)
device.set_mqtt(mqtt_connection)
shadow_client = iotshadow.IotShadowClient(mqtt_connection)
print(
f"Connecting to {AWS_IOT_ENDPOINT} with client ID '{device.name}'...")
connect_future = mqtt_connection.connect()
connect_future.result()
print("Connected!")
time.sleep(30)
device.set_global_shadow(shadow_client)
device.get_healthcheck()
device.global_shadow.set_state(device.global_shadow.local_state, update_index=True)
device.global_shadow.update_state(override_desired=True)
sensors = device.global_shadow.local_state['sensors']
for s in sensors:
id = s['sensor_id']
shadow = SensorShadowHandler(
shadow_client, device.name, id, id, device.device_endpoint, device.get_healthcheck
)
shadow.set_state('active', s['active'])
result = shadow.get_or_update_sensor_settings()
if result['error']:
print(f'failed to get initial settings of sensor {id}')
shadow.update_state(override_desired=True)
device.sensor_shadows.append(shadow)
device.enable_heartbeat()
stop_recording_event.wait()
print("Gracefully exitting")
device.delete_shadows()
print("Disconnecting...")
disconnect_future = mqtt_connection.disconnect()
disconnect_future.result()
print("Disconnected!")
exit()
| true
| true
|
1c466940b6dc99ba7aabaaea9b7a0414ef2ddf39
| 13,009
|
py
|
Python
|
formfactory/tests/test_base.py
|
AltusBarry/django-formfactory
|
8da378d0952bfd0eb9a08d49b17b5b95ee7e607d
|
[
"BSD-3-Clause"
] | null | null | null |
formfactory/tests/test_base.py
|
AltusBarry/django-formfactory
|
8da378d0952bfd0eb9a08d49b17b5b95ee7e607d
|
[
"BSD-3-Clause"
] | null | null | null |
formfactory/tests/test_base.py
|
AltusBarry/django-formfactory
|
8da378d0952bfd0eb9a08d49b17b5b95ee7e607d
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import shutil
import uuid
from django.conf import settings
from formfactory import models
from formfactory.tests.models import Enum, EnumItem
def cleanup_files():
test_file_dir = os.path.join(settings.MEDIA_ROOT, "uploads/test")
shutil.rmtree(test_file_dir, ignore_errors=True)
def load_fixtures(kls):
kls.form_data = {
"title": "Form 1",
"slug": "form-1"
}
kls.form = models.Form.objects.create(**kls.form_data)
kls.fieldchoice_data = {
"label": "Choice 1",
"value": "choice-1"
}
kls.fieldchoice = models.FieldChoice.objects.create(**kls.fieldchoice_data)
kls.enum_data = {
"title": "Enum 1"
}
kls.enum = Enum.objects.create(**kls.enum_data)
kls.enumitem_data = {
"enum": kls.enum,
"label": "Choice 2",
"value": "choice-2"
}
kls.enumitem = EnumItem.objects.create(**kls.enumitem_data)
kls.fieldgroup_data = {
"title": "Field Group 1",
"show_title": True
}
kls.fieldgroup = models.FormFieldGroup.objects.create(
**kls.fieldgroup_data
)
kls.fieldgroupformthrough_data = {
"form": kls.form,
"field_group": kls.fieldgroup,
"order": 0
}
kls.fieldgroupformthrough = models.FieldGroupFormThrough.objects.create(
**kls.fieldgroupformthrough_data
)
for count, field_type in enumerate(models.FIELD_TYPES):
data = {
"title": "Form Field %s" % count,
"slug": "form-field-%s" % count,
"field_type": field_type[0],
"label": "Form Field %s" % count,
"placeholder": "Field Placeholder %s" % count
}
# Specialised fields with none default fields will need to have extra
# data added.
if field_type[0] == "formfactory.fields.ParagraphField":
data["paragraph"] = "**formfactory.fields.ParagraphField**"
setattr(kls, "formfield_data_%s" % count, data)
if field_type[0] == "django.forms.fields.CharField":
getattr(kls, "formfield_data_%s" % count)["max_length"] = 100
setattr(kls, "formfield_%s" % count, models.FormField.objects.create(
**getattr(kls, "formfield_data_%s" % count)
))
if field_type[0] == "django.forms.fields.ChoiceField":
getattr(kls, "formfield_%s" % count).choices.add(kls.fieldchoice)
getattr(kls, "formfield_%s" % count).model_choices = kls.enum
setattr(kls, "fieldgroupthrough_data_%s" % count, {
"field_group": kls.fieldgroup,
"field": getattr(kls, "formfield_%s" % count),
"order": count
})
setattr(
kls, "fieldgroupthrough_%s" % count,
models.FieldGroupThrough.objects.create(
**getattr(kls, "fieldgroupthrough_data_%s" % count)
)
)
kls.simpleform_data = {
"title": "Subscribe Form",
"slug": "subscribe-form",
"success_message": "Success",
"failure_message": "Failure"
}
kls.simpleform = models.Form.objects.create(**kls.simpleform_data)
kls.simplefieldgroup_data = {
"title": "Field Group 1",
"show_title": False
}
kls.simplefieldgroup = models.FormFieldGroup.objects.create(
**kls.simplefieldgroup_data
)
kls.simplefieldgroupformthrough_data = {
"form": kls.simpleform,
"field_group": kls.simplefieldgroup,
"order": 0
}
kls.simplefieldgroupformthrough = models.FieldGroupFormThrough.objects.create(
**kls.simplefieldgroupformthrough_data
)
kls.action_data = {
"action": "formfactory.actions.store_data"
}
kls.action = models.Action.objects.create(**kls.action_data)
kls.formactionthrough_data = {
"action": kls.action,
"form": kls.simpleform,
"order": 0
}
kls.formactionthrough = models.FormActionThrough.objects.create(
**kls.formactionthrough_data
)
kls.emailaction_data = {
"action": "formfactory.actions.send_email"
}
kls.emailaction = models.Action.objects.create(**kls.emailaction_data)
kls.emailactionparam_data = [
{
"key": "from_email_field",
"value": "email-address",
"action": kls.emailaction
}, {
"key": "to_email_field",
"value": "to-email",
"action": kls.emailaction
}, {
"key": "subject_field",
"value": "subject",
"action": kls.emailaction
}
]
for param in kls.emailactionparam_data:
setattr(
kls, "emailactionparam_%s" % param["key"],
models.ActionParam.objects.create(**param)
)
kls.emailformactionthrough_data = {
"action": kls.emailaction,
"form": kls.simpleform,
"order": 1
}
kls.emailformactionthrough = models.FormActionThrough.objects.create(
**kls.emailformactionthrough_data
)
kls.fileuploadaction_data = {
"action": "formfactory.actions.file_upload"
}
kls.fileuploadaction = models.Action.objects.create(
**kls.fileuploadaction_data
)
kls.fileuploadactionparam_data = [
{
"key": "upload_path_field",
"value": "upload-to",
"action": kls.fileuploadaction
}
]
for param in kls.fileuploadactionparam_data:
setattr(
kls, "fileuploadactionparam_%s" % param["key"],
models.ActionParam.objects.create(**param)
)
kls.fileuploadformactionthrough_data = {
"action": kls.fileuploadaction,
"form": kls.simpleform,
"order": 2
}
kls.fileuploadformactionthrough = models.FormActionThrough.objects.create(
**kls.fileuploadformactionthrough_data
)
kls.simpleformfield_data = {
"salutation": {
"title": "Salutation",
"slug": "salutation",
"field_type": "django.forms.fields.ChoiceField",
"label": "Salutation",
"required": False
},
"name": {
"title": "Name",
"slug": "name",
"field_type": "django.forms.fields.CharField",
"label": "Full Name",
"required": True
},
"email_address": {
"title": "Email Address",
"slug": "email-address",
"field_type": "django.forms.fields.EmailField",
"label": "Email",
"help_text": "The email you would like info to be sent to"
},
"accept_terms": {
"title": "Accept Terms",
"slug": "accept-terms",
"field_type": "django.forms.fields.BooleanField",
"label": "Do you accept the terms and conditions",
"required": False
},
"to_email": {
"title": "To Email",
"slug": "to-email",
"field_type": "django.forms.fields.CharField",
"widget": "django.forms.widgets.HiddenInput",
"initial": "dev@praekelt.com",
"required": True
},
"id_copy": {
"title": "ID Copy",
"slug": "id-copy",
"field_type": "django.forms.fields.FileField",
"required": True
},
"upload_to": {
"title": "Upload To",
"slug": "upload-to",
"field_type": "django.forms.fields.CharField",
"widget": "django.forms.widgets.HiddenInput",
"initial": "uploads/test",
"required": True
},
"subject": {
"title": "Subject",
"slug": "subject",
"field_type": "django.forms.fields.CharField",
"widget": "django.forms.widgets.HiddenInput",
"initial": "Test Email",
"required": True
},
"paragraph": {
"title": "Paragraph",
"slug": "paragraph",
"field_type": "formfactory.fields.ParagraphField",
"paragraph": "**aaaa**"
}
}
count = 0
for key, value in kls.simpleformfield_data.items():
setattr(
kls, "simpleformfield_%s" % key,
models.FormField.objects.create(**value)
)
setattr(kls, "simplefieldgroupthrough_data_%s" % key, {
"field_group": kls.simplefieldgroup,
"field": getattr(kls, "simpleformfield_%s" % key),
"order": count
})
setattr(
kls, "simplefieldgroupthrough_%s" % key,
models.FieldGroupThrough.objects.create(
**getattr(kls, "simplefieldgroupthrough_data_%s" % key)
)
)
count += 1
for salutation in ["Mr", "Mrs", "Dr", "Prof"]:
choice = models.FieldChoice.objects.create(
label=salutation, value=salutation
)
kls.simpleformfield_salutation.choices.add(choice)
kls.loginform_data = {
"title": "Login Form",
"slug": "login-form",
"success_message": "Success",
"failure_message": "Failure",
"submit_button_text": "Login"
}
kls.loginform = models.Form.objects.create(**kls.loginform_data)
kls.loginfieldgroup_data = {
"title": "Field Group 1",
"show_title": True
}
kls.loginfieldgroup = models.FormFieldGroup.objects.create(
**kls.loginfieldgroup_data
)
kls.loginfieldgroupformthrough_data = {
"form": kls.loginform,
"field_group": kls.loginfieldgroup,
"order": 0
}
kls.loginfieldgroupformthrough = models.FieldGroupFormThrough.objects.create(
**kls.loginfieldgroupformthrough_data
)
kls.loginaction_data = {
"action": "formfactory.actions.login"
}
kls.loginaction = models.Action.objects.create(**kls.loginaction_data)
kls.loginactionparam_data = [
{
"key": "username_field",
"value": "username",
"action": kls.loginaction
}, {
"key": "password_field",
"value": "password",
"action": kls.loginaction
}
]
for param in kls.loginactionparam_data:
setattr(
kls, "loginactionparam_%s" % param["key"],
models.ActionParam.objects.create(**param)
)
kls.loginformactionthrough_data = {
"action": kls.loginaction,
"form": kls.loginform,
"order": 0
}
kls.loginformactionthrough = models.FormActionThrough.objects.create(
**kls.loginformactionthrough_data
)
kls.loginformfield_data = {
"username": {
"title": "Username",
"slug": "username",
"field_type": "django.forms.fields.CharField",
"label": "Username",
"required": True
},
"password": {
"title": "Password",
"slug": "password",
"field_type": "django.forms.fields.CharField",
"widget": "django.forms.widgets.PasswordInput",
"label": "Password",
"required": True
}
}
count = 0
for key, value in kls.loginformfield_data.items():
setattr(
kls, "loginformfield_%s" % key,
models.FormField.objects.create(**value)
)
setattr(kls, "loginfieldgroupthrough_data_%s" % key, {
"field_group": kls.loginfieldgroup,
"field": getattr(kls, "loginformfield_%s" % key),
"order": count
})
setattr(
kls, "loginfieldgroupthrough_%s" % key,
models.FieldGroupThrough.objects.create(
**getattr(kls, "loginfieldgroupthrough_data_%s" % key)
)
)
count += 1
kls.formdata_data = {
"uuid": str(uuid.uuid4()),
"form": kls.form
}
kls.formdata = models.FormData.objects.create(**kls.formdata_data)
kls.formdataitem_data = {
"form_data": kls.formdata,
"form_field": kls.formfield_1,
"value": "Form Data Item Value 1"
}
kls.formdataitem = models.FormDataItem.objects.create(
**kls.formdataitem_data
)
kls.dummy_validator = "formfactory.tests.validators.dummy_validator"
kls.dummy_action = "formfactory.tests.actions.dummy_action"
kls.wizard_data = {
"title": "Test wizard",
"slug": "test-wizard",
"success_message": "Success",
"failure_message": "Failure",
"redirect_to": "/"
}
kls.validator = models.Validator.objects.create(
validator=kls.dummy_validator
)
kls.wizard = models.Wizard.objects.create(**kls.wizard_data)
kls.wizardformthrough_simple = models.WizardFormThrough.objects.create(
wizard=kls.wizard, form=kls.simpleform, order=1
)
kls.wizardformthrough_login = models.WizardFormThrough.objects.create(
wizard=kls.wizard, form=kls.loginform, order=2
)
| 30.609412
| 82
| 0.568376
|
import os
import shutil
import uuid
from django.conf import settings
from formfactory import models
from formfactory.tests.models import Enum, EnumItem
def cleanup_files():
test_file_dir = os.path.join(settings.MEDIA_ROOT, "uploads/test")
shutil.rmtree(test_file_dir, ignore_errors=True)
def load_fixtures(kls):
kls.form_data = {
"title": "Form 1",
"slug": "form-1"
}
kls.form = models.Form.objects.create(**kls.form_data)
kls.fieldchoice_data = {
"label": "Choice 1",
"value": "choice-1"
}
kls.fieldchoice = models.FieldChoice.objects.create(**kls.fieldchoice_data)
kls.enum_data = {
"title": "Enum 1"
}
kls.enum = Enum.objects.create(**kls.enum_data)
kls.enumitem_data = {
"enum": kls.enum,
"label": "Choice 2",
"value": "choice-2"
}
kls.enumitem = EnumItem.objects.create(**kls.enumitem_data)
kls.fieldgroup_data = {
"title": "Field Group 1",
"show_title": True
}
kls.fieldgroup = models.FormFieldGroup.objects.create(
**kls.fieldgroup_data
)
kls.fieldgroupformthrough_data = {
"form": kls.form,
"field_group": kls.fieldgroup,
"order": 0
}
kls.fieldgroupformthrough = models.FieldGroupFormThrough.objects.create(
**kls.fieldgroupformthrough_data
)
for count, field_type in enumerate(models.FIELD_TYPES):
data = {
"title": "Form Field %s" % count,
"slug": "form-field-%s" % count,
"field_type": field_type[0],
"label": "Form Field %s" % count,
"placeholder": "Field Placeholder %s" % count
}
if field_type[0] == "formfactory.fields.ParagraphField":
data["paragraph"] = "**formfactory.fields.ParagraphField**"
setattr(kls, "formfield_data_%s" % count, data)
if field_type[0] == "django.forms.fields.CharField":
getattr(kls, "formfield_data_%s" % count)["max_length"] = 100
setattr(kls, "formfield_%s" % count, models.FormField.objects.create(
**getattr(kls, "formfield_data_%s" % count)
))
if field_type[0] == "django.forms.fields.ChoiceField":
getattr(kls, "formfield_%s" % count).choices.add(kls.fieldchoice)
getattr(kls, "formfield_%s" % count).model_choices = kls.enum
setattr(kls, "fieldgroupthrough_data_%s" % count, {
"field_group": kls.fieldgroup,
"field": getattr(kls, "formfield_%s" % count),
"order": count
})
setattr(
kls, "fieldgroupthrough_%s" % count,
models.FieldGroupThrough.objects.create(
**getattr(kls, "fieldgroupthrough_data_%s" % count)
)
)
kls.simpleform_data = {
"title": "Subscribe Form",
"slug": "subscribe-form",
"success_message": "Success",
"failure_message": "Failure"
}
kls.simpleform = models.Form.objects.create(**kls.simpleform_data)
kls.simplefieldgroup_data = {
"title": "Field Group 1",
"show_title": False
}
kls.simplefieldgroup = models.FormFieldGroup.objects.create(
**kls.simplefieldgroup_data
)
kls.simplefieldgroupformthrough_data = {
"form": kls.simpleform,
"field_group": kls.simplefieldgroup,
"order": 0
}
kls.simplefieldgroupformthrough = models.FieldGroupFormThrough.objects.create(
**kls.simplefieldgroupformthrough_data
)
kls.action_data = {
"action": "formfactory.actions.store_data"
}
kls.action = models.Action.objects.create(**kls.action_data)
kls.formactionthrough_data = {
"action": kls.action,
"form": kls.simpleform,
"order": 0
}
kls.formactionthrough = models.FormActionThrough.objects.create(
**kls.formactionthrough_data
)
kls.emailaction_data = {
"action": "formfactory.actions.send_email"
}
kls.emailaction = models.Action.objects.create(**kls.emailaction_data)
kls.emailactionparam_data = [
{
"key": "from_email_field",
"value": "email-address",
"action": kls.emailaction
}, {
"key": "to_email_field",
"value": "to-email",
"action": kls.emailaction
}, {
"key": "subject_field",
"value": "subject",
"action": kls.emailaction
}
]
for param in kls.emailactionparam_data:
setattr(
kls, "emailactionparam_%s" % param["key"],
models.ActionParam.objects.create(**param)
)
kls.emailformactionthrough_data = {
"action": kls.emailaction,
"form": kls.simpleform,
"order": 1
}
kls.emailformactionthrough = models.FormActionThrough.objects.create(
**kls.emailformactionthrough_data
)
kls.fileuploadaction_data = {
"action": "formfactory.actions.file_upload"
}
kls.fileuploadaction = models.Action.objects.create(
**kls.fileuploadaction_data
)
kls.fileuploadactionparam_data = [
{
"key": "upload_path_field",
"value": "upload-to",
"action": kls.fileuploadaction
}
]
for param in kls.fileuploadactionparam_data:
setattr(
kls, "fileuploadactionparam_%s" % param["key"],
models.ActionParam.objects.create(**param)
)
kls.fileuploadformactionthrough_data = {
"action": kls.fileuploadaction,
"form": kls.simpleform,
"order": 2
}
kls.fileuploadformactionthrough = models.FormActionThrough.objects.create(
**kls.fileuploadformactionthrough_data
)
kls.simpleformfield_data = {
"salutation": {
"title": "Salutation",
"slug": "salutation",
"field_type": "django.forms.fields.ChoiceField",
"label": "Salutation",
"required": False
},
"name": {
"title": "Name",
"slug": "name",
"field_type": "django.forms.fields.CharField",
"label": "Full Name",
"required": True
},
"email_address": {
"title": "Email Address",
"slug": "email-address",
"field_type": "django.forms.fields.EmailField",
"label": "Email",
"help_text": "The email you would like info to be sent to"
},
"accept_terms": {
"title": "Accept Terms",
"slug": "accept-terms",
"field_type": "django.forms.fields.BooleanField",
"label": "Do you accept the terms and conditions",
"required": False
},
"to_email": {
"title": "To Email",
"slug": "to-email",
"field_type": "django.forms.fields.CharField",
"widget": "django.forms.widgets.HiddenInput",
"initial": "dev@praekelt.com",
"required": True
},
"id_copy": {
"title": "ID Copy",
"slug": "id-copy",
"field_type": "django.forms.fields.FileField",
"required": True
},
"upload_to": {
"title": "Upload To",
"slug": "upload-to",
"field_type": "django.forms.fields.CharField",
"widget": "django.forms.widgets.HiddenInput",
"initial": "uploads/test",
"required": True
},
"subject": {
"title": "Subject",
"slug": "subject",
"field_type": "django.forms.fields.CharField",
"widget": "django.forms.widgets.HiddenInput",
"initial": "Test Email",
"required": True
},
"paragraph": {
"title": "Paragraph",
"slug": "paragraph",
"field_type": "formfactory.fields.ParagraphField",
"paragraph": "**aaaa**"
}
}
count = 0
for key, value in kls.simpleformfield_data.items():
setattr(
kls, "simpleformfield_%s" % key,
models.FormField.objects.create(**value)
)
setattr(kls, "simplefieldgroupthrough_data_%s" % key, {
"field_group": kls.simplefieldgroup,
"field": getattr(kls, "simpleformfield_%s" % key),
"order": count
})
setattr(
kls, "simplefieldgroupthrough_%s" % key,
models.FieldGroupThrough.objects.create(
**getattr(kls, "simplefieldgroupthrough_data_%s" % key)
)
)
count += 1
for salutation in ["Mr", "Mrs", "Dr", "Prof"]:
choice = models.FieldChoice.objects.create(
label=salutation, value=salutation
)
kls.simpleformfield_salutation.choices.add(choice)
kls.loginform_data = {
"title": "Login Form",
"slug": "login-form",
"success_message": "Success",
"failure_message": "Failure",
"submit_button_text": "Login"
}
kls.loginform = models.Form.objects.create(**kls.loginform_data)
kls.loginfieldgroup_data = {
"title": "Field Group 1",
"show_title": True
}
kls.loginfieldgroup = models.FormFieldGroup.objects.create(
**kls.loginfieldgroup_data
)
kls.loginfieldgroupformthrough_data = {
"form": kls.loginform,
"field_group": kls.loginfieldgroup,
"order": 0
}
kls.loginfieldgroupformthrough = models.FieldGroupFormThrough.objects.create(
**kls.loginfieldgroupformthrough_data
)
kls.loginaction_data = {
"action": "formfactory.actions.login"
}
kls.loginaction = models.Action.objects.create(**kls.loginaction_data)
kls.loginactionparam_data = [
{
"key": "username_field",
"value": "username",
"action": kls.loginaction
}, {
"key": "password_field",
"value": "password",
"action": kls.loginaction
}
]
for param in kls.loginactionparam_data:
setattr(
kls, "loginactionparam_%s" % param["key"],
models.ActionParam.objects.create(**param)
)
kls.loginformactionthrough_data = {
"action": kls.loginaction,
"form": kls.loginform,
"order": 0
}
kls.loginformactionthrough = models.FormActionThrough.objects.create(
**kls.loginformactionthrough_data
)
kls.loginformfield_data = {
"username": {
"title": "Username",
"slug": "username",
"field_type": "django.forms.fields.CharField",
"label": "Username",
"required": True
},
"password": {
"title": "Password",
"slug": "password",
"field_type": "django.forms.fields.CharField",
"widget": "django.forms.widgets.PasswordInput",
"label": "Password",
"required": True
}
}
count = 0
for key, value in kls.loginformfield_data.items():
setattr(
kls, "loginformfield_%s" % key,
models.FormField.objects.create(**value)
)
setattr(kls, "loginfieldgroupthrough_data_%s" % key, {
"field_group": kls.loginfieldgroup,
"field": getattr(kls, "loginformfield_%s" % key),
"order": count
})
setattr(
kls, "loginfieldgroupthrough_%s" % key,
models.FieldGroupThrough.objects.create(
**getattr(kls, "loginfieldgroupthrough_data_%s" % key)
)
)
count += 1
kls.formdata_data = {
"uuid": str(uuid.uuid4()),
"form": kls.form
}
kls.formdata = models.FormData.objects.create(**kls.formdata_data)
kls.formdataitem_data = {
"form_data": kls.formdata,
"form_field": kls.formfield_1,
"value": "Form Data Item Value 1"
}
kls.formdataitem = models.FormDataItem.objects.create(
**kls.formdataitem_data
)
kls.dummy_validator = "formfactory.tests.validators.dummy_validator"
kls.dummy_action = "formfactory.tests.actions.dummy_action"
kls.wizard_data = {
"title": "Test wizard",
"slug": "test-wizard",
"success_message": "Success",
"failure_message": "Failure",
"redirect_to": "/"
}
kls.validator = models.Validator.objects.create(
validator=kls.dummy_validator
)
kls.wizard = models.Wizard.objects.create(**kls.wizard_data)
kls.wizardformthrough_simple = models.WizardFormThrough.objects.create(
wizard=kls.wizard, form=kls.simpleform, order=1
)
kls.wizardformthrough_login = models.WizardFormThrough.objects.create(
wizard=kls.wizard, form=kls.loginform, order=2
)
| true
| true
|
1c466974e1deea98828676448579173ed8d0bcef
| 357
|
py
|
Python
|
rusel/config.py
|
ruslan-ok/ruslan
|
fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8
|
[
"MIT"
] | null | null | null |
rusel/config.py
|
ruslan-ok/ruslan
|
fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8
|
[
"MIT"
] | null | null | null |
rusel/config.py
|
ruslan-ok/ruslan
|
fc402e53d2683581e13f4d6c69a6f21e5c2ca1f8
|
[
"MIT"
] | null | null | null |
from task.const import *
app_config = {
'name': APP_ALL,
'app_title': 'search results',
'icon': 'search',
'role': ROLE_SEARCH_RESULTS,
'sort': [
('name', 'name'),
('created', 'create date'),
],
'views': {
'search': {
'icon': 'search',
'title': 'search results',
},
}
}
| 19.833333
| 38
| 0.456583
|
from task.const import *
app_config = {
'name': APP_ALL,
'app_title': 'search results',
'icon': 'search',
'role': ROLE_SEARCH_RESULTS,
'sort': [
('name', 'name'),
('created', 'create date'),
],
'views': {
'search': {
'icon': 'search',
'title': 'search results',
},
}
}
| true
| true
|
1c466c2eb51ca67b46ef055458e6f5edc433953f
| 8,546
|
py
|
Python
|
handler.py
|
abizerlokhandwala/Cowin-Notification-Service
|
4fd7fd9c3cfab37502ad4135007a6127ca4cc15f
|
[
"MIT"
] | 14
|
2021-05-07T13:09:03.000Z
|
2022-01-10T23:24:42.000Z
|
handler.py
|
abizerlokhandwala/Cowin-Notification-Service
|
4fd7fd9c3cfab37502ad4135007a6127ca4cc15f
|
[
"MIT"
] | 16
|
2021-05-10T16:41:21.000Z
|
2021-06-09T14:49:03.000Z
|
handler.py
|
abizerlokhandwala/Cowin-Notification-Service
|
4fd7fd9c3cfab37502ad4135007a6127ca4cc15f
|
[
"MIT"
] | 5
|
2021-05-09T12:14:03.000Z
|
2021-06-08T13:56:55.000Z
|
import asyncio
import json
import logging
import random
from datetime import date
import boto3
from helpers.constants import ISSUE_MSG, DB_NAME
from helpers.cowin_sdk import CowinAPI
from helpers.db_handler import DBHandler, get_pin_code_location
from helpers.notificationHandler import NotifHandler
from helpers.queries import USER_PATTERN_MATCH, GET_USER_QUERY, UPDATE_USER_VERIFIED, SUBSCRIBED_DISTRICT_USERS
from helpers.utils import response_handler, get_preference_slots, send_historical_diff, get_event_loop
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def get_states(event, context):
cowin = CowinAPI()
states = cowin.get_states()
return response_handler(states, 200)
def get_districts(event, context):
cowin = CowinAPI()
state_id = event["queryStringParameters"]["state_id"]
districts = cowin.get_districts(state_id)
return response_handler(districts, 200)
def get_centers(event, context):
cowin = CowinAPI()
district_id = event["queryStringParameters"]["district_id"]
date_today = date.today().strftime("%d-%m-%Y")
centers = cowin.get_centers_7(district_id, date_today)
return response_handler(centers, 200)
def get_district_preferences(event, context):
district_id = event["queryStringParameters"]['district_id']
vaccine = event["queryStringParameters"]['vaccine']
age_group = event["queryStringParameters"]['age_group']
return response_handler(get_preference_slots(district_id, vaccine, age_group), 200)
def subscribe(event, context):
body = json.loads(event['body'])
body['email'] = body['email'].strip()
db = DBHandler.get_instance()
notif = NotifHandler()
is_verified, verification_token = db.subscribe(body)
if is_verified == -1:
return response_handler({'message': f'Email Already exists'}, 400)
elif is_verified == -2: # pincode not found
return response_handler({'message': f'Pincode is invalid'}, 400)
additional_comments = ''
if is_verified is False:
notif.send_verification_email(body['email'], True)
additional_comments = f'Please verify your email ID: {body["email"]}'
db.close()
return response_handler({'message': f'Subscribed successfully! {additional_comments}'}, 201)
def unsubscribe(event, context):
user_email = event["queryStringParameters"]["email"]
token = event["queryStringParameters"]["token"]
db = DBHandler.get_instance()
if db.unsubscribe(user_email, token):
logger.info(f'{user_email} unsubscribed')
db.close()
return response_handler({'message': f'Unsubscribed successfully!'}, 200)
else:
db.close()
return response_handler({'message': ISSUE_MSG}, status=400)
def verify_email(event, context):
user_email = event["queryStringParameters"]["email"]
token = event["queryStringParameters"]["token"]
db = DBHandler.get_instance()
user = db.query(GET_USER_QUERY, (user_email,))
if user and int(user[0][3]) == 1:
db.close()
return response_handler({'message': 'User already verified'}, status=200)
if user and user[0][2] == token:
db.insert(UPDATE_USER_VERIFIED, (user_email,))
db.close()
return response_handler({'message': 'Successful Verification'}, status=200)
db.close()
return response_handler({'message': 'Unsuccessful Verification'}, status=403)
def check_district_nums(event, context):
cowin = CowinAPI()
districts = cowin.get_all_districts()
for ind in range(0,1+max(districts)):
if ind not in districts:
print(f'Missing {ind}')
return districts
district_nums = []
def trigger_district_updates(event, context):
global district_nums
# db = DBHandler.get_instance()
# districts = db.candidate_districts()
# db.close()
if not district_nums:
cowin = CowinAPI()
district_nums = cowin.get_all_districts()
client = boto3.client('lambda', region_name='ap-south-1')
UPDATE_FUNCTION_NAME = 'cowin-notification-service-dev-update_district_slots'
batch = []
for district in district_nums:
if district:
batch.append(district)
if len(batch) >= 10:
client.invoke(FunctionName=UPDATE_FUNCTION_NAME,
InvocationType='Event', Payload=json.dumps({'districts': batch}))
batch.clear()
if len(batch) > 0:
client.invoke(FunctionName=UPDATE_FUNCTION_NAME,
InvocationType='Event', Payload=json.dumps({'districts': batch}))
return response_handler({}, 200)
def update_district_slots(event, context):
# logger.info(f"IP: {requests.get('https://api.ipify.org').text}")
district_ids = event['districts']
# district_ids = [363]
get_event_loop().run_until_complete(asyncio.gather(*[send_historical_diff(district_id) for district_id in
district_ids]))
return response_handler({'message': f'Districts {district_ids} processed'}, 200)
def notif_dispatcher(event, context):
message = event['message']
# message = {'vaccine':'covishield','age_group':'above_18','district_id':'363','pincode':'411028'}
location = get_pin_code_location(message['pincode'])
db = DBHandler.get_instance()
user_info = [(row[0], row[1]) for row in db.query(USER_PATTERN_MATCH, (
'email', message['age_group'], message['vaccine'], message['dose_1'], message['dose_2'],
message['district_id'], location))]
db.close()
# print(user_info)
# return {}
# logger.info(f'Users to send emails to: {user_info}')
message['age_group'] += '+'
message['age_group'] = message['age_group'].replace('above_', '')
client = boto3.client('lambda', region_name='ap-south-1')
SEND_EMAIL_FUNCTION_NAME = 'cowin-notification-service-dev-send_batch_email'
batch = []
for user in user_info:
if user:
batch.append(user)
if len(batch) >= 20:
client.invoke(FunctionName=SEND_EMAIL_FUNCTION_NAME,
InvocationType='Event', Payload=json.dumps({'users': batch, 'message': message}))
batch.clear()
if len(batch) > 0:
client.invoke(FunctionName=SEND_EMAIL_FUNCTION_NAME,
InvocationType='Event', Payload=json.dumps({'users': batch, 'message': message}))
return response_handler({},200)
def send_batch_email(event, context):
notif = NotifHandler()
users = event['users']
message = event['message']
notif.send_template_emails(users, message)
return response_handler({'message': f'All notifs processed'}, 200)
def test_email(event, context):
notif = NotifHandler()
notif.send_template_emails(
[('abizerL123@gmail.com', 'abc'), ('sharangpai123@gmail.com', 'abc'), ('pujan.iceman@gmail.com', 'abc'),
('shloksingh10@gmail.com', 'abc'), ('arsenal.arpit11@gmail.com', 'abc')], {
'center_name': 'test_center',
'slots': '[1-2]',
'district_name': 'test_district',
'date': '1-1-1',
'age_group': '45',
'vaccine': 'covishield',
'address': 'abc, pqr, xyz',
'pincode': '411040',
'capacity': '40',
'capacity_dose_1': '20',
'capacity_dose_2': '20',
'fee_amount': '₹200'
})
return
def notify_pincode_email(event, context):
db = DBHandler.get_instance()
user_info = [(row[0], row[1]) for row in db.query(SUBSCRIBED_DISTRICT_USERS, (
'email'))]
db.close()
notif = NotifHandler()
notif.send_pincode_one_time_email(user_info)
return
def test_email_pincode(event, context):
notif = NotifHandler()
notif.send_pincode_one_time_email(
[('abizerL123@gmail.com', 'abc'), ('sharangpai123@gmail.com', 'abc'), ('pujan.iceman@gmail.com', 'abc'),
('shloksingh10@gmail.com', 'abc'), ('arsenal.arpit11@gmail.com', 'abc')])
return
def send_verification_email_manual(event, context):
db = DBHandler.get_instance()
users = db.query(f"SELECT email FROM {DB_NAME}.users where id>=%s and is_verified = 0",(4923,))
db.close()
notif = NotifHandler()
for user in users:
notif.send_verification_email(user[0], False)
return response_handler({'message': f'Sent'}, 200)
def poller_service_endpoint(event, context):
body = event['body']
logger.info(body)
return response_handler({'message': 'success'},200)
| 38.845455
| 118
| 0.662064
|
import asyncio
import json
import logging
import random
from datetime import date
import boto3
from helpers.constants import ISSUE_MSG, DB_NAME
from helpers.cowin_sdk import CowinAPI
from helpers.db_handler import DBHandler, get_pin_code_location
from helpers.notificationHandler import NotifHandler
from helpers.queries import USER_PATTERN_MATCH, GET_USER_QUERY, UPDATE_USER_VERIFIED, SUBSCRIBED_DISTRICT_USERS
from helpers.utils import response_handler, get_preference_slots, send_historical_diff, get_event_loop
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def get_states(event, context):
cowin = CowinAPI()
states = cowin.get_states()
return response_handler(states, 200)
def get_districts(event, context):
cowin = CowinAPI()
state_id = event["queryStringParameters"]["state_id"]
districts = cowin.get_districts(state_id)
return response_handler(districts, 200)
def get_centers(event, context):
cowin = CowinAPI()
district_id = event["queryStringParameters"]["district_id"]
date_today = date.today().strftime("%d-%m-%Y")
centers = cowin.get_centers_7(district_id, date_today)
return response_handler(centers, 200)
def get_district_preferences(event, context):
district_id = event["queryStringParameters"]['district_id']
vaccine = event["queryStringParameters"]['vaccine']
age_group = event["queryStringParameters"]['age_group']
return response_handler(get_preference_slots(district_id, vaccine, age_group), 200)
def subscribe(event, context):
body = json.loads(event['body'])
body['email'] = body['email'].strip()
db = DBHandler.get_instance()
notif = NotifHandler()
is_verified, verification_token = db.subscribe(body)
if is_verified == -1:
return response_handler({'message': f'Email Already exists'}, 400)
elif is_verified == -2:
return response_handler({'message': f'Pincode is invalid'}, 400)
additional_comments = ''
if is_verified is False:
notif.send_verification_email(body['email'], True)
additional_comments = f'Please verify your email ID: {body["email"]}'
db.close()
return response_handler({'message': f'Subscribed successfully! {additional_comments}'}, 201)
def unsubscribe(event, context):
user_email = event["queryStringParameters"]["email"]
token = event["queryStringParameters"]["token"]
db = DBHandler.get_instance()
if db.unsubscribe(user_email, token):
logger.info(f'{user_email} unsubscribed')
db.close()
return response_handler({'message': f'Unsubscribed successfully!'}, 200)
else:
db.close()
return response_handler({'message': ISSUE_MSG}, status=400)
def verify_email(event, context):
user_email = event["queryStringParameters"]["email"]
token = event["queryStringParameters"]["token"]
db = DBHandler.get_instance()
user = db.query(GET_USER_QUERY, (user_email,))
if user and int(user[0][3]) == 1:
db.close()
return response_handler({'message': 'User already verified'}, status=200)
if user and user[0][2] == token:
db.insert(UPDATE_USER_VERIFIED, (user_email,))
db.close()
return response_handler({'message': 'Successful Verification'}, status=200)
db.close()
return response_handler({'message': 'Unsuccessful Verification'}, status=403)
def check_district_nums(event, context):
cowin = CowinAPI()
districts = cowin.get_all_districts()
for ind in range(0,1+max(districts)):
if ind not in districts:
print(f'Missing {ind}')
return districts
district_nums = []
def trigger_district_updates(event, context):
global district_nums
if not district_nums:
cowin = CowinAPI()
district_nums = cowin.get_all_districts()
client = boto3.client('lambda', region_name='ap-south-1')
UPDATE_FUNCTION_NAME = 'cowin-notification-service-dev-update_district_slots'
batch = []
for district in district_nums:
if district:
batch.append(district)
if len(batch) >= 10:
client.invoke(FunctionName=UPDATE_FUNCTION_NAME,
InvocationType='Event', Payload=json.dumps({'districts': batch}))
batch.clear()
if len(batch) > 0:
client.invoke(FunctionName=UPDATE_FUNCTION_NAME,
InvocationType='Event', Payload=json.dumps({'districts': batch}))
return response_handler({}, 200)
def update_district_slots(event, context):
district_ids = event['districts']
get_event_loop().run_until_complete(asyncio.gather(*[send_historical_diff(district_id) for district_id in
district_ids]))
return response_handler({'message': f'Districts {district_ids} processed'}, 200)
def notif_dispatcher(event, context):
message = event['message']
location = get_pin_code_location(message['pincode'])
db = DBHandler.get_instance()
user_info = [(row[0], row[1]) for row in db.query(USER_PATTERN_MATCH, (
'email', message['age_group'], message['vaccine'], message['dose_1'], message['dose_2'],
message['district_id'], location))]
db.close()
message['age_group'] += '+'
message['age_group'] = message['age_group'].replace('above_', '')
client = boto3.client('lambda', region_name='ap-south-1')
SEND_EMAIL_FUNCTION_NAME = 'cowin-notification-service-dev-send_batch_email'
batch = []
for user in user_info:
if user:
batch.append(user)
if len(batch) >= 20:
client.invoke(FunctionName=SEND_EMAIL_FUNCTION_NAME,
InvocationType='Event', Payload=json.dumps({'users': batch, 'message': message}))
batch.clear()
if len(batch) > 0:
client.invoke(FunctionName=SEND_EMAIL_FUNCTION_NAME,
InvocationType='Event', Payload=json.dumps({'users': batch, 'message': message}))
return response_handler({},200)
def send_batch_email(event, context):
notif = NotifHandler()
users = event['users']
message = event['message']
notif.send_template_emails(users, message)
return response_handler({'message': f'All notifs processed'}, 200)
def test_email(event, context):
notif = NotifHandler()
notif.send_template_emails(
[('abizerL123@gmail.com', 'abc'), ('sharangpai123@gmail.com', 'abc'), ('pujan.iceman@gmail.com', 'abc'),
('shloksingh10@gmail.com', 'abc'), ('arsenal.arpit11@gmail.com', 'abc')], {
'center_name': 'test_center',
'slots': '[1-2]',
'district_name': 'test_district',
'date': '1-1-1',
'age_group': '45',
'vaccine': 'covishield',
'address': 'abc, pqr, xyz',
'pincode': '411040',
'capacity': '40',
'capacity_dose_1': '20',
'capacity_dose_2': '20',
'fee_amount': '₹200'
})
return
def notify_pincode_email(event, context):
db = DBHandler.get_instance()
user_info = [(row[0], row[1]) for row in db.query(SUBSCRIBED_DISTRICT_USERS, (
'email'))]
db.close()
notif = NotifHandler()
notif.send_pincode_one_time_email(user_info)
return
def test_email_pincode(event, context):
notif = NotifHandler()
notif.send_pincode_one_time_email(
[('abizerL123@gmail.com', 'abc'), ('sharangpai123@gmail.com', 'abc'), ('pujan.iceman@gmail.com', 'abc'),
('shloksingh10@gmail.com', 'abc'), ('arsenal.arpit11@gmail.com', 'abc')])
return
def send_verification_email_manual(event, context):
db = DBHandler.get_instance()
users = db.query(f"SELECT email FROM {DB_NAME}.users where id>=%s and is_verified = 0",(4923,))
db.close()
notif = NotifHandler()
for user in users:
notif.send_verification_email(user[0], False)
return response_handler({'message': f'Sent'}, 200)
def poller_service_endpoint(event, context):
body = event['body']
logger.info(body)
return response_handler({'message': 'success'},200)
| true
| true
|
1c466c5148c6b829d6eaaf73cbd026659824bb69
| 1,098
|
py
|
Python
|
2021/25/day25.py
|
AlbertVeli/AdventOfCode
|
3d3473695318a0686fac720a1a21dd3629f09e33
|
[
"Unlicense"
] | null | null | null |
2021/25/day25.py
|
AlbertVeli/AdventOfCode
|
3d3473695318a0686fac720a1a21dd3629f09e33
|
[
"Unlicense"
] | null | null | null |
2021/25/day25.py
|
AlbertVeli/AdventOfCode
|
3d3473695318a0686fac720a1a21dd3629f09e33
|
[
"Unlicense"
] | 1
|
2021-12-04T10:37:09.000Z
|
2021-12-04T10:37:09.000Z
|
#!/usr/bin/env python3
import sys
import numpy as np
if len(sys.argv) != 2:
print('Usage:', sys.argv[0], '<input.txt>')
sys.exit(1)
a = []
for line in open(sys.argv[1]):
a.append(list(line.rstrip()))
a = np.array(a)
sy, sx = a.shape
# Could probably move without copying a
# but logic is simpler if keeping a copy
def move(a):
moved = False
# Move right
a_org = np.array(a)
for y in range(sy):
ny = y
for x in range(sx):
if a_org[y][x] != '>':
continue
nx = (x + 1) % sx
if a_org[ny][nx] == '.':
a[ny][nx] = '>'
a[y][x] = '.'
moved = True
# Move down
a_org = np.array(a)
for y in range(sy):
ny = (y + 1) % sy
for x in range(sx):
nx = x
if a_org[y][x] != 'v':
continue
if a_org[ny][nx] == '.':
a[ny][nx] = 'v'
a[y][x] = '.'
moved = True
return moved
moves = 0
while move(a):
moves += 1
#print(a)
print(moves + 1)
| 21.115385
| 47
| 0.441712
|
import sys
import numpy as np
if len(sys.argv) != 2:
print('Usage:', sys.argv[0], '<input.txt>')
sys.exit(1)
a = []
for line in open(sys.argv[1]):
a.append(list(line.rstrip()))
a = np.array(a)
sy, sx = a.shape
def move(a):
moved = False
a_org = np.array(a)
for y in range(sy):
ny = y
for x in range(sx):
if a_org[y][x] != '>':
continue
nx = (x + 1) % sx
if a_org[ny][nx] == '.':
a[ny][nx] = '>'
a[y][x] = '.'
moved = True
a_org = np.array(a)
for y in range(sy):
ny = (y + 1) % sy
for x in range(sx):
nx = x
if a_org[y][x] != 'v':
continue
if a_org[ny][nx] == '.':
a[ny][nx] = 'v'
a[y][x] = '.'
moved = True
return moved
moves = 0
while move(a):
moves += 1
print(moves + 1)
| true
| true
|
1c466c8c0bd43fad411e356a53e99baf9f31c048
| 987
|
py
|
Python
|
2019/25_Josepus_Survivor/my_solution.py
|
erik-kristofer-anderson/codewars
|
fda780f40d1a2d8c5210cfd6ccf81148444bc9e8
|
[
"MIT"
] | null | null | null |
2019/25_Josepus_Survivor/my_solution.py
|
erik-kristofer-anderson/codewars
|
fda780f40d1a2d8c5210cfd6ccf81148444bc9e8
|
[
"MIT"
] | 1
|
2019-07-27T15:42:25.000Z
|
2019-07-27T15:42:25.000Z
|
2019/25_Josepus_Survivor/my_solution.py
|
erik-kristofer-anderson/Codewars
|
fda780f40d1a2d8c5210cfd6ccf81148444bc9e8
|
[
"MIT"
] | null | null | null |
def josephus_survivor(n,k):
my_array = list(range(1, n+1))
# print(my_array)
i = 0
while len(my_array) > 1:
length = len(my_array)
# print(my_array)
# print(length)
# print('i', i)
while not i < length:
i -= length
i += k - 1
while not i < length:
i -= length
# print(my_array)
# print('length', length)
# print('i', i)
_ = my_array.pop(i)
# print('pop out', _)
# print(my_array)
# print()
# if i < length:
# i += k
# if i >= length:
# i -= length
# result = my_array.pop(i)
# print(result)
# print(my_array)
# if i > length -1:
# i -= length
# # print('result', result)
return my_array[0]
# n, k = (7,3) # 4 expected
# print (josephus_survivor(n, k))
n, k = (11,19) # 10 expected
print (josephus_survivor(n, k))
| 22.953488
| 38
| 0.449848
|
def josephus_survivor(n,k):
my_array = list(range(1, n+1))
i = 0
while len(my_array) > 1:
length = len(my_array)
while not i < length:
i -= length
i += k - 1
while not i < length:
i -= length
_ = my_array.pop(i)
1,19)
print (josephus_survivor(n, k))
| true
| true
|
1c466d37b86970653faf38b62fdf0da523eb0c8b
| 284
|
py
|
Python
|
src/Distiller/textbrewer/distillers.py
|
haroldNLP/Distiller
|
f3ab5f94a9092fca1e2bdb9f486e66fd0b24bcfd
|
[
"MIT"
] | 2
|
2022-03-21T08:02:02.000Z
|
2022-03-21T08:29:07.000Z
|
src/Distiller/textbrewer/distillers.py
|
haroldNLP/Distiller
|
f3ab5f94a9092fca1e2bdb9f486e66fd0b24bcfd
|
[
"MIT"
] | null | null | null |
src/Distiller/textbrewer/distillers.py
|
haroldNLP/Distiller
|
f3ab5f94a9092fca1e2bdb9f486e66fd0b24bcfd
|
[
"MIT"
] | null | null | null |
from .distiller_train import BasicTrainer
from .distiller_basic import BasicDistiller
from .distiller_general import GeneralDistiller
from .distiller_multitask import MultiTaskDistiller
from .distiller_multiteacher import MultiTeacherDistiller
from .distiller_emd import EMDDistiller
| 40.571429
| 57
| 0.894366
|
from .distiller_train import BasicTrainer
from .distiller_basic import BasicDistiller
from .distiller_general import GeneralDistiller
from .distiller_multitask import MultiTaskDistiller
from .distiller_multiteacher import MultiTeacherDistiller
from .distiller_emd import EMDDistiller
| true
| true
|
1c466db29b0f06ae975488369e9165135775e8e7
| 4,842
|
py
|
Python
|
src/models/sp_classifier_model_test.py
|
bartek-wojcik/graph_agregations
|
12305e4ffdf4db60da041689f04d96b48e9e72e5
|
[
"MIT"
] | null | null | null |
src/models/sp_classifier_model_test.py
|
bartek-wojcik/graph_agregations
|
12305e4ffdf4db60da041689f04d96b48e9e72e5
|
[
"MIT"
] | null | null | null |
src/models/sp_classifier_model_test.py
|
bartek-wojcik/graph_agregations
|
12305e4ffdf4db60da041689f04d96b48e9e72e5
|
[
"MIT"
] | null | null | null |
from typing import Any, List
import torch
from pytorch_lightning import LightningModule
from pytorch_lightning.metrics.classification import Accuracy
from src.models.modules import gat, jumping_knowledge, graph_sage, gcn, vector_sage_module, jumping_knowledge_test
class SuperpixelClassifierModelTest(LightningModule):
"""LightningModule for image classification from superpixels."""
def __init__(
self,
architecture: str = "GraphSAGE",
aggregation_method: str = "concat",
num_node_features: int = 1,
add_pos_to_features: bool = False,
num_conv_layers: int = 3,
conv_size: int = 128,
lin_size: int = 128,
output_size: int = 10,
lr: float = 0.001,
weight_decay: float = 0,
**kwargs
):
super().__init__()
self.save_hyperparameters()
self.add_pos_to_features = add_pos_to_features
# init network architecture
if self.hparams.architecture == "GraphSAGE":
self.model = graph_sage.GraphSage(hparams=self.hparams)
elif self.hparams.architecture == "GAT":
self.model = gat.GAT(hparams=self.hparams)
elif self.hparams.architecture == "JumpingKnowledge":
self.model = jumping_knowledge_test.JK(hparams=self.hparams)
elif self.hparams.architecture == "GCN":
self.model = gcn.GCN(hparams=self.hparams)
elif self.hparams.architecture == "VectorSAGE":
self.model = vector_sage_module.VectorSAGEModule(hparams=self.hparams)
else:
raise Exception("Incorrect architecture name!")
# loss function
self.criterion = torch.nn.CrossEntropyLoss()
# use separate metric instance for train, val and test step
# to ensure a proper reduction over the epoch
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
self.metric_hist = {
"train/acc": [],
"val/acc": [],
"train/loss": [],
"val/loss": [],
}
def forward(self, x, edge_index, batch, pos):
return self.model(x, edge_index, batch, pos)
def step(self, data: Any):
x, edge_index, batch, pos, y = data.x, data.edge_index, data.batch, data.pos, data.y
if self.add_pos_to_features:
x = torch.cat((x, pos), 1)
logits = self.forward(x, edge_index, batch, pos)
loss = self.criterion(logits, y)
preds = torch.argmax(logits, dim=1)
return loss, preds, y
def training_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
acc = self.train_accuracy(preds, targets)
self.log("train/loss", loss, on_step=False, on_epoch=True, prog_bar=False)
self.log("train/acc", acc, on_step=False, on_epoch=True, prog_bar=True)
return {"loss": loss, "preds": preds, "targets": targets}
def training_epoch_end(self, outputs: List[Any]):
# log best so far train acc and train loss
self.metric_hist["train/acc"].append(self.trainer.callback_metrics["train/acc"])
self.metric_hist["train/loss"].append(self.trainer.callback_metrics["train/loss"])
self.log("train/acc_best", max(self.metric_hist["train/acc"]), prog_bar=False)
self.log("train/loss_best", min(self.metric_hist["train/loss"]), prog_bar=False)
def validation_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
acc = self.val_accuracy(preds, targets)
self.log("val/loss", loss, on_step=False, on_epoch=True, prog_bar=False)
self.log("val/acc", acc, on_step=False, on_epoch=True, prog_bar=True)
return {"loss": loss, "preds": preds, "targets": targets}
def validation_epoch_end(self, outputs: List[Any]):
# log best so far val acc and val loss
self.metric_hist["val/acc"].append(self.trainer.callback_metrics["val/acc"])
self.metric_hist["val/loss"].append(self.trainer.callback_metrics["val/loss"])
self.log("val/acc_best", max(self.metric_hist["val/acc"]), prog_bar=False)
self.log("val/loss_best", min(self.metric_hist["val/loss"]), prog_bar=False)
def test_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
acc = self.test_accuracy(preds, targets)
self.log("test/loss", loss, on_step=False, on_epoch=True)
self.log("test/acc", acc, on_step=False, on_epoch=True)
return {"loss": loss, "preds": preds, "targets": targets}
def test_epoch_end(self, outputs: List[Any]):
pass
def configure_optimizers(self):
return torch.optim.Adam(
params=self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay,
)
| 42.104348
| 114
| 0.644981
|
from typing import Any, List
import torch
from pytorch_lightning import LightningModule
from pytorch_lightning.metrics.classification import Accuracy
from src.models.modules import gat, jumping_knowledge, graph_sage, gcn, vector_sage_module, jumping_knowledge_test
class SuperpixelClassifierModelTest(LightningModule):
def __init__(
self,
architecture: str = "GraphSAGE",
aggregation_method: str = "concat",
num_node_features: int = 1,
add_pos_to_features: bool = False,
num_conv_layers: int = 3,
conv_size: int = 128,
lin_size: int = 128,
output_size: int = 10,
lr: float = 0.001,
weight_decay: float = 0,
**kwargs
):
super().__init__()
self.save_hyperparameters()
self.add_pos_to_features = add_pos_to_features
if self.hparams.architecture == "GraphSAGE":
self.model = graph_sage.GraphSage(hparams=self.hparams)
elif self.hparams.architecture == "GAT":
self.model = gat.GAT(hparams=self.hparams)
elif self.hparams.architecture == "JumpingKnowledge":
self.model = jumping_knowledge_test.JK(hparams=self.hparams)
elif self.hparams.architecture == "GCN":
self.model = gcn.GCN(hparams=self.hparams)
elif self.hparams.architecture == "VectorSAGE":
self.model = vector_sage_module.VectorSAGEModule(hparams=self.hparams)
else:
raise Exception("Incorrect architecture name!")
self.criterion = torch.nn.CrossEntropyLoss()
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
self.metric_hist = {
"train/acc": [],
"val/acc": [],
"train/loss": [],
"val/loss": [],
}
def forward(self, x, edge_index, batch, pos):
return self.model(x, edge_index, batch, pos)
def step(self, data: Any):
x, edge_index, batch, pos, y = data.x, data.edge_index, data.batch, data.pos, data.y
if self.add_pos_to_features:
x = torch.cat((x, pos), 1)
logits = self.forward(x, edge_index, batch, pos)
loss = self.criterion(logits, y)
preds = torch.argmax(logits, dim=1)
return loss, preds, y
def training_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
acc = self.train_accuracy(preds, targets)
self.log("train/loss", loss, on_step=False, on_epoch=True, prog_bar=False)
self.log("train/acc", acc, on_step=False, on_epoch=True, prog_bar=True)
return {"loss": loss, "preds": preds, "targets": targets}
def training_epoch_end(self, outputs: List[Any]):
self.metric_hist["train/acc"].append(self.trainer.callback_metrics["train/acc"])
self.metric_hist["train/loss"].append(self.trainer.callback_metrics["train/loss"])
self.log("train/acc_best", max(self.metric_hist["train/acc"]), prog_bar=False)
self.log("train/loss_best", min(self.metric_hist["train/loss"]), prog_bar=False)
def validation_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
acc = self.val_accuracy(preds, targets)
self.log("val/loss", loss, on_step=False, on_epoch=True, prog_bar=False)
self.log("val/acc", acc, on_step=False, on_epoch=True, prog_bar=True)
return {"loss": loss, "preds": preds, "targets": targets}
def validation_epoch_end(self, outputs: List[Any]):
self.metric_hist["val/acc"].append(self.trainer.callback_metrics["val/acc"])
self.metric_hist["val/loss"].append(self.trainer.callback_metrics["val/loss"])
self.log("val/acc_best", max(self.metric_hist["val/acc"]), prog_bar=False)
self.log("val/loss_best", min(self.metric_hist["val/loss"]), prog_bar=False)
def test_step(self, batch: Any, batch_idx: int):
loss, preds, targets = self.step(batch)
acc = self.test_accuracy(preds, targets)
self.log("test/loss", loss, on_step=False, on_epoch=True)
self.log("test/acc", acc, on_step=False, on_epoch=True)
return {"loss": loss, "preds": preds, "targets": targets}
def test_epoch_end(self, outputs: List[Any]):
pass
def configure_optimizers(self):
return torch.optim.Adam(
params=self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay,
)
| true
| true
|
1c466dc07772bb7c04ea801073ca12ed12884e52
| 7,794
|
py
|
Python
|
torchvision/datasets/celeba.py
|
kirilkoroves/torchvision-0.3.0
|
39f46d141f6a7ac2b094545c33936ad4500d3c7d
|
[
"BSD-3-Clause"
] | 125
|
2020-06-17T19:58:56.000Z
|
2022-03-28T12:54:43.000Z
|
datasets/celeba.py
|
ANLGBOY/ddim
|
34d640e5180cc5ab378f84af6ed596cb0c810e6c
|
[
"MIT"
] | 6
|
2021-03-19T15:30:28.000Z
|
2022-03-12T00:51:16.000Z
|
datasets/celeba.py
|
ANLGBOY/ddim
|
34d640e5180cc5ab378f84af6ed596cb0c810e6c
|
[
"MIT"
] | 29
|
2020-06-18T19:24:04.000Z
|
2022-03-11T11:20:47.000Z
|
import torch
import os
import PIL
from .vision import VisionDataset
from .utils import download_file_from_google_drive, check_integrity
class CelebA(VisionDataset):
"""`Large-scale CelebFaces Attributes (CelebA) Dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
split (string): One of {'train', 'valid', 'test'}.
Accordingly dataset is selected.
target_type (string or list, optional): Type of target to use, ``attr``, ``identity``, ``bbox``,
or ``landmarks``. Can also be a list to output a tuple with all specified target types.
The targets represent:
``attr`` (np.array shape=(40,) dtype=int): binary (0, 1) labels for attributes
``identity`` (int): label for each person (data points with the same identity are the same person)
``bbox`` (np.array shape=(4,) dtype=int): bounding box (x, y, width, height)
``landmarks`` (np.array shape=(10,) dtype=int): landmark points (lefteye_x, lefteye_y, righteye_x,
righteye_y, nose_x, nose_y, leftmouth_x, leftmouth_y, rightmouth_x, rightmouth_y)
Defaults to ``attr``.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = "celeba"
# There currently does not appear to be a easy way to extract 7z in python (without introducing additional
# dependencies). The "in-the-wild" (not aligned+cropped) images are only in 7z, so they are not available
# right now.
file_list = [
# File ID MD5 Hash Filename
("0B7EVK8r0v71pZjFTYXZWM3FlRnM", "00d2c5bc6d35e252742224ab0c1e8fcb", "img_align_celeba.zip"),
# ("0B7EVK8r0v71pbWNEUjJKdDQ3dGc", "b6cd7e93bc7a96c2dc33f819aa3ac651", "img_align_celeba_png.7z"),
# ("0B7EVK8r0v71peklHb0pGdDl6R28", "b6cd7e93bc7a96c2dc33f819aa3ac651", "img_celeba.7z"),
("0B7EVK8r0v71pblRyaVFSWGxPY0U", "75e246fa4810816ffd6ee81facbd244c", "list_attr_celeba.txt"),
("1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS", "32bd1bd63d3c78cd57e08160ec5ed1e2", "identity_CelebA.txt"),
("0B7EVK8r0v71pbThiMVRxWXZ4dU0", "00566efa6fedff7a56946cd1c10f1c16", "list_bbox_celeba.txt"),
("0B7EVK8r0v71pd0FJY3Blby1HUTQ", "cc24ecafdb5b50baae59b03474781f8c", "list_landmarks_align_celeba.txt"),
# ("0B7EVK8r0v71pTzJIdlJWdHczRlU", "063ee6ddb681f96bc9ca28c6febb9d1a", "list_landmarks_celeba.txt"),
("0B7EVK8r0v71pY0NSMzRuSXJEVkk", "d32c9cbf5e040fd4025c592c306e6668", "list_eval_partition.txt"),
]
def __init__(self, root,
split="train",
target_type="attr",
transform=None, target_transform=None,
download=False):
import pandas
super(CelebA, self).__init__(root)
self.split = split
if isinstance(target_type, list):
self.target_type = target_type
else:
self.target_type = [target_type]
self.transform = transform
self.target_transform = target_transform
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
self.transform = transform
self.target_transform = target_transform
if split.lower() == "train":
split = 0
elif split.lower() == "valid":
split = 1
elif split.lower() == "test":
split = 2
else:
raise ValueError('Wrong split entered! Please use split="train" '
'or split="valid" or split="test"')
with open(os.path.join(self.root, self.base_folder, "list_eval_partition.txt"), "r") as f:
splits = pandas.read_csv(f, delim_whitespace=True, header=None, index_col=0)
with open(os.path.join(self.root, self.base_folder, "identity_CelebA.txt"), "r") as f:
self.identity = pandas.read_csv(f, delim_whitespace=True, header=None, index_col=0)
with open(os.path.join(self.root, self.base_folder, "list_bbox_celeba.txt"), "r") as f:
self.bbox = pandas.read_csv(f, delim_whitespace=True, header=1, index_col=0)
with open(os.path.join(self.root, self.base_folder, "list_landmarks_align_celeba.txt"), "r") as f:
self.landmarks_align = pandas.read_csv(f, delim_whitespace=True, header=1)
with open(os.path.join(self.root, self.base_folder, "list_attr_celeba.txt"), "r") as f:
self.attr = pandas.read_csv(f, delim_whitespace=True, header=1)
mask = (splits[1] == split)
self.filename = splits[mask].index.values
self.identity = torch.as_tensor(self.identity[mask].values)
self.bbox = torch.as_tensor(self.bbox[mask].values)
self.landmarks_align = torch.as_tensor(self.landmarks_align[mask].values)
self.attr = torch.as_tensor(self.attr[mask].values)
self.attr = (self.attr + 1) // 2 # map from {-1, 1} to {0, 1}
def _check_integrity(self):
for (_, md5, filename) in self.file_list:
fpath = os.path.join(self.root, self.base_folder, filename)
_, ext = os.path.splitext(filename)
# Allow original archive to be deleted (zip and 7z)
# Only need the extracted images
if ext not in [".zip", ".7z"] and not check_integrity(fpath, md5):
return False
# Should check a hash of the images
return os.path.isdir(os.path.join(self.root, self.base_folder, "img_align_celeba"))
def download(self):
import zipfile
if self._check_integrity():
print('Files already downloaded and verified')
return
for (file_id, md5, filename) in self.file_list:
download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder), filename, md5)
with zipfile.ZipFile(os.path.join(self.root, self.base_folder, "img_align_celeba.zip"), "r") as f:
f.extractall(os.path.join(self.root, self.base_folder))
def __getitem__(self, index):
X = PIL.Image.open(os.path.join(self.root, self.base_folder, "img_align_celeba", self.filename[index]))
target = []
for t in self.target_type:
if t == "attr":
target.append(self.attr[index, :])
elif t == "identity":
target.append(self.identity[index, 0])
elif t == "bbox":
target.append(self.bbox[index, :])
elif t == "landmarks":
target.append(self.landmarks_align[index, :])
else:
raise ValueError("Target type \"{}\" is not recognized.".format(t))
target = tuple(target) if len(target) > 1 else target[0]
if self.transform is not None:
X = self.transform(X)
if self.target_transform is not None:
target = self.target_transform(target)
return X, target
def __len__(self):
return len(self.attr)
def extra_repr(self):
lines = ["Target type: {target_type}", "Split: {split}"]
return '\n'.join(lines).format(**self.__dict__)
| 47.52439
| 120
| 0.628689
|
import torch
import os
import PIL
from .vision import VisionDataset
from .utils import download_file_from_google_drive, check_integrity
class CelebA(VisionDataset):
base_folder = "celeba"
file_list = [
("0B7EVK8r0v71pZjFTYXZWM3FlRnM", "00d2c5bc6d35e252742224ab0c1e8fcb", "img_align_celeba.zip"),
("0B7EVK8r0v71pblRyaVFSWGxPY0U", "75e246fa4810816ffd6ee81facbd244c", "list_attr_celeba.txt"),
("1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS", "32bd1bd63d3c78cd57e08160ec5ed1e2", "identity_CelebA.txt"),
("0B7EVK8r0v71pbThiMVRxWXZ4dU0", "00566efa6fedff7a56946cd1c10f1c16", "list_bbox_celeba.txt"),
("0B7EVK8r0v71pd0FJY3Blby1HUTQ", "cc24ecafdb5b50baae59b03474781f8c", "list_landmarks_align_celeba.txt"),
("0B7EVK8r0v71pY0NSMzRuSXJEVkk", "d32c9cbf5e040fd4025c592c306e6668", "list_eval_partition.txt"),
]
def __init__(self, root,
split="train",
target_type="attr",
transform=None, target_transform=None,
download=False):
import pandas
super(CelebA, self).__init__(root)
self.split = split
if isinstance(target_type, list):
self.target_type = target_type
else:
self.target_type = [target_type]
self.transform = transform
self.target_transform = target_transform
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
self.transform = transform
self.target_transform = target_transform
if split.lower() == "train":
split = 0
elif split.lower() == "valid":
split = 1
elif split.lower() == "test":
split = 2
else:
raise ValueError('Wrong split entered! Please use split="train" '
'or split="valid" or split="test"')
with open(os.path.join(self.root, self.base_folder, "list_eval_partition.txt"), "r") as f:
splits = pandas.read_csv(f, delim_whitespace=True, header=None, index_col=0)
with open(os.path.join(self.root, self.base_folder, "identity_CelebA.txt"), "r") as f:
self.identity = pandas.read_csv(f, delim_whitespace=True, header=None, index_col=0)
with open(os.path.join(self.root, self.base_folder, "list_bbox_celeba.txt"), "r") as f:
self.bbox = pandas.read_csv(f, delim_whitespace=True, header=1, index_col=0)
with open(os.path.join(self.root, self.base_folder, "list_landmarks_align_celeba.txt"), "r") as f:
self.landmarks_align = pandas.read_csv(f, delim_whitespace=True, header=1)
with open(os.path.join(self.root, self.base_folder, "list_attr_celeba.txt"), "r") as f:
self.attr = pandas.read_csv(f, delim_whitespace=True, header=1)
mask = (splits[1] == split)
self.filename = splits[mask].index.values
self.identity = torch.as_tensor(self.identity[mask].values)
self.bbox = torch.as_tensor(self.bbox[mask].values)
self.landmarks_align = torch.as_tensor(self.landmarks_align[mask].values)
self.attr = torch.as_tensor(self.attr[mask].values)
self.attr = (self.attr + 1) // 2
def _check_integrity(self):
for (_, md5, filename) in self.file_list:
fpath = os.path.join(self.root, self.base_folder, filename)
_, ext = os.path.splitext(filename)
if ext not in [".zip", ".7z"] and not check_integrity(fpath, md5):
return False
return os.path.isdir(os.path.join(self.root, self.base_folder, "img_align_celeba"))
def download(self):
import zipfile
if self._check_integrity():
print('Files already downloaded and verified')
return
for (file_id, md5, filename) in self.file_list:
download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder), filename, md5)
with zipfile.ZipFile(os.path.join(self.root, self.base_folder, "img_align_celeba.zip"), "r") as f:
f.extractall(os.path.join(self.root, self.base_folder))
def __getitem__(self, index):
X = PIL.Image.open(os.path.join(self.root, self.base_folder, "img_align_celeba", self.filename[index]))
target = []
for t in self.target_type:
if t == "attr":
target.append(self.attr[index, :])
elif t == "identity":
target.append(self.identity[index, 0])
elif t == "bbox":
target.append(self.bbox[index, :])
elif t == "landmarks":
target.append(self.landmarks_align[index, :])
else:
raise ValueError("Target type \"{}\" is not recognized.".format(t))
target = tuple(target) if len(target) > 1 else target[0]
if self.transform is not None:
X = self.transform(X)
if self.target_transform is not None:
target = self.target_transform(target)
return X, target
def __len__(self):
return len(self.attr)
def extra_repr(self):
lines = ["Target type: {target_type}", "Split: {split}"]
return '\n'.join(lines).format(**self.__dict__)
| true
| true
|
1c466ddb0d67ce19e3f6d28c4f2f173c575e35e2
| 16,649
|
py
|
Python
|
netapp/santricity/api/symbol/p_api.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 5
|
2016-08-23T17:52:22.000Z
|
2019-05-16T08:45:30.000Z
|
netapp/santricity/api/symbol/p_api.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 2
|
2016-11-10T05:30:21.000Z
|
2019-04-05T15:03:37.000Z
|
netapp/santricity/api/symbol/p_api.py
|
NetApp/santricity-webapi-pythonsdk
|
1d3df4a00561192f4cdcdd1890f4d27547ed2de2
|
[
"BSD-3-Clause-Clear"
] | 7
|
2016-08-25T16:11:44.000Z
|
2021-02-22T05:31:25.000Z
|
#!/usr/bin/env python
# coding: utf-8
"""
PApi.py
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import absolute_import
import sys
import os
# python 2 and python 3 compatibility library
from six import iteritems
from ....santricity.configuration import Configuration
from ....santricity.api_client import ApiClient
class PApi(object):
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient(context_path='/devmgr/v2')
self.api_client = config.api_client
def symbol_ping_controller(self, system_id, **kwargs):
"""
This procedure simply verifies that the controller is responsive and is operating properly.
Documented return codes: ok.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_ping_controller(system_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: str
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_ping_controller" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_ping_controller`")
resource_path = '/storage-systems/{system-id}/symbol/pingController'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_power_cycle_physical_drive(self, system_id, body, **kwargs):
"""
This procedure is used to power cycle an individual physical drive.
Documented return codes: ok, driveNotUnassigned, volumeReconfiguring, volumeNotOptimal, downloadInProgress, parityScanInProgress, volumeGroupNotComplete, dpcVolumeGroupNotRedundant, dpcVolumeNotInitialized, dpcExclusiveOperationActive, dpcFormatActive, dpcUnreadableSectorsPresent, dpcPowerCycleAlreadyInProgress, dpcEnclosureHardwareUnsupported, dpcEnclosureFwDownlevel.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_power_cycle_physical_drive(system_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param PowerCyclePhysicalDriveDescriptor body: The descriptor for the drive to be power cycled. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: PowerCyclePhysicalDriveDataReturn
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_power_cycle_physical_drive" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_power_cycle_physical_drive`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_power_cycle_physical_drive`")
resource_path = '/storage-systems/{system-id}/symbol/powerCyclePhysicalDrive'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PowerCyclePhysicalDriveDataReturn',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_power_down_array(self, system_id, **kwargs):
"""
This command provides a programmatic means of powering off a storage array The operation is performed as gracefully as possible. Once this command is received, all open sockets, except for those that have in-process commands, are closed, meaning that any new SYMbol commands attempted will receive an RPC error. SYMbol commands that are in-process when this command is received are allowed to continue execution. In-process SYMbol commands in the \"active\" category are guaranteed to complete; In-process commands in the \"passive\" category may complete, but there is no guarantee. This command returns and reports status just prior to the actual power down event. Authentication is required for this command.
Documented return codes: ok, noHeap, background, cacheSyncFailure, quiescenceFailed, controllerInServiceMode.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.symbol_power_down_array(system_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str system_id: The unique identifier of the storage-system. This may be the id or the WWN. (required)
:param str controller: Controller selection
:param bool verbose_error_response:
:return: str
If the method is called asynchronously,
returns the request thread.
:raises: ValueError
If the required params are not provided or if the response data format is unknown.
TypeError:
When the data type of response data is different from what we are expecting
ApiException:
Occurs when we get a HTTP error code (422 and above).
"""
all_params = ['system_id', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_power_down_array" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'system_id' is set
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_power_down_array`")
resource_path = '/storage-systems/{system-id}/symbol/powerDownArray'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
| 41.210396
| 845
| 0.568983
|
from __future__ import absolute_import
import sys
import os
from six import iteritems
from ....santricity.configuration import Configuration
from ....santricity.api_client import ApiClient
class PApi(object):
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient(context_path='/devmgr/v2')
self.api_client = config.api_client
def symbol_ping_controller(self, system_id, **kwargs):
all_params = ['system_id', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_ping_controller" % key
)
params[key] = val
del params['kwargs']
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_ping_controller`")
resource_path = '/storage-systems/{system-id}/symbol/pingController'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_power_cycle_physical_drive(self, system_id, body, **kwargs):
all_params = ['system_id', 'body', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_power_cycle_physical_drive" % key
)
params[key] = val
del params['kwargs']
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_power_cycle_physical_drive`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `symbol_power_cycle_physical_drive`")
resource_path = '/storage-systems/{system-id}/symbol/powerCyclePhysicalDrive'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PowerCyclePhysicalDriveDataReturn',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def symbol_power_down_array(self, system_id, **kwargs):
all_params = ['system_id', 'controller', 'verbose_error_response']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method symbol_power_down_array" % key
)
params[key] = val
del params['kwargs']
if ('system_id' not in params) or (params['system_id'] is None):
raise ValueError("Missing the required parameter `system_id` when calling `symbol_power_down_array`")
resource_path = '/storage-systems/{system-id}/symbol/powerDownArray'.replace('{format}', 'json')
path_params = {}
if 'system_id' in params:
path_params['system-id'] = params['system_id']
query_params = {}
if 'controller' in params:
query_params['controller'] = params['controller']
if 'verbose_error_response' in params:
query_params['verboseErrorResponse'] = params['verbose_error_response']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
auth_settings = ['basicAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
| true
| true
|
1c466e24cb956d06875072696560d66b6ce8e400
| 4,071
|
py
|
Python
|
alipay/aop/api/request/KoubeiMarketingCampaignItemBatchqueryRequest.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/request/KoubeiMarketingCampaignItemBatchqueryRequest.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/request/KoubeiMarketingCampaignItemBatchqueryRequest.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiMarketingCampaignItemBatchqueryModel import KoubeiMarketingCampaignItemBatchqueryModel
class KoubeiMarketingCampaignItemBatchqueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiMarketingCampaignItemBatchqueryModel):
self._biz_content = value
else:
self._biz_content = KoubeiMarketingCampaignItemBatchqueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.marketing.campaign.item.batchquery'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 28.075862
| 166
| 0.651437
|
import simplejson as json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiMarketingCampaignItemBatchqueryModel import KoubeiMarketingCampaignItemBatchqueryModel
class KoubeiMarketingCampaignItemBatchqueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiMarketingCampaignItemBatchqueryModel):
self._biz_content = value
else:
self._biz_content = KoubeiMarketingCampaignItemBatchqueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.marketing.campaign.item.batchquery'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| true
| true
|
1c466e5cad5ca16ff031b3575687a75e615161ff
| 1,926
|
py
|
Python
|
tests/terraform/checks/resource/aws/test_S3MFADelete.py
|
cclauss/checkov
|
60a385fcaff1499cf00c2d0018575fe5ab71f556
|
[
"Apache-2.0"
] | 5
|
2021-07-29T18:08:40.000Z
|
2022-03-21T04:39:32.000Z
|
tests/terraform/checks/resource/aws/test_S3MFADelete.py
|
cclauss/checkov
|
60a385fcaff1499cf00c2d0018575fe5ab71f556
|
[
"Apache-2.0"
] | 16
|
2021-03-09T07:38:38.000Z
|
2021-06-09T03:53:55.000Z
|
tests/terraform/checks/resource/aws/test_S3MFADelete.py
|
cclauss/checkov
|
60a385fcaff1499cf00c2d0018575fe5ab71f556
|
[
"Apache-2.0"
] | 2
|
2021-08-23T13:25:36.000Z
|
2021-11-05T21:44:52.000Z
|
import unittest
from checkov.terraform.checks.resource.aws.S3MFADelete import scanner
from checkov.common.models.enums import CheckResult
class TestS3MFADelete(unittest.TestCase):
def test_failure(self):
resource_conf = {"region": ["us-west-2"],
"bucket": ["my_bucket"],
"acl": ["public-read"],
"force_destroy": [True],
"tags": [{"Name": "my-bucket"}]}
scan_result = scanner.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_versioning_enabled(self):
resource_conf = {"region": ["us-west-2"],
"bucket": ["my_bucket"],
"acl": ["public-read"],
"force_destroy": [True],
"tags": [{"Name": "my-bucket"}],
"versioning": [{"enabled": [True]}]}
scan_result = scanner.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
resource_conf = {"region": ["us-west-2"],
"bucket": ["my_bucket"],
"acl": ["public-read"],
"force_destroy": [True],
"tags": [{"Name": "my-bucket"}],
"logging": [{"target_bucket": "logging-bucket",
"target_prefix": "log/"
}],
"versioning": [
{"enabled": [True]},
{"mfa_delete": [True]}
]
}
scan_result = scanner.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| 40.125
| 72
| 0.475597
|
import unittest
from checkov.terraform.checks.resource.aws.S3MFADelete import scanner
from checkov.common.models.enums import CheckResult
class TestS3MFADelete(unittest.TestCase):
def test_failure(self):
resource_conf = {"region": ["us-west-2"],
"bucket": ["my_bucket"],
"acl": ["public-read"],
"force_destroy": [True],
"tags": [{"Name": "my-bucket"}]}
scan_result = scanner.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_versioning_enabled(self):
resource_conf = {"region": ["us-west-2"],
"bucket": ["my_bucket"],
"acl": ["public-read"],
"force_destroy": [True],
"tags": [{"Name": "my-bucket"}],
"versioning": [{"enabled": [True]}]}
scan_result = scanner.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
resource_conf = {"region": ["us-west-2"],
"bucket": ["my_bucket"],
"acl": ["public-read"],
"force_destroy": [True],
"tags": [{"Name": "my-bucket"}],
"logging": [{"target_bucket": "logging-bucket",
"target_prefix": "log/"
}],
"versioning": [
{"enabled": [True]},
{"mfa_delete": [True]}
]
}
scan_result = scanner.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c46706d423b7f39a9afbea8fb6279b3a302d7b9
| 3,384
|
py
|
Python
|
test/functional/test_framework/coverage.py
|
AlvaStudio/vtl2
|
0d0eeeaeb45d841086a9fabaeb77d3cad14c5f87
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/coverage.py
|
AlvaStudio/vtl2
|
0d0eeeaeb45d841086a9fabaeb77d3cad14c5f87
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/coverage.py
|
AlvaStudio/vtl2
|
0d0eeeaeb45d841086a9fabaeb77d3cad14c5f87
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for doing coverage analysis on the RPC interface.
Provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper():
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, name):
return_val = getattr(self.auth_service_proxy_instance, name)
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
# If proxy getattr returned an unwrapped value, do the same here.
return return_val
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
self._log_call()
return return_val
def _log_call(self):
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
self.coverage_logfile)
def get_request(self, *args, **kwargs):
self._log_call()
return self.auth_service_proxy_instance.get_request(*args, **kwargs)
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `vitalium-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
| 30.763636
| 87
| 0.661052
|
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper():
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, name):
return_val = getattr(self.auth_service_proxy_instance, name)
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
return return_val
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
self._log_call()
return return_val
def _log_call(self):
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
self.coverage_logfile)
def get_request(self, *args, **kwargs):
self._log_call()
return self.auth_service_proxy_instance.get_request(*args, **kwargs)
def get_filename(dirname, n_node):
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
| true
| true
|
1c467098f9187f933e5f2190a330b0d788c96cba
| 524
|
py
|
Python
|
lib/gensim/summarization/commons.py
|
duyetdev/api.duyetdev.com
|
4c33cc2cfb43ad6c4089873230e7b657659bff15
|
[
"MIT"
] | 4
|
2018-11-27T01:35:30.000Z
|
2022-01-27T01:17:11.000Z
|
lib/gensim/summarization/commons.py
|
duyetdev/api.duyetdev.com
|
4c33cc2cfb43ad6c4089873230e7b657659bff15
|
[
"MIT"
] | 12
|
2020-07-11T01:42:51.000Z
|
2020-08-12T17:17:35.000Z
|
lib/gensim/summarization/commons.py
|
duyetdev/api.duyetdev.com
|
4c33cc2cfb43ad6c4089873230e7b657659bff15
|
[
"MIT"
] | 1
|
2018-11-27T01:35:33.000Z
|
2018-11-27T01:35:33.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
from gensim.summarization.graph import Graph
def build_graph(sequence):
graph = Graph()
for item in sequence:
if not graph.has_node(item):
graph.add_node(item)
return graph
def remove_unreachable_nodes(graph):
for node in graph.nodes():
if sum(graph.edge_weight((node, other)) for other in graph.neighbors(node)) == 0:
graph.del_node(node)
| 24.952381
| 89
| 0.658397
|
from gensim.summarization.graph import Graph
def build_graph(sequence):
graph = Graph()
for item in sequence:
if not graph.has_node(item):
graph.add_node(item)
return graph
def remove_unreachable_nodes(graph):
for node in graph.nodes():
if sum(graph.edge_weight((node, other)) for other in graph.neighbors(node)) == 0:
graph.del_node(node)
| true
| true
|
1c467153e4dd6d941d4fa3ba61730d6360584541
| 271
|
py
|
Python
|
lab_6.py
|
goni21-meet/meet2019y1lab6
|
c75fab3b544a13d1ad8bef13e5675ff5ea165a0d
|
[
"MIT"
] | null | null | null |
lab_6.py
|
goni21-meet/meet2019y1lab6
|
c75fab3b544a13d1ad8bef13e5675ff5ea165a0d
|
[
"MIT"
] | null | null | null |
lab_6.py
|
goni21-meet/meet2019y1lab6
|
c75fab3b544a13d1ad8bef13e5675ff5ea165a0d
|
[
"MIT"
] | null | null | null |
#import turtle
#x = 0
#while x<300:
# y = x**2/300
# turtle.goto(x,y)
# print( turtle.pos())
#x = x+100
#turtle.mainloop()
import turtle
num_pts = 5
for i in range(num_pts):
turtle.left(360/num_pts)
turtle.forward(100)
turtle.mainloop()
| 11.291667
| 28
| 0.597786
|
import turtle
num_pts = 5
for i in range(num_pts):
turtle.left(360/num_pts)
turtle.forward(100)
turtle.mainloop()
| true
| true
|
1c46723f0a2b6f567ce353e54cc92039a142acf3
| 119
|
py
|
Python
|
h/groups/__init__.py
|
ssin122/test-h
|
c10062ae23b690afaac0ab4af7b9a5a5e4b686a9
|
[
"MIT"
] | 2
|
2021-11-07T23:14:54.000Z
|
2021-11-17T10:11:55.000Z
|
h/groups/__init__.py
|
ssin122/test-h
|
c10062ae23b690afaac0ab4af7b9a5a5e4b686a9
|
[
"MIT"
] | null | null | null |
h/groups/__init__.py
|
ssin122/test-h
|
c10062ae23b690afaac0ab4af7b9a5a5e4b686a9
|
[
"MIT"
] | 1
|
2017-03-12T00:18:33.000Z
|
2017-03-12T00:18:33.000Z
|
# -*- coding: utf-8 -*-
def includeme(config):
config.memex_add_search_filter('h.groups.search.GroupAuthFilter')
| 19.833333
| 69
| 0.714286
|
def includeme(config):
config.memex_add_search_filter('h.groups.search.GroupAuthFilter')
| true
| true
|
1c467277bdb5e4e30d2dce15c741769d571a9408
| 3,454
|
py
|
Python
|
lines/Segment.py
|
AlexTaguchi/lines
|
d091d52350d0bedc3c8af0aa5438b6a1da95151d
|
[
"MIT"
] | null | null | null |
lines/Segment.py
|
AlexTaguchi/lines
|
d091d52350d0bedc3c8af0aa5438b6a1da95151d
|
[
"MIT"
] | null | null | null |
lines/Segment.py
|
AlexTaguchi/lines
|
d091d52350d0bedc3c8af0aa5438b6a1da95151d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
class EvalPointError(ValueError):
pass
class Segment:
def __init__(self):
self.a = 0.0 # y = a.x + b
self.b = 0.0
self._points = []
self._rss = 0.0 # Residual sum of squares
self._wixiyi = 0.0
self._wixi = 0.0
self._wiyi = 0.0
self._wixi2 = 0.0
self._wi = 0.0
def getLength(self):
return len(self._points)
def appendPoint(self, p):
"""append point to the segment, update parameters
for the line function
"""
self._points.append(p)
self._wixiyi += p.w * p.x * p.y
self._wixi += p.w * p.x
self._wiyi += p.w * p.y
self._wixi2 += p.w * p.x * p.x
self._wi += p.w
if len(self._points) > 1:
# if points are aligned with exactly the same x, line is vertical
# a and b would be infinity
if self._wixi2 == self._wixi**2/self._wi:
self.a = float("inf")
self.b = float("inf")
else:
self.a = (self._wixiyi - self._wixi*self._wiyi/self._wi)\
/ (self._wixi2 - self._wixi**2/self._wi)
self.b = (self._wiyi - self.a * self._wixi)/self._wi
self._rss = self.calcRSS(self.a, self.b)
def calcRSS(self, a, b):
""" calculate Residual Sum of Squares
Args:
a (float): slope
b (float): intercept
Returns:
float: residual sum of squares
"""
rss = 0.0
if len(self._points) < 2:
return rss
if self.a == float("inf") or self.b == float("inf"):
# in case when y is a vertical line, independent of x
avg_x = 0.0
for p in self._points:
avg_x += p.x * p.w
avg_x /= float(len(self._points)) # weighted average of x
for p in self._points:
rss += p.w * (p.x - avg_x)**2
return rss
for p in self._points:
rss += p.w * (p.y - a * p.x - b)**2
return rss
def evalPoint(self, p):
"""calculate a and b of y=a.x+b before including in the point p
Args:
p (Point): a point not in Segment
Returns:
(a,b): a tuple for slope and intercept for fitted line y=a.x+b
"""
if not self._points:
raise EvalPointError("""Cannot calculate slope
and intercept with a single point.
""")
x = p.x
y = p.y
w = float(p.w)
wixiyi = self._wixiyi + w*x*y
wixi = self._wixi + w*x
wiyi = self._wiyi + w*y
wixi2 = self._wixi2 + w*x*x
wi = self._wi + w
if wixi2 == wixi**2/wi:
a = float("inf")
b = float("inf")
return (a, b)
a = (wixiyi - wixi*wiyi/wi)/(wixi2 - wixi**2/wi)
b = (wiyi - a * wixi)/wi
return (a, b)
def evalRSS(self, p):
""" evaluate Residual Sum of Squares before including the point """
if len(self._points) < 2:
return self._rss
new_a, new_b = self.evalPoint(p)
rss = self.calcRSS(new_a, new_b)
rss += p.w * (p.y - self.a * p.x - self.b)**2
return rss
def getPoints(self):
""" return a list of Point stored inside the Segment """
return self._points
| 29.271186
| 77
| 0.486972
|
class EvalPointError(ValueError):
pass
class Segment:
def __init__(self):
self.a = 0.0
self.b = 0.0
self._points = []
self._rss = 0.0
self._wixiyi = 0.0
self._wixi = 0.0
self._wiyi = 0.0
self._wixi2 = 0.0
self._wi = 0.0
def getLength(self):
return len(self._points)
def appendPoint(self, p):
self._points.append(p)
self._wixiyi += p.w * p.x * p.y
self._wixi += p.w * p.x
self._wiyi += p.w * p.y
self._wixi2 += p.w * p.x * p.x
self._wi += p.w
if len(self._points) > 1:
if self._wixi2 == self._wixi**2/self._wi:
self.a = float("inf")
self.b = float("inf")
else:
self.a = (self._wixiyi - self._wixi*self._wiyi/self._wi)\
/ (self._wixi2 - self._wixi**2/self._wi)
self.b = (self._wiyi - self.a * self._wixi)/self._wi
self._rss = self.calcRSS(self.a, self.b)
def calcRSS(self, a, b):
rss = 0.0
if len(self._points) < 2:
return rss
if self.a == float("inf") or self.b == float("inf"):
avg_x = 0.0
for p in self._points:
avg_x += p.x * p.w
avg_x /= float(len(self._points))
for p in self._points:
rss += p.w * (p.x - avg_x)**2
return rss
for p in self._points:
rss += p.w * (p.y - a * p.x - b)**2
return rss
def evalPoint(self, p):
if not self._points:
raise EvalPointError("""Cannot calculate slope
and intercept with a single point.
""")
x = p.x
y = p.y
w = float(p.w)
wixiyi = self._wixiyi + w*x*y
wixi = self._wixi + w*x
wiyi = self._wiyi + w*y
wixi2 = self._wixi2 + w*x*x
wi = self._wi + w
if wixi2 == wixi**2/wi:
a = float("inf")
b = float("inf")
return (a, b)
a = (wixiyi - wixi*wiyi/wi)/(wixi2 - wixi**2/wi)
b = (wiyi - a * wixi)/wi
return (a, b)
def evalRSS(self, p):
if len(self._points) < 2:
return self._rss
new_a, new_b = self.evalPoint(p)
rss = self.calcRSS(new_a, new_b)
rss += p.w * (p.y - self.a * p.x - self.b)**2
return rss
def getPoints(self):
return self._points
| true
| true
|
1c46739728bc3decdb8284725d1278b660ceff72
| 826
|
py
|
Python
|
pirates/creature/Monstrous.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/creature/Monstrous.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/creature/Monstrous.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.creature.Monstrous
class Monstrous:
__module__ = __name__
def initializeMonstrousTags(self, rootNodePath):
from pirates.piratesbase import PiratesGlobals
rootNodePath.setPythonTag('MonstrousObject', self)
self.setPythonTag('MonstrousObject', self)
rootNodePath.setTag('objType', str(PiratesGlobals.COLL_MONSTROUS))
self.setTag('objType', str(PiratesGlobals.COLL_MONSTROUS))
def cleanupMontstrousTags(self, rootNodePath):
rootNodePath.clearPythonTag('MonstrousObject')
self.clearPythonTag('MonstrousObject')
def initializeBattleCollisions(self):
pass
| 37.545455
| 104
| 0.728814
|
class Monstrous:
__module__ = __name__
def initializeMonstrousTags(self, rootNodePath):
from pirates.piratesbase import PiratesGlobals
rootNodePath.setPythonTag('MonstrousObject', self)
self.setPythonTag('MonstrousObject', self)
rootNodePath.setTag('objType', str(PiratesGlobals.COLL_MONSTROUS))
self.setTag('objType', str(PiratesGlobals.COLL_MONSTROUS))
def cleanupMontstrousTags(self, rootNodePath):
rootNodePath.clearPythonTag('MonstrousObject')
self.clearPythonTag('MonstrousObject')
def initializeBattleCollisions(self):
pass
| true
| true
|
1c4674db514b2fd6be335133809c409cc899a512
| 6,171
|
py
|
Python
|
gpgrouper/containers.py
|
malovannaya-lab/gpgrouper
|
45cb948bfa9ed256e450ad8f257ec24324f786ca
|
[
"BSD-3-Clause"
] | 5
|
2018-08-10T17:10:08.000Z
|
2020-05-21T08:09:45.000Z
|
gpgrouper/containers.py
|
malovannaya-lab/gpgrouper
|
45cb948bfa9ed256e450ad8f257ec24324f786ca
|
[
"BSD-3-Clause"
] | null | null | null |
gpgrouper/containers.py
|
malovannaya-lab/gpgrouper
|
45cb948bfa9ed256e450ad8f257ec24324f786ca
|
[
"BSD-3-Clause"
] | null | null | null |
"""Container for each experiment, has a dataframe and metadata"""
import os
import re
from datetime import datetime
import traceback
import pandas as pd
from . import _version
class UserData:
def __init__(self, recno=None, datafile=None, runno=1, searchno=1, no_taxa_redistrib=0,
addedby='', indir = '.', outdir='.', rawfiledir='.',
labeltype='none', quant_source=None, phospho=False,
searchdb=None, taxonid=None, miscuts=2):
if recno is None:
raise ValueError('Must supply record number (recno)')
self.recno = recno
self.runno = runno
self.searchno = searchno
self.taxonid = taxonid
self.added_by = addedby
self.labeltype = labeltype
self.no_taxa_redistrib = no_taxa_redistrib
self.filtervalues = dict()
self.indir = indir
self.outdir = outdir
self.rawfiledir = rawfiledir
self.searchdb = searchdb # file name for refseq
self.datafile = datafile
self.df = pd.DataFrame()
self.pipeline = None
self.original_columns = None
rrs = '{}_{}_{}_'.format(recno, runno, searchno)
basename = os.path.splitext(os.path.basename(datafile))[0]
self.basename = basename.split(rrs)[-1]
self.LOGFILE = os.path.join(outdir, self.output_name(ext='log'))
self._LOGSTACK = list()
self.EXIT_CODE = 0
self.ERROR = None
self.taxon_ratio_totals = dict()
self.miscuts = miscuts
self.phospho = phospho
with open(self.LOGFILE, 'w') as f:
f.write('{} PyGrouper {}'.format(datetime.now(), _version.__version__))
@property
def taxon_miscut_id(self):
return hash(self.taxonid) + hash(self.miscuts)
def __repr__(self):
return '{}_{}_{}'.format(self.recno, self.runno, self.searchno)
def __bool__(self):
if self.datafile is not None and self.recno is not None:
return True
return False
def to_log(self, message):
if self._LOGSTACK: # flush
messages = self._LOGSTACK + (messages,)
else:
messages = (message,)
with open(self.LOGFILE, 'w+') as f:
for message in messages:
f.write(message)
# f.write(sep)
f.write('\n')
def to_logq(self, message):
self._LOGSTACK.append(message+'\n')
return self
def flush_log(self):
if self._LOGSTACK:
stack, self._LOGSTACK = self._LOGSTACK, list()
self.to_log('\n'.join(stack))
return self
def full_path(self, in_or_out='in'):
"""returns data file with given path"""
if in_or_out == 'in':
mydir = self.indir
elif in_or_out == 'out':
mydir = self.outdir
else:
mydir = '.'
return os.path.join(mydir, self.datafile or '')
def read_csv(self, *args, **kwargs):
"""Uses pandas read_csv function to read an input file
args and kwargs are passed to this function"""
try:
self.df = pd.read_csv(self.full_path(), *args, **kwargs)
self.original_columns = self.df.columns.values
except Exception as e:
# self.to_log(''.join(traceback.format_exc()))
self.to_log(traceback.format_exc())
self.ERROR = traceback.format_exc()
self.EXIT_CODE = 1
return 1
if len(self.df) == 0:
self.EXIT_CODE = 1
return 2
return 0
def output_name(self, suffix=None, ext='tab'):
"""generate an appropriate output file name
returns rec_run_search_labeltype_filetype.tab"""
# suffix = '_'.join([str(ix) for ix in suffix])
return '{!r}_{}_{}{}.{}'.format(self,
self.labeltype,
self.basename,
'_' + suffix if suffix else '',
ext
)
def populate_base_data(self):
"""Populate dataframe with base data prior to grouping"""
self.categorical_assign('EXPRecNo', self.recno)
self.categorical_assign('EXPRunNo', self.runno)
self.categorical_assign('EXPSearchNo', self.searchno)
self.categorical_assign('CreationTS', datetime.now().strftime("%m/%d/%Y) %H:%M:%S"))
self.categorical_assign('AddedBy', self.added_by)
# self.categorical_assign('metadatainfo', '') # not sure if this is okay
# self.df['EXPRecNo'] = self._categorical_assign(self.recno)
# self.df['EXPRunNo'] = self._categorical_assign(self.runno)
# self.df['EXPSearchNo'] = self._categorical_assign(self.searchno)
# self.df['CreationTS'] = self._categorical_assign(datetime.now().strftime("%m/%d/%Y) %H:%M:%S"))
# self.df['AddedBy'] = self._categorical_assign(self.added_by)
# self.df['psm_EXPTechRepNo'] = self.techrepno
# self.df['psm_TaxonID'] = self.taxonid
#self.df['psm_GeneList'] = ''
#self.df['psm_ProteinList'] = ''
#self.df['psm_GeneCount'] = 0
#self.df['psm_ProteinCount'] = 0
#self.df['psm_HomologeneID'] = ''
#self.df['psm_ProteinCapacity'] = ''
# self.df['metadatainfo'] = [tuple()] * len(self.df)
self.df['metadatainfo'] = ''
if not 'ion_score_bins' in self.filtervalues:
self.filtervalues['ion_score_bins'] = (10, 20, 30)
return self
@property
def filterstamp(self):
s = 'is{ion_score}_qv{qvalue}_pep{pep}_idg{idg}_z{zmin}to{zmax}_mo{modi}_is_bins{ion_score_bins}'.format(**self.filtervalues)
if self.phospho:
s += '_phospho_only'
return s
def categorical_assign(self, name, value, **kwargs):
"""
Assign a static value to a new column.
Saves memory by using pandas Categorical dtype.
:kwargs: passed to pd.Series.astype
"""
self.df[name] = value
self.df[name] = self.df[name].astype('category', **kwargs)
return self
| 36.087719
| 133
| 0.577702
|
import os
import re
from datetime import datetime
import traceback
import pandas as pd
from . import _version
class UserData:
def __init__(self, recno=None, datafile=None, runno=1, searchno=1, no_taxa_redistrib=0,
addedby='', indir = '.', outdir='.', rawfiledir='.',
labeltype='none', quant_source=None, phospho=False,
searchdb=None, taxonid=None, miscuts=2):
if recno is None:
raise ValueError('Must supply record number (recno)')
self.recno = recno
self.runno = runno
self.searchno = searchno
self.taxonid = taxonid
self.added_by = addedby
self.labeltype = labeltype
self.no_taxa_redistrib = no_taxa_redistrib
self.filtervalues = dict()
self.indir = indir
self.outdir = outdir
self.rawfiledir = rawfiledir
self.searchdb = searchdb
self.datafile = datafile
self.df = pd.DataFrame()
self.pipeline = None
self.original_columns = None
rrs = '{}_{}_{}_'.format(recno, runno, searchno)
basename = os.path.splitext(os.path.basename(datafile))[0]
self.basename = basename.split(rrs)[-1]
self.LOGFILE = os.path.join(outdir, self.output_name(ext='log'))
self._LOGSTACK = list()
self.EXIT_CODE = 0
self.ERROR = None
self.taxon_ratio_totals = dict()
self.miscuts = miscuts
self.phospho = phospho
with open(self.LOGFILE, 'w') as f:
f.write('{} PyGrouper {}'.format(datetime.now(), _version.__version__))
@property
def taxon_miscut_id(self):
return hash(self.taxonid) + hash(self.miscuts)
def __repr__(self):
return '{}_{}_{}'.format(self.recno, self.runno, self.searchno)
def __bool__(self):
if self.datafile is not None and self.recno is not None:
return True
return False
def to_log(self, message):
if self._LOGSTACK:
messages = self._LOGSTACK + (messages,)
else:
messages = (message,)
with open(self.LOGFILE, 'w+') as f:
for message in messages:
f.write(message)
f.write('\n')
def to_logq(self, message):
self._LOGSTACK.append(message+'\n')
return self
def flush_log(self):
if self._LOGSTACK:
stack, self._LOGSTACK = self._LOGSTACK, list()
self.to_log('\n'.join(stack))
return self
def full_path(self, in_or_out='in'):
if in_or_out == 'in':
mydir = self.indir
elif in_or_out == 'out':
mydir = self.outdir
else:
mydir = '.'
return os.path.join(mydir, self.datafile or '')
def read_csv(self, *args, **kwargs):
try:
self.df = pd.read_csv(self.full_path(), *args, **kwargs)
self.original_columns = self.df.columns.values
except Exception as e:
self.to_log(traceback.format_exc())
self.ERROR = traceback.format_exc()
self.EXIT_CODE = 1
return 1
if len(self.df) == 0:
self.EXIT_CODE = 1
return 2
return 0
def output_name(self, suffix=None, ext='tab'):
return '{!r}_{}_{}{}.{}'.format(self,
self.labeltype,
self.basename,
'_' + suffix if suffix else '',
ext
)
def populate_base_data(self):
self.categorical_assign('EXPRecNo', self.recno)
self.categorical_assign('EXPRunNo', self.runno)
self.categorical_assign('EXPSearchNo', self.searchno)
self.categorical_assign('CreationTS', datetime.now().strftime("%m/%d/%Y) %H:%M:%S"))
self.categorical_assign('AddedBy', self.added_by)
self.df['metadatainfo'] = ''
if not 'ion_score_bins' in self.filtervalues:
self.filtervalues['ion_score_bins'] = (10, 20, 30)
return self
@property
def filterstamp(self):
s = 'is{ion_score}_qv{qvalue}_pep{pep}_idg{idg}_z{zmin}to{zmax}_mo{modi}_is_bins{ion_score_bins}'.format(**self.filtervalues)
if self.phospho:
s += '_phospho_only'
return s
def categorical_assign(self, name, value, **kwargs):
self.df[name] = value
self.df[name] = self.df[name].astype('category', **kwargs)
return self
| true
| true
|
1c4675f61a8598c7f8758a842b5965aa6ce62daf
| 1,319
|
py
|
Python
|
Python Programs/The-Imvisible-Man/opcv.py
|
Chibi-Shem/Hacktoberfest2020-Expert
|
324843464aec039e130e85a16e74b76d310f1497
|
[
"MIT"
] | 77
|
2020-10-01T10:06:59.000Z
|
2021-11-08T08:57:18.000Z
|
Python Programs/The-Imvisible-Man/opcv.py
|
Chibi-Shem/Hacktoberfest2020-Expert
|
324843464aec039e130e85a16e74b76d310f1497
|
[
"MIT"
] | 46
|
2020-09-27T04:55:36.000Z
|
2021-05-14T18:49:06.000Z
|
Python Programs/The-Imvisible-Man/opcv.py
|
Chibi-Shem/Hacktoberfest2020-Expert
|
324843464aec039e130e85a16e74b76d310f1497
|
[
"MIT"
] | 327
|
2020-09-26T17:06:03.000Z
|
2021-10-09T06:04:39.000Z
|
import numpy as np
import cv2
import time
cap = cv2.VideoCapture(0)
time.sleep(2)
background=0
#capture the background
for i in range(30):
ret,background = cap.read()
while(cap.isOpened()):
ret , img = cap.read()
if not ret:
break
hsv = cv2.cvtColor(img , cv2.COLOR_BGR2HSV)
lower_red = np.array([0,120,70])
upper_red = np.array([10, 255, 255])
mask1 = cv2.inRange(hsv , lower_red , upper_red) #sepatreting the clock part
lower_red = np.array([170, 120, 70])
upper_red = np.array([180, 255, 255])
mask2 = cv2.inRange(hsv, lower_red, upper_red) # sepatreting the clock part
mask1 = mask1+mask2 #OR 1 or x
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN,
np.ones((3,3),np.uint8),iterations=2) #Noise Removal
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE,
np.ones((3,3), np.uint8), iterations=1) #smmoting the image
mask2 = cv2.bitwise_not(mask1) #Except the clock
res1=cv2.bitwise_and(background , background , mask = mask1)
res2 = cv2.bitwise_and(img , img , mask = mask2)
final_output = cv2.addWeighted(res1,1,res2,1,0)
cv2.imshow("Hey invisible..!", final_output)
k = cv2.waitKey(10)
if k == ord('s'):
break
cap.release()
cv2.destroyAllWindows()
| 26.918367
| 88
| 0.634572
|
import numpy as np
import cv2
import time
cap = cv2.VideoCapture(0)
time.sleep(2)
background=0
for i in range(30):
ret,background = cap.read()
while(cap.isOpened()):
ret , img = cap.read()
if not ret:
break
hsv = cv2.cvtColor(img , cv2.COLOR_BGR2HSV)
lower_red = np.array([0,120,70])
upper_red = np.array([10, 255, 255])
mask1 = cv2.inRange(hsv , lower_red , upper_red)
lower_red = np.array([170, 120, 70])
upper_red = np.array([180, 255, 255])
mask2 = cv2.inRange(hsv, lower_red, upper_red)
mask1 = mask1+mask2
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN,
np.ones((3,3),np.uint8),iterations=2)
mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE,
np.ones((3,3), np.uint8), iterations=1)
mask2 = cv2.bitwise_not(mask1)
res1=cv2.bitwise_and(background , background , mask = mask1)
res2 = cv2.bitwise_and(img , img , mask = mask2)
final_output = cv2.addWeighted(res1,1,res2,1,0)
cv2.imshow("Hey invisible..!", final_output)
k = cv2.waitKey(10)
if k == ord('s'):
break
cap.release()
cv2.destroyAllWindows()
| true
| true
|
1c4676525bfb0f4f935ed4c6103fd5be72db5498
| 9,215
|
py
|
Python
|
salt/modules/s3.py
|
herlo/salt
|
10ffb8315559c0cfbc10b4adc26cd62ebc462851
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/s3.py
|
herlo/salt
|
10ffb8315559c0cfbc10b4adc26cd62ebc462851
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/s3.py
|
herlo/salt
|
10ffb8315559c0cfbc10b4adc26cd62ebc462851
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon S3
:configuration: This module accepts explicit s3 credentials but can also utilize
IAM roles assigned to the instance trough Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at::
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file::
s3.keyid: GKTADJGHEIQSXMKKRBJ08H
s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A service_url may also be specified in the configuration::
s3.service_url: s3.amazonaws.com
A role_arn may also be specified in the configuration::
s3.role_arn: arn:aws:iam::111111111111:role/my-role-to-assume
If a service_url is not specified, the default is s3.amazonaws.com. This
may appear in various documentation as an "endpoint". A comprehensive list
for Amazon S3 may be found at::
http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
The service_url will form the basis for the final endpoint that is used to
query the service.
SSL verification may also be turned off in the configuration:
s3.verify_ssl: False
This is required if using S3 bucket names that contain a period, as
these will not match Amazon's S3 wildcard certificates. Certificate
verification is enabled by default.
AWS region may be specified in the configuration:
s3.location: eu-central-1
Default is us-east-1.
This module should be usable to query other S3-like services, such as
Eucalyptus.
:depends: requests
'''
from __future__ import absolute_import
# Import Python libs
import logging
# Import Salt libs
import salt.utils
import salt.utils.s3
log = logging.getLogger(__name__)
def __virtual__():
'''
Should work on any modern Python installation
'''
return True
def delete(bucket, path=None, action=None, key=None, keyid=None,
service_url=None, verify_ssl=None, kms_keyid=None, location=None,
role_arn=None):
'''
Delete a bucket, or delete an object from a bucket.
CLI Example to delete a bucket::
salt myminion s3.delete mybucket
CLI Example to delete an object from a bucket::
salt myminion s3.delete mybucket remoteobject
'''
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
)
return salt.utils.s3.query(method='DELETE',
bucket=bucket,
path=path,
action=action,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
role_arn=role_arn)
def get(bucket=None, path=None, return_bin=False, action=None,
local_file=None, key=None, keyid=None, service_url=None,
verify_ssl=None, kms_keyid=None, location=None, role_arn=None):
'''
List the contents of a bucket, or return an object from a bucket. Set
return_bin to True in order to retrieve an object wholesale. Otherwise,
Salt will attempt to parse an XML response.
CLI Example to list buckets:
.. code-block:: bash
salt myminion s3.get
CLI Example to list the contents of a bucket:
.. code-block:: bash
salt myminion s3.get mybucket
CLI Example to return the binary contents of an object:
.. code-block:: bash
salt myminion s3.get mybucket myfile.png return_bin=True
CLI Example to save the binary contents of an object to a local file:
.. code-block:: bash
salt myminion s3.get mybucket myfile.png local_file=/tmp/myfile.png
It is also possible to perform an action on a bucket. Currently, S3
supports the following actions::
acl
cors
lifecycle
policy
location
logging
notification
tagging
versions
requestPayment
versioning
website
To perform an action on a bucket:
.. code-block:: bash
salt myminion s3.get mybucket myfile.png action=acl
'''
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
)
return salt.utils.s3.query(method='GET',
bucket=bucket,
path=path,
return_bin=return_bin,
local_file=local_file,
action=action,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
role_arn=role_arn)
def head(bucket, path=None, key=None, keyid=None, service_url=None,
verify_ssl=None, kms_keyid=None, location=None, role_arn=None):
'''
Return the metadata for a bucket, or an object in a bucket.
CLI Examples:
.. code-block:: bash
salt myminion s3.head mybucket
salt myminion s3.head mybucket myfile.png
'''
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
)
return salt.utils.s3.query(method='HEAD',
bucket=bucket,
path=path,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
full_headers=True,
role_arn=role_arn)
def put(bucket, path=None, return_bin=False, action=None, local_file=None,
key=None, keyid=None, service_url=None, verify_ssl=None,
kms_keyid=None, location=None, role_arn=None):
'''
Create a new bucket, or upload an object to a bucket.
CLI Example to create a bucket:
.. code-block:: bash
salt myminion s3.put mybucket
CLI Example to upload an object to a bucket:
.. code-block:: bash
salt myminion s3.put mybucket remotepath local_file=/path/to/file
'''
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
)
return salt.utils.s3.query(method='PUT',
bucket=bucket,
path=path,
return_bin=return_bin,
local_file=local_file,
action=action,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
role_arn=role_arn)
def _get_key(key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn):
'''
Examine the keys, and populate as necessary
'''
if not key and __salt__['config.option']('s3.key'):
key = __salt__['config.option']('s3.key')
if not keyid and __salt__['config.option']('s3.keyid'):
keyid = __salt__['config.option']('s3.keyid')
if not kms_keyid and __salt__['config.option']('aws.kms.keyid'):
kms_keyid = __salt__['config.option']('aws.kms.keyid')
if not service_url and __salt__['config.option']('s3.service_url'):
service_url = __salt__['config.option']('s3.service_url')
if not service_url:
service_url = 's3.amazonaws.com'
if verify_ssl is None and __salt__['config.option']('s3.verify_ssl') is not None:
verify_ssl = __salt__['config.option']('s3.verify_ssl')
if verify_ssl is None:
verify_ssl = True
if location is None and __salt__['config.option']('s3.location') is not None:
location = __salt__['config.option']('s3.location')
if role_arn is None and __salt__['config.option']('s3.role_arn') is not None:
role_arn = __salt__['config.option']('s3.role_arn')
return key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn
| 30.819398
| 87
| 0.58166
|
from __future__ import absolute_import
import logging
import salt.utils
import salt.utils.s3
log = logging.getLogger(__name__)
def __virtual__():
return True
def delete(bucket, path=None, action=None, key=None, keyid=None,
service_url=None, verify_ssl=None, kms_keyid=None, location=None,
role_arn=None):
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
)
return salt.utils.s3.query(method='DELETE',
bucket=bucket,
path=path,
action=action,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
role_arn=role_arn)
def get(bucket=None, path=None, return_bin=False, action=None,
local_file=None, key=None, keyid=None, service_url=None,
verify_ssl=None, kms_keyid=None, location=None, role_arn=None):
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
)
return salt.utils.s3.query(method='GET',
bucket=bucket,
path=path,
return_bin=return_bin,
local_file=local_file,
action=action,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
role_arn=role_arn)
def head(bucket, path=None, key=None, keyid=None, service_url=None,
verify_ssl=None, kms_keyid=None, location=None, role_arn=None):
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
)
return salt.utils.s3.query(method='HEAD',
bucket=bucket,
path=path,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
full_headers=True,
role_arn=role_arn)
def put(bucket, path=None, return_bin=False, action=None, local_file=None,
key=None, keyid=None, service_url=None, verify_ssl=None,
kms_keyid=None, location=None, role_arn=None):
key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn = _get_key(
key,
keyid,
service_url,
verify_ssl,
kms_keyid,
location,
role_arn,
)
return salt.utils.s3.query(method='PUT',
bucket=bucket,
path=path,
return_bin=return_bin,
local_file=local_file,
action=action,
key=key,
keyid=keyid,
kms_keyid=kms_keyid,
service_url=service_url,
verify_ssl=verify_ssl,
location=location,
role_arn=role_arn)
def _get_key(key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn):
if not key and __salt__['config.option']('s3.key'):
key = __salt__['config.option']('s3.key')
if not keyid and __salt__['config.option']('s3.keyid'):
keyid = __salt__['config.option']('s3.keyid')
if not kms_keyid and __salt__['config.option']('aws.kms.keyid'):
kms_keyid = __salt__['config.option']('aws.kms.keyid')
if not service_url and __salt__['config.option']('s3.service_url'):
service_url = __salt__['config.option']('s3.service_url')
if not service_url:
service_url = 's3.amazonaws.com'
if verify_ssl is None and __salt__['config.option']('s3.verify_ssl') is not None:
verify_ssl = __salt__['config.option']('s3.verify_ssl')
if verify_ssl is None:
verify_ssl = True
if location is None and __salt__['config.option']('s3.location') is not None:
location = __salt__['config.option']('s3.location')
if role_arn is None and __salt__['config.option']('s3.role_arn') is not None:
role_arn = __salt__['config.option']('s3.role_arn')
return key, keyid, service_url, verify_ssl, kms_keyid, location, role_arn
| true
| true
|
1c4676e95a1de5936f6b8382b6bdae4774211922
| 3,598
|
py
|
Python
|
yt_dlp/extractor/joj.py
|
YuanHsing/yt-dlp
|
38d86f4d45cf2b764f79141c602356fbb426a4b6
|
[
"Unlicense"
] | 1
|
2021-12-13T14:12:47.000Z
|
2021-12-13T14:12:47.000Z
|
yt_dlp/extractor/joj.py
|
YuanHsing/yt-dlp
|
38d86f4d45cf2b764f79141c602356fbb426a4b6
|
[
"Unlicense"
] | null | null | null |
yt_dlp/extractor/joj.py
|
YuanHsing/yt-dlp
|
38d86f4d45cf2b764f79141c602356fbb426a4b6
|
[
"Unlicense"
] | null | null | null |
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
format_field,
int_or_none,
js_to_json,
try_get,
)
class JojIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
joj:|
https?://media\.joj\.sk/embed/
)
(?P<id>[^/?#^]+)
'''
_TESTS = [{
'url': 'https://media.joj.sk/embed/a388ec4c-6019-4a4a-9312-b1bee194e932',
'info_dict': {
'id': 'a388ec4c-6019-4a4a-9312-b1bee194e932',
'ext': 'mp4',
'title': 'NOVÉ BÝVANIE',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 3118,
}
}, {
'url': 'https://media.joj.sk/embed/9i1cxv',
'only_matching': True,
}, {
'url': 'joj:a388ec4c-6019-4a4a-9312-b1bee194e932',
'only_matching': True,
}, {
'url': 'joj:9i1cxv',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return [
mobj.group('url')
for mobj in re.finditer(
r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//media\.joj\.sk/embed/(?:(?!\1).)+)\1',
webpage)]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'https://media.joj.sk/embed/%s' % video_id, video_id)
title = self._search_regex(
(r'videoTitle\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',
r'<title>(?P<title>[^<]+)'), webpage, 'title',
default=None, group='title') or self._og_search_title(webpage)
bitrates = self._parse_json(
self._search_regex(
r'(?s)(?:src|bitrates)\s*=\s*({.+?});', webpage, 'bitrates',
default='{}'),
video_id, transform_source=js_to_json, fatal=False)
formats = []
for format_url in try_get(bitrates, lambda x: x['mp4'], list) or []:
if isinstance(format_url, compat_str):
height = self._search_regex(
r'(\d+)[pP]\.', format_url, 'height', default=None)
formats.append({
'url': format_url,
'format_id': format_field(height, None, '%sp'),
'height': int(height),
})
if not formats:
playlist = self._download_xml(
'https://media.joj.sk/services/Video.php?clip=%s' % video_id,
video_id)
for file_el in playlist.findall('./files/file'):
path = file_el.get('path')
if not path:
continue
format_id = file_el.get('id') or file_el.get('label')
formats.append({
'url': 'http://n16.joj.sk/storage/%s' % path.replace(
'dat/', '', 1),
'format_id': format_id,
'height': int_or_none(self._search_regex(
r'(\d+)[pP]', format_id or path, 'height',
default=None)),
})
self._sort_formats(formats)
thumbnail = self._og_search_thumbnail(webpage)
duration = int_or_none(self._search_regex(
r'videoDuration\s*:\s*(\d+)', webpage, 'duration', fatal=False))
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
| 33.626168
| 105
| 0.472763
|
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
format_field,
int_or_none,
js_to_json,
try_get,
)
class JojIE(InfoExtractor):
_VALID_URL = r'''(?x)
(?:
joj:|
https?://media\.joj\.sk/embed/
)
(?P<id>[^/?#^]+)
'''
_TESTS = [{
'url': 'https://media.joj.sk/embed/a388ec4c-6019-4a4a-9312-b1bee194e932',
'info_dict': {
'id': 'a388ec4c-6019-4a4a-9312-b1bee194e932',
'ext': 'mp4',
'title': 'NOVÉ BÝVANIE',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 3118,
}
}, {
'url': 'https://media.joj.sk/embed/9i1cxv',
'only_matching': True,
}, {
'url': 'joj:a388ec4c-6019-4a4a-9312-b1bee194e932',
'only_matching': True,
}, {
'url': 'joj:9i1cxv',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return [
mobj.group('url')
for mobj in re.finditer(
r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//media\.joj\.sk/embed/(?:(?!\1).)+)\1',
webpage)]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'https://media.joj.sk/embed/%s' % video_id, video_id)
title = self._search_regex(
(r'videoTitle\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',
r'<title>(?P<title>[^<]+)'), webpage, 'title',
default=None, group='title') or self._og_search_title(webpage)
bitrates = self._parse_json(
self._search_regex(
r'(?s)(?:src|bitrates)\s*=\s*({.+?});', webpage, 'bitrates',
default='{}'),
video_id, transform_source=js_to_json, fatal=False)
formats = []
for format_url in try_get(bitrates, lambda x: x['mp4'], list) or []:
if isinstance(format_url, compat_str):
height = self._search_regex(
r'(\d+)[pP]\.', format_url, 'height', default=None)
formats.append({
'url': format_url,
'format_id': format_field(height, None, '%sp'),
'height': int(height),
})
if not formats:
playlist = self._download_xml(
'https://media.joj.sk/services/Video.php?clip=%s' % video_id,
video_id)
for file_el in playlist.findall('./files/file'):
path = file_el.get('path')
if not path:
continue
format_id = file_el.get('id') or file_el.get('label')
formats.append({
'url': 'http://n16.joj.sk/storage/%s' % path.replace(
'dat/', '', 1),
'format_id': format_id,
'height': int_or_none(self._search_regex(
r'(\d+)[pP]', format_id or path, 'height',
default=None)),
})
self._sort_formats(formats)
thumbnail = self._og_search_thumbnail(webpage)
duration = int_or_none(self._search_regex(
r'videoDuration\s*:\s*(\d+)', webpage, 'duration', fatal=False))
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
| true
| true
|
1c46778f48f486ad8a9dd24ae5c1193ee12576be
| 246
|
py
|
Python
|
manage.py
|
fndomariano/prototipo-rbc
|
72b48a2cdc5d5072d09b20cb7311df50ea5161bb
|
[
"MIT"
] | null | null | null |
manage.py
|
fndomariano/prototipo-rbc
|
72b48a2cdc5d5072d09b20cb7311df50ea5161bb
|
[
"MIT"
] | 6
|
2021-03-19T01:32:29.000Z
|
2021-09-22T18:50:40.000Z
|
manage.py
|
fndomariano/prototype-cbr
|
72b48a2cdc5d5072d09b20cb7311df50ea5161bb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tcc.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 22.363636
| 67
| 0.768293
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tcc.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| true
| true
|
1c467887b365f7e063283d64aeac5d205b2dbc04
| 905
|
py
|
Python
|
services/python/app/lib/modules/detections/keybase.py
|
seanmcfeely/eventsentry
|
afa4f7c3797a5b3cd96511064f58eb375ca73848
|
[
"Apache-2.0"
] | 4
|
2018-08-17T16:51:46.000Z
|
2020-05-05T21:27:18.000Z
|
services/python/app/lib/modules/detections/keybase.py
|
seanmcfeely/eventsentry
|
afa4f7c3797a5b3cd96511064f58eb375ca73848
|
[
"Apache-2.0"
] | 6
|
2018-08-06T20:40:22.000Z
|
2019-01-17T15:04:31.000Z
|
services/python/app/lib/modules/detections/keybase.py
|
seanmcfeely/eventsentry
|
afa4f7c3797a5b3cd96511064f58eb375ca73848
|
[
"Apache-2.0"
] | 4
|
2018-08-06T14:59:09.000Z
|
2019-08-30T18:03:45.000Z
|
from lib.modules.DetectionModule import *
class Module(DetectionModule):
def __init__(self, name, event_json):
super().__init__(name=name, event_json=event_json)
def run(self):
self.logger.debug('Running the {} detection module'.format(self.name))
# Loop over each sandboxed sample in the event.
for sample in self.event_json['sandbox']:
# Loop over all of the process trees.
trees = sample['process_trees'] + sample['process_trees_decoded']
for tree in trees:
tree = tree.lower()
strings = ['C:\\ProgramData\\Mails.txt', 'C:\\ProgramData\\Browsers.txt']
if all(string.lower() in tree for string in strings):
self.detections.append('Detected KeyBase by the process tree: {}'.format(' AND '.join(strings)))
self.tags.append('keybase')
| 37.708333
| 116
| 0.609945
|
from lib.modules.DetectionModule import *
class Module(DetectionModule):
def __init__(self, name, event_json):
super().__init__(name=name, event_json=event_json)
def run(self):
self.logger.debug('Running the {} detection module'.format(self.name))
for sample in self.event_json['sandbox']:
trees = sample['process_trees'] + sample['process_trees_decoded']
for tree in trees:
tree = tree.lower()
strings = ['C:\\ProgramData\\Mails.txt', 'C:\\ProgramData\\Browsers.txt']
if all(string.lower() in tree for string in strings):
self.detections.append('Detected KeyBase by the process tree: {}'.format(' AND '.join(strings)))
self.tags.append('keybase')
| true
| true
|
1c46789dc107becd64aab237d7fbcb37ef2c7d33
| 3,658
|
py
|
Python
|
assets/pinyin-python-server/server.py
|
admpub/AreaCity-JsSpider-StatsGov
|
c677db485d33a479ffb2bb28da8fc377a56d9ec9
|
[
"MIT"
] | 3,332
|
2018-11-28T07:11:37.000Z
|
2022-03-31T18:54:17.000Z
|
assets/pinyin-python-server/server.py
|
xlehehe/AreaCity-JsSpider-StatsGov
|
c36f95ed88bc24469a10fca7959d0350117e57dc
|
[
"MIT"
] | 21
|
2019-03-26T06:54:26.000Z
|
2022-02-12T05:03:36.000Z
|
assets/pinyin-python-server/server.py
|
xlehehe/AreaCity-JsSpider-StatsGov
|
c36f95ed88bc24469a10fca7959d0350117e57dc
|
[
"MIT"
] | 663
|
2018-11-28T10:32:56.000Z
|
2022-03-29T15:00:00.000Z
|
# -*- coding:utf-8 -*-
"""
GitHub: https://github.com/xiangyuecn/AreaCity-JsSpider-StatsGov/assets/pinyin-python-server
使用的HanLP (https://github.com/hankcs/HanLP) 语言处理库
【1】安装Miniconda
conda版本随意
【2】安装pyhanlp
https://github.com/hankcs/pyhanlp/wiki/Windows
测试发现python3.7.1 windows下ssl有问题无法安装,conda切换成python 3.6.4测试安装正常
安装好后运行一下hanlp命令,会提示下载,看第3步
【3】下载字典和jar
参考半自动配置: https://github.com/hankcs/pyhanlp/wiki/%E6%89%8B%E5%8A%A8%E9%85%8D%E7%BD%AE
字典和jar存放目录一般在Miniconda3[\envs\py36]\Lib\site-packages\pyhanlp\static
jar直接下载最新releases
字典最好直接clone仓库/data目录最新版本(用svn下载速度快很多,无需model数据),一样的在存储目录内放一个data文件夹,releases对bug处理稍微滞后一点。
另外需要修改hanlp.properties,给root赋值为当前目录完整路径。
svn: https://github.com/hankcs/HanLP/trunk/data
【4】运行
python server.py
【5】浏览器访问
http://127.0.0.1:9527/pinyin?txt=要拼的文字
"拼音。m" 返回结果 {c:0,m:"",v:["pin","yin","F。","Fm"]},c=0时代表正常,其他代表出错,m为错误原因,拼音如果是字母符号会用F打头
"""
import sys
if sys.version_info.major < 3:
print("Require python3 environment!")
exit(1)
from pyhanlp import *
import traceback
import time
import json
import urllib
from http.server import HTTPServer, BaseHTTPRequestHandler
class HttpHandler(BaseHTTPRequestHandler):
def _response(self, path, args):
startTime=time.time()
code=200
rtv={'c':0,'m':'','v':''}
try:
if args:
args=urllib.parse.parse_qs(args).items()
args=dict([(k,v[0]) for k,v in args])
else:
args={}
# ****************************************
# ***************页面开始*****************
# ****************************************
# ==>
if path=="/":
rtv["v"]="服务器已准备好"
# ==>
elif path=="/pinyin":
txt=args.get("txt","")
pinyin_list = HanLP.convertToPinyinList(txt)
list=[]
Pinyin=JClass("com.hankcs.hanlp.dictionary.py.Pinyin")
for i in range(pinyin_list.size()):
pinyin=pinyin_list[i]
if pinyin==Pinyin.none5:
list.append('F'+txt[i])
else:
list.append(pinyin.getPinyinWithoutTone())
rtv["v"]=list
# ****************************************
# ****************页面结束****************
# ****************************************
else:
code=404
rtv["c"]=404
rtv["m"]="路径"+path+"不存在"
except Exception as e:
rtv["c"]=1
rtv["m"]='服务器错误:'+str(e)+"\n"+traceback.format_exc()
rtv["T"]=int(startTime*1000)
rtv["D"]=int((time.time()-startTime)*1000)
try:
rtv=json.dumps(rtv,ensure_ascii=False)
except Exception as e:
rtv={'c':2,'m':'服务器返回数据错误:'+str(e)+"\n"+traceback.format_exc(),'v':''}
rtv=json.dumps(rtv,ensure_ascii=False)
self.send_response(code)
self.send_header('Content-type', 'text/json; charset=utf-8')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(rtv.encode())
def do_GET(self):
path,args=urllib.parse.splitquery(self.path)
self._response(path, args)
def do_POST(self):
args = self.rfile.read(int(self.headers['content-length'])).decode("utf-8")
self._response(self.path, args)
httpd = HTTPServer(('127.0.0.1', 9527), HttpHandler)
httpd.serve_forever()
| 30.483333
| 92
| 0.52269
|
import sys
if sys.version_info.major < 3:
print("Require python3 environment!")
exit(1)
from pyhanlp import *
import traceback
import time
import json
import urllib
from http.server import HTTPServer, BaseHTTPRequestHandler
class HttpHandler(BaseHTTPRequestHandler):
def _response(self, path, args):
startTime=time.time()
code=200
rtv={'c':0,'m':'','v':''}
try:
if args:
args=urllib.parse.parse_qs(args).items()
args=dict([(k,v[0]) for k,v in args])
else:
args={}
if path=="/":
rtv["v"]="服务器已准备好"
elif path=="/pinyin":
txt=args.get("txt","")
pinyin_list = HanLP.convertToPinyinList(txt)
list=[]
Pinyin=JClass("com.hankcs.hanlp.dictionary.py.Pinyin")
for i in range(pinyin_list.size()):
pinyin=pinyin_list[i]
if pinyin==Pinyin.none5:
list.append('F'+txt[i])
else:
list.append(pinyin.getPinyinWithoutTone())
rtv["v"]=list
else:
code=404
rtv["c"]=404
rtv["m"]="路径"+path+"不存在"
except Exception as e:
rtv["c"]=1
rtv["m"]='服务器错误:'+str(e)+"\n"+traceback.format_exc()
rtv["T"]=int(startTime*1000)
rtv["D"]=int((time.time()-startTime)*1000)
try:
rtv=json.dumps(rtv,ensure_ascii=False)
except Exception as e:
rtv={'c':2,'m':'服务器返回数据错误:'+str(e)+"\n"+traceback.format_exc(),'v':''}
rtv=json.dumps(rtv,ensure_ascii=False)
self.send_response(code)
self.send_header('Content-type', 'text/json; charset=utf-8')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(rtv.encode())
def do_GET(self):
path,args=urllib.parse.splitquery(self.path)
self._response(path, args)
def do_POST(self):
args = self.rfile.read(int(self.headers['content-length'])).decode("utf-8")
self._response(self.path, args)
httpd = HTTPServer(('127.0.0.1', 9527), HttpHandler)
httpd.serve_forever()
| true
| true
|
1c467a246f902e6c48bf43554502bd3ba0323cb2
| 7,868
|
py
|
Python
|
supervisor/bootstrap.py
|
pssc/supervisor
|
2f09ee05c52e255fbdd8b795451d39b24278ef35
|
[
"Apache-2.0"
] | 24
|
2020-03-08T21:13:00.000Z
|
2020-03-11T06:18:43.000Z
|
supervisor/bootstrap.py
|
pssc/supervisor
|
2f09ee05c52e255fbdd8b795451d39b24278ef35
|
[
"Apache-2.0"
] | null | null | null |
supervisor/bootstrap.py
|
pssc/supervisor
|
2f09ee05c52e255fbdd8b795451d39b24278ef35
|
[
"Apache-2.0"
] | null | null | null |
"""Bootstrap Supervisor."""
import logging
import os
from pathlib import Path
import shutil
import signal
from colorlog import ColoredFormatter
from .addons import AddonManager
from .api import RestAPI
from .arch import CpuArch
from .auth import Auth
from .audio import Audio
from .const import SOCKET_DOCKER, UpdateChannels
from .core import Core
from .coresys import CoreSys
from .dbus import DBusManager
from .discovery import Discovery
from .dns import CoreDNS
from .hassos import HassOS
from .homeassistant import HomeAssistant
from .host import HostManager
from .hwmon import HwMonitor
from .ingress import Ingress
from .services import ServiceManager
from .snapshots import SnapshotManager
from .store import StoreManager
from .supervisor import Supervisor
from .tasks import Tasks
from .updater import Updater
from .secrets import SecretsManager
from .utils.dt import fetch_timezone
_LOGGER: logging.Logger = logging.getLogger(__name__)
ENV_SHARE = "SUPERVISOR_SHARE"
ENV_NAME = "SUPERVISOR_NAME"
ENV_REPO = "HOMEASSISTANT_REPOSITORY"
MACHINE_ID = Path("/etc/machine-id")
async def initialize_coresys():
"""Initialize supervisor coresys/objects."""
coresys = CoreSys()
# Initialize core objects
coresys.core = Core(coresys)
coresys.dns = CoreDNS(coresys)
coresys.arch = CpuArch(coresys)
coresys.audio = Audio(coresys)
coresys.auth = Auth(coresys)
coresys.updater = Updater(coresys)
coresys.api = RestAPI(coresys)
coresys.supervisor = Supervisor(coresys)
coresys.homeassistant = HomeAssistant(coresys)
coresys.addons = AddonManager(coresys)
coresys.snapshots = SnapshotManager(coresys)
coresys.host = HostManager(coresys)
coresys.hwmonitor = HwMonitor(coresys)
coresys.ingress = Ingress(coresys)
coresys.tasks = Tasks(coresys)
coresys.services = ServiceManager(coresys)
coresys.store = StoreManager(coresys)
coresys.discovery = Discovery(coresys)
coresys.dbus = DBusManager(coresys)
coresys.hassos = HassOS(coresys)
coresys.secrets = SecretsManager(coresys)
# bootstrap config
initialize_system_data(coresys)
# Set Machine/Host ID
if MACHINE_ID.exists():
coresys.machine_id = MACHINE_ID.read_text().strip()
# Init TimeZone
if coresys.config.timezone == "UTC":
coresys.config.timezone = await fetch_timezone(coresys.websession)
return coresys
def initialize_system_data(coresys: CoreSys):
"""Set up the default configuration and create folders."""
config = coresys.config
# Home Assistant configuration folder
if not config.path_homeassistant.is_dir():
_LOGGER.info(
"Create Home Assistant configuration folder %s", config.path_homeassistant
)
config.path_homeassistant.mkdir()
# Supervisor ssl folder
if not config.path_ssl.is_dir():
_LOGGER.info("Create Supervisor SSL/TLS folder %s", config.path_ssl)
config.path_ssl.mkdir()
# Supervisor addon data folder
if not config.path_addons_data.is_dir():
_LOGGER.info("Create Supervisor Add-on data folder %s", config.path_addons_data)
config.path_addons_data.mkdir(parents=True)
if not config.path_addons_local.is_dir():
_LOGGER.info(
"Create Supervisor Add-on local repository folder %s",
config.path_addons_local,
)
config.path_addons_local.mkdir(parents=True)
if not config.path_addons_git.is_dir():
_LOGGER.info(
"Create Supervisor Add-on git repositories folder %s",
config.path_addons_git,
)
config.path_addons_git.mkdir(parents=True)
# Supervisor tmp folder
if not config.path_tmp.is_dir():
_LOGGER.info("Create Supervisor temp folder %s", config.path_tmp)
config.path_tmp.mkdir(parents=True)
# Supervisor backup folder
if not config.path_backup.is_dir():
_LOGGER.info("Create Supervisor backup folder %s", config.path_backup)
config.path_backup.mkdir()
# Share folder
if not config.path_share.is_dir():
_LOGGER.info("Create Supervisor share folder %s", config.path_share)
config.path_share.mkdir()
# Apparmor folder
if not config.path_apparmor.is_dir():
_LOGGER.info("Create Supervisor Apparmor folder %s", config.path_apparmor)
config.path_apparmor.mkdir()
# DNS folder
if not config.path_dns.is_dir():
_LOGGER.info("Create Supervisor DNS folder %s", config.path_dns)
config.path_dns.mkdir()
# Audio folder
if not config.path_audio.is_dir():
_LOGGER.info("Create Supervisor audio folder %s", config.path_audio)
config.path_audio.mkdir()
# Update log level
coresys.config.modify_log_level()
# Check if ENV is in development mode
if bool(os.environ.get("SUPERVISOR_DEV", 0)):
_LOGGER.warning("SUPERVISOR_DEV is set")
coresys.updater.channel = UpdateChannels.DEV
coresys.config.logging = "debug"
coresys.config.debug = True
def migrate_system_env(coresys: CoreSys):
"""Cleanup some stuff after update."""
config = coresys.config
# hass.io 0.37 -> 0.38
old_build = Path(config.path_hassio, "addons/build")
if old_build.is_dir():
try:
old_build.rmdir()
except OSError:
_LOGGER.warning("Can't cleanup old Add-on build directory")
def initialize_logging():
"""Setup the logging."""
logging.basicConfig(level=logging.INFO)
fmt = "%(asctime)s %(levelname)s (%(threadName)s) [%(name)s] %(message)s"
colorfmt = f"%(log_color)s{fmt}%(reset)s"
datefmt = "%y-%m-%d %H:%M:%S"
# suppress overly verbose logs from libraries that aren't helpful
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
logging.getLogger().handlers[0].setFormatter(
ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
},
)
)
def check_environment() -> None:
"""Check if all environment are exists."""
# check environment variables
for key in (ENV_SHARE, ENV_NAME, ENV_REPO):
try:
os.environ[key]
except KeyError:
_LOGGER.fatal("Can't find %s in env!", key)
# check docker socket
if not SOCKET_DOCKER.is_socket():
_LOGGER.fatal("Can't find Docker socket!")
# check socat exec
if not shutil.which("socat"):
_LOGGER.fatal("Can't find socat!")
# check socat exec
if not shutil.which("gdbus"):
_LOGGER.fatal("Can't find gdbus!")
def reg_signal(loop):
"""Register SIGTERM and SIGKILL to stop system."""
try:
loop.add_signal_handler(signal.SIGTERM, lambda: loop.call_soon(loop.stop))
except (ValueError, RuntimeError):
_LOGGER.warning("Could not bind to SIGTERM")
try:
loop.add_signal_handler(signal.SIGHUP, lambda: loop.call_soon(loop.stop))
except (ValueError, RuntimeError):
_LOGGER.warning("Could not bind to SIGHUP")
try:
loop.add_signal_handler(signal.SIGINT, lambda: loop.call_soon(loop.stop))
except (ValueError, RuntimeError):
_LOGGER.warning("Could not bind to SIGINT")
def supervisor_debugger(coresys: CoreSys) -> None:
"""Setup debugger if needed."""
if not coresys.config.debug:
return
# pylint: disable=import-outside-toplevel
import ptvsd
_LOGGER.info("Initialize Supervisor debugger")
ptvsd.enable_attach(address=("0.0.0.0", 33333), redirect_output=True)
if coresys.config.debug_block:
_LOGGER.info("Wait until debugger is attached")
ptvsd.wait_for_attach()
| 31.098814
| 88
| 0.680859
|
import logging
import os
from pathlib import Path
import shutil
import signal
from colorlog import ColoredFormatter
from .addons import AddonManager
from .api import RestAPI
from .arch import CpuArch
from .auth import Auth
from .audio import Audio
from .const import SOCKET_DOCKER, UpdateChannels
from .core import Core
from .coresys import CoreSys
from .dbus import DBusManager
from .discovery import Discovery
from .dns import CoreDNS
from .hassos import HassOS
from .homeassistant import HomeAssistant
from .host import HostManager
from .hwmon import HwMonitor
from .ingress import Ingress
from .services import ServiceManager
from .snapshots import SnapshotManager
from .store import StoreManager
from .supervisor import Supervisor
from .tasks import Tasks
from .updater import Updater
from .secrets import SecretsManager
from .utils.dt import fetch_timezone
_LOGGER: logging.Logger = logging.getLogger(__name__)
ENV_SHARE = "SUPERVISOR_SHARE"
ENV_NAME = "SUPERVISOR_NAME"
ENV_REPO = "HOMEASSISTANT_REPOSITORY"
MACHINE_ID = Path("/etc/machine-id")
async def initialize_coresys():
coresys = CoreSys()
coresys.core = Core(coresys)
coresys.dns = CoreDNS(coresys)
coresys.arch = CpuArch(coresys)
coresys.audio = Audio(coresys)
coresys.auth = Auth(coresys)
coresys.updater = Updater(coresys)
coresys.api = RestAPI(coresys)
coresys.supervisor = Supervisor(coresys)
coresys.homeassistant = HomeAssistant(coresys)
coresys.addons = AddonManager(coresys)
coresys.snapshots = SnapshotManager(coresys)
coresys.host = HostManager(coresys)
coresys.hwmonitor = HwMonitor(coresys)
coresys.ingress = Ingress(coresys)
coresys.tasks = Tasks(coresys)
coresys.services = ServiceManager(coresys)
coresys.store = StoreManager(coresys)
coresys.discovery = Discovery(coresys)
coresys.dbus = DBusManager(coresys)
coresys.hassos = HassOS(coresys)
coresys.secrets = SecretsManager(coresys)
initialize_system_data(coresys)
if MACHINE_ID.exists():
coresys.machine_id = MACHINE_ID.read_text().strip()
if coresys.config.timezone == "UTC":
coresys.config.timezone = await fetch_timezone(coresys.websession)
return coresys
def initialize_system_data(coresys: CoreSys):
config = coresys.config
if not config.path_homeassistant.is_dir():
_LOGGER.info(
"Create Home Assistant configuration folder %s", config.path_homeassistant
)
config.path_homeassistant.mkdir()
if not config.path_ssl.is_dir():
_LOGGER.info("Create Supervisor SSL/TLS folder %s", config.path_ssl)
config.path_ssl.mkdir()
if not config.path_addons_data.is_dir():
_LOGGER.info("Create Supervisor Add-on data folder %s", config.path_addons_data)
config.path_addons_data.mkdir(parents=True)
if not config.path_addons_local.is_dir():
_LOGGER.info(
"Create Supervisor Add-on local repository folder %s",
config.path_addons_local,
)
config.path_addons_local.mkdir(parents=True)
if not config.path_addons_git.is_dir():
_LOGGER.info(
"Create Supervisor Add-on git repositories folder %s",
config.path_addons_git,
)
config.path_addons_git.mkdir(parents=True)
if not config.path_tmp.is_dir():
_LOGGER.info("Create Supervisor temp folder %s", config.path_tmp)
config.path_tmp.mkdir(parents=True)
if not config.path_backup.is_dir():
_LOGGER.info("Create Supervisor backup folder %s", config.path_backup)
config.path_backup.mkdir()
if not config.path_share.is_dir():
_LOGGER.info("Create Supervisor share folder %s", config.path_share)
config.path_share.mkdir()
if not config.path_apparmor.is_dir():
_LOGGER.info("Create Supervisor Apparmor folder %s", config.path_apparmor)
config.path_apparmor.mkdir()
if not config.path_dns.is_dir():
_LOGGER.info("Create Supervisor DNS folder %s", config.path_dns)
config.path_dns.mkdir()
if not config.path_audio.is_dir():
_LOGGER.info("Create Supervisor audio folder %s", config.path_audio)
config.path_audio.mkdir()
coresys.config.modify_log_level()
if bool(os.environ.get("SUPERVISOR_DEV", 0)):
_LOGGER.warning("SUPERVISOR_DEV is set")
coresys.updater.channel = UpdateChannels.DEV
coresys.config.logging = "debug"
coresys.config.debug = True
def migrate_system_env(coresys: CoreSys):
config = coresys.config
old_build = Path(config.path_hassio, "addons/build")
if old_build.is_dir():
try:
old_build.rmdir()
except OSError:
_LOGGER.warning("Can't cleanup old Add-on build directory")
def initialize_logging():
logging.basicConfig(level=logging.INFO)
fmt = "%(asctime)s %(levelname)s (%(threadName)s) [%(name)s] %(message)s"
colorfmt = f"%(log_color)s{fmt}%(reset)s"
datefmt = "%y-%m-%d %H:%M:%S"
# suppress overly verbose logs from libraries that aren't helpful
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
logging.getLogger().handlers[0].setFormatter(
ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
},
)
)
def check_environment() -> None:
for key in (ENV_SHARE, ENV_NAME, ENV_REPO):
try:
os.environ[key]
except KeyError:
_LOGGER.fatal("Can't find %s in env!", key)
# check docker socket
if not SOCKET_DOCKER.is_socket():
_LOGGER.fatal("Can't find Docker socket!")
if not shutil.which("socat"):
_LOGGER.fatal("Can't find socat!")
# check socat exec
if not shutil.which("gdbus"):
_LOGGER.fatal("Can't find gdbus!")
def reg_signal(loop):
try:
loop.add_signal_handler(signal.SIGTERM, lambda: loop.call_soon(loop.stop))
except (ValueError, RuntimeError):
_LOGGER.warning("Could not bind to SIGTERM")
try:
loop.add_signal_handler(signal.SIGHUP, lambda: loop.call_soon(loop.stop))
except (ValueError, RuntimeError):
_LOGGER.warning("Could not bind to SIGHUP")
try:
loop.add_signal_handler(signal.SIGINT, lambda: loop.call_soon(loop.stop))
except (ValueError, RuntimeError):
_LOGGER.warning("Could not bind to SIGINT")
def supervisor_debugger(coresys: CoreSys) -> None:
if not coresys.config.debug:
return
import ptvsd
_LOGGER.info("Initialize Supervisor debugger")
ptvsd.enable_attach(address=("0.0.0.0", 33333), redirect_output=True)
if coresys.config.debug_block:
_LOGGER.info("Wait until debugger is attached")
ptvsd.wait_for_attach()
| true
| true
|
1c467a33edb77eed8083f7b4d9221ac4d6a5fbee
| 6,264
|
py
|
Python
|
marshmallow_oneofschema/one_of_schema.py
|
misterflop/marshmallow-oneofschema
|
64f836d7c332b515e3e4c956683366fa11d17966
|
[
"MIT"
] | null | null | null |
marshmallow_oneofschema/one_of_schema.py
|
misterflop/marshmallow-oneofschema
|
64f836d7c332b515e3e4c956683366fa11d17966
|
[
"MIT"
] | null | null | null |
marshmallow_oneofschema/one_of_schema.py
|
misterflop/marshmallow-oneofschema
|
64f836d7c332b515e3e4c956683366fa11d17966
|
[
"MIT"
] | null | null | null |
from marshmallow import Schema, ValidationError
class OneOfSchema(Schema):
"""
This is a special kind of schema that actually multiplexes other schemas
based on object type. When serializing values, it uses get_obj_type() method
to get object type name. Then it uses `type_schemas` name-to-Schema mapping
to get schema for that particular object type, serializes object using that
schema and adds an extra "type" field with name of object type.
Deserialization is reverse.
Example:
class Foo(object):
def __init__(self, foo):
self.foo = foo
class Bar(object):
def __init__(self, bar):
self.bar = bar
class FooSchema(marshmallow.Schema):
foo = marshmallow.fields.String(required=True)
@marshmallow.post_load
def make_foo(self, data, **kwargs):
return Foo(**data)
class BarSchema(marshmallow.Schema):
bar = marshmallow.fields.Integer(required=True)
@marshmallow.post_load
def make_bar(self, data, **kwargs):
return Bar(**data)
class MyUberSchema(marshmallow.OneOfSchema):
type_schemas = {
'foo': FooSchema,
'bar': BarSchema,
}
def get_obj_type(self, obj):
if isinstance(obj, Foo):
return 'foo'
elif isinstance(obj, Bar):
return 'bar'
else:
raise Exception('Unknown object type: %s' % repr(obj))
MyUberSchema().dump([Foo(foo='hello'), Bar(bar=123)], many=True)
# => [{'type': 'foo', 'foo': 'hello'}, {'type': 'bar', 'bar': 123}]
You can control type field name added to serialized object representation by
setting `type_field` class property.
"""
type_field = "type"
type_field_remove = True
type_schemas = []
def get_obj_type(self, obj):
"""Returns name of object schema"""
return obj.__class__.__name__
def dump(self, obj, *, many=None, **kwargs):
errors = {}
result_data = []
result_errors = {}
many = self.many if many is None else bool(many)
if not many:
result = result_data = self._dump(obj, **kwargs)
else:
for idx, o in enumerate(obj):
try:
result = self._dump(o, **kwargs)
result_data.append(result)
except ValidationError as error:
result_errors[idx] = error.normalized_messages()
result_data.append(error.valid_data)
result = result_data
errors = result_errors
if not errors:
return result
else:
exc = ValidationError(errors, data=obj, valid_data=result)
raise exc
def _dump(self, obj, *, update_fields=True, **kwargs):
obj_type = self.get_obj_type(obj)
if not obj_type:
return (
None,
{"_schema": "Unknown object class: %s" % obj.__class__.__name__},
)
type_schema = self.type_schemas.get(obj_type)
if not type_schema:
return None, {"_schema": "Unsupported object type: %s" % obj_type}
schema = type_schema if isinstance(type_schema, Schema) else type_schema()
schema.context.update(getattr(self, "context", {}))
result = schema.dump(obj, many=False, **kwargs)
if result is not None:
result[self.type_field] = obj_type
return result
def load(self, data, *, many=None, partial=None, unknown=None, **kwargs):
errors = {}
result_data = []
result_errors = {}
many = self.many if many is None else bool(many)
if partial is None:
partial = self.partial
if not many:
try:
result = result_data = self._load(
data, partial=partial, unknown=unknown, **kwargs
)
# result_data.append(result)
except ValidationError as error:
result_errors = error.normalized_messages()
result_data.append(error.valid_data)
else:
for idx, item in enumerate(data):
try:
result = self._load(item, partial=partial, **kwargs)
result_data.append(result)
except ValidationError as error:
result_errors[idx] = error.normalized_messages()
result_data.append(error.valid_data)
result = result_data
errors = result_errors
if not errors:
return result
else:
exc = ValidationError(errors, data=data, valid_data=result)
raise exc
def _load(self, data, *, partial=None, unknown=None, **kwargs):
if not isinstance(data, dict):
raise ValidationError({"_schema": "Invalid data type: %s" % data})
data = dict(data)
unknown = unknown or self.unknown
data_type = data.get(self.type_field)
if self.type_field in data and self.type_field_remove:
data.pop(self.type_field)
if not data_type:
raise ValidationError(
{self.type_field: ["Missing data for required field."]}
)
try:
type_schema = self.type_schemas.get(data_type)
except TypeError:
# data_type could be unhashable
raise ValidationError({self.type_field: ["Invalid value: %s" % data_type]})
if not type_schema:
raise ValidationError(
{self.type_field: ["Unsupported value: %s" % data_type]}
)
schema = type_schema if isinstance(type_schema, Schema) else type_schema()
schema.context.update(getattr(self, "context", {}))
return schema.load(data, many=False, partial=partial, unknown=unknown, **kwargs)
def validate(self, data, *, many=None, partial=None):
try:
self.load(data, many=many, partial=partial)
except ValidationError as ve:
return ve.messages
return {}
| 36.208092
| 88
| 0.566252
|
from marshmallow import Schema, ValidationError
class OneOfSchema(Schema):
type_field = "type"
type_field_remove = True
type_schemas = []
def get_obj_type(self, obj):
return obj.__class__.__name__
def dump(self, obj, *, many=None, **kwargs):
errors = {}
result_data = []
result_errors = {}
many = self.many if many is None else bool(many)
if not many:
result = result_data = self._dump(obj, **kwargs)
else:
for idx, o in enumerate(obj):
try:
result = self._dump(o, **kwargs)
result_data.append(result)
except ValidationError as error:
result_errors[idx] = error.normalized_messages()
result_data.append(error.valid_data)
result = result_data
errors = result_errors
if not errors:
return result
else:
exc = ValidationError(errors, data=obj, valid_data=result)
raise exc
def _dump(self, obj, *, update_fields=True, **kwargs):
obj_type = self.get_obj_type(obj)
if not obj_type:
return (
None,
{"_schema": "Unknown object class: %s" % obj.__class__.__name__},
)
type_schema = self.type_schemas.get(obj_type)
if not type_schema:
return None, {"_schema": "Unsupported object type: %s" % obj_type}
schema = type_schema if isinstance(type_schema, Schema) else type_schema()
schema.context.update(getattr(self, "context", {}))
result = schema.dump(obj, many=False, **kwargs)
if result is not None:
result[self.type_field] = obj_type
return result
def load(self, data, *, many=None, partial=None, unknown=None, **kwargs):
errors = {}
result_data = []
result_errors = {}
many = self.many if many is None else bool(many)
if partial is None:
partial = self.partial
if not many:
try:
result = result_data = self._load(
data, partial=partial, unknown=unknown, **kwargs
)
except ValidationError as error:
result_errors = error.normalized_messages()
result_data.append(error.valid_data)
else:
for idx, item in enumerate(data):
try:
result = self._load(item, partial=partial, **kwargs)
result_data.append(result)
except ValidationError as error:
result_errors[idx] = error.normalized_messages()
result_data.append(error.valid_data)
result = result_data
errors = result_errors
if not errors:
return result
else:
exc = ValidationError(errors, data=data, valid_data=result)
raise exc
def _load(self, data, *, partial=None, unknown=None, **kwargs):
if not isinstance(data, dict):
raise ValidationError({"_schema": "Invalid data type: %s" % data})
data = dict(data)
unknown = unknown or self.unknown
data_type = data.get(self.type_field)
if self.type_field in data and self.type_field_remove:
data.pop(self.type_field)
if not data_type:
raise ValidationError(
{self.type_field: ["Missing data for required field."]}
)
try:
type_schema = self.type_schemas.get(data_type)
except TypeError:
raise ValidationError({self.type_field: ["Invalid value: %s" % data_type]})
if not type_schema:
raise ValidationError(
{self.type_field: ["Unsupported value: %s" % data_type]}
)
schema = type_schema if isinstance(type_schema, Schema) else type_schema()
schema.context.update(getattr(self, "context", {}))
return schema.load(data, many=False, partial=partial, unknown=unknown, **kwargs)
def validate(self, data, *, many=None, partial=None):
try:
self.load(data, many=many, partial=partial)
except ValidationError as ve:
return ve.messages
return {}
| true
| true
|
1c467bde363eec1247fd8544df7541fa713b274f
| 197
|
py
|
Python
|
code/example1.py
|
ThomasHuke/knowPython
|
0a9d3c224b36416b7eaa9d664e145bc0f2a63df2
|
[
"MIT"
] | 4
|
2017-06-20T10:42:45.000Z
|
2017-10-24T09:19:27.000Z
|
code/example1.py
|
ThomasHuke/knowPython
|
0a9d3c224b36416b7eaa9d664e145bc0f2a63df2
|
[
"MIT"
] | null | null | null |
code/example1.py
|
ThomasHuke/knowPython
|
0a9d3c224b36416b7eaa9d664e145bc0f2a63df2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def file():
for x, y in [[1, 3], [1, 4]]:
print('x , y ', x, y)
file()
# x , y 1 3
# x , y 1 4
print([x + x for x in range(1, 12)])
| 14.071429
| 36
| 0.436548
|
def file():
for x, y in [[1, 3], [1, 4]]:
print('x , y ', x, y)
file()
print([x + x for x in range(1, 12)])
| true
| true
|
1c467e126304bcb3bd6860b417e50dc19d57f51d
| 12,032
|
py
|
Python
|
dpmModule/jobs/cannonshooter.py
|
Jeongwoo-KGI/maplestory_dpm_calc
|
c474419146e377a05a724e9975a047649b7effa7
|
[
"MIT"
] | 2
|
2020-12-18T17:02:21.000Z
|
2021-02-01T04:16:33.000Z
|
dpmModule/jobs/cannonshooter.py
|
Jeongwoo-KGI/maplestory_dpm_calc
|
c474419146e377a05a724e9975a047649b7effa7
|
[
"MIT"
] | null | null | null |
dpmModule/jobs/cannonshooter.py
|
Jeongwoo-KGI/maplestory_dpm_calc
|
c474419146e377a05a724e9975a047649b7effa7
|
[
"MIT"
] | null | null | null |
from ..kernel import core
from ..character import characterKernel as ck
from ..status.ability import Ability_tool
from ..execution.rules import RuleSet, ConditionRule
from . import globalSkill
from .jobbranch import pirates
from .jobclass import adventurer
from . import jobutils
from math import ceil
from typing import Any, Dict
class JobGenerator(ck.JobGenerator):
def __init__(self):
super(JobGenerator, self).__init__()
self.jobtype = "str"
self.jobname = "캐논슈터"
self.vEnhanceNum = 16
self.ability_list = Ability_tool.get_ability_set(
"boss_pdamage", "crit", "reuse"
)
self.preEmptiveSkills = 2
def get_ruleset(self):
def cannonball_rule(soul_contract):
if soul_contract.is_active():
return True
if soul_contract.is_cooltime_left(50000, -1):
return False
return True
ruleset = RuleSet()
ruleset.add_rule(
ConditionRule("빅 휴즈 기간틱 캐논볼", "소울 컨트랙트", cannonball_rule),
RuleSet.BASE,
)
return ruleset
def get_modifier_optimization_hint(self):
return core.CharacterModifier(pdamage=66, crit_damage=6, armor_ignore=30)
def get_passive_skill_list(
self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]
):
passive_level = chtr.get_base_modifier().passive_level + self.combat
BuildupCannon = core.InformedCharacterModifier("빌드업 캐논", att=20)
CriticalFire = core.InformedCharacterModifier(
"크리티컬 파이어", crit=20, crit_damage=5
)
PirateTraining = core.InformedCharacterModifier(
"파이렛 트레이닝", stat_main=30, stat_sub=30
)
MonkeyWavePassive = core.InformedCharacterModifier("몽키 웨이브(패시브)", crit=20)
OakRuletPassive = core.InformedCharacterModifier(
"오크통 룰렛(패시브)", pdamage_indep=10
)
ReinforceCannon = core.InformedCharacterModifier("리인포스 캐논", att=40)
PirateSpirit = core.InformedCharacterModifier(
"파이렛 스피릿", boss_pdamage=40 + self.combat
)
OverburningCannon = core.InformedCharacterModifier(
"오버버닝 캐논",
pdamage_indep=30 + passive_level,
armor_ignore=20 + passive_level // 2,
)
LoadedDicePassive = pirates.LoadedDicePassiveWrapper(vEhc, 3, 4)
return [
BuildupCannon,
CriticalFire,
PirateTraining,
MonkeyWavePassive,
OakRuletPassive,
ReinforceCannon,
PirateSpirit,
OverburningCannon,
LoadedDicePassive,
]
def get_not_implied_skill_list(
self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]
):
passive_level = chtr.get_base_modifier().passive_level + self.combat
WeaponConstant = core.InformedCharacterModifier("무기상수", pdamage_indep=50)
Mastery = core.InformedCharacterModifier(
"숙련도", pdamage_indep=-7.5 + 0.5 * ceil(passive_level / 2)
)
return [WeaponConstant, Mastery]
def generate(self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]):
"""
하이퍼 : 몽키트윈스-스플릿, 인핸스, 캐논버스터 - 리인포스, 보너스 어택.
롤링캐논레인보우 26타
코코볼 6초
이씨밤 5타
코강 순서:
버스터-서포트-다수기-롤캐
"""
COCOBALLHIT = options.get("cocoball_hit", 27)
ICBMHIT = 6
passive_level = chtr.get_base_modifier().passive_level + self.combat
# Buff skills
Booster = core.BuffSkill("부스터", 0, 200 * 1000).wrap(core.BuffSkillWrapper)
Buckshot = core.BuffSkill("벅 샷", 0, 180000).wrap(core.BuffSkillWrapper)
LuckyDice = (
core.BuffSkill(
"로디드 다이스",
delay=0,
remain=180 * 1000,
pdamage=20 # 로디드 데미지 고정.
+ 10 / 6
+ 10 / 6 * (5 / 6 + 1 / 11) * (10 * (5 + passive_level) * 0.01),
)
.isV(vEhc, 3, 4)
.wrap(core.BuffSkillWrapper)
)
MonkeyWave = core.DamageSkill(
"몽키 웨이브",
delay=810,
damage=860,
hit=1,
cooltime=30 * 1000,
).wrap(core.DamageSkillWrapper)
MonkeyWaveBuff = core.BuffSkill(
"몽키 웨이브(버프)",
delay=0,
remain=30000,
cooltime=-1,
crit_damage=5,
).wrap(core.BuffSkillWrapper)
MonkeyFurious = core.DamageSkill(
"몽키 퓨리어스",
delay=720,
damage=180,
hit=3,
cooltime=30 * 1000,
).wrap(core.DamageSkillWrapper)
MonkeyFuriousBuff = core.BuffSkill(
"몽키 퓨리어스(버프)",
delay=0,
remain=30000,
cooltime=-1,
pdamage=40,
).wrap(core.BuffSkillWrapper)
MonkeyFuriousDot = core.DotSkill(
"몽키 퓨리어스(도트)",
summondelay=0,
delay=1000,
damage=200,
hit=1,
remain=30000,
cooltime=-1,
).wrap(core.DotSkillWrapper)
OakRoulette = core.BuffSkill(
"오크통 룰렛",
delay=840,
remain=180000,
rem=True,
cooltime=180000,
crit_damage=1.25,
).wrap(core.BuffSkillWrapper)
OakRuletDOT = core.DotSkill(
"오크통 룰렛(도트)",
summondelay=0,
delay=1000,
damage=50,
hit=1,
remain=5000,
cooltime=-1,
).wrap(core.DotSkillWrapper)
MonkeyMagic = core.BuffSkill(
"하이퍼 몽키 스펠",
delay=0,
remain=180000,
rem=True,
stat_main=60 + passive_level,
stat_sub=60 + passive_level,
).wrap(core.BuffSkillWrapper)
# Damage Skills
CannonBuster = (
core.DamageSkill(
"캐논 버스터",
delay=690,
damage=(750 + 5 * self.combat) * 0.45, # BuckShot
hit=3 * (4 + 1),
modifier=core.CharacterModifier(
crit=15 + ceil(self.combat / 2),
armor_ignore=20 + self.combat // 2,
pdamage=20,
),
)
.setV(vEhc, 0, 2, True)
.wrap(core.DamageSkillWrapper)
)
# Summon Skills
SupportMonkeyTwins = (
core.SummonSkill(
"서포트 몽키 트윈스",
summondelay=720,
delay=930,
damage=(295 + 8 * self.combat) * 0.6, # Split Damage
hit=(1 + 1) * (2 + 1), # Split Damage, Enhance
remain=60000 + 2000 * self.combat,
rem=True,
)
.setV(vEhc, 1, 2, False)
.wrap(core.SummonSkillWrapper)
)
# Hyper
RollingCannonRainbow = (
core.SummonSkill(
"롤링 캐논 레인보우",
summondelay=480,
delay=12000 / 26,
damage=600,
hit=3,
remain=12000,
cooltime=90000,
)
.setV(vEhc, 3, 2, True)
.wrap(core.SummonSkillWrapper)
)
EpicAdventure = core.BuffSkill(
"에픽 어드벤처",
delay=0,
remain=60000,
cooltime=120000,
pdamage=10,
).wrap(core.BuffSkillWrapper)
# V skills
WEAPON_ATT = jobutils.get_weapon_att(chtr)
Overdrive = pirates.OverdriveWrapper(vEhc, 5, 5, WEAPON_ATT)
PirateFlag = adventurer.PirateFlagWrapper(vEhc, 4, 3, chtr.level)
MirrorBreak, MirrorSpider = globalSkill.SpiderInMirrorBuilder(vEhc, 0, 0)
# 쿨타임마다 사용
# 허수아비 대상 27회 충돌
BFGCannonball = core.StackableSummonSkillWrapper(
core.SummonSkill(
"빅 휴즈 기간틱 캐논볼",
summondelay=600,
delay=210,
damage=(450 + 15 * vEhc.getV(0, 0)) * 0.45, # BuckShot
hit=4 * 3,
remain=210 * COCOBALLHIT,
cooltime=25000,
).isV(vEhc, 0, 0),
max_stack=3,
)
ICBM = (
core.DamageSkill(
"ICBM",
delay=1140,
damage=(800 + 32 * vEhc.getV(1, 1)) * 0.45, # BuckShot
hit=5 * ICBMHIT * 3,
cooltime=30000,
red=True,
)
.isV(vEhc, 1, 1)
.wrap(core.DamageSkillWrapper)
)
ICBMDOT = (
core.SummonSkill(
"ICBM(장판)",
summondelay=0,
delay=15000 / 27, # 27타
damage=(500 + 20 * vEhc.getV(1, 1)) * 0.45, # BuckShot
hit=1 * 3,
remain=15000,
cooltime=-1,
)
.isV(vEhc, 1, 1)
.wrap(core.SummonSkillWrapper)
)
SpecialMonkeyEscort_Cannon = (
core.SummonSkill(
"스페셜 몽키 에스코트",
summondelay=780,
delay=1500,
damage=300 + 12 * vEhc.getV(2, 2),
hit=4 * 3,
remain=(30 + vEhc.getV(2, 2) // 2) * 1000,
cooltime=120000,
red=True,
)
.isV(vEhc, 2, 2)
.wrap(core.SummonSkillWrapper)
)
SpecialMonkeyEscort_Bomb = (
core.SummonSkill(
"스페셜 몽키 에스코트(폭탄)",
summondelay=0,
delay=5000,
damage=450 + 18 * vEhc.getV(2, 2),
hit=7 * 3,
remain=(30 + vEhc.getV(2, 2) // 2) * 1000,
cooltime=-1,
modifier=core.CharacterModifier(armor_ignore=100),
)
.isV(vEhc, 2, 2)
.wrap(core.SummonSkillWrapper)
)
FullMaker = (
core.SummonSkill(
"풀 메이커",
summondelay=720,
delay=360,
damage=(700 + 28 * vEhc.getV(0, 0)) * 0.45, # BuckShot
hit=3 * 3,
remain=360 * 20 - 1,
cooltime=60000,
red=True,
)
.isV(vEhc, 0, 0)
.wrap(core.SummonSkillWrapper)
)
### build graph relationships
MonkeyWave.onAfter(MonkeyWaveBuff)
MonkeyFurious.onAfters([MonkeyFuriousBuff, MonkeyFuriousDot])
CannonBuster.onAfter(OakRuletDOT)
BFGCannonball.onAfter(OakRuletDOT)
ICBM.onAfter(OakRuletDOT)
ICBM.onAfter(ICBMDOT)
SpecialMonkeyEscort_Cannon.onJustAfter(SpecialMonkeyEscort_Bomb)
return (
CannonBuster,
[
globalSkill.maple_heros(chtr.level, combat_level=self.combat),
globalSkill.useful_sharp_eyes(),
globalSkill.useful_combat_orders(),
globalSkill.useful_wind_booster(),
Booster,
OakRoulette,
Buckshot,
MonkeyMagic,
LuckyDice,
globalSkill.MapleHeroes2Wrapper(vEhc, 0, 0, chtr.level, self.combat),
EpicAdventure,
Overdrive,
PirateFlag,
globalSkill.soul_contract(),
]
+ [
SpecialMonkeyEscort_Cannon,
BFGCannonball,
FullMaker,
RollingCannonRainbow,
SupportMonkeyTwins,
]
+ [MonkeyWave, MonkeyFurious, ICBM, MirrorBreak]
+ [
SpecialMonkeyEscort_Bomb,
MirrorSpider,
OakRuletDOT,
MonkeyFuriousDot,
MonkeyWaveBuff,
MonkeyFuriousBuff,
ICBMDOT,
] # Not used from scheduler
+ [CannonBuster],
)
| 31.170984
| 85
| 0.499668
|
from ..kernel import core
from ..character import characterKernel as ck
from ..status.ability import Ability_tool
from ..execution.rules import RuleSet, ConditionRule
from . import globalSkill
from .jobbranch import pirates
from .jobclass import adventurer
from . import jobutils
from math import ceil
from typing import Any, Dict
class JobGenerator(ck.JobGenerator):
def __init__(self):
super(JobGenerator, self).__init__()
self.jobtype = "str"
self.jobname = "캐논슈터"
self.vEnhanceNum = 16
self.ability_list = Ability_tool.get_ability_set(
"boss_pdamage", "crit", "reuse"
)
self.preEmptiveSkills = 2
def get_ruleset(self):
def cannonball_rule(soul_contract):
if soul_contract.is_active():
return True
if soul_contract.is_cooltime_left(50000, -1):
return False
return True
ruleset = RuleSet()
ruleset.add_rule(
ConditionRule("빅 휴즈 기간틱 캐논볼", "소울 컨트랙트", cannonball_rule),
RuleSet.BASE,
)
return ruleset
def get_modifier_optimization_hint(self):
return core.CharacterModifier(pdamage=66, crit_damage=6, armor_ignore=30)
def get_passive_skill_list(
self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]
):
passive_level = chtr.get_base_modifier().passive_level + self.combat
BuildupCannon = core.InformedCharacterModifier("빌드업 캐논", att=20)
CriticalFire = core.InformedCharacterModifier(
"크리티컬 파이어", crit=20, crit_damage=5
)
PirateTraining = core.InformedCharacterModifier(
"파이렛 트레이닝", stat_main=30, stat_sub=30
)
MonkeyWavePassive = core.InformedCharacterModifier("몽키 웨이브(패시브)", crit=20)
OakRuletPassive = core.InformedCharacterModifier(
"오크통 룰렛(패시브)", pdamage_indep=10
)
ReinforceCannon = core.InformedCharacterModifier("리인포스 캐논", att=40)
PirateSpirit = core.InformedCharacterModifier(
"파이렛 스피릿", boss_pdamage=40 + self.combat
)
OverburningCannon = core.InformedCharacterModifier(
"오버버닝 캐논",
pdamage_indep=30 + passive_level,
armor_ignore=20 + passive_level // 2,
)
LoadedDicePassive = pirates.LoadedDicePassiveWrapper(vEhc, 3, 4)
return [
BuildupCannon,
CriticalFire,
PirateTraining,
MonkeyWavePassive,
OakRuletPassive,
ReinforceCannon,
PirateSpirit,
OverburningCannon,
LoadedDicePassive,
]
def get_not_implied_skill_list(
self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]
):
passive_level = chtr.get_base_modifier().passive_level + self.combat
WeaponConstant = core.InformedCharacterModifier("무기상수", pdamage_indep=50)
Mastery = core.InformedCharacterModifier(
"숙련도", pdamage_indep=-7.5 + 0.5 * ceil(passive_level / 2)
)
return [WeaponConstant, Mastery]
def generate(self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]):
COCOBALLHIT = options.get("cocoball_hit", 27)
ICBMHIT = 6
passive_level = chtr.get_base_modifier().passive_level + self.combat
Booster = core.BuffSkill("부스터", 0, 200 * 1000).wrap(core.BuffSkillWrapper)
Buckshot = core.BuffSkill("벅 샷", 0, 180000).wrap(core.BuffSkillWrapper)
LuckyDice = (
core.BuffSkill(
"로디드 다이스",
delay=0,
remain=180 * 1000,
pdamage=20
+ 10 / 6
+ 10 / 6 * (5 / 6 + 1 / 11) * (10 * (5 + passive_level) * 0.01),
)
.isV(vEhc, 3, 4)
.wrap(core.BuffSkillWrapper)
)
MonkeyWave = core.DamageSkill(
"몽키 웨이브",
delay=810,
damage=860,
hit=1,
cooltime=30 * 1000,
).wrap(core.DamageSkillWrapper)
MonkeyWaveBuff = core.BuffSkill(
"몽키 웨이브(버프)",
delay=0,
remain=30000,
cooltime=-1,
crit_damage=5,
).wrap(core.BuffSkillWrapper)
MonkeyFurious = core.DamageSkill(
"몽키 퓨리어스",
delay=720,
damage=180,
hit=3,
cooltime=30 * 1000,
).wrap(core.DamageSkillWrapper)
MonkeyFuriousBuff = core.BuffSkill(
"몽키 퓨리어스(버프)",
delay=0,
remain=30000,
cooltime=-1,
pdamage=40,
).wrap(core.BuffSkillWrapper)
MonkeyFuriousDot = core.DotSkill(
"몽키 퓨리어스(도트)",
summondelay=0,
delay=1000,
damage=200,
hit=1,
remain=30000,
cooltime=-1,
).wrap(core.DotSkillWrapper)
OakRoulette = core.BuffSkill(
"오크통 룰렛",
delay=840,
remain=180000,
rem=True,
cooltime=180000,
crit_damage=1.25,
).wrap(core.BuffSkillWrapper)
OakRuletDOT = core.DotSkill(
"오크통 룰렛(도트)",
summondelay=0,
delay=1000,
damage=50,
hit=1,
remain=5000,
cooltime=-1,
).wrap(core.DotSkillWrapper)
MonkeyMagic = core.BuffSkill(
"하이퍼 몽키 스펠",
delay=0,
remain=180000,
rem=True,
stat_main=60 + passive_level,
stat_sub=60 + passive_level,
).wrap(core.BuffSkillWrapper)
CannonBuster = (
core.DamageSkill(
"캐논 버스터",
delay=690,
damage=(750 + 5 * self.combat) * 0.45,
hit=3 * (4 + 1),
modifier=core.CharacterModifier(
crit=15 + ceil(self.combat / 2),
armor_ignore=20 + self.combat // 2,
pdamage=20,
),
)
.setV(vEhc, 0, 2, True)
.wrap(core.DamageSkillWrapper)
)
SupportMonkeyTwins = (
core.SummonSkill(
"서포트 몽키 트윈스",
summondelay=720,
delay=930,
damage=(295 + 8 * self.combat) * 0.6,
hit=(1 + 1) * (2 + 1),
remain=60000 + 2000 * self.combat,
rem=True,
)
.setV(vEhc, 1, 2, False)
.wrap(core.SummonSkillWrapper)
)
RollingCannonRainbow = (
core.SummonSkill(
"롤링 캐논 레인보우",
summondelay=480,
delay=12000 / 26,
damage=600,
hit=3,
remain=12000,
cooltime=90000,
)
.setV(vEhc, 3, 2, True)
.wrap(core.SummonSkillWrapper)
)
EpicAdventure = core.BuffSkill(
"에픽 어드벤처",
delay=0,
remain=60000,
cooltime=120000,
pdamage=10,
).wrap(core.BuffSkillWrapper)
WEAPON_ATT = jobutils.get_weapon_att(chtr)
Overdrive = pirates.OverdriveWrapper(vEhc, 5, 5, WEAPON_ATT)
PirateFlag = adventurer.PirateFlagWrapper(vEhc, 4, 3, chtr.level)
MirrorBreak, MirrorSpider = globalSkill.SpiderInMirrorBuilder(vEhc, 0, 0)
BFGCannonball = core.StackableSummonSkillWrapper(
core.SummonSkill(
"빅 휴즈 기간틱 캐논볼",
summondelay=600,
delay=210,
damage=(450 + 15 * vEhc.getV(0, 0)) * 0.45,
hit=4 * 3,
remain=210 * COCOBALLHIT,
cooltime=25000,
).isV(vEhc, 0, 0),
max_stack=3,
)
ICBM = (
core.DamageSkill(
"ICBM",
delay=1140,
damage=(800 + 32 * vEhc.getV(1, 1)) * 0.45,
hit=5 * ICBMHIT * 3,
cooltime=30000,
red=True,
)
.isV(vEhc, 1, 1)
.wrap(core.DamageSkillWrapper)
)
ICBMDOT = (
core.SummonSkill(
"ICBM(장판)",
summondelay=0,
delay=15000 / 27,
damage=(500 + 20 * vEhc.getV(1, 1)) * 0.45,
hit=1 * 3,
remain=15000,
cooltime=-1,
)
.isV(vEhc, 1, 1)
.wrap(core.SummonSkillWrapper)
)
SpecialMonkeyEscort_Cannon = (
core.SummonSkill(
"스페셜 몽키 에스코트",
summondelay=780,
delay=1500,
damage=300 + 12 * vEhc.getV(2, 2),
hit=4 * 3,
remain=(30 + vEhc.getV(2, 2) // 2) * 1000,
cooltime=120000,
red=True,
)
.isV(vEhc, 2, 2)
.wrap(core.SummonSkillWrapper)
)
SpecialMonkeyEscort_Bomb = (
core.SummonSkill(
"스페셜 몽키 에스코트(폭탄)",
summondelay=0,
delay=5000,
damage=450 + 18 * vEhc.getV(2, 2),
hit=7 * 3,
remain=(30 + vEhc.getV(2, 2) // 2) * 1000,
cooltime=-1,
modifier=core.CharacterModifier(armor_ignore=100),
)
.isV(vEhc, 2, 2)
.wrap(core.SummonSkillWrapper)
)
FullMaker = (
core.SummonSkill(
"풀 메이커",
summondelay=720,
delay=360,
damage=(700 + 28 * vEhc.getV(0, 0)) * 0.45,
hit=3 * 3,
remain=360 * 20 - 1,
cooltime=60000,
red=True,
)
.isV(vEhc, 0, 0)
.wrap(core.SummonSkillWrapper)
)
keyFurious.onAfters([MonkeyFuriousBuff, MonkeyFuriousDot])
CannonBuster.onAfter(OakRuletDOT)
BFGCannonball.onAfter(OakRuletDOT)
ICBM.onAfter(OakRuletDOT)
ICBM.onAfter(ICBMDOT)
SpecialMonkeyEscort_Cannon.onJustAfter(SpecialMonkeyEscort_Bomb)
return (
CannonBuster,
[
globalSkill.maple_heros(chtr.level, combat_level=self.combat),
globalSkill.useful_sharp_eyes(),
globalSkill.useful_combat_orders(),
globalSkill.useful_wind_booster(),
Booster,
OakRoulette,
Buckshot,
MonkeyMagic,
LuckyDice,
globalSkill.MapleHeroes2Wrapper(vEhc, 0, 0, chtr.level, self.combat),
EpicAdventure,
Overdrive,
PirateFlag,
globalSkill.soul_contract(),
]
+ [
SpecialMonkeyEscort_Cannon,
BFGCannonball,
FullMaker,
RollingCannonRainbow,
SupportMonkeyTwins,
]
+ [MonkeyWave, MonkeyFurious, ICBM, MirrorBreak]
+ [
SpecialMonkeyEscort_Bomb,
MirrorSpider,
OakRuletDOT,
MonkeyFuriousDot,
MonkeyWaveBuff,
MonkeyFuriousBuff,
ICBMDOT,
]
+ [CannonBuster],
)
| true
| true
|
1c4680a5dd30a51494fef98f8426f5bccd4e47a7
| 1,264
|
py
|
Python
|
frontend/tool/a.py
|
yinzhangyue/PDF_tool
|
ff1c689478e0d40370724ad88da78ef8bd0bf3d1
|
[
"MIT"
] | 3
|
2021-12-07T06:19:12.000Z
|
2022-03-30T13:45:34.000Z
|
frontend/tool/a.py
|
yinzhangyue/PDF_tool
|
ff1c689478e0d40370724ad88da78ef8bd0bf3d1
|
[
"MIT"
] | null | null | null |
frontend/tool/a.py
|
yinzhangyue/PDF_tool
|
ff1c689478e0d40370724ad88da78ef8bd0bf3d1
|
[
"MIT"
] | 2
|
2022-02-27T16:15:05.000Z
|
2022-03-19T07:35:38.000Z
|
# -*- coding: utf-8 -*-
import numpy as np
import cv2.cv2 as cv2
from numpy import float32
if __name__ == "__main__":
# Read image
img = cv2.imread("./c73.png")
a = np.array([[
1458.4429931640625, 145.316650390625, 1554.5313720703125,
176.924560546875, 1
]],
dtype=float32)
b = np.array([[
1734.0457763671875, 191.89208984375, 1829.681640625, 222.283935546875,
1
]],
dtype=float32)
# Draw rectangle
j = 0
for i in a:
if i[4] > 0.85:
cv2.rectangle(img, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])),
(50, 205, 50), 4)
# cut = img[int(i[0]):int(i[2]), int(i[1]):int(i[3])]
# cv2.imwrite('./pic/' + str(j) + '.png', cut)
# j += 1
for i in b:
if i[4] > 0.85:
cv2.rectangle(img, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])),
(254, 67, 101), 4)
# Display cropped image
width = int(img.shape[1] / 4)
height = int(img.shape[0] / 4)
dim = (width, height)
# resize image
resized = cv2.resize(img, dim)
# save the image
cv2.imshow("Image", resized)
cv2.waitKey(0)
cv2.imwrite('./c73_.png', img)
| 28.727273
| 78
| 0.492089
|
import numpy as np
import cv2.cv2 as cv2
from numpy import float32
if __name__ == "__main__":
img = cv2.imread("./c73.png")
a = np.array([[
1458.4429931640625, 145.316650390625, 1554.5313720703125,
176.924560546875, 1
]],
dtype=float32)
b = np.array([[
1734.0457763671875, 191.89208984375, 1829.681640625, 222.283935546875,
1
]],
dtype=float32)
j = 0
for i in a:
if i[4] > 0.85:
cv2.rectangle(img, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])),
(50, 205, 50), 4)
for i in b:
if i[4] > 0.85:
cv2.rectangle(img, (int(i[0]), int(i[1])), (int(i[2]), int(i[3])),
(254, 67, 101), 4)
width = int(img.shape[1] / 4)
height = int(img.shape[0] / 4)
dim = (width, height)
resized = cv2.resize(img, dim)
cv2.imshow("Image", resized)
cv2.waitKey(0)
cv2.imwrite('./c73_.png', img)
| true
| true
|
1c4680c58421b1abc945fead1a676d7387f98adb
| 3,191
|
py
|
Python
|
tests/test_roi_pooling.py
|
scilicet64/keras-spp
|
23da20561fe92c585208af9bf3e0ef8f51bc5dcc
|
[
"MIT"
] | null | null | null |
tests/test_roi_pooling.py
|
scilicet64/keras-spp
|
23da20561fe92c585208af9bf3e0ef8f51bc5dcc
|
[
"MIT"
] | null | null | null |
tests/test_roi_pooling.py
|
scilicet64/keras-spp
|
23da20561fe92c585208af9bf3e0ef8f51bc5dcc
|
[
"MIT"
] | null | null | null |
import keras.backend as K
import numpy as np
from keras.layers import Input
from keras.models import Model
from spp.RoiPooling import RoiPooling
dim_ordering = K.image_data_format()
assert dim_ordering in {'channels_last','channels_first'}, 'dim_ordering must be in {channels_last,channels_first}'
pooling_regions = [1, 2, 4]
num_rois = 2
num_channels = 3
if dim_ordering == 'channels_last':
in_img = Input(shape=(None, None, num_channels))
elif dim_ordering == 'channels_first':
in_img = Input(shape=(num_channels, None, None))
in_roi = Input(shape=(num_rois, 4))
out_roi_pool = RoiPooling(pooling_regions, num_rois)([in_img, in_roi])
model = Model([in_img, in_roi], out_roi_pool)
model.summary()
model.compile(loss='mse', optimizer='sgd')
for img_size in [8, 16, 32]:
if dim_ordering == 'channels_first':
X_img = np.random.rand(1, num_channels, img_size, img_size)
row_length = [float(X_img.shape[2]) / i for i in pooling_regions]
col_length = [float(X_img.shape[3]) / i for i in pooling_regions]
elif dim_ordering == 'channels_last':
X_img = np.random.rand(1, img_size, img_size, num_channels)
row_length = [float(X_img.shape[1]) / i for i in pooling_regions]
col_length = [float(X_img.shape[2]) / i for i in pooling_regions]
X_roi = np.array([[0, 0, img_size / 1, img_size / 1],
[0, 0, img_size / 2, img_size / 2]])
X_roi = np.reshape(X_roi, (1, num_rois, 4)).astype(int)
Y = model.predict([X_img, X_roi])
for roi in range(num_rois):
if dim_ordering == 'channels_first':
X_curr = X_img[0, :, X_roi[0, roi, 0]:X_roi[0, roi, 2], X_roi[0, roi, 1]:X_roi[0, roi, 3]]
row_length = [float(X_curr.shape[1]) / i for i in pooling_regions]
col_length = [float(X_curr.shape[2]) / i for i in pooling_regions]
elif dim_ordering == 'channels_last':
X_curr = X_img[0, X_roi[0, roi, 0]:X_roi[0, roi, 2], X_roi[0, roi, 1]:X_roi[0, roi, 3], :]
row_length = [float(X_curr.shape[0]) / i for i in pooling_regions]
col_length = [float(X_curr.shape[1]) / i for i in pooling_regions]
idx = 0
for pool_num, num_pool_regions in enumerate(pooling_regions):
for ix in range(num_pool_regions):
for jy in range(num_pool_regions):
for cn in range(num_channels):
x1 = int(round(ix * col_length[pool_num]))
x2 = int(round(ix * col_length[pool_num] + col_length[pool_num]))
y1 = int(round(jy * row_length[pool_num]))
y2 = int(round(jy * row_length[pool_num] + row_length[pool_num]))
if dim_ordering == 'channels_first':
m_val = np.max(X_curr[cn, y1:y2, x1:x2])
elif dim_ordering == 'channels_last':
m_val = np.max(X_curr[y1:y2, x1:x2, cn])
np.testing.assert_almost_equal(
m_val, Y[0, roi, idx], decimal=6)
idx += 1
print('Passed roi pooling test')
| 40.392405
| 115
| 0.596365
|
import keras.backend as K
import numpy as np
from keras.layers import Input
from keras.models import Model
from spp.RoiPooling import RoiPooling
dim_ordering = K.image_data_format()
assert dim_ordering in {'channels_last','channels_first'}, 'dim_ordering must be in {channels_last,channels_first}'
pooling_regions = [1, 2, 4]
num_rois = 2
num_channels = 3
if dim_ordering == 'channels_last':
in_img = Input(shape=(None, None, num_channels))
elif dim_ordering == 'channels_first':
in_img = Input(shape=(num_channels, None, None))
in_roi = Input(shape=(num_rois, 4))
out_roi_pool = RoiPooling(pooling_regions, num_rois)([in_img, in_roi])
model = Model([in_img, in_roi], out_roi_pool)
model.summary()
model.compile(loss='mse', optimizer='sgd')
for img_size in [8, 16, 32]:
if dim_ordering == 'channels_first':
X_img = np.random.rand(1, num_channels, img_size, img_size)
row_length = [float(X_img.shape[2]) / i for i in pooling_regions]
col_length = [float(X_img.shape[3]) / i for i in pooling_regions]
elif dim_ordering == 'channels_last':
X_img = np.random.rand(1, img_size, img_size, num_channels)
row_length = [float(X_img.shape[1]) / i for i in pooling_regions]
col_length = [float(X_img.shape[2]) / i for i in pooling_regions]
X_roi = np.array([[0, 0, img_size / 1, img_size / 1],
[0, 0, img_size / 2, img_size / 2]])
X_roi = np.reshape(X_roi, (1, num_rois, 4)).astype(int)
Y = model.predict([X_img, X_roi])
for roi in range(num_rois):
if dim_ordering == 'channels_first':
X_curr = X_img[0, :, X_roi[0, roi, 0]:X_roi[0, roi, 2], X_roi[0, roi, 1]:X_roi[0, roi, 3]]
row_length = [float(X_curr.shape[1]) / i for i in pooling_regions]
col_length = [float(X_curr.shape[2]) / i for i in pooling_regions]
elif dim_ordering == 'channels_last':
X_curr = X_img[0, X_roi[0, roi, 0]:X_roi[0, roi, 2], X_roi[0, roi, 1]:X_roi[0, roi, 3], :]
row_length = [float(X_curr.shape[0]) / i for i in pooling_regions]
col_length = [float(X_curr.shape[1]) / i for i in pooling_regions]
idx = 0
for pool_num, num_pool_regions in enumerate(pooling_regions):
for ix in range(num_pool_regions):
for jy in range(num_pool_regions):
for cn in range(num_channels):
x1 = int(round(ix * col_length[pool_num]))
x2 = int(round(ix * col_length[pool_num] + col_length[pool_num]))
y1 = int(round(jy * row_length[pool_num]))
y2 = int(round(jy * row_length[pool_num] + row_length[pool_num]))
if dim_ordering == 'channels_first':
m_val = np.max(X_curr[cn, y1:y2, x1:x2])
elif dim_ordering == 'channels_last':
m_val = np.max(X_curr[y1:y2, x1:x2, cn])
np.testing.assert_almost_equal(
m_val, Y[0, roi, idx], decimal=6)
idx += 1
print('Passed roi pooling test')
| true
| true
|
1c46810f9df9a321d810630eb9522ae322eae9a2
| 2,102
|
py
|
Python
|
tests/testproject/testapp/models.py
|
allenling/django-easy-fixtures
|
cf6e0abff83565e5bf106e388922a31feb288ee9
|
[
"MIT"
] | 2
|
2016-09-19T12:53:44.000Z
|
2016-09-25T05:14:15.000Z
|
tests/testproject/testapp/models.py
|
allenling/django-easy-fixture
|
cf6e0abff83565e5bf106e388922a31feb288ee9
|
[
"MIT"
] | null | null | null |
tests/testproject/testapp/models.py
|
allenling/django-easy-fixture
|
cf6e0abff83565e5bf106e388922a31feb288ee9
|
[
"MIT"
] | null | null | null |
from django.db import models
class TestAbstractModel(models.Model):
postive_integer = models.PositiveIntegerField()
postive_small_integer = models.PositiveSmallIntegerField()
file_path_field = models.FilePathField()
float_field = models.FloatField()
ip = models.GenericIPAddressField()
slug_field = models.SlugField()
small_integer = models.SmallIntegerField()
text_field = models.TextField()
time_field = models.TimeField()
biginteger_field = models.BigIntegerField()
boolean_field = models.BooleanField()
non_boolean_field = models.NullBooleanField()
# decimal_field = models.DecimalField()
duration_field = models.DurationField()
email_field = models.EmailField()
char_field = models.CharField(max_length=20)
integer_field = models.IntegerField()
dete_field = models.DateField()
datetime_field = models.DateTimeField()
url = models.URLField()
bin = models.BinaryField()
uuid = models.UUIDField()
default_field = models.CharField(max_length=20, default='')
unique_field = models.CharField(max_length=20)
unique_together_field_a = models.CharField(max_length=20)
unique_together_field_b = models.CharField(max_length=20)
class Meta:
abstract = True
class OtherModel(TestAbstractModel):
pass
class FixtureForeignModel(TestAbstractModel):
foreign_field = models.ForeignKey(OtherModel, on_delete=models.CASCADE)
class Meta(object):
unique_together = (('char_field', ), ('unique_together_field_a', 'unique_together_field_b'))
class FixtureManyToManyModel(TestAbstractModel):
class Meta(object):
unique_together = (('float_field', ), ('integer_field', ), ('unique_together_field_a', 'unique_together_field_b'))
class FixtureModel(TestAbstractModel):
foreign_field = models.ForeignKey(FixtureForeignModel, on_delete=models.CASCADE)
many_to_many = models.ManyToManyField(FixtureManyToManyModel)
class Meta(object):
unique_together = (('char_field', 'foreign_field'), ('unique_together_field_a', 'unique_together_field_b'))
| 33.903226
| 122
| 0.744053
|
from django.db import models
class TestAbstractModel(models.Model):
postive_integer = models.PositiveIntegerField()
postive_small_integer = models.PositiveSmallIntegerField()
file_path_field = models.FilePathField()
float_field = models.FloatField()
ip = models.GenericIPAddressField()
slug_field = models.SlugField()
small_integer = models.SmallIntegerField()
text_field = models.TextField()
time_field = models.TimeField()
biginteger_field = models.BigIntegerField()
boolean_field = models.BooleanField()
non_boolean_field = models.NullBooleanField()
duration_field = models.DurationField()
email_field = models.EmailField()
char_field = models.CharField(max_length=20)
integer_field = models.IntegerField()
dete_field = models.DateField()
datetime_field = models.DateTimeField()
url = models.URLField()
bin = models.BinaryField()
uuid = models.UUIDField()
default_field = models.CharField(max_length=20, default='')
unique_field = models.CharField(max_length=20)
unique_together_field_a = models.CharField(max_length=20)
unique_together_field_b = models.CharField(max_length=20)
class Meta:
abstract = True
class OtherModel(TestAbstractModel):
pass
class FixtureForeignModel(TestAbstractModel):
foreign_field = models.ForeignKey(OtherModel, on_delete=models.CASCADE)
class Meta(object):
unique_together = (('char_field', ), ('unique_together_field_a', 'unique_together_field_b'))
class FixtureManyToManyModel(TestAbstractModel):
class Meta(object):
unique_together = (('float_field', ), ('integer_field', ), ('unique_together_field_a', 'unique_together_field_b'))
class FixtureModel(TestAbstractModel):
foreign_field = models.ForeignKey(FixtureForeignModel, on_delete=models.CASCADE)
many_to_many = models.ManyToManyField(FixtureManyToManyModel)
class Meta(object):
unique_together = (('char_field', 'foreign_field'), ('unique_together_field_a', 'unique_together_field_b'))
| true
| true
|
1c4681245bdc3eedb5787d6a299414db6b6e6a58
| 4,237
|
py
|
Python
|
ch05/timsort.py
|
laszlokiraly/LearningAlgorithms
|
032a3cc409546619cf41220821d081cde54bbcce
|
[
"MIT"
] | 74
|
2021-05-06T22:03:18.000Z
|
2022-03-25T04:37:51.000Z
|
ch05/timsort.py
|
laszlokiraly/LearningAlgorithms
|
032a3cc409546619cf41220821d081cde54bbcce
|
[
"MIT"
] | null | null | null |
ch05/timsort.py
|
laszlokiraly/LearningAlgorithms
|
032a3cc409546619cf41220821d081cde54bbcce
|
[
"MIT"
] | 19
|
2021-07-16T11:42:00.000Z
|
2022-03-22T00:25:49.000Z
|
"""
Simplistic non-optimized, native Python implementation showing the mechanics
of TimSort.
This code is designed to show how TimSort uses Insertion Sort and Merge Sort
as its constituent building blocks. It is not the actual sorting algorithm,
because of extra complexities that optimize this base algorithm even further.
Full details on the sorting algorithm are in the actual CPython code base,
but Tim Peters has provided documentation explaining reasons behind many
of the choices in Tim Sort.
https://hg.python.org/cpython/file/tip/Objects/listsort.txt
"""
import timeit
from algs.table import DataTable
def merge(A, lo, mid, hi, aux):
"""Merge two (consecutive) runs together."""
aux[lo:hi+1] = A[lo:hi+1]
left = lo
right = mid + 1
for i in range(lo, hi+1):
if left > mid:
A[i] = aux[right]
right += 1
elif right > hi:
A[i] = aux[left]
left += 1
elif aux[right] < aux[left]:
A[i] = aux[right]
right += 1
else:
A[i] = aux[left]
left += 1
# https://hg.python.org/cpython/file/tip/Objects/listsort.txt
# Instead we pick a minrun in range(32, 65) such that N/minrun is exactly a
# power of 2, or if that isn't possible, is close to, but strictly less than,
# a power of 2. This is easier to do than it may sound: take the first 6
# bits of N, and add 1 if any of the remaining bits are set. In fact, that
# rule covers every case in this section, including small N and exact powers
# of 2; merge_compute_minrun() is a deceptively simple function.
def compute_min_run(n):
"""Compute min_run to use when sorting n total values."""
# Used to add 1 if any remaining bits are set
r = 0
while n >= 64:
r |= n & 1
n >>= 1
return n + r
def insertion_sort(A, lo, hi):
"""Sort A[lo .. hi] using Insertion Sort. Stable sort demands Ai <= Aj. """
for i in range(lo+1,hi+1):
for j in range(i,lo,-1):
if A[j-1] <= A[j]:
break
A[j],A[j-1] = A[j-1],A[j]
def tim_sort(A):
"""Apply simplistic Tim Sort implementation on A."""
# Small arrays are sorted using insertion sort
N = len(A)
if N < 64:
insertion_sort(A,0,N-1)
return
# Insertion sort in strips of 'size'
size = compute_min_run(N)
for lo in range(0, N, size):
insertion_sort(A, lo, min(lo+size-1, N-1))
aux = [None]*N
while size < N:
# Merge all doubled ranges, taking care with last one
for lo in range(0, N, 2*size):
mid = min(lo + size - 1, N-1)
hi = min(lo + 2*size - 1, N-1)
merge(A, lo, mid, hi, aux)
size = 2 * size
def timing_nlogn_sorting_real_world(max_k=18, output=True):
"""
Confirm N Log N performance of Merge Sort, Heap Sort and Python's built-in sort
for n in 2**k for k up to (but not including) max_k=18.
Represents real-world case where Tim Sort shines, namely, where you are
adding random data to an already sorted set.
"""
# Build model
tbl = DataTable([12,10,10,10,10],['N','MergeSort', 'Quicksort', 'TimSort', 'PythonSort'],
output=output)
for n in [2**k for k in range(8, max_k)]:
t_ms = min(timeit.repeat(stmt='merge_sort(A)', setup='''
import random
from ch05.merge import merge_sort
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
t_qs = min(timeit.repeat(stmt='quick_sort(A)', setup='''
import random
from ch05.sorting import quick_sort
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
t_ps = min(timeit.repeat(stmt='A.sort()', setup='''
import random
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
t_ts = min(timeit.repeat(stmt='tim_sort(A)', setup='''
import random
from ch05.timsort import tim_sort
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
tbl.row([n, t_ms, t_qs, t_ts, t_ps])
return tbl
| 32.098485
| 93
| 0.621194
|
import timeit
from algs.table import DataTable
def merge(A, lo, mid, hi, aux):
aux[lo:hi+1] = A[lo:hi+1]
left = lo
right = mid + 1
for i in range(lo, hi+1):
if left > mid:
A[i] = aux[right]
right += 1
elif right > hi:
A[i] = aux[left]
left += 1
elif aux[right] < aux[left]:
A[i] = aux[right]
right += 1
else:
A[i] = aux[left]
left += 1
# a power of 2. This is easier to do than it may sound: take the first 6
# bits of N, and add 1 if any of the remaining bits are set. In fact, that
# rule covers every case in this section, including small N and exact powers
# of 2; merge_compute_minrun() is a deceptively simple function.
def compute_min_run(n):
# Used to add 1 if any remaining bits are set
r = 0
while n >= 64:
r |= n & 1
n >>= 1
return n + r
def insertion_sort(A, lo, hi):
for i in range(lo+1,hi+1):
for j in range(i,lo,-1):
if A[j-1] <= A[j]:
break
A[j],A[j-1] = A[j-1],A[j]
def tim_sort(A):
# Small arrays are sorted using insertion sort
N = len(A)
if N < 64:
insertion_sort(A,0,N-1)
return
# Insertion sort in strips of 'size'
size = compute_min_run(N)
for lo in range(0, N, size):
insertion_sort(A, lo, min(lo+size-1, N-1))
aux = [None]*N
while size < N:
# Merge all doubled ranges, taking care with last one
for lo in range(0, N, 2*size):
mid = min(lo + size - 1, N-1)
hi = min(lo + 2*size - 1, N-1)
merge(A, lo, mid, hi, aux)
size = 2 * size
def timing_nlogn_sorting_real_world(max_k=18, output=True):
# Build model
tbl = DataTable([12,10,10,10,10],['N','MergeSort', 'Quicksort', 'TimSort', 'PythonSort'],
output=output)
for n in [2**k for k in range(8, max_k)]:
t_ms = min(timeit.repeat(stmt='merge_sort(A)', setup='''
import random
from ch05.merge import merge_sort
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
t_qs = min(timeit.repeat(stmt='quick_sort(A)', setup='''
import random
from ch05.sorting import quick_sort
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
t_ps = min(timeit.repeat(stmt='A.sort()', setup='''
import random
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
t_ts = min(timeit.repeat(stmt='tim_sort(A)', setup='''
import random
from ch05.timsort import tim_sort
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
tbl.row([n, t_ms, t_qs, t_ts, t_ps])
return tbl
| true
| true
|
1c46821305f403db023274e47c9b4a630caf9c77
| 4,807
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/search/tests/latest/test_shared_private_link_resource.py
|
ZengTaoxu/azure-cli
|
6be96de450da5ac9f07aafb22dd69880bea04792
|
[
"MIT"
] | 4
|
2022-01-25T07:33:15.000Z
|
2022-03-24T05:15:13.000Z
|
src/azure-cli/azure/cli/command_modules/search/tests/latest/test_shared_private_link_resource.py
|
ZengTaoxu/azure-cli
|
6be96de450da5ac9f07aafb22dd69880bea04792
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/search/tests/latest/test_shared_private_link_resource.py
|
ZengTaoxu/azure-cli
|
6be96de450da5ac9f07aafb22dd69880bea04792
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer
import unittest
class AzureSearchServicesTests(ScenarioTest):
@ResourceGroupPreparer(name_prefix='azure_search_cli_test')
@StorageAccountPreparer(name_prefix='satest', kind='StorageV2')
def test_shared_private_link_resource_crud(self, resource_group, storage_account):
self.kwargs.update({
'sku_name': 'basic',
'search_service_name': self.create_random_name(prefix='azstest', length=24),
'public_network_access': 'Disabled',
'shared_private_link_resource_name': self.create_random_name(prefix='spltest', length=24),
'storage_account_name': storage_account,
'shared_private_link_resource_group_id': 'blob',
'shared_private_link_resource_request_provisioning_state_default': 'Succeeded',
'shared_private_link_resource_request_status_default': 'Pending',
'shared_private_link_resource_request_message_default': 'Please approve',
'shared_private_link_resource_request_message': 'Please approve again'
})
self.cmd(
'az search service create -n {search_service_name} -g {rg} --sku {sku_name} --public-network-access {public_network_access}',
checks=[self.check('name', '{search_service_name}'),
self.check('sku.name', '{sku_name}'),
self.check('publicNetworkAccess', '{public_network_access}')])
_account_resource_id = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}".format(self.get_subscription_id(), resource_group, storage_account)
self.kwargs.update({'_account_resource_id': _account_resource_id})
# create shared private link resource
_tpe_resource = self.cmd('az search shared-private-link-resource create --service-name {search_service_name} -g {rg} --resource-id {_account_resource_id} --name {shared_private_link_resource_name} --group-id {shared_private_link_resource_group_id}',
checks=[self.check('name', '{shared_private_link_resource_name}'),
self.check('properties.provisioningState', '{shared_private_link_resource_request_provisioning_state_default}'),
self.check('properties.requestMessage', '{shared_private_link_resource_request_message_default}'),
self.check('properties.status', '{shared_private_link_resource_request_status_default}')]).get_output_in_json()
# update shared private link resource
self.cmd('az search shared-private-link-resource update --service-name {search_service_name} -g {rg} --resource-id {_account_resource_id} --name {shared_private_link_resource_name} --group-id {shared_private_link_resource_group_id} --request-message "{shared_private_link_resource_request_message}"',
checks=[self.check('properties.requestMessage', '{shared_private_link_resource_request_message}')])
# list shared private link resources
_tpe_resources = self.cmd('az search shared-private-link-resource list --service-name {search_service_name} -g {rg}').get_output_in_json()
self.assertTrue(len(_tpe_resources) == 1)
# get shared private link resource
_tpe_resource = self.cmd('az search shared-private-link-resource show --service-name {search_service_name} -g {rg} --name {shared_private_link_resource_name}').get_output_in_json()
self.assertTrue(_tpe_resource['properties']['privateLinkResourceId'] == _account_resource_id)
# delete shared private link resource
self.cmd('az search shared-private-link-resource delete --service-name {search_service_name} -g {rg} --name {shared_private_link_resource_name} -y')
# list shared private link resources
_tpe_resources = self.cmd('az search shared-private-link-resource list --service-name {search_service_name} -g {rg}').get_output_in_json()
self.assertTrue(len(_tpe_resources) == 0)
# get shared private link resource
with self.assertRaises(SystemExit) as ex:
self.cmd('az search shared-private-link-resource show --service-name {search_service_name} -g {rg} --name {shared_private_link_resource_name}')
self.assertEqual(ex.exception.code, 3)
if __name__ == '__main__':
unittest.main()
| 66.763889
| 308
| 0.68317
|
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer
import unittest
class AzureSearchServicesTests(ScenarioTest):
@ResourceGroupPreparer(name_prefix='azure_search_cli_test')
@StorageAccountPreparer(name_prefix='satest', kind='StorageV2')
def test_shared_private_link_resource_crud(self, resource_group, storage_account):
self.kwargs.update({
'sku_name': 'basic',
'search_service_name': self.create_random_name(prefix='azstest', length=24),
'public_network_access': 'Disabled',
'shared_private_link_resource_name': self.create_random_name(prefix='spltest', length=24),
'storage_account_name': storage_account,
'shared_private_link_resource_group_id': 'blob',
'shared_private_link_resource_request_provisioning_state_default': 'Succeeded',
'shared_private_link_resource_request_status_default': 'Pending',
'shared_private_link_resource_request_message_default': 'Please approve',
'shared_private_link_resource_request_message': 'Please approve again'
})
self.cmd(
'az search service create -n {search_service_name} -g {rg} --sku {sku_name} --public-network-access {public_network_access}',
checks=[self.check('name', '{search_service_name}'),
self.check('sku.name', '{sku_name}'),
self.check('publicNetworkAccess', '{public_network_access}')])
_account_resource_id = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Storage/storageAccounts/{}".format(self.get_subscription_id(), resource_group, storage_account)
self.kwargs.update({'_account_resource_id': _account_resource_id})
_tpe_resource = self.cmd('az search shared-private-link-resource create --service-name {search_service_name} -g {rg} --resource-id {_account_resource_id} --name {shared_private_link_resource_name} --group-id {shared_private_link_resource_group_id}',
checks=[self.check('name', '{shared_private_link_resource_name}'),
self.check('properties.provisioningState', '{shared_private_link_resource_request_provisioning_state_default}'),
self.check('properties.requestMessage', '{shared_private_link_resource_request_message_default}'),
self.check('properties.status', '{shared_private_link_resource_request_status_default}')]).get_output_in_json()
self.cmd('az search shared-private-link-resource update --service-name {search_service_name} -g {rg} --resource-id {_account_resource_id} --name {shared_private_link_resource_name} --group-id {shared_private_link_resource_group_id} --request-message "{shared_private_link_resource_request_message}"',
checks=[self.check('properties.requestMessage', '{shared_private_link_resource_request_message}')])
_tpe_resources = self.cmd('az search shared-private-link-resource list --service-name {search_service_name} -g {rg}').get_output_in_json()
self.assertTrue(len(_tpe_resources) == 1)
_tpe_resource = self.cmd('az search shared-private-link-resource show --service-name {search_service_name} -g {rg} --name {shared_private_link_resource_name}').get_output_in_json()
self.assertTrue(_tpe_resource['properties']['privateLinkResourceId'] == _account_resource_id)
self.cmd('az search shared-private-link-resource delete --service-name {search_service_name} -g {rg} --name {shared_private_link_resource_name} -y')
_tpe_resources = self.cmd('az search shared-private-link-resource list --service-name {search_service_name} -g {rg}').get_output_in_json()
self.assertTrue(len(_tpe_resources) == 0)
with self.assertRaises(SystemExit) as ex:
self.cmd('az search shared-private-link-resource show --service-name {search_service_name} -g {rg} --name {shared_private_link_resource_name}')
self.assertEqual(ex.exception.code, 3)
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c4682ad9a9b67315be210907787728e1c713657
| 61,319
|
py
|
Python
|
tests/integration_tests/core_tests.py
|
akashkj/superset
|
8a157d8446780e4e71550405cbedde8a4d64d92a
|
[
"Apache-2.0"
] | 1
|
2022-01-23T17:08:13.000Z
|
2022-01-23T17:08:13.000Z
|
tests/integration_tests/core_tests.py
|
akashkj/superset
|
8a157d8446780e4e71550405cbedde8a4d64d92a
|
[
"Apache-2.0"
] | 19
|
2022-01-29T03:16:22.000Z
|
2022-03-25T23:50:16.000Z
|
tests/integration_tests/core_tests.py
|
akashkj/superset
|
8a157d8446780e4e71550405cbedde8a4d64d92a
|
[
"Apache-2.0"
] | 1
|
2022-02-02T19:59:50.000Z
|
2022-02-02T19:59:50.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# isort:skip_file
"""Unit tests for Superset"""
import csv
import datetime
import doctest
import html
import io
import json
import logging
from typing import Dict, List
from urllib.parse import quote
import superset.utils.database
from tests.integration_tests.fixtures.birth_names_dashboard import (
load_birth_names_dashboard_with_slices,
load_birth_names_data,
)
import pytest
import pytz
import random
import re
import unittest
from unittest import mock
import pandas as pd
import sqlalchemy as sqla
from sqlalchemy.exc import SQLAlchemyError
from superset.models.cache import CacheKey
from superset.utils.database import get_example_database
from tests.integration_tests.conftest import with_feature_flags
from tests.integration_tests.fixtures.energy_dashboard import (
load_energy_table_with_slice,
load_energy_table_data,
)
from tests.integration_tests.test_app import app
import superset.views.utils
from superset import (
dataframe,
db,
security_manager,
sql_lab,
)
from superset.common.db_query_status import QueryStatus
from superset.connectors.sqla.models import SqlaTable
from superset.db_engine_specs.base import BaseEngineSpec
from superset.db_engine_specs.mssql import MssqlEngineSpec
from superset.exceptions import SupersetException
from superset.extensions import async_query_manager
from superset.models import core as models
from superset.models.annotations import Annotation, AnnotationLayer
from superset.models.dashboard import Dashboard
from superset.models.datasource_access_request import DatasourceAccessRequest
from superset.models.slice import Slice
from superset.models.sql_lab import Query
from superset.result_set import SupersetResultSet
from superset.utils import core as utils
from superset.views import core as views
from superset.views.database.views import DatabaseView
from .base_tests import SupersetTestCase
from tests.integration_tests.fixtures.world_bank_dashboard import (
load_world_bank_dashboard_with_slices,
load_world_bank_data,
)
logger = logging.getLogger(__name__)
class TestCore(SupersetTestCase):
def setUp(self):
db.session.query(Query).delete()
db.session.query(DatasourceAccessRequest).delete()
db.session.query(models.Log).delete()
self.table_ids = {
tbl.table_name: tbl.id for tbl in (db.session.query(SqlaTable).all())
}
self.original_unsafe_db_setting = app.config["PREVENT_UNSAFE_DB_CONNECTIONS"]
def tearDown(self):
db.session.query(Query).delete()
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = self.original_unsafe_db_setting
def test_login(self):
resp = self.get_resp("/login/", data=dict(username="admin", password="general"))
self.assertNotIn("User confirmation needed", resp)
resp = self.get_resp("/logout/", follow_redirects=True)
self.assertIn("User confirmation needed", resp)
resp = self.get_resp(
"/login/", data=dict(username="admin", password="wrongPassword")
)
self.assertIn("User confirmation needed", resp)
def test_dashboard_endpoint(self):
self.login()
resp = self.client.get("/superset/dashboard/-1/")
assert resp.status_code == 404
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_slice_endpoint(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
resp = self.get_resp("/superset/slice/{}/".format(slc.id))
assert "Original value" in resp
assert "List Roles" in resp
# Testing overrides
resp = self.get_resp("/superset/slice/{}/?standalone=true".format(slc.id))
assert '<div class="navbar' not in resp
resp = self.client.get("/superset/slice/-1/")
assert resp.status_code == 404
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_viz_cache_key(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
viz = slc.viz
qobj = viz.query_obj()
cache_key = viz.cache_key(qobj)
qobj["groupby"] = []
cache_key_with_groupby = viz.cache_key(qobj)
self.assertNotEqual(cache_key, cache_key_with_groupby)
self.assertNotEqual(
viz.cache_key(qobj), viz.cache_key(qobj, time_compare="12 weeks")
)
self.assertNotEqual(
viz.cache_key(qobj, time_compare="28 days"),
viz.cache_key(qobj, time_compare="12 weeks"),
)
qobj["inner_from_dttm"] = datetime.datetime(1901, 1, 1)
self.assertEqual(cache_key_with_groupby, viz.cache_key(qobj))
def test_get_superset_tables_not_allowed(self):
example_db = superset.utils.database.get_example_database()
schema_name = self.default_schema_backend_map[example_db.backend]
self.login(username="gamma")
uri = f"superset/tables/{example_db.id}/{schema_name}/undefined/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_get_superset_tables_substr(self):
example_db = superset.utils.database.get_example_database()
if example_db.backend in {"presto", "hive"}:
# TODO: change table to the real table that is in examples.
return
self.login(username="admin")
schema_name = self.default_schema_backend_map[example_db.backend]
uri = f"superset/tables/{example_db.id}/{schema_name}/ab_role/"
rv = self.client.get(uri)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
expected_response = {
"options": [
{
"label": "ab_role",
"schema": schema_name,
"title": "ab_role",
"type": "table",
"value": "ab_role",
"extra": None,
}
],
"tableLength": 1,
}
self.assertEqual(response, expected_response)
def test_get_superset_tables_not_found(self):
self.login(username="admin")
uri = f"superset/tables/invalid/public/undefined/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_annotation_json_endpoint(self):
# Set up an annotation layer and annotation
layer = AnnotationLayer(name="foo", descr="bar")
db.session.add(layer)
db.session.commit()
annotation = Annotation(
layer_id=layer.id,
short_descr="my_annotation",
start_dttm=datetime.datetime(2020, 5, 20, 18, 21, 51),
end_dttm=datetime.datetime(2020, 5, 20, 18, 31, 51),
)
db.session.add(annotation)
db.session.commit()
self.login()
resp_annotations = json.loads(
self.get_resp("annotationlayermodelview/api/read")
)
# the UI needs id and name to function
self.assertIn("id", resp_annotations["result"][0])
self.assertIn("name", resp_annotations["result"][0])
response = self.get_resp(
f"/superset/annotation_json/{layer.id}?form_data="
+ quote(json.dumps({"time_range": "100 years ago : now"}))
)
assert "my_annotation" in response
# Rollback changes
db.session.delete(annotation)
db.session.delete(layer)
db.session.commit()
def test_admin_only_permissions(self):
def assert_admin_permission_in(role_name, assert_func):
role = security_manager.find_role(role_name)
permissions = [p.permission.name for p in role.permissions]
assert_func("can_sync_druid_source", permissions)
assert_func("can_approve", permissions)
assert_admin_permission_in("Admin", self.assertIn)
assert_admin_permission_in("Alpha", self.assertNotIn)
assert_admin_permission_in("Gamma", self.assertNotIn)
def test_admin_only_menu_views(self):
def assert_admin_view_menus_in(role_name, assert_func):
role = security_manager.find_role(role_name)
view_menus = [p.view_menu.name for p in role.permissions]
assert_func("ResetPasswordView", view_menus)
assert_func("RoleModelView", view_menus)
assert_func("Security", view_menus)
assert_func("SQL Lab", view_menus)
assert_admin_view_menus_in("Admin", self.assertIn)
assert_admin_view_menus_in("Alpha", self.assertNotIn)
assert_admin_view_menus_in("Gamma", self.assertNotIn)
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_save_slice(self):
self.login(username="admin")
slice_name = f"Energy Sankey"
slice_id = self.get_slice(slice_name, db.session).id
copy_name_prefix = "Test Sankey"
copy_name = f"{copy_name_prefix}[save]{random.random()}"
tbl_id = self.table_ids.get("energy_usage")
new_slice_name = f"{copy_name_prefix}[overwrite]{random.random()}"
url = (
"/superset/explore/table/{}/?slice_name={}&"
"action={}&datasource_name=energy_usage"
)
form_data = {
"adhoc_filters": [],
"viz_type": "sankey",
"groupby": ["target"],
"metric": "sum__value",
"row_limit": 5000,
"slice_id": slice_id,
"time_range_endpoints": ["inclusive", "exclusive"],
}
# Changing name and save as a new slice
resp = self.client.post(
url.format(tbl_id, copy_name, "saveas"),
data={"form_data": json.dumps(form_data)},
)
db.session.expunge_all()
new_slice_id = resp.json["form_data"]["slice_id"]
slc = db.session.query(Slice).filter_by(id=new_slice_id).one()
self.assertEqual(slc.slice_name, copy_name)
form_data.pop("slice_id") # We don't save the slice id when saving as
self.assertEqual(slc.viz.form_data, form_data)
form_data = {
"adhoc_filters": [],
"viz_type": "sankey",
"groupby": ["source"],
"metric": "sum__value",
"row_limit": 5000,
"slice_id": new_slice_id,
"time_range": "now",
"time_range_endpoints": ["inclusive", "exclusive"],
}
# Setting the name back to its original name by overwriting new slice
self.client.post(
url.format(tbl_id, new_slice_name, "overwrite"),
data={"form_data": json.dumps(form_data)},
)
db.session.expunge_all()
slc = db.session.query(Slice).filter_by(id=new_slice_id).one()
self.assertEqual(slc.slice_name, new_slice_name)
self.assertEqual(slc.viz.form_data, form_data)
# Cleanup
slices = (
db.session.query(Slice)
.filter(Slice.slice_name.like(copy_name_prefix + "%"))
.all()
)
for slc in slices:
db.session.delete(slc)
db.session.commit()
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_filter_endpoint(self):
self.login(username="admin")
slice_name = "Energy Sankey"
slice_id = self.get_slice(slice_name, db.session).id
db.session.commit()
tbl_id = self.table_ids.get("energy_usage")
table = db.session.query(SqlaTable).filter(SqlaTable.id == tbl_id)
table.filter_select_enabled = True
url = (
"/superset/filter/table/{}/target/?viz_type=sankey&groupby=source"
"&metric=sum__value&flt_col_0=source&flt_op_0=in&flt_eq_0=&"
"slice_id={}&datasource_name=energy_usage&"
"datasource_id=1&datasource_type=table"
)
# Changing name
resp = self.get_resp(url.format(tbl_id, slice_id))
assert len(resp) > 0
assert "energy_target0" in resp
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_slice_data(self):
# slice data should have some required attributes
self.login(username="admin")
slc = self.get_slice(
slice_name="Girls", session=db.session, expunge_from_session=False
)
slc_data_attributes = slc.data.keys()
assert "changed_on" in slc_data_attributes
assert "modified" in slc_data_attributes
assert "owners" in slc_data_attributes
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_slices(self):
# Testing by hitting the two supported end points for all slices
self.login(username="admin")
Slc = Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [
(slc.slice_name, "explore", slc.slice_url),
]
for name, method, url in urls:
logger.info(f"[{name}]/[{method}]: {url}")
print(f"[{name}]/[{method}]: {url}")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_tablemodelview_list(self):
self.login(username="admin")
url = "/tablemodelview/list/"
resp = self.get_resp(url)
# assert that a table is listed
table = db.session.query(SqlaTable).first()
assert table.name in resp
assert "/superset/explore/table/{}".format(table.id) in resp
def test_add_slice(self):
self.login(username="admin")
# assert that /chart/add responds with 200
url = "/chart/add"
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_get_user_slices_for_owners(self):
self.login(username="alpha")
user = security_manager.find_user("alpha")
slice_name = "Girls"
# ensure user is not owner of any slices
url = f"/superset/user_slices/{user.id}/"
resp = self.client.get(url)
data = json.loads(resp.data)
self.assertEqual(data, [])
# make user owner of slice and verify that endpoint returns said slice
slc = self.get_slice(
slice_name=slice_name, session=db.session, expunge_from_session=False
)
slc.owners = [user]
db.session.merge(slc)
db.session.commit()
url = f"/superset/user_slices/{user.id}/"
resp = self.client.get(url)
data = json.loads(resp.data)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]["title"], slice_name)
# remove ownership and ensure user no longer gets slice
slc = self.get_slice(
slice_name=slice_name, session=db.session, expunge_from_session=False
)
slc.owners = []
db.session.merge(slc)
db.session.commit()
url = f"/superset/user_slices/{user.id}/"
resp = self.client.get(url)
data = json.loads(resp.data)
self.assertEqual(data, [])
def test_get_user_slices(self):
self.login(username="admin")
userid = security_manager.find_user("admin").id
url = f"/sliceasync/api/read?_flt_0_created_by={userid}"
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_slices_V2(self):
# Add explore-v2-beta role to admin user
# Test all slice urls as user with with explore-v2-beta role
security_manager.add_role("explore-v2-beta")
security_manager.add_user(
"explore_beta",
"explore_beta",
" user",
"explore_beta@airbnb.com",
security_manager.find_role("explore-v2-beta"),
password="general",
)
self.login(username="explore_beta", password="general")
Slc = Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [(slc.slice_name, "slice_url", slc.slice_url)]
for name, method, url in urls:
print(f"[{name}]/[{method}]: {url}")
self.client.get(url)
def test_doctests(self):
modules = [utils, models, sql_lab]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_misc(self):
assert self.get_resp("/health") == "OK"
assert self.get_resp("/healthcheck") == "OK"
assert self.get_resp("/ping") == "OK"
def test_testconn(self, username="admin"):
# need to temporarily allow sqlite dbs, teardown will undo this
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = False
self.login(username=username)
database = superset.utils.database.get_example_database()
# validate that the endpoint works with the password-masked sqlalchemy uri
data = json.dumps(
{
"uri": database.safe_sqlalchemy_uri(),
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
# validate that the endpoint works with the decrypted sqlalchemy uri
data = json.dumps(
{
"uri": database.sqlalchemy_uri_decrypted,
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
def test_testconn_failed_conn(self, username="admin"):
self.login(username=username)
data = json.dumps(
{"uri": "broken://url", "name": "examples", "impersonate_user": False}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 400
assert response.headers["Content-Type"] == "application/json"
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {"error": "Could not load database driver: broken"}
assert response_body == expected_body, "%s != %s" % (
response_body,
expected_body,
)
data = json.dumps(
{
"uri": "mssql+pymssql://url",
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 400
assert response.headers["Content-Type"] == "application/json"
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {"error": "Could not load database driver: mssql+pymssql"}
assert response_body == expected_body, "%s != %s" % (
response_body,
expected_body,
)
def test_testconn_unsafe_uri(self, username="admin"):
self.login(username=username)
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = True
response = self.client.post(
"/superset/testconn",
data=json.dumps(
{
"uri": "sqlite:///home/superset/unsafe.db",
"name": "unsafe",
"impersonate_user": False,
}
),
content_type="application/json",
)
self.assertEqual(400, response.status_code)
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {
"error": "SQLiteDialect_pysqlite cannot be used as a data source for security reasons."
}
self.assertEqual(expected_body, response_body)
def test_custom_password_store(self):
database = superset.utils.database.get_example_database()
conn_pre = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
def custom_password_store(uri):
return "password_store_test"
models.custom_password_store = custom_password_store
conn = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
if conn_pre.password:
assert conn.password == "password_store_test"
assert conn.password != conn_pre.password
# Disable for password store for later tests
models.custom_password_store = None
def test_databaseview_edit(self, username="admin"):
# validate that sending a password-masked uri does not over-write the decrypted
# uri
self.login(username=username)
database = superset.utils.database.get_example_database()
sqlalchemy_uri_decrypted = database.sqlalchemy_uri_decrypted
url = "databaseview/edit/{}".format(database.id)
data = {k: database.__getattribute__(k) for k in DatabaseView.add_columns}
data["sqlalchemy_uri"] = database.safe_sqlalchemy_uri()
self.client.post(url, data=data)
database = superset.utils.database.get_example_database()
self.assertEqual(sqlalchemy_uri_decrypted, database.sqlalchemy_uri_decrypted)
# Need to clean up after ourselves
database.impersonate_user = False
database.allow_dml = False
database.allow_run_async = False
db.session.commit()
@pytest.mark.usefixtures(
"load_energy_table_with_slice", "load_birth_names_dashboard_with_slices"
)
def test_warm_up_cache(self):
self.login()
slc = self.get_slice("Girls", db.session)
data = self.get_json_resp("/superset/warm_up_cache?slice_id={}".format(slc.id))
self.assertEqual(
data, [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
)
data = self.get_json_resp(
"/superset/warm_up_cache?table_name=energy_usage&db_name=main"
)
assert len(data) > 0
dashboard = self.get_dash_by_slug("births")
assert self.get_json_resp(
f"/superset/warm_up_cache?dashboard_id={dashboard.id}&slice_id={slc.id}"
) == [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
assert self.get_json_resp(
f"/superset/warm_up_cache?dashboard_id={dashboard.id}&slice_id={slc.id}&extra_filters="
+ quote(json.dumps([{"col": "name", "op": "in", "val": ["Jennifer"]}]))
) == [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_cache_logging(self):
self.login("admin")
store_cache_keys = app.config["STORE_CACHE_KEYS_IN_METADATA_DB"]
app.config["STORE_CACHE_KEYS_IN_METADATA_DB"] = True
girls_slice = self.get_slice("Girls", db.session)
self.get_json_resp("/superset/warm_up_cache?slice_id={}".format(girls_slice.id))
ck = db.session.query(CacheKey).order_by(CacheKey.id.desc()).first()
assert ck.datasource_uid == f"{girls_slice.table.id}__table"
app.config["STORE_CACHE_KEYS_IN_METADATA_DB"] = store_cache_keys
def test_shortner(self):
self.login(username="admin")
data = (
"//superset/explore/table/1/?viz_type=sankey&groupby=source&"
"groupby=target&metric=sum__value&row_limit=5000&where=&having=&"
"flt_col_0=source&flt_op_0=in&flt_eq_0=&slice_id=78&slice_name="
"Energy+Sankey&collapsed_fieldsets=&action=&datasource_name="
"energy_usage&datasource_id=1&datasource_type=table&"
"previous_viz_type=sankey"
)
resp = self.client.post("/r/shortner/", data=dict(data=data))
assert re.search(r"\/r\/[0-9]+", resp.data.decode("utf-8"))
def test_shortner_invalid(self):
self.login(username="admin")
invalid_urls = [
"hhttp://invalid.com",
"hhttps://invalid.com",
"www.invalid.com",
]
for invalid_url in invalid_urls:
resp = self.client.post("/r/shortner/", data=dict(data=invalid_url))
assert resp.status_code == 400
def test_redirect_invalid(self):
model_url = models.Url(url="hhttp://invalid.com")
db.session.add(model_url)
db.session.commit()
self.login(username="admin")
response = self.client.get(f"/r/{model_url.id}")
assert response.headers["Location"] == "http://localhost/"
db.session.delete(model_url)
db.session.commit()
@with_feature_flags(KV_STORE=False)
def test_kv_disabled(self):
self.login(username="admin")
resp = self.client.get("/kv/10001/")
self.assertEqual(404, resp.status_code)
value = json.dumps({"data": "this is a test"})
resp = self.client.post("/kv/store/", data=dict(data=value))
self.assertEqual(resp.status_code, 404)
@with_feature_flags(KV_STORE=True)
def test_kv_enabled(self):
self.login(username="admin")
resp = self.client.get("/kv/10001/")
self.assertEqual(404, resp.status_code)
value = json.dumps({"data": "this is a test"})
resp = self.client.post("/kv/store/", data=dict(data=value))
self.assertEqual(resp.status_code, 200)
kv = db.session.query(models.KeyValue).first()
kv_value = kv.value
self.assertEqual(json.loads(value), json.loads(kv_value))
resp = self.client.get("/kv/{}/".format(kv.id))
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(value), json.loads(resp.data.decode("utf-8")))
def test_gamma(self):
self.login(username="gamma")
assert "Charts" in self.get_resp("/chart/list/")
assert "Dashboards" in self.get_resp("/dashboard/list/")
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_csv_endpoint(self):
self.login()
client_id = "{}".format(random.getrandbits(64))[:10]
get_name_sql = """
SELECT name
FROM birth_names
LIMIT 1
"""
resp = self.run_sql(get_name_sql, client_id, raise_on_error=True)
name = resp["data"][0]["name"]
sql = f"""
SELECT name
FROM birth_names
WHERE name = '{name}'
LIMIT 1
"""
client_id = "{}".format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp("/superset/csv/{}".format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(io.StringIO(f"name\n{name}\n"))
client_id = "{}".format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp("/superset/csv/{}".format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(io.StringIO(f"name\n{name}\n"))
self.assertEqual(list(expected_data), list(data))
self.logout()
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_extra_table_metadata(self):
self.login()
example_db = superset.utils.database.get_example_database()
schema = "default" if example_db.backend in {"presto", "hive"} else "superset"
self.get_json_resp(
f"/superset/extra_table_metadata/{example_db.id}/birth_names/{schema}/"
)
def test_templated_sql_json(self):
if superset.utils.database.get_example_database().backend == "presto":
# TODO: make it work for presto
return
self.login()
sql = "SELECT '{{ 1+1 }}' as test"
data = self.run_sql(sql, "fdaklj3ws")
self.assertEqual(data["data"][0]["test"], "2")
@mock.patch(
"tests.integration_tests.superset_test_custom_template_processors.datetime"
)
@mock.patch("superset.views.core.get_sql_results")
def test_custom_templated_sql_json(self, sql_lab_mock, mock_dt) -> None:
"""Test sqllab receives macros expanded query."""
mock_dt.utcnow = mock.Mock(return_value=datetime.datetime(1970, 1, 1))
self.login()
sql = "SELECT '$DATE()' as test"
resp = {
"status": QueryStatus.SUCCESS,
"query": {"rows": 1},
"data": [{"test": "'1970-01-01'"}],
}
sql_lab_mock.return_value = resp
dbobj = self.create_fake_db_for_macros()
json_payload = dict(database_id=dbobj.id, sql=sql)
self.get_json_resp(
"/superset/sql_json/", raise_on_error=False, json_=json_payload
)
assert sql_lab_mock.called
self.assertEqual(sql_lab_mock.call_args[0][1], "SELECT '1970-01-01' as test")
self.delete_fake_db_for_macros()
def test_fetch_datasource_metadata(self):
self.login(username="admin")
url = "/superset/fetch_datasource_metadata?" "datasourceKey=1__table"
resp = self.get_json_resp(url)
keys = [
"name",
"type",
"order_by_choices",
"granularity_sqla",
"time_grain_sqla",
"id",
]
for k in keys:
self.assertIn(k, resp.keys())
@staticmethod
def _get_user_activity_endpoints(user: str):
userid = security_manager.find_user(user).id
return (
f"/superset/recent_activity/{userid}/",
f"/superset/created_slices/{userid}/",
f"/superset/created_dashboards/{userid}/",
f"/superset/fave_slices/{userid}/",
f"/superset/fave_dashboards/{userid}/",
f"/superset/user_slices/{userid}/",
f"/superset/fave_dashboards_by_username/{user}/",
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_user_profile(self, username="admin"):
self.login(username=username)
slc = self.get_slice("Girls", db.session)
# Setting some faves
url = f"/superset/favstar/Slice/{slc.id}/select/"
resp = self.get_json_resp(url)
self.assertEqual(resp["count"], 1)
dash = db.session.query(Dashboard).filter_by(slug="births").first()
url = f"/superset/favstar/Dashboard/{dash.id}/select/"
resp = self.get_json_resp(url)
self.assertEqual(resp["count"], 1)
resp = self.get_resp(f"/superset/profile/{username}/")
self.assertIn('"app"', resp)
for endpoint in self._get_user_activity_endpoints(username):
data = self.get_json_resp(endpoint)
self.assertNotIn("message", data)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_user_activity_access(self, username="gamma"):
self.login(username=username)
# accessing own and other users' activity is allowed by default
for user in ("admin", "gamma"):
for endpoint in self._get_user_activity_endpoints(user):
resp = self.client.get(endpoint)
assert resp.status_code == 200
# disabling flag will block access to other users' activity data
access_flag = app.config["ENABLE_BROAD_ACTIVITY_ACCESS"]
app.config["ENABLE_BROAD_ACTIVITY_ACCESS"] = False
for user in ("admin", "gamma"):
for endpoint in self._get_user_activity_endpoints(user):
resp = self.client.get(endpoint)
expected_status_code = 200 if user == username else 403
assert resp.status_code == expected_status_code
# restore flag
app.config["ENABLE_BROAD_ACTIVITY_ACCESS"] = access_flag
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_slice_id_is_always_logged_correctly_on_web_request(self):
# superset/explore case
self.login("admin")
slc = db.session.query(Slice).filter_by(slice_name="Girls").one()
qry = db.session.query(models.Log).filter_by(slice_id=slc.id)
self.get_resp(slc.slice_url, {"form_data": json.dumps(slc.form_data)})
self.assertEqual(1, qry.count())
def create_sample_csvfile(self, filename: str, content: List[str]) -> None:
with open(filename, "w+") as test_file:
for l in content:
test_file.write(f"{l}\n")
def create_sample_excelfile(self, filename: str, content: Dict[str, str]) -> None:
pd.DataFrame(content).to_excel(filename)
def enable_csv_upload(self, database: models.Database) -> None:
"""Enables csv upload in the given database."""
database.allow_file_upload = True
db.session.commit()
add_datasource_page = self.get_resp("/databaseview/list/")
self.assertIn("Upload a CSV", add_datasource_page)
form_get = self.get_resp("/csvtodatabaseview/form")
self.assertIn("CSV to Database configuration", form_get)
def test_dataframe_timezone(self):
tz = pytz.FixedOffset(60)
data = [
(datetime.datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=tz),),
(datetime.datetime(2017, 11, 18, 22, 6, 30, tzinfo=tz),),
]
results = SupersetResultSet(list(data), [["data"]], BaseEngineSpec)
df = results.to_pandas_df()
data = dataframe.df_to_records(df)
json_str = json.dumps(data, default=utils.pessimistic_json_iso_dttm_ser)
self.assertDictEqual(
data[0], {"data": pd.Timestamp("2017-11-18 21:53:00.219225+0100", tz=tz)}
)
self.assertDictEqual(
data[1], {"data": pd.Timestamp("2017-11-18 22:06:30+0100", tz=tz)}
)
self.assertEqual(
json_str,
'[{"data": "2017-11-18T21:53:00.219225+01:00"}, {"data": "2017-11-18T22:06:30+01:00"}]',
)
def test_mssql_engine_spec_pymssql(self):
# Test for case when tuple is returned (pymssql)
data = [
(1, 1, datetime.datetime(2017, 10, 19, 23, 39, 16, 660000)),
(2, 2, datetime.datetime(2018, 10, 19, 23, 39, 16, 660000)),
]
results = SupersetResultSet(
list(data), [["col1"], ["col2"], ["col3"]], MssqlEngineSpec
)
df = results.to_pandas_df()
data = dataframe.df_to_records(df)
self.assertEqual(len(data), 2)
self.assertEqual(
data[0],
{"col1": 1, "col2": 1, "col3": pd.Timestamp("2017-10-19 23:39:16.660000")},
)
def test_comments_in_sqlatable_query(self):
clean_query = "SELECT '/* val 1 */' as c1, '-- val 2' as c2 FROM tbl"
commented_query = "/* comment 1 */" + clean_query + "-- comment 2"
table = SqlaTable(
table_name="test_comments_in_sqlatable_query_table",
sql=commented_query,
database=get_example_database(),
)
rendered_query = str(table.get_from_clause())
self.assertEqual(clean_query, rendered_query)
def test_slice_payload_no_datasource(self):
self.login(username="admin")
data = self.get_json_resp("/superset/explore_json/", raise_on_error=False)
self.assertEqual(
data["errors"][0]["message"],
"The dataset associated with this chart no longer exists",
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_explore_json(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/", data={"form_data": json.dumps(form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["rowcount"], 2)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_explore_json_dist_bar_order(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"url_params": {},
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": 'DATEADD(DATETIME("2021-01-22T00:00:00"), -100, year) : 2021-01-22T00:00:00',
"metrics": [
{
"expressionType": "SIMPLE",
"column": {
"id": 334,
"column_name": "name",
"verbose_name": "null",
"description": "null",
"expression": "",
"filterable": True,
"groupby": True,
"is_dttm": False,
"type": "VARCHAR(255)",
"python_date_format": "null",
},
"aggregate": "COUNT",
"sqlExpression": "null",
"isNew": False,
"hasCustomLabel": False,
"label": "COUNT(name)",
"optionName": "metric_xdzsijn42f9_khi4h3v3vci",
},
{
"expressionType": "SIMPLE",
"column": {
"id": 332,
"column_name": "ds",
"verbose_name": "null",
"description": "null",
"expression": "",
"filterable": True,
"groupby": True,
"is_dttm": True,
"type": "TIMESTAMP WITHOUT TIME ZONE",
"python_date_format": "null",
},
"aggregate": "COUNT",
"sqlExpression": "null",
"isNew": False,
"hasCustomLabel": False,
"label": "COUNT(ds)",
"optionName": "metric_80g1qb9b6o7_ci5vquydcbe",
},
],
"order_desc": True,
"adhoc_filters": [],
"groupby": ["name"],
"columns": [],
"row_limit": 10,
"color_scheme": "supersetColors",
"label_colors": {},
"show_legend": True,
"y_axis_format": "SMART_NUMBER",
"bottom_margin": "auto",
"x_ticks_layout": "auto",
}
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/", data={"form_data": json.dumps(form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
resp = self.run_sql(
"""
SELECT count(name) AS count_name, count(ds) AS count_ds
FROM birth_names
WHERE ds >= '1921-01-22 00:00:00.000000' AND ds < '2021-01-22 00:00:00.000000'
GROUP BY name
ORDER BY count_name DESC
LIMIT 10;
""",
client_id="client_id_1",
user_name="admin",
)
count_ds = []
count_name = []
for series in data["data"]:
if series["key"] == "COUNT(ds)":
count_ds = series["values"]
if series["key"] == "COUNT(name)":
count_name = series["values"]
for expected, actual_ds, actual_name in zip(resp["data"], count_ds, count_name):
assert expected["count_name"] == actual_name["y"]
assert expected["count_ds"] == actual_ds["y"]
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
def test_explore_json_async(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
async_query_manager.init_app(app)
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/", data={"form_data": json.dumps(form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
keys = list(data.keys())
self.assertEqual(rv.status_code, 202)
self.assertCountEqual(
keys, ["channel_id", "job_id", "user_id", "status", "errors", "result_url"]
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
def test_explore_json_async_results_format(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
async_query_manager.init_app(app)
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/?results=true",
data={"form_data": json.dumps(form_data)},
)
self.assertEqual(rv.status_code, 200)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch(
"superset.utils.cache_manager.CacheManager.cache",
new_callable=mock.PropertyMock,
)
@mock.patch("superset.viz.BaseViz.force_cached", new_callable=mock.PropertyMock)
def test_explore_json_data(self, mock_force_cached, mock_cache):
tbl_id = self.table_ids.get("birth_names")
form_data = dict(
{
"form_data": {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
}
)
class MockCache:
def get(self, key):
return form_data
def set(self):
return None
mock_cache.return_value = MockCache()
mock_force_cached.return_value = False
self.login(username="admin")
rv = self.client.get("/superset/explore_json/data/valid-cache-key")
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["rowcount"], 2)
@mock.patch(
"superset.utils.cache_manager.CacheManager.cache",
new_callable=mock.PropertyMock,
)
def test_explore_json_data_no_login(self, mock_cache):
tbl_id = self.table_ids.get("birth_names")
form_data = dict(
{
"form_data": {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
}
)
class MockCache:
def get(self, key):
return form_data
def set(self):
return None
mock_cache.return_value = MockCache()
rv = self.client.get("/superset/explore_json/data/valid-cache-key")
self.assertEqual(rv.status_code, 401)
def test_explore_json_data_invalid_cache_key(self):
self.login(username="admin")
cache_key = "invalid-cache-key"
rv = self.client.get(f"/superset/explore_json/data/{cache_key}")
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 404)
self.assertEqual(data["error"], "Cached data not found")
@mock.patch(
"superset.security.SupersetSecurityManager.get_schemas_accessible_by_user"
)
@mock.patch("superset.security.SupersetSecurityManager.can_access_database")
@mock.patch("superset.security.SupersetSecurityManager.can_access_all_datasources")
def test_schemas_access_for_csv_upload_endpoint(
self,
mock_can_access_all_datasources,
mock_can_access_database,
mock_schemas_accessible,
):
self.login(username="admin")
dbobj = self.create_fake_db()
mock_can_access_all_datasources.return_value = False
mock_can_access_database.return_value = False
mock_schemas_accessible.return_value = ["this_schema_is_allowed_too"]
data = self.get_json_resp(
url="/superset/schemas_access_for_file_upload?db_id={db_id}".format(
db_id=dbobj.id
)
)
assert data == ["this_schema_is_allowed_too"]
self.delete_fake_db()
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_select_star(self):
self.login(username="admin")
examples_db = superset.utils.database.get_example_database()
resp = self.get_resp(f"/superset/select_star/{examples_db.id}/birth_names")
self.assertIn("gender", resp)
def test_get_select_star_not_allowed(self):
"""
Database API: Test get select star not allowed
"""
self.login(username="gamma")
example_db = superset.utils.database.get_example_database()
resp = self.client.get(f"/superset/select_star/{example_db.id}/birth_names")
self.assertEqual(resp.status_code, 403)
@mock.patch("superset.views.core.results_backend_use_msgpack", False)
@mock.patch("superset.views.core.results_backend")
def test_display_limit(self, mock_results_backend):
self.login()
data = [{"col_0": i} for i in range(100)]
payload = {
"status": QueryStatus.SUCCESS,
"query": {"rows": 100},
"data": data,
}
# limit results to 1
expected_key = {"status": "success", "query": {"rows": 100}, "data": data}
limited_data = data[:1]
expected_limited = {
"status": "success",
"query": {"rows": 100},
"data": limited_data,
"displayLimitReached": True,
}
query_mock = mock.Mock()
query_mock.sql = "SELECT *"
query_mock.database = 1
query_mock.schema = "superset"
# do not apply msgpack serialization
use_msgpack = app.config["RESULTS_BACKEND_USE_MSGPACK"]
app.config["RESULTS_BACKEND_USE_MSGPACK"] = False
serialized_payload = sql_lab._serialize_payload(payload, False)
compressed = utils.zlib_compress(serialized_payload)
mock_results_backend.get.return_value = compressed
with mock.patch("superset.views.core.db") as mock_superset_db:
mock_superset_db.session.query().filter_by().one_or_none.return_value = (
query_mock
)
# get all results
result_key = json.loads(self.get_resp("/superset/results/key/"))
result_limited = json.loads(self.get_resp("/superset/results/key/?rows=1"))
self.assertEqual(result_key, expected_key)
self.assertEqual(result_limited, expected_limited)
app.config["RESULTS_BACKEND_USE_MSGPACK"] = use_msgpack
def test_results_default_deserialization(self):
use_new_deserialization = False
data = [("a", 4, 4.0, "2019-08-18T16:39:16.660000")]
cursor_descr = (
("a", "string"),
("b", "int"),
("c", "float"),
("d", "datetime"),
)
db_engine_spec = BaseEngineSpec()
results = SupersetResultSet(data, cursor_descr, db_engine_spec)
query = {
"database_id": 1,
"sql": "SELECT * FROM birth_names LIMIT 100",
"status": QueryStatus.PENDING,
}
(
serialized_data,
selected_columns,
all_columns,
expanded_columns,
) = sql_lab._serialize_and_expand_data(
results, db_engine_spec, use_new_deserialization
)
payload = {
"query_id": 1,
"status": QueryStatus.SUCCESS,
"state": QueryStatus.SUCCESS,
"data": serialized_data,
"columns": all_columns,
"selected_columns": selected_columns,
"expanded_columns": expanded_columns,
"query": query,
}
serialized_payload = sql_lab._serialize_payload(
payload, use_new_deserialization
)
self.assertIsInstance(serialized_payload, str)
query_mock = mock.Mock()
deserialized_payload = superset.views.utils._deserialize_results_payload(
serialized_payload, query_mock, use_new_deserialization
)
self.assertDictEqual(deserialized_payload, payload)
query_mock.assert_not_called()
def test_results_msgpack_deserialization(self):
use_new_deserialization = True
data = [("a", 4, 4.0, "2019-08-18T16:39:16.660000")]
cursor_descr = (
("a", "string"),
("b", "int"),
("c", "float"),
("d", "datetime"),
)
db_engine_spec = BaseEngineSpec()
results = SupersetResultSet(data, cursor_descr, db_engine_spec)
query = {
"database_id": 1,
"sql": "SELECT * FROM birth_names LIMIT 100",
"status": QueryStatus.PENDING,
}
(
serialized_data,
selected_columns,
all_columns,
expanded_columns,
) = sql_lab._serialize_and_expand_data(
results, db_engine_spec, use_new_deserialization
)
payload = {
"query_id": 1,
"status": QueryStatus.SUCCESS,
"state": QueryStatus.SUCCESS,
"data": serialized_data,
"columns": all_columns,
"selected_columns": selected_columns,
"expanded_columns": expanded_columns,
"query": query,
}
serialized_payload = sql_lab._serialize_payload(
payload, use_new_deserialization
)
self.assertIsInstance(serialized_payload, bytes)
with mock.patch.object(
db_engine_spec, "expand_data", wraps=db_engine_spec.expand_data
) as expand_data:
query_mock = mock.Mock()
query_mock.database.db_engine_spec.expand_data = expand_data
deserialized_payload = superset.views.utils._deserialize_results_payload(
serialized_payload, query_mock, use_new_deserialization
)
df = results.to_pandas_df()
payload["data"] = dataframe.df_to_records(df)
self.assertDictEqual(deserialized_payload, payload)
expand_data.assert_called_once()
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"FOO": lambda x: 1},
clear=True,
)
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
def test_feature_flag_serialization(self):
"""
Functions in feature flags don't break bootstrap data serialization.
"""
self.login()
encoded = json.dumps(
{"FOO": lambda x: 1, "super": "set"},
default=utils.pessimistic_json_iso_dttm_ser,
)
html_string = (
html.escape(encoded, quote=False)
.replace("'", "'")
.replace('"', """)
)
dash_id = db.session.query(Dashboard.id).first()[0]
tbl_id = self.table_ids.get("wb_health_population")
urls = [
"/superset/sqllab",
"/superset/welcome",
f"/superset/dashboard/{dash_id}/",
"/superset/profile/admin/",
f"/superset/explore/table/{tbl_id}",
]
for url in urls:
data = self.get_resp(url)
self.assertTrue(html_string in data)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"SQLLAB_BACKEND_PERSISTENCE": True},
clear=True,
)
def test_sqllab_backend_persistence_payload(self):
username = "admin"
self.login(username)
user_id = security_manager.find_user(username).id
# create a tab
data = {
"queryEditor": json.dumps(
{
"title": "Untitled Query 1",
"dbId": 1,
"schema": None,
"autorun": False,
"sql": "SELECT ...",
"queryLimit": 1000,
}
)
}
resp = self.get_json_resp("/tabstateview/", data=data)
tab_state_id = resp["id"]
# run a query in the created tab
self.run_sql(
"SELECT name FROM birth_names",
"client_id_1",
user_name=username,
raise_on_error=True,
sql_editor_id=str(tab_state_id),
)
# run an orphan query (no tab)
self.run_sql(
"SELECT name FROM birth_names",
"client_id_2",
user_name=username,
raise_on_error=True,
)
# we should have only 1 query returned, since the second one is not
# associated with any tabs
payload = views.Superset._get_sqllab_tabs(user_id=user_id)
self.assertEqual(len(payload["queries"]), 1)
def test_virtual_table_explore_visibility(self):
# test that default visibility it set to True
database = superset.utils.database.get_example_database()
self.assertEqual(database.allows_virtual_table_explore, True)
# test that visibility is disabled when extra is set to False
extra = database.get_extra()
extra["allows_virtual_table_explore"] = False
database.extra = json.dumps(extra)
self.assertEqual(database.allows_virtual_table_explore, False)
# test that visibility is enabled when extra is set to True
extra = database.get_extra()
extra["allows_virtual_table_explore"] = True
database.extra = json.dumps(extra)
self.assertEqual(database.allows_virtual_table_explore, True)
# test that visibility is not broken with bad values
extra = database.get_extra()
extra["allows_virtual_table_explore"] = "trash value"
database.extra = json.dumps(extra)
self.assertEqual(database.allows_virtual_table_explore, True)
def test_explore_database_id(self):
database = superset.utils.database.get_example_database()
explore_database = superset.utils.database.get_example_database()
# test that explore_database_id is the regular database
# id if none is set in the extra
self.assertEqual(database.explore_database_id, database.id)
# test that explore_database_id is correct if the extra is set
extra = database.get_extra()
extra["explore_database_id"] = explore_database.id
database.extra = json.dumps(extra)
self.assertEqual(database.explore_database_id, explore_database.id)
def test_get_column_names_from_metric(self):
simple_metric = {
"expressionType": utils.AdhocMetricExpressionType.SIMPLE.value,
"column": {"column_name": "my_col"},
"aggregate": "SUM",
"label": "My Simple Label",
}
assert utils.get_column_name_from_metric(simple_metric) == "my_col"
sql_metric = {
"expressionType": utils.AdhocMetricExpressionType.SQL.value,
"sqlExpression": "SUM(my_label)",
"label": "My SQL Label",
}
assert utils.get_column_name_from_metric(sql_metric) is None
assert utils.get_column_names_from_metrics([simple_metric, sql_metric]) == [
"my_col"
]
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@mock.patch("superset.models.core.DB_CONNECTION_MUTATOR")
def test_explore_injected_exceptions(self, mock_db_connection_mutator):
"""
Handle injected exceptions from the db mutator
"""
# Assert we can handle a custom exception at the mutator level
exception = SupersetException("Error message")
mock_db_connection_mutator.side_effect = exception
slice = db.session.query(Slice).first()
url = f"/superset/explore/?form_data=%7B%22slice_id%22%3A%20{slice.id}%7D"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
# Assert we can handle a driver exception at the mutator level
exception = SQLAlchemyError("Error message")
mock_db_connection_mutator.side_effect = exception
slice = db.session.query(Slice).first()
url = f"/superset/explore/?form_data=%7B%22slice_id%22%3A%20{slice.id}%7D"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@mock.patch("superset.models.core.DB_CONNECTION_MUTATOR")
def test_dashboard_injected_exceptions(self, mock_db_connection_mutator):
"""
Handle injected exceptions from the db mutator
"""
# Assert we can handle a custom excetion at the mutator level
exception = SupersetException("Error message")
mock_db_connection_mutator.side_effect = exception
dash = db.session.query(Dashboard).first()
url = f"/superset/dashboard/{dash.id}/"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
# Assert we can handle a driver exception at the mutator level
exception = SQLAlchemyError("Error message")
mock_db_connection_mutator.side_effect = exception
dash = db.session.query(Dashboard).first()
url = f"/superset/dashboard/{dash.id}/"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
@mock.patch("superset.sql_lab.cancel_query")
@mock.patch("superset.views.core.db.session")
def test_stop_query_not_implemented(
self, mock_superset_db_session, mock_sql_lab_cancel_query
):
"""
Handles stop query when the DB engine spec does not
have a cancel query method.
"""
form_data = {"client_id": "foo"}
query_mock = mock.Mock()
query_mock.client_id = "foo"
query_mock.status = QueryStatus.RUNNING
self.login(username="admin")
mock_superset_db_session.query().filter_by().one().return_value = query_mock
mock_sql_lab_cancel_query.return_value = False
rv = self.client.post(
"/superset/stop_query/", data={"form_data": json.dumps(form_data)},
)
assert rv.status_code == 422
if __name__ == "__main__":
unittest.main()
| 38.228803
| 103
| 0.606566
|
import csv
import datetime
import doctest
import html
import io
import json
import logging
from typing import Dict, List
from urllib.parse import quote
import superset.utils.database
from tests.integration_tests.fixtures.birth_names_dashboard import (
load_birth_names_dashboard_with_slices,
load_birth_names_data,
)
import pytest
import pytz
import random
import re
import unittest
from unittest import mock
import pandas as pd
import sqlalchemy as sqla
from sqlalchemy.exc import SQLAlchemyError
from superset.models.cache import CacheKey
from superset.utils.database import get_example_database
from tests.integration_tests.conftest import with_feature_flags
from tests.integration_tests.fixtures.energy_dashboard import (
load_energy_table_with_slice,
load_energy_table_data,
)
from tests.integration_tests.test_app import app
import superset.views.utils
from superset import (
dataframe,
db,
security_manager,
sql_lab,
)
from superset.common.db_query_status import QueryStatus
from superset.connectors.sqla.models import SqlaTable
from superset.db_engine_specs.base import BaseEngineSpec
from superset.db_engine_specs.mssql import MssqlEngineSpec
from superset.exceptions import SupersetException
from superset.extensions import async_query_manager
from superset.models import core as models
from superset.models.annotations import Annotation, AnnotationLayer
from superset.models.dashboard import Dashboard
from superset.models.datasource_access_request import DatasourceAccessRequest
from superset.models.slice import Slice
from superset.models.sql_lab import Query
from superset.result_set import SupersetResultSet
from superset.utils import core as utils
from superset.views import core as views
from superset.views.database.views import DatabaseView
from .base_tests import SupersetTestCase
from tests.integration_tests.fixtures.world_bank_dashboard import (
load_world_bank_dashboard_with_slices,
load_world_bank_data,
)
logger = logging.getLogger(__name__)
class TestCore(SupersetTestCase):
def setUp(self):
db.session.query(Query).delete()
db.session.query(DatasourceAccessRequest).delete()
db.session.query(models.Log).delete()
self.table_ids = {
tbl.table_name: tbl.id for tbl in (db.session.query(SqlaTable).all())
}
self.original_unsafe_db_setting = app.config["PREVENT_UNSAFE_DB_CONNECTIONS"]
def tearDown(self):
db.session.query(Query).delete()
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = self.original_unsafe_db_setting
def test_login(self):
resp = self.get_resp("/login/", data=dict(username="admin", password="general"))
self.assertNotIn("User confirmation needed", resp)
resp = self.get_resp("/logout/", follow_redirects=True)
self.assertIn("User confirmation needed", resp)
resp = self.get_resp(
"/login/", data=dict(username="admin", password="wrongPassword")
)
self.assertIn("User confirmation needed", resp)
def test_dashboard_endpoint(self):
self.login()
resp = self.client.get("/superset/dashboard/-1/")
assert resp.status_code == 404
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_slice_endpoint(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
resp = self.get_resp("/superset/slice/{}/".format(slc.id))
assert "Original value" in resp
assert "List Roles" in resp
resp = self.get_resp("/superset/slice/{}/?standalone=true".format(slc.id))
assert '<div class="navbar' not in resp
resp = self.client.get("/superset/slice/-1/")
assert resp.status_code == 404
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_viz_cache_key(self):
self.login(username="admin")
slc = self.get_slice("Girls", db.session)
viz = slc.viz
qobj = viz.query_obj()
cache_key = viz.cache_key(qobj)
qobj["groupby"] = []
cache_key_with_groupby = viz.cache_key(qobj)
self.assertNotEqual(cache_key, cache_key_with_groupby)
self.assertNotEqual(
viz.cache_key(qobj), viz.cache_key(qobj, time_compare="12 weeks")
)
self.assertNotEqual(
viz.cache_key(qobj, time_compare="28 days"),
viz.cache_key(qobj, time_compare="12 weeks"),
)
qobj["inner_from_dttm"] = datetime.datetime(1901, 1, 1)
self.assertEqual(cache_key_with_groupby, viz.cache_key(qobj))
def test_get_superset_tables_not_allowed(self):
example_db = superset.utils.database.get_example_database()
schema_name = self.default_schema_backend_map[example_db.backend]
self.login(username="gamma")
uri = f"superset/tables/{example_db.id}/{schema_name}/undefined/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_get_superset_tables_substr(self):
example_db = superset.utils.database.get_example_database()
if example_db.backend in {"presto", "hive"}:
# TODO: change table to the real table that is in examples.
return
self.login(username="admin")
schema_name = self.default_schema_backend_map[example_db.backend]
uri = f"superset/tables/{example_db.id}/{schema_name}/ab_role/"
rv = self.client.get(uri)
response = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
expected_response = {
"options": [
{
"label": "ab_role",
"schema": schema_name,
"title": "ab_role",
"type": "table",
"value": "ab_role",
"extra": None,
}
],
"tableLength": 1,
}
self.assertEqual(response, expected_response)
def test_get_superset_tables_not_found(self):
self.login(username="admin")
uri = f"superset/tables/invalid/public/undefined/"
rv = self.client.get(uri)
self.assertEqual(rv.status_code, 404)
def test_annotation_json_endpoint(self):
# Set up an annotation layer and annotation
layer = AnnotationLayer(name="foo", descr="bar")
db.session.add(layer)
db.session.commit()
annotation = Annotation(
layer_id=layer.id,
short_descr="my_annotation",
start_dttm=datetime.datetime(2020, 5, 20, 18, 21, 51),
end_dttm=datetime.datetime(2020, 5, 20, 18, 31, 51),
)
db.session.add(annotation)
db.session.commit()
self.login()
resp_annotations = json.loads(
self.get_resp("annotationlayermodelview/api/read")
)
# the UI needs id and name to function
self.assertIn("id", resp_annotations["result"][0])
self.assertIn("name", resp_annotations["result"][0])
response = self.get_resp(
f"/superset/annotation_json/{layer.id}?form_data="
+ quote(json.dumps({"time_range": "100 years ago : now"}))
)
assert "my_annotation" in response
# Rollback changes
db.session.delete(annotation)
db.session.delete(layer)
db.session.commit()
def test_admin_only_permissions(self):
def assert_admin_permission_in(role_name, assert_func):
role = security_manager.find_role(role_name)
permissions = [p.permission.name for p in role.permissions]
assert_func("can_sync_druid_source", permissions)
assert_func("can_approve", permissions)
assert_admin_permission_in("Admin", self.assertIn)
assert_admin_permission_in("Alpha", self.assertNotIn)
assert_admin_permission_in("Gamma", self.assertNotIn)
def test_admin_only_menu_views(self):
def assert_admin_view_menus_in(role_name, assert_func):
role = security_manager.find_role(role_name)
view_menus = [p.view_menu.name for p in role.permissions]
assert_func("ResetPasswordView", view_menus)
assert_func("RoleModelView", view_menus)
assert_func("Security", view_menus)
assert_func("SQL Lab", view_menus)
assert_admin_view_menus_in("Admin", self.assertIn)
assert_admin_view_menus_in("Alpha", self.assertNotIn)
assert_admin_view_menus_in("Gamma", self.assertNotIn)
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_save_slice(self):
self.login(username="admin")
slice_name = f"Energy Sankey"
slice_id = self.get_slice(slice_name, db.session).id
copy_name_prefix = "Test Sankey"
copy_name = f"{copy_name_prefix}[save]{random.random()}"
tbl_id = self.table_ids.get("energy_usage")
new_slice_name = f"{copy_name_prefix}[overwrite]{random.random()}"
url = (
"/superset/explore/table/{}/?slice_name={}&"
"action={}&datasource_name=energy_usage"
)
form_data = {
"adhoc_filters": [],
"viz_type": "sankey",
"groupby": ["target"],
"metric": "sum__value",
"row_limit": 5000,
"slice_id": slice_id,
"time_range_endpoints": ["inclusive", "exclusive"],
}
# Changing name and save as a new slice
resp = self.client.post(
url.format(tbl_id, copy_name, "saveas"),
data={"form_data": json.dumps(form_data)},
)
db.session.expunge_all()
new_slice_id = resp.json["form_data"]["slice_id"]
slc = db.session.query(Slice).filter_by(id=new_slice_id).one()
self.assertEqual(slc.slice_name, copy_name)
form_data.pop("slice_id") # We don't save the slice id when saving as
self.assertEqual(slc.viz.form_data, form_data)
form_data = {
"adhoc_filters": [],
"viz_type": "sankey",
"groupby": ["source"],
"metric": "sum__value",
"row_limit": 5000,
"slice_id": new_slice_id,
"time_range": "now",
"time_range_endpoints": ["inclusive", "exclusive"],
}
# Setting the name back to its original name by overwriting new slice
self.client.post(
url.format(tbl_id, new_slice_name, "overwrite"),
data={"form_data": json.dumps(form_data)},
)
db.session.expunge_all()
slc = db.session.query(Slice).filter_by(id=new_slice_id).one()
self.assertEqual(slc.slice_name, new_slice_name)
self.assertEqual(slc.viz.form_data, form_data)
# Cleanup
slices = (
db.session.query(Slice)
.filter(Slice.slice_name.like(copy_name_prefix + "%"))
.all()
)
for slc in slices:
db.session.delete(slc)
db.session.commit()
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_filter_endpoint(self):
self.login(username="admin")
slice_name = "Energy Sankey"
slice_id = self.get_slice(slice_name, db.session).id
db.session.commit()
tbl_id = self.table_ids.get("energy_usage")
table = db.session.query(SqlaTable).filter(SqlaTable.id == tbl_id)
table.filter_select_enabled = True
url = (
"/superset/filter/table/{}/target/?viz_type=sankey&groupby=source"
"&metric=sum__value&flt_col_0=source&flt_op_0=in&flt_eq_0=&"
"slice_id={}&datasource_name=energy_usage&"
"datasource_id=1&datasource_type=table"
)
# Changing name
resp = self.get_resp(url.format(tbl_id, slice_id))
assert len(resp) > 0
assert "energy_target0" in resp
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_slice_data(self):
# slice data should have some required attributes
self.login(username="admin")
slc = self.get_slice(
slice_name="Girls", session=db.session, expunge_from_session=False
)
slc_data_attributes = slc.data.keys()
assert "changed_on" in slc_data_attributes
assert "modified" in slc_data_attributes
assert "owners" in slc_data_attributes
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_slices(self):
# Testing by hitting the two supported end points for all slices
self.login(username="admin")
Slc = Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [
(slc.slice_name, "explore", slc.slice_url),
]
for name, method, url in urls:
logger.info(f"[{name}]/[{method}]: {url}")
print(f"[{name}]/[{method}]: {url}")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
def test_tablemodelview_list(self):
self.login(username="admin")
url = "/tablemodelview/list/"
resp = self.get_resp(url)
# assert that a table is listed
table = db.session.query(SqlaTable).first()
assert table.name in resp
assert "/superset/explore/table/{}".format(table.id) in resp
def test_add_slice(self):
self.login(username="admin")
# assert that /chart/add responds with 200
url = "/chart/add"
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_get_user_slices_for_owners(self):
self.login(username="alpha")
user = security_manager.find_user("alpha")
slice_name = "Girls"
# ensure user is not owner of any slices
url = f"/superset/user_slices/{user.id}/"
resp = self.client.get(url)
data = json.loads(resp.data)
self.assertEqual(data, [])
# make user owner of slice and verify that endpoint returns said slice
slc = self.get_slice(
slice_name=slice_name, session=db.session, expunge_from_session=False
)
slc.owners = [user]
db.session.merge(slc)
db.session.commit()
url = f"/superset/user_slices/{user.id}/"
resp = self.client.get(url)
data = json.loads(resp.data)
self.assertEqual(len(data), 1)
self.assertEqual(data[0]["title"], slice_name)
# remove ownership and ensure user no longer gets slice
slc = self.get_slice(
slice_name=slice_name, session=db.session, expunge_from_session=False
)
slc.owners = []
db.session.merge(slc)
db.session.commit()
url = f"/superset/user_slices/{user.id}/"
resp = self.client.get(url)
data = json.loads(resp.data)
self.assertEqual(data, [])
def test_get_user_slices(self):
self.login(username="admin")
userid = security_manager.find_user("admin").id
url = f"/sliceasync/api/read?_flt_0_created_by={userid}"
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
@pytest.mark.usefixtures("load_energy_table_with_slice")
def test_slices_V2(self):
# Add explore-v2-beta role to admin user
# Test all slice urls as user with with explore-v2-beta role
security_manager.add_role("explore-v2-beta")
security_manager.add_user(
"explore_beta",
"explore_beta",
" user",
"explore_beta@airbnb.com",
security_manager.find_role("explore-v2-beta"),
password="general",
)
self.login(username="explore_beta", password="general")
Slc = Slice
urls = []
for slc in db.session.query(Slc).all():
urls += [(slc.slice_name, "slice_url", slc.slice_url)]
for name, method, url in urls:
print(f"[{name}]/[{method}]: {url}")
self.client.get(url)
def test_doctests(self):
modules = [utils, models, sql_lab]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_misc(self):
assert self.get_resp("/health") == "OK"
assert self.get_resp("/healthcheck") == "OK"
assert self.get_resp("/ping") == "OK"
def test_testconn(self, username="admin"):
# need to temporarily allow sqlite dbs, teardown will undo this
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = False
self.login(username=username)
database = superset.utils.database.get_example_database()
# validate that the endpoint works with the password-masked sqlalchemy uri
data = json.dumps(
{
"uri": database.safe_sqlalchemy_uri(),
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
# validate that the endpoint works with the decrypted sqlalchemy uri
data = json.dumps(
{
"uri": database.sqlalchemy_uri_decrypted,
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 200
assert response.headers["Content-Type"] == "application/json"
def test_testconn_failed_conn(self, username="admin"):
self.login(username=username)
data = json.dumps(
{"uri": "broken://url", "name": "examples", "impersonate_user": False}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 400
assert response.headers["Content-Type"] == "application/json"
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {"error": "Could not load database driver: broken"}
assert response_body == expected_body, "%s != %s" % (
response_body,
expected_body,
)
data = json.dumps(
{
"uri": "mssql+pymssql://url",
"name": "examples",
"impersonate_user": False,
}
)
response = self.client.post(
"/superset/testconn", data=data, content_type="application/json"
)
assert response.status_code == 400
assert response.headers["Content-Type"] == "application/json"
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {"error": "Could not load database driver: mssql+pymssql"}
assert response_body == expected_body, "%s != %s" % (
response_body,
expected_body,
)
def test_testconn_unsafe_uri(self, username="admin"):
self.login(username=username)
app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = True
response = self.client.post(
"/superset/testconn",
data=json.dumps(
{
"uri": "sqlite:///home/superset/unsafe.db",
"name": "unsafe",
"impersonate_user": False,
}
),
content_type="application/json",
)
self.assertEqual(400, response.status_code)
response_body = json.loads(response.data.decode("utf-8"))
expected_body = {
"error": "SQLiteDialect_pysqlite cannot be used as a data source for security reasons."
}
self.assertEqual(expected_body, response_body)
def test_custom_password_store(self):
database = superset.utils.database.get_example_database()
conn_pre = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
def custom_password_store(uri):
return "password_store_test"
models.custom_password_store = custom_password_store
conn = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted)
if conn_pre.password:
assert conn.password == "password_store_test"
assert conn.password != conn_pre.password
# Disable for password store for later tests
models.custom_password_store = None
def test_databaseview_edit(self, username="admin"):
# validate that sending a password-masked uri does not over-write the decrypted
# uri
self.login(username=username)
database = superset.utils.database.get_example_database()
sqlalchemy_uri_decrypted = database.sqlalchemy_uri_decrypted
url = "databaseview/edit/{}".format(database.id)
data = {k: database.__getattribute__(k) for k in DatabaseView.add_columns}
data["sqlalchemy_uri"] = database.safe_sqlalchemy_uri()
self.client.post(url, data=data)
database = superset.utils.database.get_example_database()
self.assertEqual(sqlalchemy_uri_decrypted, database.sqlalchemy_uri_decrypted)
# Need to clean up after ourselves
database.impersonate_user = False
database.allow_dml = False
database.allow_run_async = False
db.session.commit()
@pytest.mark.usefixtures(
"load_energy_table_with_slice", "load_birth_names_dashboard_with_slices"
)
def test_warm_up_cache(self):
self.login()
slc = self.get_slice("Girls", db.session)
data = self.get_json_resp("/superset/warm_up_cache?slice_id={}".format(slc.id))
self.assertEqual(
data, [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
)
data = self.get_json_resp(
"/superset/warm_up_cache?table_name=energy_usage&db_name=main"
)
assert len(data) > 0
dashboard = self.get_dash_by_slug("births")
assert self.get_json_resp(
f"/superset/warm_up_cache?dashboard_id={dashboard.id}&slice_id={slc.id}"
) == [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
assert self.get_json_resp(
f"/superset/warm_up_cache?dashboard_id={dashboard.id}&slice_id={slc.id}&extra_filters="
+ quote(json.dumps([{"col": "name", "op": "in", "val": ["Jennifer"]}]))
) == [{"slice_id": slc.id, "viz_error": None, "viz_status": "success"}]
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_cache_logging(self):
self.login("admin")
store_cache_keys = app.config["STORE_CACHE_KEYS_IN_METADATA_DB"]
app.config["STORE_CACHE_KEYS_IN_METADATA_DB"] = True
girls_slice = self.get_slice("Girls", db.session)
self.get_json_resp("/superset/warm_up_cache?slice_id={}".format(girls_slice.id))
ck = db.session.query(CacheKey).order_by(CacheKey.id.desc()).first()
assert ck.datasource_uid == f"{girls_slice.table.id}__table"
app.config["STORE_CACHE_KEYS_IN_METADATA_DB"] = store_cache_keys
def test_shortner(self):
self.login(username="admin")
data = (
"//superset/explore/table/1/?viz_type=sankey&groupby=source&"
"groupby=target&metric=sum__value&row_limit=5000&where=&having=&"
"flt_col_0=source&flt_op_0=in&flt_eq_0=&slice_id=78&slice_name="
"Energy+Sankey&collapsed_fieldsets=&action=&datasource_name="
"energy_usage&datasource_id=1&datasource_type=table&"
"previous_viz_type=sankey"
)
resp = self.client.post("/r/shortner/", data=dict(data=data))
assert re.search(r"\/r\/[0-9]+", resp.data.decode("utf-8"))
def test_shortner_invalid(self):
self.login(username="admin")
invalid_urls = [
"hhttp://invalid.com",
"hhttps://invalid.com",
"www.invalid.com",
]
for invalid_url in invalid_urls:
resp = self.client.post("/r/shortner/", data=dict(data=invalid_url))
assert resp.status_code == 400
def test_redirect_invalid(self):
model_url = models.Url(url="hhttp://invalid.com")
db.session.add(model_url)
db.session.commit()
self.login(username="admin")
response = self.client.get(f"/r/{model_url.id}")
assert response.headers["Location"] == "http://localhost/"
db.session.delete(model_url)
db.session.commit()
@with_feature_flags(KV_STORE=False)
def test_kv_disabled(self):
self.login(username="admin")
resp = self.client.get("/kv/10001/")
self.assertEqual(404, resp.status_code)
value = json.dumps({"data": "this is a test"})
resp = self.client.post("/kv/store/", data=dict(data=value))
self.assertEqual(resp.status_code, 404)
@with_feature_flags(KV_STORE=True)
def test_kv_enabled(self):
self.login(username="admin")
resp = self.client.get("/kv/10001/")
self.assertEqual(404, resp.status_code)
value = json.dumps({"data": "this is a test"})
resp = self.client.post("/kv/store/", data=dict(data=value))
self.assertEqual(resp.status_code, 200)
kv = db.session.query(models.KeyValue).first()
kv_value = kv.value
self.assertEqual(json.loads(value), json.loads(kv_value))
resp = self.client.get("/kv/{}/".format(kv.id))
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(value), json.loads(resp.data.decode("utf-8")))
def test_gamma(self):
self.login(username="gamma")
assert "Charts" in self.get_resp("/chart/list/")
assert "Dashboards" in self.get_resp("/dashboard/list/")
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_csv_endpoint(self):
self.login()
client_id = "{}".format(random.getrandbits(64))[:10]
get_name_sql = """
SELECT name
FROM birth_names
LIMIT 1
"""
resp = self.run_sql(get_name_sql, client_id, raise_on_error=True)
name = resp["data"][0]["name"]
sql = f"""
SELECT name
FROM birth_names
WHERE name = '{name}'
LIMIT 1
"""
client_id = "{}".format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp("/superset/csv/{}".format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(io.StringIO(f"name\n{name}\n"))
client_id = "{}".format(random.getrandbits(64))[:10]
self.run_sql(sql, client_id, raise_on_error=True)
resp = self.get_resp("/superset/csv/{}".format(client_id))
data = csv.reader(io.StringIO(resp))
expected_data = csv.reader(io.StringIO(f"name\n{name}\n"))
self.assertEqual(list(expected_data), list(data))
self.logout()
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_extra_table_metadata(self):
self.login()
example_db = superset.utils.database.get_example_database()
schema = "default" if example_db.backend in {"presto", "hive"} else "superset"
self.get_json_resp(
f"/superset/extra_table_metadata/{example_db.id}/birth_names/{schema}/"
)
def test_templated_sql_json(self):
if superset.utils.database.get_example_database().backend == "presto":
# TODO: make it work for presto
return
self.login()
sql = "SELECT '{{ 1+1 }}' as test"
data = self.run_sql(sql, "fdaklj3ws")
self.assertEqual(data["data"][0]["test"], "2")
@mock.patch(
"tests.integration_tests.superset_test_custom_template_processors.datetime"
)
@mock.patch("superset.views.core.get_sql_results")
def test_custom_templated_sql_json(self, sql_lab_mock, mock_dt) -> None:
mock_dt.utcnow = mock.Mock(return_value=datetime.datetime(1970, 1, 1))
self.login()
sql = "SELECT '$DATE()' as test"
resp = {
"status": QueryStatus.SUCCESS,
"query": {"rows": 1},
"data": [{"test": "'1970-01-01'"}],
}
sql_lab_mock.return_value = resp
dbobj = self.create_fake_db_for_macros()
json_payload = dict(database_id=dbobj.id, sql=sql)
self.get_json_resp(
"/superset/sql_json/", raise_on_error=False, json_=json_payload
)
assert sql_lab_mock.called
self.assertEqual(sql_lab_mock.call_args[0][1], "SELECT '1970-01-01' as test")
self.delete_fake_db_for_macros()
def test_fetch_datasource_metadata(self):
self.login(username="admin")
url = "/superset/fetch_datasource_metadata?" "datasourceKey=1__table"
resp = self.get_json_resp(url)
keys = [
"name",
"type",
"order_by_choices",
"granularity_sqla",
"time_grain_sqla",
"id",
]
for k in keys:
self.assertIn(k, resp.keys())
@staticmethod
def _get_user_activity_endpoints(user: str):
userid = security_manager.find_user(user).id
return (
f"/superset/recent_activity/{userid}/",
f"/superset/created_slices/{userid}/",
f"/superset/created_dashboards/{userid}/",
f"/superset/fave_slices/{userid}/",
f"/superset/fave_dashboards/{userid}/",
f"/superset/user_slices/{userid}/",
f"/superset/fave_dashboards_by_username/{user}/",
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_user_profile(self, username="admin"):
self.login(username=username)
slc = self.get_slice("Girls", db.session)
# Setting some faves
url = f"/superset/favstar/Slice/{slc.id}/select/"
resp = self.get_json_resp(url)
self.assertEqual(resp["count"], 1)
dash = db.session.query(Dashboard).filter_by(slug="births").first()
url = f"/superset/favstar/Dashboard/{dash.id}/select/"
resp = self.get_json_resp(url)
self.assertEqual(resp["count"], 1)
resp = self.get_resp(f"/superset/profile/{username}/")
self.assertIn('"app"', resp)
for endpoint in self._get_user_activity_endpoints(username):
data = self.get_json_resp(endpoint)
self.assertNotIn("message", data)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_user_activity_access(self, username="gamma"):
self.login(username=username)
# accessing own and other users' activity is allowed by default
for user in ("admin", "gamma"):
for endpoint in self._get_user_activity_endpoints(user):
resp = self.client.get(endpoint)
assert resp.status_code == 200
# disabling flag will block access to other users' activity data
access_flag = app.config["ENABLE_BROAD_ACTIVITY_ACCESS"]
app.config["ENABLE_BROAD_ACTIVITY_ACCESS"] = False
for user in ("admin", "gamma"):
for endpoint in self._get_user_activity_endpoints(user):
resp = self.client.get(endpoint)
expected_status_code = 200 if user == username else 403
assert resp.status_code == expected_status_code
# restore flag
app.config["ENABLE_BROAD_ACTIVITY_ACCESS"] = access_flag
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_slice_id_is_always_logged_correctly_on_web_request(self):
# superset/explore case
self.login("admin")
slc = db.session.query(Slice).filter_by(slice_name="Girls").one()
qry = db.session.query(models.Log).filter_by(slice_id=slc.id)
self.get_resp(slc.slice_url, {"form_data": json.dumps(slc.form_data)})
self.assertEqual(1, qry.count())
def create_sample_csvfile(self, filename: str, content: List[str]) -> None:
with open(filename, "w+") as test_file:
for l in content:
test_file.write(f"{l}\n")
def create_sample_excelfile(self, filename: str, content: Dict[str, str]) -> None:
pd.DataFrame(content).to_excel(filename)
def enable_csv_upload(self, database: models.Database) -> None:
database.allow_file_upload = True
db.session.commit()
add_datasource_page = self.get_resp("/databaseview/list/")
self.assertIn("Upload a CSV", add_datasource_page)
form_get = self.get_resp("/csvtodatabaseview/form")
self.assertIn("CSV to Database configuration", form_get)
def test_dataframe_timezone(self):
tz = pytz.FixedOffset(60)
data = [
(datetime.datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=tz),),
(datetime.datetime(2017, 11, 18, 22, 6, 30, tzinfo=tz),),
]
results = SupersetResultSet(list(data), [["data"]], BaseEngineSpec)
df = results.to_pandas_df()
data = dataframe.df_to_records(df)
json_str = json.dumps(data, default=utils.pessimistic_json_iso_dttm_ser)
self.assertDictEqual(
data[0], {"data": pd.Timestamp("2017-11-18 21:53:00.219225+0100", tz=tz)}
)
self.assertDictEqual(
data[1], {"data": pd.Timestamp("2017-11-18 22:06:30+0100", tz=tz)}
)
self.assertEqual(
json_str,
'[{"data": "2017-11-18T21:53:00.219225+01:00"}, {"data": "2017-11-18T22:06:30+01:00"}]',
)
def test_mssql_engine_spec_pymssql(self):
# Test for case when tuple is returned (pymssql)
data = [
(1, 1, datetime.datetime(2017, 10, 19, 23, 39, 16, 660000)),
(2, 2, datetime.datetime(2018, 10, 19, 23, 39, 16, 660000)),
]
results = SupersetResultSet(
list(data), [["col1"], ["col2"], ["col3"]], MssqlEngineSpec
)
df = results.to_pandas_df()
data = dataframe.df_to_records(df)
self.assertEqual(len(data), 2)
self.assertEqual(
data[0],
{"col1": 1, "col2": 1, "col3": pd.Timestamp("2017-10-19 23:39:16.660000")},
)
def test_comments_in_sqlatable_query(self):
clean_query = "SELECT '/* val 1 */' as c1, '-- val 2' as c2 FROM tbl"
commented_query = "/* comment 1 */" + clean_query + "-- comment 2"
table = SqlaTable(
table_name="test_comments_in_sqlatable_query_table",
sql=commented_query,
database=get_example_database(),
)
rendered_query = str(table.get_from_clause())
self.assertEqual(clean_query, rendered_query)
def test_slice_payload_no_datasource(self):
self.login(username="admin")
data = self.get_json_resp("/superset/explore_json/", raise_on_error=False)
self.assertEqual(
data["errors"][0]["message"],
"The dataset associated with this chart no longer exists",
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_explore_json(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/", data={"form_data": json.dumps(form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["rowcount"], 2)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_explore_json_dist_bar_order(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"url_params": {},
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": 'DATEADD(DATETIME("2021-01-22T00:00:00"), -100, year) : 2021-01-22T00:00:00',
"metrics": [
{
"expressionType": "SIMPLE",
"column": {
"id": 334,
"column_name": "name",
"verbose_name": "null",
"description": "null",
"expression": "",
"filterable": True,
"groupby": True,
"is_dttm": False,
"type": "VARCHAR(255)",
"python_date_format": "null",
},
"aggregate": "COUNT",
"sqlExpression": "null",
"isNew": False,
"hasCustomLabel": False,
"label": "COUNT(name)",
"optionName": "metric_xdzsijn42f9_khi4h3v3vci",
},
{
"expressionType": "SIMPLE",
"column": {
"id": 332,
"column_name": "ds",
"verbose_name": "null",
"description": "null",
"expression": "",
"filterable": True,
"groupby": True,
"is_dttm": True,
"type": "TIMESTAMP WITHOUT TIME ZONE",
"python_date_format": "null",
},
"aggregate": "COUNT",
"sqlExpression": "null",
"isNew": False,
"hasCustomLabel": False,
"label": "COUNT(ds)",
"optionName": "metric_80g1qb9b6o7_ci5vquydcbe",
},
],
"order_desc": True,
"adhoc_filters": [],
"groupby": ["name"],
"columns": [],
"row_limit": 10,
"color_scheme": "supersetColors",
"label_colors": {},
"show_legend": True,
"y_axis_format": "SMART_NUMBER",
"bottom_margin": "auto",
"x_ticks_layout": "auto",
}
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/", data={"form_data": json.dumps(form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
resp = self.run_sql(
"""
SELECT count(name) AS count_name, count(ds) AS count_ds
FROM birth_names
WHERE ds >= '1921-01-22 00:00:00.000000' AND ds < '2021-01-22 00:00:00.000000'
GROUP BY name
ORDER BY count_name DESC
LIMIT 10;
""",
client_id="client_id_1",
user_name="admin",
)
count_ds = []
count_name = []
for series in data["data"]:
if series["key"] == "COUNT(ds)":
count_ds = series["values"]
if series["key"] == "COUNT(name)":
count_name = series["values"]
for expected, actual_ds, actual_name in zip(resp["data"], count_ds, count_name):
assert expected["count_name"] == actual_name["y"]
assert expected["count_ds"] == actual_ds["y"]
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
def test_explore_json_async(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
async_query_manager.init_app(app)
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/", data={"form_data": json.dumps(form_data)},
)
data = json.loads(rv.data.decode("utf-8"))
keys = list(data.keys())
self.assertEqual(rv.status_code, 202)
self.assertCountEqual(
keys, ["channel_id", "job_id", "user_id", "status", "errors", "result_url"]
)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
GLOBAL_ASYNC_QUERIES=True,
)
def test_explore_json_async_results_format(self):
tbl_id = self.table_ids.get("birth_names")
form_data = {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
async_query_manager.init_app(app)
self.login(username="admin")
rv = self.client.post(
"/superset/explore_json/?results=true",
data={"form_data": json.dumps(form_data)},
)
self.assertEqual(rv.status_code, 200)
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
@mock.patch(
"superset.utils.cache_manager.CacheManager.cache",
new_callable=mock.PropertyMock,
)
@mock.patch("superset.viz.BaseViz.force_cached", new_callable=mock.PropertyMock)
def test_explore_json_data(self, mock_force_cached, mock_cache):
tbl_id = self.table_ids.get("birth_names")
form_data = dict(
{
"form_data": {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
}
)
class MockCache:
def get(self, key):
return form_data
def set(self):
return None
mock_cache.return_value = MockCache()
mock_force_cached.return_value = False
self.login(username="admin")
rv = self.client.get("/superset/explore_json/data/valid-cache-key")
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["rowcount"], 2)
@mock.patch(
"superset.utils.cache_manager.CacheManager.cache",
new_callable=mock.PropertyMock,
)
def test_explore_json_data_no_login(self, mock_cache):
tbl_id = self.table_ids.get("birth_names")
form_data = dict(
{
"form_data": {
"datasource": f"{tbl_id}__table",
"viz_type": "dist_bar",
"time_range_endpoints": ["inclusive", "exclusive"],
"granularity_sqla": "ds",
"time_range": "No filter",
"metrics": ["count"],
"adhoc_filters": [],
"groupby": ["gender"],
"row_limit": 100,
}
}
)
class MockCache:
def get(self, key):
return form_data
def set(self):
return None
mock_cache.return_value = MockCache()
rv = self.client.get("/superset/explore_json/data/valid-cache-key")
self.assertEqual(rv.status_code, 401)
def test_explore_json_data_invalid_cache_key(self):
self.login(username="admin")
cache_key = "invalid-cache-key"
rv = self.client.get(f"/superset/explore_json/data/{cache_key}")
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 404)
self.assertEqual(data["error"], "Cached data not found")
@mock.patch(
"superset.security.SupersetSecurityManager.get_schemas_accessible_by_user"
)
@mock.patch("superset.security.SupersetSecurityManager.can_access_database")
@mock.patch("superset.security.SupersetSecurityManager.can_access_all_datasources")
def test_schemas_access_for_csv_upload_endpoint(
self,
mock_can_access_all_datasources,
mock_can_access_database,
mock_schemas_accessible,
):
self.login(username="admin")
dbobj = self.create_fake_db()
mock_can_access_all_datasources.return_value = False
mock_can_access_database.return_value = False
mock_schemas_accessible.return_value = ["this_schema_is_allowed_too"]
data = self.get_json_resp(
url="/superset/schemas_access_for_file_upload?db_id={db_id}".format(
db_id=dbobj.id
)
)
assert data == ["this_schema_is_allowed_too"]
self.delete_fake_db()
@pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
def test_select_star(self):
self.login(username="admin")
examples_db = superset.utils.database.get_example_database()
resp = self.get_resp(f"/superset/select_star/{examples_db.id}/birth_names")
self.assertIn("gender", resp)
def test_get_select_star_not_allowed(self):
self.login(username="gamma")
example_db = superset.utils.database.get_example_database()
resp = self.client.get(f"/superset/select_star/{example_db.id}/birth_names")
self.assertEqual(resp.status_code, 403)
@mock.patch("superset.views.core.results_backend_use_msgpack", False)
@mock.patch("superset.views.core.results_backend")
def test_display_limit(self, mock_results_backend):
self.login()
data = [{"col_0": i} for i in range(100)]
payload = {
"status": QueryStatus.SUCCESS,
"query": {"rows": 100},
"data": data,
}
# limit results to 1
expected_key = {"status": "success", "query": {"rows": 100}, "data": data}
limited_data = data[:1]
expected_limited = {
"status": "success",
"query": {"rows": 100},
"data": limited_data,
"displayLimitReached": True,
}
query_mock = mock.Mock()
query_mock.sql = "SELECT *"
query_mock.database = 1
query_mock.schema = "superset"
# do not apply msgpack serialization
use_msgpack = app.config["RESULTS_BACKEND_USE_MSGPACK"]
app.config["RESULTS_BACKEND_USE_MSGPACK"] = False
serialized_payload = sql_lab._serialize_payload(payload, False)
compressed = utils.zlib_compress(serialized_payload)
mock_results_backend.get.return_value = compressed
with mock.patch("superset.views.core.db") as mock_superset_db:
mock_superset_db.session.query().filter_by().one_or_none.return_value = (
query_mock
)
# get all results
result_key = json.loads(self.get_resp("/superset/results/key/"))
result_limited = json.loads(self.get_resp("/superset/results/key/?rows=1"))
self.assertEqual(result_key, expected_key)
self.assertEqual(result_limited, expected_limited)
app.config["RESULTS_BACKEND_USE_MSGPACK"] = use_msgpack
def test_results_default_deserialization(self):
use_new_deserialization = False
data = [("a", 4, 4.0, "2019-08-18T16:39:16.660000")]
cursor_descr = (
("a", "string"),
("b", "int"),
("c", "float"),
("d", "datetime"),
)
db_engine_spec = BaseEngineSpec()
results = SupersetResultSet(data, cursor_descr, db_engine_spec)
query = {
"database_id": 1,
"sql": "SELECT * FROM birth_names LIMIT 100",
"status": QueryStatus.PENDING,
}
(
serialized_data,
selected_columns,
all_columns,
expanded_columns,
) = sql_lab._serialize_and_expand_data(
results, db_engine_spec, use_new_deserialization
)
payload = {
"query_id": 1,
"status": QueryStatus.SUCCESS,
"state": QueryStatus.SUCCESS,
"data": serialized_data,
"columns": all_columns,
"selected_columns": selected_columns,
"expanded_columns": expanded_columns,
"query": query,
}
serialized_payload = sql_lab._serialize_payload(
payload, use_new_deserialization
)
self.assertIsInstance(serialized_payload, str)
query_mock = mock.Mock()
deserialized_payload = superset.views.utils._deserialize_results_payload(
serialized_payload, query_mock, use_new_deserialization
)
self.assertDictEqual(deserialized_payload, payload)
query_mock.assert_not_called()
def test_results_msgpack_deserialization(self):
use_new_deserialization = True
data = [("a", 4, 4.0, "2019-08-18T16:39:16.660000")]
cursor_descr = (
("a", "string"),
("b", "int"),
("c", "float"),
("d", "datetime"),
)
db_engine_spec = BaseEngineSpec()
results = SupersetResultSet(data, cursor_descr, db_engine_spec)
query = {
"database_id": 1,
"sql": "SELECT * FROM birth_names LIMIT 100",
"status": QueryStatus.PENDING,
}
(
serialized_data,
selected_columns,
all_columns,
expanded_columns,
) = sql_lab._serialize_and_expand_data(
results, db_engine_spec, use_new_deserialization
)
payload = {
"query_id": 1,
"status": QueryStatus.SUCCESS,
"state": QueryStatus.SUCCESS,
"data": serialized_data,
"columns": all_columns,
"selected_columns": selected_columns,
"expanded_columns": expanded_columns,
"query": query,
}
serialized_payload = sql_lab._serialize_payload(
payload, use_new_deserialization
)
self.assertIsInstance(serialized_payload, bytes)
with mock.patch.object(
db_engine_spec, "expand_data", wraps=db_engine_spec.expand_data
) as expand_data:
query_mock = mock.Mock()
query_mock.database.db_engine_spec.expand_data = expand_data
deserialized_payload = superset.views.utils._deserialize_results_payload(
serialized_payload, query_mock, use_new_deserialization
)
df = results.to_pandas_df()
payload["data"] = dataframe.df_to_records(df)
self.assertDictEqual(deserialized_payload, payload)
expand_data.assert_called_once()
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"FOO": lambda x: 1},
clear=True,
)
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
def test_feature_flag_serialization(self):
self.login()
encoded = json.dumps(
{"FOO": lambda x: 1, "super": "set"},
default=utils.pessimistic_json_iso_dttm_ser,
)
html_string = (
html.escape(encoded, quote=False)
.replace("'", "&
.replace('"', """)
)
dash_id = db.session.query(Dashboard.id).first()[0]
tbl_id = self.table_ids.get("wb_health_population")
urls = [
"/superset/sqllab",
"/superset/welcome",
f"/superset/dashboard/{dash_id}/",
"/superset/profile/admin/",
f"/superset/explore/table/{tbl_id}",
]
for url in urls:
data = self.get_resp(url)
self.assertTrue(html_string in data)
@mock.patch.dict(
"superset.extensions.feature_flag_manager._feature_flags",
{"SQLLAB_BACKEND_PERSISTENCE": True},
clear=True,
)
def test_sqllab_backend_persistence_payload(self):
username = "admin"
self.login(username)
user_id = security_manager.find_user(username).id
data = {
"queryEditor": json.dumps(
{
"title": "Untitled Query 1",
"dbId": 1,
"schema": None,
"autorun": False,
"sql": "SELECT ...",
"queryLimit": 1000,
}
)
}
resp = self.get_json_resp("/tabstateview/", data=data)
tab_state_id = resp["id"]
self.run_sql(
"SELECT name FROM birth_names",
"client_id_1",
user_name=username,
raise_on_error=True,
sql_editor_id=str(tab_state_id),
)
self.run_sql(
"SELECT name FROM birth_names",
"client_id_2",
user_name=username,
raise_on_error=True,
)
payload = views.Superset._get_sqllab_tabs(user_id=user_id)
self.assertEqual(len(payload["queries"]), 1)
def test_virtual_table_explore_visibility(self):
database = superset.utils.database.get_example_database()
self.assertEqual(database.allows_virtual_table_explore, True)
extra = database.get_extra()
extra["allows_virtual_table_explore"] = False
database.extra = json.dumps(extra)
self.assertEqual(database.allows_virtual_table_explore, False)
extra = database.get_extra()
extra["allows_virtual_table_explore"] = True
database.extra = json.dumps(extra)
self.assertEqual(database.allows_virtual_table_explore, True)
extra = database.get_extra()
extra["allows_virtual_table_explore"] = "trash value"
database.extra = json.dumps(extra)
self.assertEqual(database.allows_virtual_table_explore, True)
def test_explore_database_id(self):
database = superset.utils.database.get_example_database()
explore_database = superset.utils.database.get_example_database()
self.assertEqual(database.explore_database_id, database.id)
extra = database.get_extra()
extra["explore_database_id"] = explore_database.id
database.extra = json.dumps(extra)
self.assertEqual(database.explore_database_id, explore_database.id)
def test_get_column_names_from_metric(self):
simple_metric = {
"expressionType": utils.AdhocMetricExpressionType.SIMPLE.value,
"column": {"column_name": "my_col"},
"aggregate": "SUM",
"label": "My Simple Label",
}
assert utils.get_column_name_from_metric(simple_metric) == "my_col"
sql_metric = {
"expressionType": utils.AdhocMetricExpressionType.SQL.value,
"sqlExpression": "SUM(my_label)",
"label": "My SQL Label",
}
assert utils.get_column_name_from_metric(sql_metric) is None
assert utils.get_column_names_from_metrics([simple_metric, sql_metric]) == [
"my_col"
]
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@mock.patch("superset.models.core.DB_CONNECTION_MUTATOR")
def test_explore_injected_exceptions(self, mock_db_connection_mutator):
exception = SupersetException("Error message")
mock_db_connection_mutator.side_effect = exception
slice = db.session.query(Slice).first()
url = f"/superset/explore/?form_data=%7B%22slice_id%22%3A%20{slice.id}%7D"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
exception = SQLAlchemyError("Error message")
mock_db_connection_mutator.side_effect = exception
slice = db.session.query(Slice).first()
url = f"/superset/explore/?form_data=%7B%22slice_id%22%3A%20{slice.id}%7D"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
@pytest.mark.usefixtures("load_world_bank_dashboard_with_slices")
@mock.patch("superset.models.core.DB_CONNECTION_MUTATOR")
def test_dashboard_injected_exceptions(self, mock_db_connection_mutator):
exception = SupersetException("Error message")
mock_db_connection_mutator.side_effect = exception
dash = db.session.query(Dashboard).first()
url = f"/superset/dashboard/{dash.id}/"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
exception = SQLAlchemyError("Error message")
mock_db_connection_mutator.side_effect = exception
dash = db.session.query(Dashboard).first()
url = f"/superset/dashboard/{dash.id}/"
self.login()
data = self.get_resp(url)
self.assertIn("Error message", data)
@mock.patch("superset.sql_lab.cancel_query")
@mock.patch("superset.views.core.db.session")
def test_stop_query_not_implemented(
self, mock_superset_db_session, mock_sql_lab_cancel_query
):
form_data = {"client_id": "foo"}
query_mock = mock.Mock()
query_mock.client_id = "foo"
query_mock.status = QueryStatus.RUNNING
self.login(username="admin")
mock_superset_db_session.query().filter_by().one().return_value = query_mock
mock_sql_lab_cancel_query.return_value = False
rv = self.client.post(
"/superset/stop_query/", data={"form_data": json.dumps(form_data)},
)
assert rv.status_code == 422
if __name__ == "__main__":
unittest.main()
| true
| true
|
1c4683a2b04793b6517763584a3577a7502613a2
| 620
|
py
|
Python
|
adv/pia.py
|
mattkw/dl
|
45bfc28ad9ff827045a3734730deb893a2436c09
|
[
"Apache-2.0"
] | null | null | null |
adv/pia.py
|
mattkw/dl
|
45bfc28ad9ff827045a3734730deb893a2436c09
|
[
"Apache-2.0"
] | null | null | null |
adv/pia.py
|
mattkw/dl
|
45bfc28ad9ff827045a3734730deb893a2436c09
|
[
"Apache-2.0"
] | null | null | null |
import adv_test
from adv import *
from module import energy
def module():
return Pia
class Pia(Adv):
def pre(this):
if this.condition('energy'):
this.init = this.c_init
def init(this):
this.conf['acl'] = """
`s1, seq=5 and cancel
`s3, seq=5 and cancel
"""
energy.Energy(this,{},{})
def c_init(this):
energy.Energy(this,{'s2':1},{'s2':1})
if __name__ == '__main__':
conf = {}
conf['acl'] = """
`s1
`s2
`s3
`fs, seq=5
"""
adv_test.test(module(), conf, verbose=0)
| 16.756757
| 45
| 0.485484
|
import adv_test
from adv import *
from module import energy
def module():
return Pia
class Pia(Adv):
def pre(this):
if this.condition('energy'):
this.init = this.c_init
def init(this):
this.conf['acl'] = """
`s1, seq=5 and cancel
`s3, seq=5 and cancel
"""
energy.Energy(this,{},{})
def c_init(this):
energy.Energy(this,{'s2':1},{'s2':1})
if __name__ == '__main__':
conf = {}
conf['acl'] = """
`s1
`s2
`s3
`fs, seq=5
"""
adv_test.test(module(), conf, verbose=0)
| true
| true
|
1c4683a648d2c1ac36ba542e426f889e72d0b473
| 1,925
|
py
|
Python
|
medexbot/spiders/dosage_form_spider.py
|
ahmedshahriar/bd-medicine-scraper
|
ea97d929fc9cdcbdde2602827cdc3d12709e2ca9
|
[
"Apache-2.0"
] | 1
|
2022-03-17T03:02:49.000Z
|
2022-03-17T03:02:49.000Z
|
medexbot/spiders/dosage_form_spider.py
|
ahmedshahriar/bd-medicine-scraper
|
ea97d929fc9cdcbdde2602827cdc3d12709e2ca9
|
[
"Apache-2.0"
] | null | null | null |
medexbot/spiders/dosage_form_spider.py
|
ahmedshahriar/bd-medicine-scraper
|
ea97d929fc9cdcbdde2602827cdc3d12709e2ca9
|
[
"Apache-2.0"
] | null | null | null |
import re
import scrapy
from django.utils.text import slugify
from medexbot.items import DosageFormItem
class DosageFormSpider(scrapy.Spider):
name = "dosage"
allowed_domains = ['medex.com.bd']
start_urls = ['https://medex.com.bd/dosage-forms']
def parse(self, response, **kwargs):
for dosage_form_info in response.css('a.hoverable-block'):
dosage_form_link = dosage_form_info.css('a.hoverable-block ::attr("href") ').get()
dosage_form_id = re.findall("dosage-forms/(\S*)/", dosage_form_link)[0]
dosage_form_name = dosage_form_info.css('div.data-row-top img ::attr("title") ').get()
brand_names_counter = dosage_form_info.css('div.data-row-company ::text').re(r"(\d+)")
brand_names_count = 0 if len(brand_names_counter) == 0 else brand_names_counter[0]
yield from response.follow_all(dosage_form_info.css('a.hoverable-block ::attr("href") '),
self.parse_dosage_form,
meta={"dosage_form_id": dosage_form_id, "dosage_form_name": dosage_form_name,
"brand_names_count": brand_names_count})
def parse_dosage_form(self, response):
item = DosageFormItem()
item["dosage_form_id"] = response.request.meta['dosage_form_id']
item['dosage_form_name'] = response.request.meta['dosage_form_name']
item['brand_names_count'] = response.request.meta['brand_names_count']
item['slug'] = slugify(item['dosage_form_name'] + '-' + item['dosage_form_id'],
allow_unicode=True)
# todo brand ids mapping
# brand_name_links = response.css('a.hoverable-block ::attr(href)').extract()
# brand_name_ids = [re.findall("brands/(\S*)/", brand_name_link)[0] for brand_name_link in brand_name_links]
yield item
| 48.125
| 120
| 0.628571
|
import re
import scrapy
from django.utils.text import slugify
from medexbot.items import DosageFormItem
class DosageFormSpider(scrapy.Spider):
name = "dosage"
allowed_domains = ['medex.com.bd']
start_urls = ['https://medex.com.bd/dosage-forms']
def parse(self, response, **kwargs):
for dosage_form_info in response.css('a.hoverable-block'):
dosage_form_link = dosage_form_info.css('a.hoverable-block ::attr("href") ').get()
dosage_form_id = re.findall("dosage-forms/(\S*)/", dosage_form_link)[0]
dosage_form_name = dosage_form_info.css('div.data-row-top img ::attr("title") ').get()
brand_names_counter = dosage_form_info.css('div.data-row-company ::text').re(r"(\d+)")
brand_names_count = 0 if len(brand_names_counter) == 0 else brand_names_counter[0]
yield from response.follow_all(dosage_form_info.css('a.hoverable-block ::attr("href") '),
self.parse_dosage_form,
meta={"dosage_form_id": dosage_form_id, "dosage_form_name": dosage_form_name,
"brand_names_count": brand_names_count})
def parse_dosage_form(self, response):
item = DosageFormItem()
item["dosage_form_id"] = response.request.meta['dosage_form_id']
item['dosage_form_name'] = response.request.meta['dosage_form_name']
item['brand_names_count'] = response.request.meta['brand_names_count']
item['slug'] = slugify(item['dosage_form_name'] + '-' + item['dosage_form_id'],
allow_unicode=True)
yield item
| true
| true
|
1c4684004d467ca7f02daba2bde3f7b30970341c
| 2,502
|
py
|
Python
|
velkoz_web_packages/objects_base/db_orm_models_base.py
|
MatthewTe/velkoz-web-data-extraction-library
|
d6acb8bd86106a6ab754be99488436eb37037e54
|
[
"MIT"
] | null | null | null |
velkoz_web_packages/objects_base/db_orm_models_base.py
|
MatthewTe/velkoz-web-data-extraction-library
|
d6acb8bd86106a6ab754be99488436eb37037e54
|
[
"MIT"
] | 2
|
2021-03-31T20:12:25.000Z
|
2021-12-13T20:48:22.000Z
|
velkoz_web_packages/objects_base/db_orm_models_base.py
|
MatthewTe/velkoz-web-data-extraction-library
|
d6acb8bd86106a6ab754be99488436eb37037e54
|
[
"MIT"
] | null | null | null |
# Importing the database orm management packages:
from sqlalchemy import Column, Integer, String, Text, DateTime, LargeBinary
from sqlalchemy.ext.declarative import declarative_base
# Creating the declarative base object used to create base database orm models:
Base = declarative_base()
class BaseWebPageResponseModel(Base):
"""This is the database model that represents the database table for the
BaseWebPageResponse object.
This is the orm model that the BaseDataIngestionEngine uses to write base data
extracted from each BaseWebPageResponse object into a database. The data model
is designed to represent the following data from the BaseWebPageResponse object:
* The time the BaseWebPageResponse object was initialized.
* The http response code retrieved from the url of the BaseWebPageResponse object_url.
* The url used to initialize the BaseWebPageResponse object.
* The raw html content scraped from the site by the BaseWebPageResponse object.
It is expected that WebObjects and Ingestion Engines that inherit from their
base objects use a custom response model another method of writing data to
a database.
Attributes:
__tablename__ (str): A metadata attribute that determines the name of the table
created by the engine.
date (sqlalchemy.Column): The Datetime that the BaseWebPageResponse
object being ingested was created.
response_code (sqlalchemy.Column): The http response Integer the BaseWebPageResponse
object got from the url it was initialized with.
url (sqlalchemy.Column): The url string that was used to initialize the
BaseWebPageResponse object stored as Text.
html_content (sqlalchemy.Column): The LargeBinary element used to store the
raw html data scraped from the webpage by the BaseWebPageResponse object.
"""
# Declaring table metadata attributes:
__tablename__ = "default_web_obj_tbl"
# Declaring table column attributes:
date = Column(
"date_initialized",
DateTime,
primary_key = True
)
response_code = Column(
"response_code",
Integer
)
url = Column(
"url",
Text
)
html_content = Column(
"html_content",
LargeBinary,
nullable = True
)
# __dunder methods:
def __repr__(self):
return f"BaseWebPageResponse Model({self.url}_{self.date}_{self.response_code})"
| 36.26087
| 92
| 0.71303
|
from sqlalchemy import Column, Integer, String, Text, DateTime, LargeBinary
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class BaseWebPageResponseModel(Base):
__tablename__ = "default_web_obj_tbl"
date = Column(
"date_initialized",
DateTime,
primary_key = True
)
response_code = Column(
"response_code",
Integer
)
url = Column(
"url",
Text
)
html_content = Column(
"html_content",
LargeBinary,
nullable = True
)
def __repr__(self):
return f"BaseWebPageResponse Model({self.url}_{self.date}_{self.response_code})"
| true
| true
|
1c46857cccde346eb4a78768c8061b11e696ca88
| 4,379
|
py
|
Python
|
contrib/seeds/generate-seeds.py
|
behi11/vectorium-plus
|
fb9c68db8e2450c949d75bff9e737562f125be27
|
[
"MIT"
] | 5
|
2019-07-09T02:06:22.000Z
|
2021-08-08T18:48:03.000Z
|
contrib/seeds/generate-seeds.py
|
behi11/vectorium-plus
|
fb9c68db8e2450c949d75bff9e737562f125be27
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
behi11/vectorium-plus
|
fb9c68db8e2450c949d75bff9e737562f125be27
|
[
"MIT"
] | 6
|
2019-07-09T02:02:14.000Z
|
2021-08-06T16:01:01.000Z
|
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 18884)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 28884)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.503597
| 98
| 0.582553
|
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr:
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'):
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match:
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1:
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 18884)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 28884)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| true
| true
|
1c46866d970d2c1b41f7f0473196477642241b48
| 5,200
|
py
|
Python
|
src/python/dxpy/scripts/dx_reads_to_fastq.py
|
psung/dx-toolkit
|
f3a430c5e24184215eb4a9883a179edf07bfa08b
|
[
"Apache-2.0"
] | null | null | null |
src/python/dxpy/scripts/dx_reads_to_fastq.py
|
psung/dx-toolkit
|
f3a430c5e24184215eb4a9883a179edf07bfa08b
|
[
"Apache-2.0"
] | null | null | null |
src/python/dxpy/scripts/dx_reads_to_fastq.py
|
psung/dx-toolkit
|
f3a430c5e24184215eb4a9883a179edf07bfa08b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (C) 2013-2014 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys, argparse
import dxpy
arg_parser = argparse.ArgumentParser(description="Download a reads table into a FASTQ file")
arg_parser.add_argument("reads_table", help="ID of the reads GTable object")
arg_parser.add_argument("--output", help="Name of the output file", required=True)
arg_parser.add_argument("--output2", help="Name of the second output file (for paired reads)")
arg_parser.add_argument("--discard_names", help="Discard read names", type=bool, default=False)
arg_parser.add_argument("--output_FASTA", help="Output FASTA instead of FASTQ", type=bool, default=False)
arg_parser.add_argument("-s", "--start_row", help="Start at this table row", type=int, default=0)
arg_parser.add_argument("-e", "--end_row", help="End at this table row", type=int, default=None)
def main(**kwargs):
if len(kwargs) == 0:
kwargs = vars(arg_parser.parse_args(sys.argv[1:]))
if "end_row" not in kwargs:
kwargs["end_row"] = None
if kwargs["end_row"] is not None and kwargs["end_row"] <= kwargs["start_row"]:
arg_parser.error("End row %d must be greater than start row %d" % (kwargs["end_row"], kwargs["start_row"]))
try:
table = dxpy.DXGTable(kwargs['reads_table'])
except:
raise dxpy.AppError("Failed to open table for export")
existCols = table.get_col_names()
### sort out columns to download
col = []
col2 = []
# if there's a second sequence, it's paired
if "sequence2" in existCols:
isPaired = True
else:
isPaired = False
if "name" in existCols and kwargs['discard_names'] != True:
hasName = True
col.append("name")
if isPaired == True:
col2.append("name2")
else:
hasName = False
col.append("sequence")
if isPaired == True:
col2.append("sequence2")
if "quality" in existCols:
hasQual = True
col.append("quality")
if isPaired == True:
col2.append("quality2")
else:
hasQual = False
# if we don't have quals we must output FASTA instead
kwargs['output_FASTA'] = True
if kwargs['output'] is None:
raise dxpy.AppError("output parameter is required")
with open(kwargs['output'], 'wb') as out_fh:
exportToFile(columns=col, table=table, output_file=out_fh, hasName=hasName, hasQual=hasQual, FASTA=kwargs['output_FASTA'], start_row=kwargs['start_row'], end_row=kwargs['end_row'])
if isPaired == True:
if kwargs['output2'] is None:
raise dxpy.AppError("output2 parameter is required for paired reads")
with open(kwargs['output2'], 'wb') as out_fh2:
exportToFile(columns=col2, table=table, output_file=out_fh2, hasName=hasName, hasQual=hasQual, FASTA=kwargs['output_FASTA'], start_row=kwargs['start_row'], end_row=kwargs['end_row'])
def exportToFile(columns, table, output_file, hasName = True, hasQual = True, FASTA = False, start_row = 0, end_row = None):
for row in table.iterate_rows(start=start_row, end=end_row, columns=columns):
if FASTA == True:
if hasName == True:
# change comment character for FASTA
if row[0][0] == '@':
row[0] = u'>' + row[0][1:]
# if already has comment character (>)
if row[0][0] == ">":
output_file.write('\n'.join([ row[0], row[1] ]))
# otherwise, add it
else:
output_file.write('\n'.join([">" + row[0], row[1] ]))
else:
output_file.write('\n'.join([">", row[0]]))
#output FASTQ
else:
if hasName == True:
# if alread has comment character (@)
if row[0][0] == "@":
output_file.write('\n'.join([ row[0], row[1] ]))
# otherwise, add it
else:
output_file.write('\n'.join(["@" + row[0], row[1] ]))
# add qualities if they exist
if hasQual == True:
output_file.write('\n'.join(["\n+", row[2] ]))
# else add without name
else:
output_file.write('\n'.join(["@", row[0]]))
if hasQual == True:
output_file.write('\n'.join(['', "+", row[1] ]))
# end of current record
output_file.write('\n')
output_file.close()
return output_file.name
if __name__ == '__main__':
main()
| 38.518519
| 194
| 0.603846
|
import sys, argparse
import dxpy
arg_parser = argparse.ArgumentParser(description="Download a reads table into a FASTQ file")
arg_parser.add_argument("reads_table", help="ID of the reads GTable object")
arg_parser.add_argument("--output", help="Name of the output file", required=True)
arg_parser.add_argument("--output2", help="Name of the second output file (for paired reads)")
arg_parser.add_argument("--discard_names", help="Discard read names", type=bool, default=False)
arg_parser.add_argument("--output_FASTA", help="Output FASTA instead of FASTQ", type=bool, default=False)
arg_parser.add_argument("-s", "--start_row", help="Start at this table row", type=int, default=0)
arg_parser.add_argument("-e", "--end_row", help="End at this table row", type=int, default=None)
def main(**kwargs):
if len(kwargs) == 0:
kwargs = vars(arg_parser.parse_args(sys.argv[1:]))
if "end_row" not in kwargs:
kwargs["end_row"] = None
if kwargs["end_row"] is not None and kwargs["end_row"] <= kwargs["start_row"]:
arg_parser.error("End row %d must be greater than start row %d" % (kwargs["end_row"], kwargs["start_row"]))
try:
table = dxpy.DXGTable(kwargs['reads_table'])
except:
raise dxpy.AppError("Failed to open table for export")
existCols = table.get_col_names()
tCols:
isPaired = True
else:
isPaired = False
if "name" in existCols and kwargs['discard_names'] != True:
hasName = True
col.append("name")
if isPaired == True:
col2.append("name2")
else:
hasName = False
col.append("sequence")
if isPaired == True:
col2.append("sequence2")
if "quality" in existCols:
hasQual = True
col.append("quality")
if isPaired == True:
col2.append("quality2")
else:
hasQual = False
kwargs['output_FASTA'] = True
if kwargs['output'] is None:
raise dxpy.AppError("output parameter is required")
with open(kwargs['output'], 'wb') as out_fh:
exportToFile(columns=col, table=table, output_file=out_fh, hasName=hasName, hasQual=hasQual, FASTA=kwargs['output_FASTA'], start_row=kwargs['start_row'], end_row=kwargs['end_row'])
if isPaired == True:
if kwargs['output2'] is None:
raise dxpy.AppError("output2 parameter is required for paired reads")
with open(kwargs['output2'], 'wb') as out_fh2:
exportToFile(columns=col2, table=table, output_file=out_fh2, hasName=hasName, hasQual=hasQual, FASTA=kwargs['output_FASTA'], start_row=kwargs['start_row'], end_row=kwargs['end_row'])
def exportToFile(columns, table, output_file, hasName = True, hasQual = True, FASTA = False, start_row = 0, end_row = None):
for row in table.iterate_rows(start=start_row, end=end_row, columns=columns):
if FASTA == True:
if hasName == True:
# change comment character for FASTA
if row[0][0] == '@':
row[0] = u'>' + row[0][1:]
# if already has comment character (>)
if row[0][0] == ">":
output_file.write('\n'.join([ row[0], row[1] ]))
# otherwise, add it
else:
output_file.write('\n'.join([">" + row[0], row[1] ]))
else:
output_file.write('\n'.join([">", row[0]]))
#output FASTQ
else:
if hasName == True:
# if alread has comment character (@)
if row[0][0] == "@":
output_file.write('\n'.join([ row[0], row[1] ]))
# otherwise, add it
else:
output_file.write('\n'.join(["@" + row[0], row[1] ]))
# add qualities if they exist
if hasQual == True:
output_file.write('\n'.join(["\n+", row[2] ]))
# else add without name
else:
output_file.write('\n'.join(["@", row[0]]))
if hasQual == True:
output_file.write('\n'.join(['', "+", row[1] ]))
# end of current record
output_file.write('\n')
output_file.close()
return output_file.name
if __name__ == '__main__':
main()
| true
| true
|
1c468a5f559d58aac8a33d2176a44891f8e19041
| 88
|
py
|
Python
|
hibiapi/api/tieba/net.py
|
cleoold/HibiAPI
|
d997c5a2bf3cdbccc758d7036447e443c6b6f0ff
|
[
"Apache-2.0"
] | 394
|
2020-12-19T05:51:02.000Z
|
2022-03-30T07:44:42.000Z
|
hibiapi/api/tieba/net.py
|
cleoold/HibiAPI
|
d997c5a2bf3cdbccc758d7036447e443c6b6f0ff
|
[
"Apache-2.0"
] | 208
|
2020-12-20T14:47:31.000Z
|
2022-03-31T11:11:00.000Z
|
hibiapi/api/tieba/net.py
|
cleoold/HibiAPI
|
d997c5a2bf3cdbccc758d7036447e443c6b6f0ff
|
[
"Apache-2.0"
] | 93
|
2020-12-29T08:19:04.000Z
|
2022-03-30T06:08:16.000Z
|
from hibiapi.utils.net import BaseNetClient
class NetRequest(BaseNetClient):
pass
| 14.666667
| 43
| 0.795455
|
from hibiapi.utils.net import BaseNetClient
class NetRequest(BaseNetClient):
pass
| true
| true
|
1c468a5f5e92b0751d052266b0d99570a56f5837
| 2,539
|
py
|
Python
|
Normal_Equation/prep.py
|
Globe-Eater/Geographic-Duplicate-Detection-
|
ec467fc41cb456959da87fd913465dc9daa27d80
|
[
"MIT"
] | null | null | null |
Normal_Equation/prep.py
|
Globe-Eater/Geographic-Duplicate-Detection-
|
ec467fc41cb456959da87fd913465dc9daa27d80
|
[
"MIT"
] | null | null | null |
Normal_Equation/prep.py
|
Globe-Eater/Geographic-Duplicate-Detection-
|
ec467fc41cb456959da87fd913465dc9daa27d80
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
def start():
'''This method is designed to read input in from the user.'''
df = pd.read_excel("datasets/unprepared_data/" + input("Please enter the path for the data:"))
return df
def fill_empty(df):
'''fill_empty takes the arugment df (dataframe)
This should be used with apply.
For example:
df = fill_empty(df)'''
df = df.replace(r'^\s*$', np.nan, regex=True)
df = df.fillna(value="No Data")
return df
def check_numbers(lat, long):
'''This method is to make sure that the lats and longs are within the state of Oklahoma.
inputs: df['Lat', 'long']]
output "Everything should be within Oklahoma.
output There is a value that is outside the range of Oklahoma.'''
# assert # are the numbers within Oklahoma? Need to look this up and impliment it.
pass
def prep(df):
'''prep is used for vectorizing the data so that it can be used
in a machine learning model.
Dev Notes:
Order of OPS:
Convert fields like duplicate_check from text to
1 or 0.'''
df = df[['OBJECTID', 'PROPNAME', 'ADDRESS', 'RESNAME', 'Lat', 'Long', 'duplicate_check']]
return df
def labels(x):
'''This method is to be applied to the dataframe df, that has a column duplicate_check in it.
This is being used to convert poss_dup to 1 or good to 0. Later this will be expanded to cover
anything else within the dataset.'''
if x == 'pos_dup':
return 1
elif x == 'good':
return 0
elif x == 'No Data':
return 0
else:
return 0
def saver(df):
'''This method is designed to ask the user if they want to save and if so where.
The arguments are for asking the user if they want to save, and the dataframe to
be saved.'''
user_input = input("Would you like to save y/n?: ")
question = True
while question:
if user_input == 'n':
break
elif user_input == 'y':
path = input('Please input a valid path and filename such as /path/to/file/.xlsx : ')
try:
df.to_excel(path)
print("File successfully saved.")
question = False
except FileNotFoundError:
print("Path was not found please try again")
if __name__ == '__main__':
dataframe = start()
dataframe = fill_empty(dataframe)
dataframe = prep(dataframe)
dataframe['duplicate_check'] = dataframe['duplicate_check'].apply(labels)
dataframe.head()
saver(dataframe)
| 34.310811
| 98
| 0.633714
|
import numpy as np
import pandas as pd
def start():
df = pd.read_excel("datasets/unprepared_data/" + input("Please enter the path for the data:"))
return df
def fill_empty(df):
df = df.replace(r'^\s*$', np.nan, regex=True)
df = df.fillna(value="No Data")
return df
def check_numbers(lat, long):
'RESNAME', 'Lat', 'Long', 'duplicate_check']]
return df
def labels(x):
if x == 'pos_dup':
return 1
elif x == 'good':
return 0
elif x == 'No Data':
return 0
else:
return 0
def saver(df):
user_input = input("Would you like to save y/n?: ")
question = True
while question:
if user_input == 'n':
break
elif user_input == 'y':
path = input('Please input a valid path and filename such as /path/to/file/.xlsx : ')
try:
df.to_excel(path)
print("File successfully saved.")
question = False
except FileNotFoundError:
print("Path was not found please try again")
if __name__ == '__main__':
dataframe = start()
dataframe = fill_empty(dataframe)
dataframe = prep(dataframe)
dataframe['duplicate_check'] = dataframe['duplicate_check'].apply(labels)
dataframe.head()
saver(dataframe)
| true
| true
|
1c468cbad038e07db05af79f02c42510983d4d81
| 5,341
|
py
|
Python
|
sdk/python/pulumi_kubernetes/rbac/v1/ClusterRole.py
|
Carlangueitor/pulumi-kubernetes
|
859ccaaeb8291de49128dbc202fbac1358b2a25a
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_kubernetes/rbac/v1/ClusterRole.py
|
Carlangueitor/pulumi-kubernetes
|
859ccaaeb8291de49128dbc202fbac1358b2a25a
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_kubernetes/rbac/v1/ClusterRole.py
|
Carlangueitor/pulumi-kubernetes
|
859ccaaeb8291de49128dbc202fbac1358b2a25a
|
[
"Apache-2.0"
] | null | null | null |
# *** WARNING: this file was generated by the Pulumi Kubernetes codegen tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
from typing import Optional
import pulumi
import pulumi.runtime
from pulumi import Input, ResourceOptions
from ... import tables, version
class ClusterRole(pulumi.CustomResource):
"""
ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit
by a RoleBinding or ClusterRoleBinding.
"""
apiVersion: pulumi.Output[str]
"""
APIVersion defines the versioned schema of this representation of an object. Servers should
convert recognized schemas to the latest internal value, and may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
"""
kind: pulumi.Output[str]
"""
Kind is a string value representing the REST resource this object represents. Servers may infer
this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More
info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
"""
aggregation_rule: pulumi.Output[dict]
"""
AggregationRule is an optional field that describes how to build the Rules for this ClusterRole.
If AggregationRule is set, then the Rules are controller managed and direct changes to Rules
will be stomped by the controller.
"""
metadata: pulumi.Output[dict]
"""
Standard object's metadata.
"""
rules: pulumi.Output[list]
"""
Rules holds all the PolicyRules for this ClusterRole
"""
def __init__(self, resource_name, opts=None, aggregation_rule=None, metadata=None, rules=None, __name__=None, __opts__=None):
"""
Create a ClusterRole resource with the given unique name, arguments, and options.
:param str resource_name: The _unique_ name of the resource.
:param pulumi.ResourceOptions opts: A bag of options that control this resource's behavior.
:param pulumi.Input[dict] aggregation_rule: AggregationRule is an optional field that describes how to build the Rules for this
ClusterRole. If AggregationRule is set, then the Rules are controller managed and
direct changes to Rules will be stomped by the controller.
:param pulumi.Input[dict] metadata: Standard object's metadata.
:param pulumi.Input[list] rules: Rules holds all the PolicyRules for this ClusterRole
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'rbac.authorization.k8s.io/v1'
__props__['kind'] = 'ClusterRole'
__props__['aggregationRule'] = aggregation_rule
__props__['metadata'] = metadata
__props__['rules'] = rules
__props__['status'] = None
parent = opts.parent if opts and opts.parent else None
aliases = [
pulumi.Alias(type_="kubernetes:rbac.authorization.k8s.io/v1alpha1:ClusterRole"),
pulumi.Alias(type_="kubernetes:rbac.authorization.k8s.io/v1beta1:ClusterRole"),
]
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(
version=version.get_version(),
aliases=aliases,
))
super(ClusterRole, self).__init__(
"kubernetes:rbac.authorization.k8s.io/v1:ClusterRole",
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None):
"""
Get the state of an existing `ClusterRole` resource, as identified by `id`.
The ID is of the form `[namespace]/[name]`; if `[namespace]` is omitted,
then (per Kubernetes convention) the ID becomes `default/[name]`.
Pulumi will keep track of this resource using `resource_name` as the Pulumi ID.
:param str resource_name: _Unique_ name used to register this resource with Pulumi.
:param pulumi.Input[str] id: An ID for the Kubernetes resource to retrieve.
Takes the form `[namespace]/[name]` or `[name]`.
:param Optional[pulumi.ResourceOptions] opts: A bag of options that control this
resource's behavior.
"""
opts = ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
return ClusterRole(resource_name, opts)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| 42.728
| 135
| 0.683767
|
import warnings
from typing import Optional
import pulumi
import pulumi.runtime
from pulumi import Input, ResourceOptions
from ... import tables, version
class ClusterRole(pulumi.CustomResource):
apiVersion: pulumi.Output[str]
kind: pulumi.Output[str]
aggregation_rule: pulumi.Output[dict]
metadata: pulumi.Output[dict]
rules: pulumi.Output[list]
def __init__(self, resource_name, opts=None, aggregation_rule=None, metadata=None, rules=None, __name__=None, __opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'rbac.authorization.k8s.io/v1'
__props__['kind'] = 'ClusterRole'
__props__['aggregationRule'] = aggregation_rule
__props__['metadata'] = metadata
__props__['rules'] = rules
__props__['status'] = None
parent = opts.parent if opts and opts.parent else None
aliases = [
pulumi.Alias(type_="kubernetes:rbac.authorization.k8s.io/v1alpha1:ClusterRole"),
pulumi.Alias(type_="kubernetes:rbac.authorization.k8s.io/v1beta1:ClusterRole"),
]
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(
version=version.get_version(),
aliases=aliases,
))
super(ClusterRole, self).__init__(
"kubernetes:rbac.authorization.k8s.io/v1:ClusterRole",
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None):
opts = ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
return ClusterRole(resource_name, opts)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| true
| true
|
1c468d4a46a3aba4597155860cda3864d9364bfc
| 3,145
|
py
|
Python
|
test/functional/mining_getblocktemplate_longpoll.py
|
denofdevscrypto/Topaz2.0
|
34ca0e644a6b5d9524a06156568fc11c89dcffed
|
[
"MIT"
] | null | null | null |
test/functional/mining_getblocktemplate_longpoll.py
|
denofdevscrypto/Topaz2.0
|
34ca0e644a6b5d9524a06156568fc11c89dcffed
|
[
"MIT"
] | null | null | null |
test/functional/mining_getblocktemplate_longpoll.py
|
denofdevscrypto/Topaz2.0
|
34ca0e644a6b5d9524a06156568fc11c89dcffed
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test longpolling with getblocktemplate."""
from decimal import Decimal
from test_framework.test_framework import TOPAZTestFramework
from test_framework.util import get_rpc_proxy, random_transaction
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
# query current longpollid
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# create a new connection to the node, we can't use the same
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(TOPAZTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.nodes[0].generate(10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
# min_relay_fee is fee per 1000 bytes, which should be more than enough.
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| 42.5
| 112
| 0.686169
|
from decimal import Decimal
from test_framework.test_framework import TOPAZTestFramework
from test_framework.util import get_rpc_proxy, random_transaction
import threading
class LongpollThread(threading.Thread):
def __init__(self, node):
threading.Thread.__init__(self)
templat = node.getblocktemplate()
self.longpollid = templat['longpollid']
# connection from two threads
self.node = get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
def run(self):
self.node.getblocktemplate({'longpollid':self.longpollid})
class GetBlockTemplateLPTest(TOPAZTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
self.log.info("Warning: this test will take about 70 seconds in the best case. Be patient.")
self.nodes[0].generate(10)
templat = self.nodes[0].getblocktemplate()
longpollid = templat['longpollid']
# longpollid should not change between successive invocations if nothing else happens
templat2 = self.nodes[0].getblocktemplate()
assert(templat2['longpollid'] == longpollid)
# Test 1: test that the longpolling wait if we do nothing
thr = LongpollThread(self.nodes[0])
thr.start()
# check that thread still lives
thr.join(5) # wait 5 seconds or until thread exits
assert(thr.is_alive())
# Test 2: test that longpoll will terminate if another node generates a block
self.nodes[1].generate(1) # generate a block on another node
# check that thread will exit now that new transaction entered mempool
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 3: test that longpoll will terminate if we generate a block ourselves
thr = LongpollThread(self.nodes[0])
thr.start()
self.nodes[0].generate(1) # generate a block on another node
thr.join(5) # wait 5 seconds or until thread exits
assert(not thr.is_alive())
# Test 4: test that introducing a new transaction into the mempool will terminate the longpoll
thr = LongpollThread(self.nodes[0])
thr.start()
# generate a random transaction and submit it
min_relay_fee = self.nodes[0].getnetworkinfo()["relayfee"]
# min_relay_fee is fee per 1000 bytes, which should be more than enough.
(txid, txhex, fee) = random_transaction(self.nodes, Decimal("1.1"), min_relay_fee, Decimal("0.001"), 20)
# after one minute, every 10 seconds the mempool is probed, so in 80 seconds it should have returned
thr.join(60 + 20)
assert(not thr.is_alive())
if __name__ == '__main__':
GetBlockTemplateLPTest().main()
| true
| true
|
1c468d8b2783656346e7be58ba7b95f781ed035f
| 4,896
|
py
|
Python
|
2019_noe_deep_boltzmann_tfv2/deep_boltzmann/networks/noninvertible.py
|
zierenberg/machine_learning_muca
|
6fcca12ccda7680ea4cb0e1f10bb53a68b6b0a02
|
[
"CC0-1.0"
] | null | null | null |
2019_noe_deep_boltzmann_tfv2/deep_boltzmann/networks/noninvertible.py
|
zierenberg/machine_learning_muca
|
6fcca12ccda7680ea4cb0e1f10bb53a68b6b0a02
|
[
"CC0-1.0"
] | null | null | null |
2019_noe_deep_boltzmann_tfv2/deep_boltzmann/networks/noninvertible.py
|
zierenberg/machine_learning_muca
|
6fcca12ccda7680ea4cb0e1f10bb53a68b6b0a02
|
[
"CC0-1.0"
] | null | null | null |
import keras
import tensorflow as tf
import numpy as np
from deep_boltzmann.networks import nonlinear_transform
from deep_boltzmann.networks import connect as _connect
class NormalTransformer(object):
def __init__(self, mu_layers, sigma_layers):
self.mu_layers = mu_layers
self.sigma_layers = sigma_layers
def _compute_x1(self, mu, log_sigma, w1):
return mu + tf.exp(log_sigma) * w1
def _compute_log_p1(self, mu, log_sigma, x1):
return -tf.reduce_sum(input_tensor=log_sigma, axis=1) - 0.5 * tf.reduce_sum(input_tensor=((x1 - mu)/(tf.exp(log_sigma)))**2, axis=1)
def connect(self, x0, w1):
# evaluate mu and sigma
mu = _connect(x0, self.mu_layers)
log_sigma = _connect(x0, self.sigma_layers)
# transform x
#x1 = mu + sigma * w0
self.x1 = keras.layers.Lambda(lambda args: self._compute_x1(args[0], args[1], args[2]))([mu, log_sigma, w1])
# compute density
#log_p1 = -tf.reduce_sum(sigma, axis=0) - 0.5 * tf.reduce_sum((self.x1 - mu)/sigma, axis=0)
self.log_p1 = keras.layers.Lambda(lambda args: self._compute_log_p1(args[0], args[1], args[2]))([mu, log_sigma, self.x1])
# return variable and density
return self.x1, self.log_p1
class NormalResidualTransformer(object):
def __init__(self, mu_layers, sigma_layers):
self.mu_layers = mu_layers
self.sigma_layers = sigma_layers
def _compute_x1(self, x0, mu, log_sigma, w1):
return x0 + mu + tf.exp(log_sigma) * w1
def _compute_log_p1(self, x0, mu, log_sigma, x1):
return -tf.reduce_sum(input_tensor=log_sigma, axis=1) - 0.5 * tf.reduce_sum(input_tensor=((x1 - x0 - mu)/(tf.exp(log_sigma)))**2, axis=1)
def connect(self, x0, w1):
# evaluate mu and sigma
mu = _connect(x0, self.mu_layers)
log_sigma = _connect(x0, self.sigma_layers)
# transform x
#x1 = mu + sigma * w0
self.x1 = keras.layers.Lambda(lambda args: self._compute_x1(args[0], args[1], args[2], args[3]))([x0, mu, log_sigma, w1])
# compute density
#log_p1 = -tf.reduce_sum(sigma, axis=0) - 0.5 * tf.reduce_sum((self.x1 - mu)/sigma, axis=0)
self.log_p1 = keras.layers.Lambda(lambda args: self._compute_log_p1(args[0], args[1], args[2], args[3]))([x0, mu, log_sigma, self.x1])
# return variable and density
return self.x1, self.log_p1
class NoninvNet(object):
def __init__(self, dim, layers):
self.dim = dim
self.layers = layers
self.log_p_total = None
def connect(self):
# x0 = 0
self.x0 = keras.layers.Input(shape=(self.dim,)) # current noise input
x_last = self.x0
self.xs = []
self.ws = []
self.log_ps = []
for layer in self.layers:
# noise input
w = keras.layers.Input(shape=(self.dim,)) # current noise input
self.ws.append(w)
# compute x and probability
x, log_p = layer.connect(x_last, w)
self.xs.append(x) # new state
self.log_ps.append(log_p) # conditional generation probability
# update x_last
x_last = x
# output
self.x_out = self.xs[-1]
# total probability
self.log_p_total = keras.layers.Lambda(lambda arg: tf.reduce_sum(input_tensor=arg, axis=0))(self.log_ps)
def log_probability(self):
""" Computes the total log probability of the current sample"""
return tf.reduce_sum(input_tensor=self.log_ps, axis=0)
def normal_transnet(dim, nlayers, mu_shape=(100, 100), mu_activation='relu',
sigma_shape=(100, 100), sigma_activation='tanh', residual=False,
**layer_args):
"""
dim : int
Dimension of variables
nlayers : int
Number of layers in the transformer
mu_shape : int
Number of hidden units in each nonlinear layer
mu_activation : str
Hidden-neuron activation functions used in the nonlinear layers
sigma_shape : int
Number of hidden units in each nonlinear layer
sigma_activation : str
Hidden-neuron activation functions used in the nonlinear layers
"""
layers = []
for l in range(nlayers):
mu_net = nonlinear_transform(dim, nlayers=len(mu_shape)+1, nhidden=mu_shape,
activation=mu_activation, **layer_args)
sigma_net = nonlinear_transform(dim, nlayers=len(sigma_shape)+1, nhidden=sigma_shape,
activation=sigma_activation, init_outputs=0, **layer_args)
if residual:
layer = NormalResidualTransformer(mu_net, sigma_net)
else:
layer = NormalTransformer(mu_net, sigma_net)
layers.append(layer)
ninvnet = NoninvNet(dim, layers)
ninvnet.connect()
return ninvnet
| 39.168
| 145
| 0.62643
|
import keras
import tensorflow as tf
import numpy as np
from deep_boltzmann.networks import nonlinear_transform
from deep_boltzmann.networks import connect as _connect
class NormalTransformer(object):
def __init__(self, mu_layers, sigma_layers):
self.mu_layers = mu_layers
self.sigma_layers = sigma_layers
def _compute_x1(self, mu, log_sigma, w1):
return mu + tf.exp(log_sigma) * w1
def _compute_log_p1(self, mu, log_sigma, x1):
return -tf.reduce_sum(input_tensor=log_sigma, axis=1) - 0.5 * tf.reduce_sum(input_tensor=((x1 - mu)/(tf.exp(log_sigma)))**2, axis=1)
def connect(self, x0, w1):
mu = _connect(x0, self.mu_layers)
log_sigma = _connect(x0, self.sigma_layers)
self.x1 = keras.layers.Lambda(lambda args: self._compute_x1(args[0], args[1], args[2]))([mu, log_sigma, w1])
self.log_p1 = keras.layers.Lambda(lambda args: self._compute_log_p1(args[0], args[1], args[2]))([mu, log_sigma, self.x1])
return self.x1, self.log_p1
class NormalResidualTransformer(object):
def __init__(self, mu_layers, sigma_layers):
self.mu_layers = mu_layers
self.sigma_layers = sigma_layers
def _compute_x1(self, x0, mu, log_sigma, w1):
return x0 + mu + tf.exp(log_sigma) * w1
def _compute_log_p1(self, x0, mu, log_sigma, x1):
return -tf.reduce_sum(input_tensor=log_sigma, axis=1) - 0.5 * tf.reduce_sum(input_tensor=((x1 - x0 - mu)/(tf.exp(log_sigma)))**2, axis=1)
def connect(self, x0, w1):
mu = _connect(x0, self.mu_layers)
log_sigma = _connect(x0, self.sigma_layers)
self.x1 = keras.layers.Lambda(lambda args: self._compute_x1(args[0], args[1], args[2], args[3]))([x0, mu, log_sigma, w1])
self.log_p1 = keras.layers.Lambda(lambda args: self._compute_log_p1(args[0], args[1], args[2], args[3]))([x0, mu, log_sigma, self.x1])
return self.x1, self.log_p1
class NoninvNet(object):
def __init__(self, dim, layers):
self.dim = dim
self.layers = layers
self.log_p_total = None
def connect(self):
self.x0 = keras.layers.Input(shape=(self.dim,))
x_last = self.x0
self.xs = []
self.ws = []
self.log_ps = []
for layer in self.layers:
w = keras.layers.Input(shape=(self.dim,))
self.ws.append(w)
x, log_p = layer.connect(x_last, w)
self.xs.append(x)
self.log_ps.append(log_p)
x_last = x
self.x_out = self.xs[-1]
self.log_p_total = keras.layers.Lambda(lambda arg: tf.reduce_sum(input_tensor=arg, axis=0))(self.log_ps)
def log_probability(self):
return tf.reduce_sum(input_tensor=self.log_ps, axis=0)
def normal_transnet(dim, nlayers, mu_shape=(100, 100), mu_activation='relu',
sigma_shape=(100, 100), sigma_activation='tanh', residual=False,
**layer_args):
layers = []
for l in range(nlayers):
mu_net = nonlinear_transform(dim, nlayers=len(mu_shape)+1, nhidden=mu_shape,
activation=mu_activation, **layer_args)
sigma_net = nonlinear_transform(dim, nlayers=len(sigma_shape)+1, nhidden=sigma_shape,
activation=sigma_activation, init_outputs=0, **layer_args)
if residual:
layer = NormalResidualTransformer(mu_net, sigma_net)
else:
layer = NormalTransformer(mu_net, sigma_net)
layers.append(layer)
ninvnet = NoninvNet(dim, layers)
ninvnet.connect()
return ninvnet
| true
| true
|
1c468d98816f3a6edf4390c586235b73703529fd
| 2,298
|
py
|
Python
|
src/07 - Blurring And Smoothing/01-img_analysis.py
|
hritik5102/Awesome-Computer-Vision-Guide
|
005cd96f6d6c7dacdf1b9b5f5bf56cae3d6cea18
|
[
"MIT"
] | null | null | null |
src/07 - Blurring And Smoothing/01-img_analysis.py
|
hritik5102/Awesome-Computer-Vision-Guide
|
005cd96f6d6c7dacdf1b9b5f5bf56cae3d6cea18
|
[
"MIT"
] | null | null | null |
src/07 - Blurring And Smoothing/01-img_analysis.py
|
hritik5102/Awesome-Computer-Vision-Guide
|
005cd96f6d6c7dacdf1b9b5f5bf56cae3d6cea18
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
def nothing(x):
pass
#img = cv2.imread('img.jpeg',-1)
cap=cv2.VideoCapture(0)
cv2.namedWindow('image')
cv2.resizeWindow('image',600,350)
#Creating trackbar
cv2.createTrackbar('lh','image',0,255,nothing)
cv2.createTrackbar('uh','image',0,255,nothing)
cv2.createTrackbar('ls','image',0,255,nothing)
cv2.createTrackbar('us','image',0,255,nothing)
cv2.createTrackbar('lv','image',0,255,nothing)
cv2.createTrackbar('uv','image',0,255,nothing)
#cv2.createTrackbar('switch','image',0,1,nothing)
#set track bar
cv2.setTrackbarPos('lh','image',0)
cv2.setTrackbarPos('uh','image',58)
cv2.setTrackbarPos('ls','image',45)
cv2.setTrackbarPos('us','image',255)
cv2.setTrackbarPos('lv','image',54)
cv2.setTrackbarPos('uv','image',168)
while True:
_,img=cap.read()
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
#while 1 :
#reading trackbar
lh=cv2.getTrackbarPos('lh','image')
uh=cv2.getTrackbarPos('uh','image')
ls=cv2.getTrackbarPos('ls','image')
us=cv2.getTrackbarPos('us','image')
lv=cv2.getTrackbarPos('lv','image')
uv=cv2.getTrackbarPos('uv','image')
#switch = cv2.getTrackbarPos('switch','image')
l_r=np.array([lh,ls,lv])
u_r=np.array([uh,us,uv])
mask = cv2.inRange(hsv,l_r,u_r)
res=cv2.bitwise_and(img,img,mask=mask)
#blur
k=np.ones((15,15),np.float32)/225
s= cv2.filter2D(res,-1,k)
b= cv2.GaussianBlur(res,(15,15),0)
m= cv2.medianBlur(res,15)
bb =cv2.bilateralFilter(res , 15 , 75, 75)#useless
#morphology
k2= np.ones((5,5) , np.uint8)
e=cv2.erode(mask,k2,1)
d=cv2.dilate(mask,k2,1)
o=cv2.morphologyEx(mask,cv2.MORPH_OPEN,k2)
c=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,k2)
oc=cv2.morphologyEx(o,cv2.MORPH_CLOSE,k2)#same as close+open
#output
#cv2.imshow('img',img)
#cv2.imshow('mask',mask)
#cv2.waitKey(1000)
cv2.imshow('res',res)
#cv2.imshow('blur',s)
#cv2.imshow('Gblur',b)
#cv2.imshow('medblur',m)
#cv2.imshow('bilateralblur',bb)
#cv2.imshow('erode',e)
#cv2.imshow('dillate',d)
#cv2.imshow('openM',o)
#cv2.imshow('closeM',c)
#cv2.imshow('OnC_M',oc)
if cv2.waitKey(1) & 0xFF==ord('q'):
break
#cv2.waitKey(0)
cap.release()
cv2.destroyAllWindows()
| 21.679245
| 64
| 0.64839
|
import cv2
import numpy as np
def nothing(x):
pass
cap=cv2.VideoCapture(0)
cv2.namedWindow('image')
cv2.resizeWindow('image',600,350)
cv2.createTrackbar('lh','image',0,255,nothing)
cv2.createTrackbar('uh','image',0,255,nothing)
cv2.createTrackbar('ls','image',0,255,nothing)
cv2.createTrackbar('us','image',0,255,nothing)
cv2.createTrackbar('lv','image',0,255,nothing)
cv2.createTrackbar('uv','image',0,255,nothing)
cv2.setTrackbarPos('lh','image',0)
cv2.setTrackbarPos('uh','image',58)
cv2.setTrackbarPos('ls','image',45)
cv2.setTrackbarPos('us','image',255)
cv2.setTrackbarPos('lv','image',54)
cv2.setTrackbarPos('uv','image',168)
while True:
_,img=cap.read()
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
lh=cv2.getTrackbarPos('lh','image')
uh=cv2.getTrackbarPos('uh','image')
ls=cv2.getTrackbarPos('ls','image')
us=cv2.getTrackbarPos('us','image')
lv=cv2.getTrackbarPos('lv','image')
uv=cv2.getTrackbarPos('uv','image')
l_r=np.array([lh,ls,lv])
u_r=np.array([uh,us,uv])
mask = cv2.inRange(hsv,l_r,u_r)
res=cv2.bitwise_and(img,img,mask=mask)
k=np.ones((15,15),np.float32)/225
s= cv2.filter2D(res,-1,k)
b= cv2.GaussianBlur(res,(15,15),0)
m= cv2.medianBlur(res,15)
bb =cv2.bilateralFilter(res , 15 , 75, 75)
k2= np.ones((5,5) , np.uint8)
e=cv2.erode(mask,k2,1)
d=cv2.dilate(mask,k2,1)
o=cv2.morphologyEx(mask,cv2.MORPH_OPEN,k2)
c=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,k2)
oc=cv2.morphologyEx(o,cv2.MORPH_CLOSE,k2)
cv2.imshow('res',res)
if cv2.waitKey(1) & 0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| true
| true
|
1c468dca7335ecf4d19068d904ca06e6efdee798
| 18,712
|
py
|
Python
|
leanerp/helpdesk/management/commands/get_email.py
|
seLain/Leaf
|
f02e15576071429a29f76a06328d024b58a2d69e
|
[
"Apache-2.0"
] | null | null | null |
leanerp/helpdesk/management/commands/get_email.py
|
seLain/Leaf
|
f02e15576071429a29f76a06328d024b58a2d69e
|
[
"Apache-2.0"
] | 6
|
2018-02-20T13:59:07.000Z
|
2018-03-06T17:35:41.000Z
|
leanerp/helpdesk/management/commands/get_email.py
|
seLain/Leaf
|
f02e15576071429a29f76a06328d024b58a2d69e
|
[
"Apache-2.0"
] | 1
|
2018-03-06T17:28:07.000Z
|
2018-03-06T17:28:07.000Z
|
#!/usr/bin/python
"""
Jutda Helpdesk - A Django powered ticket tracker for small enterprise.
(c) Copyright 2008 Jutda. All Rights Reserved. See LICENSE for details.
scripts/get_email.py - Designed to be run from cron, this script checks the
POP and IMAP boxes, or a local mailbox directory,
defined for the queues within a
helpdesk, creating tickets from the new messages (or
adding to existing tickets if needed)
"""
from __future__ import unicode_literals
from datetime import timedelta
import email
import imaplib
import mimetypes
from os import listdir, unlink
from os.path import isfile, join
import poplib
import re
import socket
from time import ctime
from email_reply_parser import EmailReplyParser
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.management.base import BaseCommand
from django.db.models import Q
from django.utils.translation import ugettext as _
from django.utils import encoding, six, timezone
from helpdesk import settings
from helpdesk.lib import send_templated_mail, safe_template_context, process_attachments
from helpdesk.models import Queue, Ticket, TicketCC, FollowUp, IgnoreEmail
from django.contrib.auth.models import User
import logging
STRIPPED_SUBJECT_STRINGS = [
"Re: ",
"Fw: ",
"RE: ",
"FW: ",
"Automatic reply: ",
]
class Command(BaseCommand):
def __init__(self):
BaseCommand.__init__(self)
help = 'Process django-helpdesk queues and process e-mails via POP3/IMAP or ' \
'from a local mailbox directory as required, feeding them into the helpdesk.'
def add_arguments(self, parser):
parser.add_argument(
'--quiet',
action='store_true',
dest='quiet',
default=False,
help='Hide details about each queue/message as they are processed',
)
def handle(self, *args, **options):
quiet = options.get('quiet', False)
process_email(quiet=quiet)
def process_email(quiet=False):
for q in Queue.objects.filter(
email_box_type__isnull=False,
allow_email_submission=True):
logger = logging.getLogger('django.helpdesk.queue.' + q.slug)
if not q.logging_type or q.logging_type == 'none':
logging.disable(logging.CRITICAL) # disable all messages
elif q.logging_type == 'info':
logger.setLevel(logging.INFO)
elif q.logging_type == 'warn':
logger.setLevel(logging.WARN)
elif q.logging_type == 'error':
logger.setLevel(logging.ERROR)
elif q.logging_type == 'crit':
logger.setLevel(logging.CRITICAL)
elif q.logging_type == 'debug':
logger.setLevel(logging.DEBUG)
if quiet:
logger.propagate = False # do not propagate to root logger that would log to console
logdir = q.logging_dir or '/var/log/helpdesk/'
handler = logging.FileHandler(join(logdir, q.slug + '_get_email.log'))
logger.addHandler(handler)
if not q.email_box_last_check:
q.email_box_last_check = timezone.now() - timedelta(minutes=30)
queue_time_delta = timedelta(minutes=q.email_box_interval or 0)
if (q.email_box_last_check + queue_time_delta) < timezone.now():
process_queue(q, logger=logger)
q.email_box_last_check = timezone.now()
q.save()
def process_queue(q, logger):
logger.info("***** %s: Begin processing mail for django-helpdesk" % ctime())
if q.socks_proxy_type and q.socks_proxy_host and q.socks_proxy_port:
try:
import socks
except ImportError:
no_socks_msg = "Queue has been configured with proxy settings, " \
"but no socks library was installed. Try to " \
"install PySocks via PyPI."
logger.error(no_socks_msg)
raise ImportError(no_socks_msg)
proxy_type = {
'socks4': socks.SOCKS4,
'socks5': socks.SOCKS5,
}.get(q.socks_proxy_type)
socks.set_default_proxy(proxy_type=proxy_type,
addr=q.socks_proxy_host,
port=q.socks_proxy_port)
socket.socket = socks.socksocket
elif six.PY2:
socket.socket = socket._socketobject
email_box_type = settings.QUEUE_EMAIL_BOX_TYPE or q.email_box_type
if email_box_type == 'pop3':
if q.email_box_ssl or settings.QUEUE_EMAIL_BOX_SSL:
if not q.email_box_port:
q.email_box_port = 995
server = poplib.POP3_SSL(q.email_box_host or
settings.QUEUE_EMAIL_BOX_HOST,
int(q.email_box_port))
else:
if not q.email_box_port:
q.email_box_port = 110
server = poplib.POP3(q.email_box_host or
settings.QUEUE_EMAIL_BOX_HOST,
int(q.email_box_port))
logger.info("Attempting POP3 server login")
server.getwelcome()
server.user(q.email_box_user or settings.QUEUE_EMAIL_BOX_USER)
server.pass_(q.email_box_pass or settings.QUEUE_EMAIL_BOX_PASSWORD)
messagesInfo = server.list()[1]
logger.info("Received %d messages from POP3 server" % len(messagesInfo))
for msg in messagesInfo:
msgNum = msg.split(" ")[0]
logger.info("Processing message %s" % msgNum)
full_message = encoding.force_text("\n".join(server.retr(msgNum)[1]), errors='replace')
ticket = ticket_from_message(message=full_message, queue=q, logger=logger)
if ticket:
server.dele(msgNum)
logger.info("Successfully processed message %s, deleted from POP3 server" % msgNum)
else:
logger.warn("Message %s was not successfully processed, and will be left on POP3 server" % msgNum)
server.quit()
elif email_box_type == 'imap':
if q.email_box_ssl or settings.QUEUE_EMAIL_BOX_SSL:
if not q.email_box_port:
q.email_box_port = 993
server = imaplib.IMAP4_SSL(q.email_box_host or
settings.QUEUE_EMAIL_BOX_HOST,
int(q.email_box_port))
else:
if not q.email_box_port:
q.email_box_port = 143
server = imaplib.IMAP4(q.email_box_host or
settings.QUEUE_EMAIL_BOX_HOST,
int(q.email_box_port))
logger.info("Attempting IMAP server login")
server.login(q.email_box_user or
settings.QUEUE_EMAIL_BOX_USER,
q.email_box_pass or
settings.QUEUE_EMAIL_BOX_PASSWORD)
server.select(q.email_box_imap_folder)
status, data = server.search(None, 'NOT', 'DELETED')
if data:
msgnums = data[0].split()
logger.info("Received %d messages from IMAP server" % len(msgnums))
for num in msgnums:
logger.info("Processing message %s" % num)
status, data = server.fetch(num, '(RFC822)')
full_message = encoding.force_text(data[0][1], errors='replace')
ticket = ticket_from_message(message=full_message, queue=q, logger=logger)
if ticket:
server.store(num, '+FLAGS', '\\Deleted')
logger.info("Successfully processed message %s, deleted from IMAP server" % num)
else:
logger.warn("Message %s was not successfully processed, and will be left on IMAP server" % num)
server.expunge()
server.close()
server.logout()
elif email_box_type == 'local':
mail_dir = q.email_box_local_dir or '/var/lib/mail/helpdesk/'
mail = [join(mail_dir, f) for f in listdir(mail_dir) if isfile(join(mail_dir, f))]
logger.info("Found %d messages in local mailbox directory" % len(mail))
logger.info("Found %d messages in local mailbox directory" % len(mail))
for i, m in enumerate(mail, 1):
logger.info("Processing message %d" % i)
with open(m, 'r') as f:
full_message = encoding.force_text(f.read(), errors='replace')
ticket = ticket_from_message(message=full_message, queue=q, logger=logger)
if ticket:
logger.info("Successfully processed message %d, ticket/comment created." % i)
try:
unlink(m) # delete message file if ticket was successful
except:
logger.error("Unable to delete message %d." % i)
else:
logger.info("Successfully deleted message %d." % i)
else:
logger.warn("Message %d was not successfully processed, and will be left in local directory" % i)
def decodeUnknown(charset, string):
if six.PY2:
if not charset:
try:
return string.decode('utf-8', 'replace')
except:
return string.decode('iso8859-1', 'replace')
return unicode(string, charset)
elif six.PY3:
if type(string) is not str:
if not charset:
try:
return str(string, encoding='utf-8', errors='replace')
except:
return str(string, encoding='iso8859-1', errors='replace')
return str(string, encoding=charset, errors='replace')
return string
def decode_mail_headers(string):
decoded = email.header.decode_header(string) if six.PY3 else email.header.decode_header(string.encode('utf-8'))
if six.PY2:
return u' '.join([unicode(msg, charset or 'utf-8') for msg, charset in decoded])
elif six.PY3:
return u' '.join([str(msg, encoding=charset, errors='replace') if charset else str(msg) for msg, charset in decoded])
def ticket_from_message(message, queue, logger):
# 'message' must be an RFC822 formatted message.
message = email.message_from_string(message) if six.PY3 else email.message_from_string(message.encode('utf-8'))
subject = message.get('subject', _('Comment from e-mail'))
subject = decode_mail_headers(decodeUnknown(message.get_charset(), subject))
for affix in STRIPPED_SUBJECT_STRINGS:
subject = subject.replace(affix, "")
subject = subject.strip()
sender = message.get('from', _('Unknown Sender'))
sender = decode_mail_headers(decodeUnknown(message.get_charset(), sender))
sender_email = email.utils.parseaddr(sender)[1]
cc = message.get_all('cc', None)
if cc:
# first, fixup the encoding if necessary
cc = [decode_mail_headers(decodeUnknown(message.get_charset(), x)) for x in cc]
# get_all checks if multiple CC headers, but individual emails may be comma separated too
tempcc = []
for hdr in cc:
tempcc.extend(hdr.split(','))
# use a set to ensure no duplicates
cc = set([x.strip() for x in tempcc])
for ignore in IgnoreEmail.objects.filter(Q(queues=queue) | Q(queues__isnull=True)):
if ignore.test(sender_email):
if ignore.keep_in_mailbox:
# By returning 'False' the message will be kept in the mailbox,
# and the 'True' will cause the message to be deleted.
return False
return True
matchobj = re.match(r".*\[" + queue.slug + "-(?P<id>\d+)\]", subject)
if matchobj:
# This is a reply or forward.
ticket = matchobj.group('id')
logger.info("Matched tracking ID %s-%s" % (queue.slug, ticket))
else:
logger.info("No tracking ID matched.")
ticket = None
body = None
counter = 0
files = []
for part in message.walk():
if part.get_content_maintype() == 'multipart':
continue
name = part.get_param("name")
if name:
name = email.utils.collapse_rfc2231_value(name)
if part.get_content_maintype() == 'text' and name is None:
if part.get_content_subtype() == 'plain':
body = EmailReplyParser.parse_reply(
decodeUnknown(part.get_content_charset(), part.get_payload(decode=True))
)
# workaround to get unicode text out rather than escaped text
body = body.encode('ascii').decode('unicode_escape') if six.PY3 else body.encode('utf-8')
logger.debug("Discovered plain text MIME part")
else:
files.append(
SimpleUploadedFile(_("email_html_body.html"), encoding.smart_bytes(part.get_payload()), 'text/html')
)
logger.debug("Discovered HTML MIME part")
else:
if not name:
ext = mimetypes.guess_extension(part.get_content_type())
name = "part-%i%s" % (counter, ext)
files.append(SimpleUploadedFile(name, encoding.smart_bytes(part.get_payload()), part.get_content_type()))
logger.debug("Found MIME attachment %s" % name)
counter += 1
if not body:
body = _('No plain-text email body available. Please see attachment "email_html_body.html".')
if ticket:
try:
t = Ticket.objects.get(id=ticket)
except Ticket.DoesNotExist:
logger.info("Tracking ID %s-%s not associated with existing ticket. Creating new ticket." % (queue.slug, ticket))
ticket = None
else:
logger.info("Found existing ticket with Tracking ID %s-%s" % (t.queue.slug, t.id))
if t.status == Ticket.CLOSED_STATUS:
t.status = Ticket.REOPENED_STATUS
t.save()
new = False
smtp_priority = message.get('priority', '')
smtp_importance = message.get('importance', '')
high_priority_types = {'high', 'important', '1', 'urgent'}
priority = 2 if high_priority_types & {smtp_priority, smtp_importance} else 3
if ticket is None:
new = True
t = Ticket.objects.create(
title=subject,
queue=queue,
submitter_email=sender_email,
created=timezone.now(),
description=body,
priority=priority,
)
logger.debug("Created new ticket %s-%s" % (t.queue.slug, t.id))
if cc:
# get list of currently CC'd emails
current_cc = TicketCC.objects.filter(ticket=ticket)
current_cc_emails = [x.email for x in current_cc]
# get emails of any Users CC'd to email
current_cc_users = [x.user.email for x in current_cc]
# ensure submitter, assigned user, queue email not added
other_emails = [queue.email_address]
if t.submitter_email:
other_emails.append(t.submitter_email)
if t.assigned_to:
other_emails.append(t.assigned_to.email)
current_cc = set(current_cc_emails + current_cc_users + other_emails)
# first, add any User not previously CC'd (as identified by User's email)
all_users = User.objects.all()
all_user_emails = set([x.email for x in all_users])
users_not_currently_ccd = all_user_emails.difference(set(current_cc))
users_to_cc = cc.intersection(users_not_currently_ccd)
for user in users_to_cc:
tcc = TicketCC.objects.create(
ticket=t,
user=User.objects.get(email=user),
can_view=True,
can_update=False
)
tcc.save()
# then add remaining emails alphabetically, makes testing easy
new_cc = cc.difference(current_cc).difference(all_user_emails)
new_cc = sorted(list(new_cc))
for ccemail in new_cc:
tcc = TicketCC.objects.create(
ticket=t,
email=ccemail,
can_view=True,
can_update=False
)
tcc.save()
f = FollowUp(
ticket=t,
title=_('E-Mail Received from %(sender_email)s' % {'sender_email': sender_email}),
date=timezone.now(),
public=True,
comment=body,
)
if t.status == Ticket.REOPENED_STATUS:
f.new_status = Ticket.REOPENED_STATUS
f.title = _('Ticket Re-Opened by E-Mail Received from %(sender_email)s' % {'sender_email': sender_email})
f.save()
logger.debug("Created new FollowUp for Ticket")
if six.PY2:
logger.info(("[%s-%s] %s" % (t.queue.slug, t.id, t.title,)).encode('ascii', 'replace'))
elif six.PY3:
logger.info("[%s-%s] %s" % (t.queue.slug, t.id, t.title,))
attached = process_attachments(f, files)
for att_file in attached:
logger.info("Attachment '%s' successfully added to ticket from email." % att_file[0])
context = safe_template_context(t)
if new:
if sender_email:
send_templated_mail(
'newticket_submitter',
context,
recipients=sender_email,
sender=queue.from_address,
fail_silently=True,
)
if queue.new_ticket_cc:
send_templated_mail(
'newticket_cc',
context,
recipients=queue.new_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
if queue.updated_ticket_cc and queue.updated_ticket_cc != queue.new_ticket_cc:
send_templated_mail(
'newticket_cc',
context,
recipients=queue.updated_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
else:
context.update(comment=f.comment)
if t.assigned_to:
send_templated_mail(
'updated_owner',
context,
recipients=t.assigned_to.email,
sender=queue.from_address,
fail_silently=True,
)
if queue.updated_ticket_cc:
send_templated_mail(
'updated_cc',
context,
recipients=queue.updated_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
return t
if __name__ == '__main__':
process_email()
| 38.502058
| 125
| 0.595981
|
from __future__ import unicode_literals
from datetime import timedelta
import email
import imaplib
import mimetypes
from os import listdir, unlink
from os.path import isfile, join
import poplib
import re
import socket
from time import ctime
from email_reply_parser import EmailReplyParser
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.management.base import BaseCommand
from django.db.models import Q
from django.utils.translation import ugettext as _
from django.utils import encoding, six, timezone
from helpdesk import settings
from helpdesk.lib import send_templated_mail, safe_template_context, process_attachments
from helpdesk.models import Queue, Ticket, TicketCC, FollowUp, IgnoreEmail
from django.contrib.auth.models import User
import logging
STRIPPED_SUBJECT_STRINGS = [
"Re: ",
"Fw: ",
"RE: ",
"FW: ",
"Automatic reply: ",
]
class Command(BaseCommand):
def __init__(self):
BaseCommand.__init__(self)
help = 'Process django-helpdesk queues and process e-mails via POP3/IMAP or ' \
'from a local mailbox directory as required, feeding them into the helpdesk.'
def add_arguments(self, parser):
parser.add_argument(
'--quiet',
action='store_true',
dest='quiet',
default=False,
help='Hide details about each queue/message as they are processed',
)
def handle(self, *args, **options):
quiet = options.get('quiet', False)
process_email(quiet=quiet)
def process_email(quiet=False):
for q in Queue.objects.filter(
email_box_type__isnull=False,
allow_email_submission=True):
logger = logging.getLogger('django.helpdesk.queue.' + q.slug)
if not q.logging_type or q.logging_type == 'none':
logging.disable(logging.CRITICAL)
elif q.logging_type == 'info':
logger.setLevel(logging.INFO)
elif q.logging_type == 'warn':
logger.setLevel(logging.WARN)
elif q.logging_type == 'error':
logger.setLevel(logging.ERROR)
elif q.logging_type == 'crit':
logger.setLevel(logging.CRITICAL)
elif q.logging_type == 'debug':
logger.setLevel(logging.DEBUG)
if quiet:
logger.propagate = False
logdir = q.logging_dir or '/var/log/helpdesk/'
handler = logging.FileHandler(join(logdir, q.slug + '_get_email.log'))
logger.addHandler(handler)
if not q.email_box_last_check:
q.email_box_last_check = timezone.now() - timedelta(minutes=30)
queue_time_delta = timedelta(minutes=q.email_box_interval or 0)
if (q.email_box_last_check + queue_time_delta) < timezone.now():
process_queue(q, logger=logger)
q.email_box_last_check = timezone.now()
q.save()
def process_queue(q, logger):
logger.info("***** %s: Begin processing mail for django-helpdesk" % ctime())
if q.socks_proxy_type and q.socks_proxy_host and q.socks_proxy_port:
try:
import socks
except ImportError:
no_socks_msg = "Queue has been configured with proxy settings, " \
"but no socks library was installed. Try to " \
"install PySocks via PyPI."
logger.error(no_socks_msg)
raise ImportError(no_socks_msg)
proxy_type = {
'socks4': socks.SOCKS4,
'socks5': socks.SOCKS5,
}.get(q.socks_proxy_type)
socks.set_default_proxy(proxy_type=proxy_type,
addr=q.socks_proxy_host,
port=q.socks_proxy_port)
socket.socket = socks.socksocket
elif six.PY2:
socket.socket = socket._socketobject
email_box_type = settings.QUEUE_EMAIL_BOX_TYPE or q.email_box_type
if email_box_type == 'pop3':
if q.email_box_ssl or settings.QUEUE_EMAIL_BOX_SSL:
if not q.email_box_port:
q.email_box_port = 995
server = poplib.POP3_SSL(q.email_box_host or
settings.QUEUE_EMAIL_BOX_HOST,
int(q.email_box_port))
else:
if not q.email_box_port:
q.email_box_port = 110
server = poplib.POP3(q.email_box_host or
settings.QUEUE_EMAIL_BOX_HOST,
int(q.email_box_port))
logger.info("Attempting POP3 server login")
server.getwelcome()
server.user(q.email_box_user or settings.QUEUE_EMAIL_BOX_USER)
server.pass_(q.email_box_pass or settings.QUEUE_EMAIL_BOX_PASSWORD)
messagesInfo = server.list()[1]
logger.info("Received %d messages from POP3 server" % len(messagesInfo))
for msg in messagesInfo:
msgNum = msg.split(" ")[0]
logger.info("Processing message %s" % msgNum)
full_message = encoding.force_text("\n".join(server.retr(msgNum)[1]), errors='replace')
ticket = ticket_from_message(message=full_message, queue=q, logger=logger)
if ticket:
server.dele(msgNum)
logger.info("Successfully processed message %s, deleted from POP3 server" % msgNum)
else:
logger.warn("Message %s was not successfully processed, and will be left on POP3 server" % msgNum)
server.quit()
elif email_box_type == 'imap':
if q.email_box_ssl or settings.QUEUE_EMAIL_BOX_SSL:
if not q.email_box_port:
q.email_box_port = 993
server = imaplib.IMAP4_SSL(q.email_box_host or
settings.QUEUE_EMAIL_BOX_HOST,
int(q.email_box_port))
else:
if not q.email_box_port:
q.email_box_port = 143
server = imaplib.IMAP4(q.email_box_host or
settings.QUEUE_EMAIL_BOX_HOST,
int(q.email_box_port))
logger.info("Attempting IMAP server login")
server.login(q.email_box_user or
settings.QUEUE_EMAIL_BOX_USER,
q.email_box_pass or
settings.QUEUE_EMAIL_BOX_PASSWORD)
server.select(q.email_box_imap_folder)
status, data = server.search(None, 'NOT', 'DELETED')
if data:
msgnums = data[0].split()
logger.info("Received %d messages from IMAP server" % len(msgnums))
for num in msgnums:
logger.info("Processing message %s" % num)
status, data = server.fetch(num, '(RFC822)')
full_message = encoding.force_text(data[0][1], errors='replace')
ticket = ticket_from_message(message=full_message, queue=q, logger=logger)
if ticket:
server.store(num, '+FLAGS', '\\Deleted')
logger.info("Successfully processed message %s, deleted from IMAP server" % num)
else:
logger.warn("Message %s was not successfully processed, and will be left on IMAP server" % num)
server.expunge()
server.close()
server.logout()
elif email_box_type == 'local':
mail_dir = q.email_box_local_dir or '/var/lib/mail/helpdesk/'
mail = [join(mail_dir, f) for f in listdir(mail_dir) if isfile(join(mail_dir, f))]
logger.info("Found %d messages in local mailbox directory" % len(mail))
logger.info("Found %d messages in local mailbox directory" % len(mail))
for i, m in enumerate(mail, 1):
logger.info("Processing message %d" % i)
with open(m, 'r') as f:
full_message = encoding.force_text(f.read(), errors='replace')
ticket = ticket_from_message(message=full_message, queue=q, logger=logger)
if ticket:
logger.info("Successfully processed message %d, ticket/comment created." % i)
try:
unlink(m)
except:
logger.error("Unable to delete message %d." % i)
else:
logger.info("Successfully deleted message %d." % i)
else:
logger.warn("Message %d was not successfully processed, and will be left in local directory" % i)
def decodeUnknown(charset, string):
if six.PY2:
if not charset:
try:
return string.decode('utf-8', 'replace')
except:
return string.decode('iso8859-1', 'replace')
return unicode(string, charset)
elif six.PY3:
if type(string) is not str:
if not charset:
try:
return str(string, encoding='utf-8', errors='replace')
except:
return str(string, encoding='iso8859-1', errors='replace')
return str(string, encoding=charset, errors='replace')
return string
def decode_mail_headers(string):
decoded = email.header.decode_header(string) if six.PY3 else email.header.decode_header(string.encode('utf-8'))
if six.PY2:
return u' '.join([unicode(msg, charset or 'utf-8') for msg, charset in decoded])
elif six.PY3:
return u' '.join([str(msg, encoding=charset, errors='replace') if charset else str(msg) for msg, charset in decoded])
def ticket_from_message(message, queue, logger):
message = email.message_from_string(message) if six.PY3 else email.message_from_string(message.encode('utf-8'))
subject = message.get('subject', _('Comment from e-mail'))
subject = decode_mail_headers(decodeUnknown(message.get_charset(), subject))
for affix in STRIPPED_SUBJECT_STRINGS:
subject = subject.replace(affix, "")
subject = subject.strip()
sender = message.get('from', _('Unknown Sender'))
sender = decode_mail_headers(decodeUnknown(message.get_charset(), sender))
sender_email = email.utils.parseaddr(sender)[1]
cc = message.get_all('cc', None)
if cc:
cc = [decode_mail_headers(decodeUnknown(message.get_charset(), x)) for x in cc]
tempcc = []
for hdr in cc:
tempcc.extend(hdr.split(','))
cc = set([x.strip() for x in tempcc])
for ignore in IgnoreEmail.objects.filter(Q(queues=queue) | Q(queues__isnull=True)):
if ignore.test(sender_email):
if ignore.keep_in_mailbox:
return False
return True
matchobj = re.match(r".*\[" + queue.slug + "-(?P<id>\d+)\]", subject)
if matchobj:
ticket = matchobj.group('id')
logger.info("Matched tracking ID %s-%s" % (queue.slug, ticket))
else:
logger.info("No tracking ID matched.")
ticket = None
body = None
counter = 0
files = []
for part in message.walk():
if part.get_content_maintype() == 'multipart':
continue
name = part.get_param("name")
if name:
name = email.utils.collapse_rfc2231_value(name)
if part.get_content_maintype() == 'text' and name is None:
if part.get_content_subtype() == 'plain':
body = EmailReplyParser.parse_reply(
decodeUnknown(part.get_content_charset(), part.get_payload(decode=True))
)
body = body.encode('ascii').decode('unicode_escape') if six.PY3 else body.encode('utf-8')
logger.debug("Discovered plain text MIME part")
else:
files.append(
SimpleUploadedFile(_("email_html_body.html"), encoding.smart_bytes(part.get_payload()), 'text/html')
)
logger.debug("Discovered HTML MIME part")
else:
if not name:
ext = mimetypes.guess_extension(part.get_content_type())
name = "part-%i%s" % (counter, ext)
files.append(SimpleUploadedFile(name, encoding.smart_bytes(part.get_payload()), part.get_content_type()))
logger.debug("Found MIME attachment %s" % name)
counter += 1
if not body:
body = _('No plain-text email body available. Please see attachment "email_html_body.html".')
if ticket:
try:
t = Ticket.objects.get(id=ticket)
except Ticket.DoesNotExist:
logger.info("Tracking ID %s-%s not associated with existing ticket. Creating new ticket." % (queue.slug, ticket))
ticket = None
else:
logger.info("Found existing ticket with Tracking ID %s-%s" % (t.queue.slug, t.id))
if t.status == Ticket.CLOSED_STATUS:
t.status = Ticket.REOPENED_STATUS
t.save()
new = False
smtp_priority = message.get('priority', '')
smtp_importance = message.get('importance', '')
high_priority_types = {'high', 'important', '1', 'urgent'}
priority = 2 if high_priority_types & {smtp_priority, smtp_importance} else 3
if ticket is None:
new = True
t = Ticket.objects.create(
title=subject,
queue=queue,
submitter_email=sender_email,
created=timezone.now(),
description=body,
priority=priority,
)
logger.debug("Created new ticket %s-%s" % (t.queue.slug, t.id))
if cc:
current_cc = TicketCC.objects.filter(ticket=ticket)
current_cc_emails = [x.email for x in current_cc]
# get emails of any Users CC'd to email
current_cc_users = [x.user.email for x in current_cc]
other_emails = [queue.email_address]
if t.submitter_email:
other_emails.append(t.submitter_email)
if t.assigned_to:
other_emails.append(t.assigned_to.email)
current_cc = set(current_cc_emails + current_cc_users + other_emails)
all_users = User.objects.all()
all_user_emails = set([x.email for x in all_users])
users_not_currently_ccd = all_user_emails.difference(set(current_cc))
users_to_cc = cc.intersection(users_not_currently_ccd)
for user in users_to_cc:
tcc = TicketCC.objects.create(
ticket=t,
user=User.objects.get(email=user),
can_view=True,
can_update=False
)
tcc.save()
new_cc = cc.difference(current_cc).difference(all_user_emails)
new_cc = sorted(list(new_cc))
for ccemail in new_cc:
tcc = TicketCC.objects.create(
ticket=t,
email=ccemail,
can_view=True,
can_update=False
)
tcc.save()
f = FollowUp(
ticket=t,
title=_('E-Mail Received from %(sender_email)s' % {'sender_email': sender_email}),
date=timezone.now(),
public=True,
comment=body,
)
if t.status == Ticket.REOPENED_STATUS:
f.new_status = Ticket.REOPENED_STATUS
f.title = _('Ticket Re-Opened by E-Mail Received from %(sender_email)s' % {'sender_email': sender_email})
f.save()
logger.debug("Created new FollowUp for Ticket")
if six.PY2:
logger.info(("[%s-%s] %s" % (t.queue.slug, t.id, t.title,)).encode('ascii', 'replace'))
elif six.PY3:
logger.info("[%s-%s] %s" % (t.queue.slug, t.id, t.title,))
attached = process_attachments(f, files)
for att_file in attached:
logger.info("Attachment '%s' successfully added to ticket from email." % att_file[0])
context = safe_template_context(t)
if new:
if sender_email:
send_templated_mail(
'newticket_submitter',
context,
recipients=sender_email,
sender=queue.from_address,
fail_silently=True,
)
if queue.new_ticket_cc:
send_templated_mail(
'newticket_cc',
context,
recipients=queue.new_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
if queue.updated_ticket_cc and queue.updated_ticket_cc != queue.new_ticket_cc:
send_templated_mail(
'newticket_cc',
context,
recipients=queue.updated_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
else:
context.update(comment=f.comment)
if t.assigned_to:
send_templated_mail(
'updated_owner',
context,
recipients=t.assigned_to.email,
sender=queue.from_address,
fail_silently=True,
)
if queue.updated_ticket_cc:
send_templated_mail(
'updated_cc',
context,
recipients=queue.updated_ticket_cc,
sender=queue.from_address,
fail_silently=True,
)
return t
if __name__ == '__main__':
process_email()
| true
| true
|
1c468e275b37e8f12a979296a79fb4db9882c0cd
| 838
|
py
|
Python
|
lib/colors.py
|
Saveurian/Cleartext_Scanner
|
c54828fc6321b8549245f7914b4749b9114df7e0
|
[
"Unlicense"
] | null | null | null |
lib/colors.py
|
Saveurian/Cleartext_Scanner
|
c54828fc6321b8549245f7914b4749b9114df7e0
|
[
"Unlicense"
] | null | null | null |
lib/colors.py
|
Saveurian/Cleartext_Scanner
|
c54828fc6321b8549245f7914b4749b9114df7e0
|
[
"Unlicense"
] | null | null | null |
class Colors:
"""
Provides ANSI terminal colors helping the eyes identify
potential clear-text passwords.
"""
NONE = "\033[0m"
RED = "\033[31m"
GREEN = "\033[32m"
LIGHT_GRAY = "\033[37m"
LIGHT_BLUE = "\033[34m"
YELLOW = "\033[33m"
def __init__(self):
return
# Red terminal color
def red(self, text):
return self.RED + text + self.NONE
# Green terminal color
def green(self, text):
return self.GREEN + text + self.NONE
# Light gray terminal color
def light_gray(self, text):
return self.LIGHT_GRAY + text + self.NONE
# Light blue terminal color
def light_blue(self, text):
return self.LIGHT_BLUE + text + self.NONE
# Yellow terminal color
def yellow(self, text):
return self.YELLOW + text + self.NONE
| 23.942857
| 59
| 0.608592
|
class Colors:
NONE = "\033[0m"
RED = "\033[31m"
GREEN = "\033[32m"
LIGHT_GRAY = "\033[37m"
LIGHT_BLUE = "\033[34m"
YELLOW = "\033[33m"
def __init__(self):
return
def red(self, text):
return self.RED + text + self.NONE
def green(self, text):
return self.GREEN + text + self.NONE
def light_gray(self, text):
return self.LIGHT_GRAY + text + self.NONE
def light_blue(self, text):
return self.LIGHT_BLUE + text + self.NONE
def yellow(self, text):
return self.YELLOW + text + self.NONE
| true
| true
|
1c468f73f1203314d42997ed056bf5f884a64d3b
| 2,065
|
py
|
Python
|
pytorch/poly/polygonize.py
|
IUResearchApplications/BuildingFootprints
|
97dc2ba9303bb5fdfd1c357c94b9e1e903a52ebe
|
[
"MIT"
] | 2
|
2020-05-01T15:41:14.000Z
|
2020-05-27T20:49:09.000Z
|
pytorch/poly/polygonize.py
|
IUResearchApplications/BuildingFootprints
|
97dc2ba9303bb5fdfd1c357c94b9e1e903a52ebe
|
[
"MIT"
] | null | null | null |
pytorch/poly/polygonize.py
|
IUResearchApplications/BuildingFootprints
|
97dc2ba9303bb5fdfd1c357c94b9e1e903a52ebe
|
[
"MIT"
] | 1
|
2020-05-01T15:41:15.000Z
|
2020-05-01T15:41:15.000Z
|
import sys
import subprocess
import os
import ogr
import glob
from setup import setup_run
def call_gdal_polygonize(input_file, output_file):
# If the file already exists, delete it first or additional polygons will be saved to the
# files if this is ran more than once.
if os.path.isfile(output_file):
os.remove(output_file)
# Call gdal_polygonize.py
subprocess.call(['gdal_polygonize.py', input_file, '-b', '1', '-q', '-f','GeoJSON',
output_file])
# Open the image with OGR
src = ogr.Open(output_file)
# If the GeoTIFF has no shapes to polygonize then gdal_polygonize outputs a GeoJSON in an
# incorrect format, so delete it.
layer = src.GetLayer(0)
# The GeoJSON that needs to be deleted will have no features
count = layer.GetFeatureCount()
if count == 0:
os.remove(output_file)
print ('Removed ' + os.path.basename(output_file))
def run_polygonize(main_path):
# Set up required file paths to the images
poly_fp = glob.glob(os.path.join(main_path, '*.tif'))
# Set up the main file path to where the original GeoJSONs will be saved
geojson_path = os.path.join(main_path, 'original_geojson')
# If the directory to save the GeoJSONs to does not exist then create it
if not os.path.isdir(geojson_path):
os.mkdir(geojson_path)
print ("Created folder 'original_geojson'")
print ('Polygonizing the predictions...')
for tif_fp in poly_fp:
# Switch the file extension from .tif to .geojson
file_name = os.path.splitext(os.path.basename(tif_fp))[0]
geojson_name = os.path.join(geojson_path, file_name + '.geojson')
# Set up the file path and name of the new GeoJSON file
json_fp = os.path.join(main_path, geojson_name)
# Polygonize the predictions
call_gdal_polygonize(tif_fp, json_fp)
print ('Done.')
return geojson_path
def main():
main_path = setup_run('polygonize')
run_polygonize(main_path)
if __name__ == '__main__':
main()
| 30.820896
| 94
| 0.678935
|
import sys
import subprocess
import os
import ogr
import glob
from setup import setup_run
def call_gdal_polygonize(input_file, output_file):
if os.path.isfile(output_file):
os.remove(output_file)
subprocess.call(['gdal_polygonize.py', input_file, '-b', '1', '-q', '-f','GeoJSON',
output_file])
src = ogr.Open(output_file)
layer = src.GetLayer(0)
count = layer.GetFeatureCount()
if count == 0:
os.remove(output_file)
print ('Removed ' + os.path.basename(output_file))
def run_polygonize(main_path):
poly_fp = glob.glob(os.path.join(main_path, '*.tif'))
geojson_path = os.path.join(main_path, 'original_geojson')
if not os.path.isdir(geojson_path):
os.mkdir(geojson_path)
print ("Created folder 'original_geojson'")
print ('Polygonizing the predictions...')
for tif_fp in poly_fp:
file_name = os.path.splitext(os.path.basename(tif_fp))[0]
geojson_name = os.path.join(geojson_path, file_name + '.geojson')
json_fp = os.path.join(main_path, geojson_name)
call_gdal_polygonize(tif_fp, json_fp)
print ('Done.')
return geojson_path
def main():
main_path = setup_run('polygonize')
run_polygonize(main_path)
if __name__ == '__main__':
main()
| true
| true
|
1c46914d8a6cc3062ee889cf79e110b85a8762f3
| 938
|
py
|
Python
|
pycrypt.py
|
o0void0o/pycrypt
|
46ea779f2de983a9d6caa974a3b932590af5c156
|
[
"MIT"
] | null | null | null |
pycrypt.py
|
o0void0o/pycrypt
|
46ea779f2de983a9d6caa974a3b932590af5c156
|
[
"MIT"
] | null | null | null |
pycrypt.py
|
o0void0o/pycrypt
|
46ea779f2de983a9d6caa974a3b932590af5c156
|
[
"MIT"
] | null | null | null |
from Crypto.Random import get_random_bytes
from Crypto.Cipher import AES
key = get_random_bytes(32) # 32 bytes * 8 = 256 bits (1 byte = 8 bits)
print(key)
output_file = 'enc/encrypted.bin'
file = open("supernoooichFile.adoc", "rb")
data = file.read(-1)
cipher = AES.new(key, AES.MODE_CFB) # CFB mode
ciphered_data = cipher.encrypt(data) # Only need to encrypt the data, no padding required for this mode
file_out = open(output_file, "wb")
file_out.write(cipher.iv)
file_out.write(ciphered_data)
file_out.close()
input_file= 'enc/encrypted.bin'
file_in = open(input_file, 'rb')
iv = file_in.read(16)
ciphered_data = file_in.read()
file_in.close()
cipher = AES.new(key, AES.MODE_CFB, iv=iv)
original_data = cipher.decrypt(ciphered_data) # No need to un-pad
print(original_data)
f = open('dec/lkjl.adoc', 'wb')
f.write(original_data)
f.close()
#https://nitratine.net/blog/post/python-encryption-and-decryption-with-pycryptodome/
| 24.684211
| 103
| 0.744136
|
from Crypto.Random import get_random_bytes
from Crypto.Cipher import AES
key = get_random_bytes(32)
print(key)
output_file = 'enc/encrypted.bin'
file = open("supernoooichFile.adoc", "rb")
data = file.read(-1)
cipher = AES.new(key, AES.MODE_CFB)
ciphered_data = cipher.encrypt(data)
file_out = open(output_file, "wb")
file_out.write(cipher.iv)
file_out.write(ciphered_data)
file_out.close()
input_file= 'enc/encrypted.bin'
file_in = open(input_file, 'rb')
iv = file_in.read(16)
ciphered_data = file_in.read()
file_in.close()
cipher = AES.new(key, AES.MODE_CFB, iv=iv)
original_data = cipher.decrypt(ciphered_data)
print(original_data)
f = open('dec/lkjl.adoc', 'wb')
f.write(original_data)
f.close()
| true
| true
|
1c4691903b60395ec59f186203e784ee1996ad0a
| 1,966
|
py
|
Python
|
ex35.py
|
Zinmarlwin711/python-exercises
|
361cb426a8bc03760906e25b6cb6a4a458260bfc
|
[
"MIT"
] | null | null | null |
ex35.py
|
Zinmarlwin711/python-exercises
|
361cb426a8bc03760906e25b6cb6a4a458260bfc
|
[
"MIT"
] | null | null | null |
ex35.py
|
Zinmarlwin711/python-exercises
|
361cb426a8bc03760906e25b6cb6a4a458260bfc
|
[
"MIT"
] | null | null | null |
from sys import exit
def gold_room():
print("This room is full of gold. How much do you take?")
choice = input("> ")
if "0" in choice or "1" in choice:
how_much = int (choice)
else:
dead("Man, learn to type a number.")
if how_much < 50:
print("Nice, you're not greedy, you win!")
exit(0)
else:
dead("You greedy bastard!")
def bear_room():
print("There is a bear here.")
print("The bear has bunch of honey.")
print("The fat bear is in front of another door.")
print("How are you going to move the bear?")
bear_moved = False
while True:
choice = input("> ")
if choice == "take honey":
dead ("The bear looks at you then slaps your face.")
elif choice == "taunt bear" and not bear_moved:
print("The bear has moved from the door.")
print("You can go through it now.")
bear_moved = True
elif choice == "taunt bear" and bear_moved:
dead("The bear gets pissed off and chews your legs.")
elif choice == "open door" and bear_moved:
gold_room()
else:
print("I got no idea what that means.")
def cthulhu_room():
print("Here you see the great evil Cthulhu.")
print("He, it, whatever stares at you and you go insane.")
print("Do you flee for your life or eat your head?")
choice = input("> ")
if "flee" in choice:
start()
elif "head" in choice:
dead ("Well that was tasty!")
else:
cthulhu_room()
def dead(why):
print(why, "Good job!")
exit(0)
def start():
print("You are in a dark room.")
print("There is a door to your right and left.")
print("Which one do you take?")
choice = input("> ")
if choice == "left":
bear_room()
elif choice == "right":
cthulhu_room()
else:
dead("You stumble around the room until you starve.")
start()
| 26.931507
| 65
| 0.571719
|
from sys import exit
def gold_room():
print("This room is full of gold. How much do you take?")
choice = input("> ")
if "0" in choice or "1" in choice:
how_much = int (choice)
else:
dead("Man, learn to type a number.")
if how_much < 50:
print("Nice, you're not greedy, you win!")
exit(0)
else:
dead("You greedy bastard!")
def bear_room():
print("There is a bear here.")
print("The bear has bunch of honey.")
print("The fat bear is in front of another door.")
print("How are you going to move the bear?")
bear_moved = False
while True:
choice = input("> ")
if choice == "take honey":
dead ("The bear looks at you then slaps your face.")
elif choice == "taunt bear" and not bear_moved:
print("The bear has moved from the door.")
print("You can go through it now.")
bear_moved = True
elif choice == "taunt bear" and bear_moved:
dead("The bear gets pissed off and chews your legs.")
elif choice == "open door" and bear_moved:
gold_room()
else:
print("I got no idea what that means.")
def cthulhu_room():
print("Here you see the great evil Cthulhu.")
print("He, it, whatever stares at you and you go insane.")
print("Do you flee for your life or eat your head?")
choice = input("> ")
if "flee" in choice:
start()
elif "head" in choice:
dead ("Well that was tasty!")
else:
cthulhu_room()
def dead(why):
print(why, "Good job!")
exit(0)
def start():
print("You are in a dark room.")
print("There is a door to your right and left.")
print("Which one do you take?")
choice = input("> ")
if choice == "left":
bear_room()
elif choice == "right":
cthulhu_room()
else:
dead("You stumble around the room until you starve.")
start()
| true
| true
|
1c4691faed7347e61b1a3bcb8447db2c3d16ec2e
| 801
|
py
|
Python
|
DataMining/Stats/coord_bounds.py
|
CKPalk/SeattleCrime_DM
|
0bfbf597ef7c4e87a4030e1c03f62b2f4c9f3c5b
|
[
"MIT"
] | null | null | null |
DataMining/Stats/coord_bounds.py
|
CKPalk/SeattleCrime_DM
|
0bfbf597ef7c4e87a4030e1c03f62b2f4c9f3c5b
|
[
"MIT"
] | null | null | null |
DataMining/Stats/coord_bounds.py
|
CKPalk/SeattleCrime_DM
|
0bfbf597ef7c4e87a4030e1c03f62b2f4c9f3c5b
|
[
"MIT"
] | null | null | null |
''' Work of Cameron Palk '''
import sys
import pandas as pd
def main( argv ):
try:
csv_filepath = argv[ 0 ]
output_filepath = argv[ 1 ]
except IndexError:
print( "Error, usage: \"python3 coord_bounds.py <CSV> <output_file>\"" )
return
training_data = pd.read_csv( csv_filepath )
training_data[ 'clean_Latitude' ] = training_data[ training_data.Latitude > 47 ].Latitude
training_data[ 'clean_Longitude' ] = training_data[ training_data.Longitude < -122 ].Longitude
training_data.dropna()
print( training_data[ 'clean_Latitude' ] )
for axis in [ 'clean_Longitude', 'clean_Latitude' ]:
print( "{:16} min: {:16} max: {:16}".format(
axis,
min( training_data[ axis ] ),
max( training_data[ axis ] )
) )
#
if __name__=='__main__':
main( sys.argv[ 1: ] )
| 23.558824
| 95
| 0.66417
|
import sys
import pandas as pd
def main( argv ):
try:
csv_filepath = argv[ 0 ]
output_filepath = argv[ 1 ]
except IndexError:
print( "Error, usage: \"python3 coord_bounds.py <CSV> <output_file>\"" )
return
training_data = pd.read_csv( csv_filepath )
training_data[ 'clean_Latitude' ] = training_data[ training_data.Latitude > 47 ].Latitude
training_data[ 'clean_Longitude' ] = training_data[ training_data.Longitude < -122 ].Longitude
training_data.dropna()
print( training_data[ 'clean_Latitude' ] )
for axis in [ 'clean_Longitude', 'clean_Latitude' ]:
print( "{:16} min: {:16} max: {:16}".format(
axis,
min( training_data[ axis ] ),
max( training_data[ axis ] )
) )
if __name__=='__main__':
main( sys.argv[ 1: ] )
| true
| true
|
1c4691ff67ef169df40aa3167b4cbb97f94211d2
| 1,425
|
py
|
Python
|
dufi/gui/boxes/ttkstyles.py
|
Shura1oplot/dufi
|
c9c25524020e57d3670c298acca305900b6490e7
|
[
"MIT"
] | null | null | null |
dufi/gui/boxes/ttkstyles.py
|
Shura1oplot/dufi
|
c9c25524020e57d3670c298acca305900b6490e7
|
[
"MIT"
] | null | null | null |
dufi/gui/boxes/ttkstyles.py
|
Shura1oplot/dufi
|
c9c25524020e57d3670c298acca305900b6490e7
|
[
"MIT"
] | null | null | null |
# [SublimeLinter @python:3]
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
try:
from tkinter import ttk
from tkinter import font as tk_font
except ImportError:
import ttk
import tkFont as tk_font
_ttk_styles = set()
_ttk_style_customizers = set()
def create_ttk_styles():
for func in _ttk_style_customizers:
func()
def _ttk_style_customizer(func):
_ttk_style_customizers.add(func)
def wrapper():
if func.__name__ not in _ttk_styles:
func()
_ttk_styles.add(func.__name__)
return wrapper
@_ttk_style_customizer
def create_ttk_style_white_frame():
ttk.Style().configure("WhiteFrame.TFrame", background="White")
ttk.Style().configure("WhiteLabel.TLabel", background="White")
ttk.Style().configure("WhiteCheckbutton.TCheckbutton", background="White")
@_ttk_style_customizer
def create_ttk_style_plain_notebook():
s = ttk.Style()
s.configure("Plain.TNotebook", borderwidth=0)
s.layout("Plain.TNotebook.Tab", [])
@_ttk_style_customizer
def create_ttk_style_bold_label():
# bold_font = tk_font.nametofont("TkDefaultFont").copy()
# bold_font.config(weight=tk_font.BOLD)
bold_font = ("Segoe UI", 9, "bold")
ttk.Style().configure("Bold.TLabel", font=bold_font)
ttk.Style().configure("WhiteLabelBold.TLabel", font=bold_font, background="White")
| 25.909091
| 86
| 0.716491
|
from __future__ import unicode_literals, division, print_function, absolute_import
try:
from tkinter import ttk
from tkinter import font as tk_font
except ImportError:
import ttk
import tkFont as tk_font
_ttk_styles = set()
_ttk_style_customizers = set()
def create_ttk_styles():
for func in _ttk_style_customizers:
func()
def _ttk_style_customizer(func):
_ttk_style_customizers.add(func)
def wrapper():
if func.__name__ not in _ttk_styles:
func()
_ttk_styles.add(func.__name__)
return wrapper
@_ttk_style_customizer
def create_ttk_style_white_frame():
ttk.Style().configure("WhiteFrame.TFrame", background="White")
ttk.Style().configure("WhiteLabel.TLabel", background="White")
ttk.Style().configure("WhiteCheckbutton.TCheckbutton", background="White")
@_ttk_style_customizer
def create_ttk_style_plain_notebook():
s = ttk.Style()
s.configure("Plain.TNotebook", borderwidth=0)
s.layout("Plain.TNotebook.Tab", [])
@_ttk_style_customizer
def create_ttk_style_bold_label():
bold_font = ("Segoe UI", 9, "bold")
ttk.Style().configure("Bold.TLabel", font=bold_font)
ttk.Style().configure("WhiteLabelBold.TLabel", font=bold_font, background="White")
| true
| true
|
1c4692d5cb27e6291a7c4ae6d099c67074bc40ee
| 5,735
|
py
|
Python
|
src/web-application/app.py
|
hitesh009911/thrain
|
2535cddb8908772cbac3ba9fed194623aa9334d6
|
[
"MIT"
] | null | null | null |
src/web-application/app.py
|
hitesh009911/thrain
|
2535cddb8908772cbac3ba9fed194623aa9334d6
|
[
"MIT"
] | null | null | null |
src/web-application/app.py
|
hitesh009911/thrain
|
2535cddb8908772cbac3ba9fed194623aa9334d6
|
[
"MIT"
] | null | null | null |
import os
import os.path
from flask import Flask, request, redirect, url_for, render_template, session, send_from_directory, send_file
from werkzeug.utils import secure_filename
import DH
import pickle
import random
UPLOAD_FOLDER = './media/text-files/'
UPLOAD_KEY = './media/public-keys/'
ALLOWED_EXTENSIONS = set(['txt'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
'''
-----------------------------------------------------------
PAGE REDIRECTS
-----------------------------------------------------------
'''
def post_upload_redirect():
return render_template('post-upload.html')
@app.route('/register')
def call_page_register_user():
return render_template('register.html')
@app.route('/home')
def back_home():
return render_template('index.html')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/upload-file')
def call_page_upload():
return render_template('upload.html')
'''
-----------------------------------------------------------
DOWNLOAD KEY-FILE
-----------------------------------------------------------
'''
@app.route('/public-key-directory/retrieve/key/<username>')
def download_public_key(username):
for root,dirs,files in os.walk('./media/public-keys/'):
for file in files:
list = file.split('-')
if list[0] == username:
filename = UPLOAD_KEY+file
return send_file(filename, attachment_filename='publicKey.pem',as_attachment=True)
@app.route('/file-directory/retrieve/file/<filename>')
def download_file(filename):
filepath = UPLOAD_FOLDER+filename
if(os.path.isfile(filepath)):
return send_file(filepath, attachment_filename='fileMessage-thrainSecurity.txt',as_attachment=True)
else:
return render_template('file-list.html',msg='An issue encountered, our team is working on that')
'''
-----------------------------------------------------------
BUILD - DISPLAY FILE - KEY DIRECTORY
-----------------------------------------------------------
'''
# Build public key directory
@app.route('/public-key-directory/')
def downloads_pk():
username = []
if(os.path.isfile("./media/database/database_1.pickle")):
pickleObj = open("./media/database/database_1.pickle","rb")
username = pickle.load(pickleObj)
pickleObj.close()
if len(username) == 0:
return render_template('public-key-list.html',msg='Aww snap! No public key found in the database')
else:
return render_template('public-key-list.html',msg='',itr = 0, length = len(username),directory=username)
# Build file directory
@app.route('/file-directory/')
def download_f():
for root,dirs,files in os.walk(UPLOAD_FOLDER):
if(len(files) == 0):
return render_template('file-list.html',msg='Aww snap! No file found in directory')
else:
return render_template('file-list.html',msg='',itr=0,length=len(files),list=files)
'''
-----------------------------------------------------------
UPLOAD ENCRYPTED FILE
-----------------------------------------------------------
'''
@app.route('/data', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
flash('No selected file')
return 'NO FILE SELECTED'
if file:
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))
return post_upload_redirect()
return 'Invalid File Format !'
'''
-----------------------------------------------------------
REGISTER UNIQUE USERNAME AND GENERATE PUBLIC KEY WITH FILE
-----------------------------------------------------------
'''
@app.route('/register-new-user', methods = ['GET', 'POST'])
def register_user():
files = []
privatekeylist = []
usernamelist = []
# Import pickle file to maintain uniqueness of the keys
if(os.path.isfile("./media/database/database.pickle")):
pickleObj = open("./media/database/database.pickle","rb")
privatekeylist = pickle.load(pickleObj)
pickleObj.close()
if(os.path.isfile("./media/database/database_1.pickle")):
pickleObj = open("./media/database/database_1.pickle","rb")
usernamelist = pickle.load(pickleObj)
pickleObj.close()
# Declare a new list which consists all usernames
if request.form['username'] in usernamelist:
return render_template('register.html', name='Username already exists')
username = request.form['username']
firstname = request.form['first-name']
secondname = request.form['last-name']
pin = int(random.randint(1,128))
pin = pin % 64
#Generating a unique private key
privatekey = DH.generate_private_key(pin)
while privatekey in privatekeylist:
privatekey = DH.generate_private_key(pin)
privatekeylist.append(str(privatekey))
usernamelist.append(username)
#Save/update pickle
pickleObj = open("./media/database/database.pickle","wb")
pickle.dump(privatekeylist,pickleObj)
pickleObj.close()
pickleObj = open("./media/database/database_1.pickle","wb")
pickle.dump(usernamelist,pickleObj)
pickleObj.close()
#Updating a new public key for a new user
filename = UPLOAD_KEY+username+'-'+secondname.upper()+firstname.lower()+'-PublicKey.pem'
# Generate public key and save it in the file generated
publickey = DH.generate_public_key(privatekey)
fileObject = open(filename,"w")
fileObject.write(str(publickey))
return render_template('key-display.html',privatekey=str(privatekey))
if __name__ == '__main__':
app.run(host="0.0.0.0", port=80)
#app.run(debug=True)
| 33.735294
| 109
| 0.6551
|
import os
import os.path
from flask import Flask, request, redirect, url_for, render_template, session, send_from_directory, send_file
from werkzeug.utils import secure_filename
import DH
import pickle
import random
UPLOAD_FOLDER = './media/text-files/'
UPLOAD_KEY = './media/public-keys/'
ALLOWED_EXTENSIONS = set(['txt'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def post_upload_redirect():
return render_template('post-upload.html')
@app.route('/register')
def call_page_register_user():
return render_template('register.html')
@app.route('/home')
def back_home():
return render_template('index.html')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/upload-file')
def call_page_upload():
return render_template('upload.html')
@app.route('/public-key-directory/retrieve/key/<username>')
def download_public_key(username):
for root,dirs,files in os.walk('./media/public-keys/'):
for file in files:
list = file.split('-')
if list[0] == username:
filename = UPLOAD_KEY+file
return send_file(filename, attachment_filename='publicKey.pem',as_attachment=True)
@app.route('/file-directory/retrieve/file/<filename>')
def download_file(filename):
filepath = UPLOAD_FOLDER+filename
if(os.path.isfile(filepath)):
return send_file(filepath, attachment_filename='fileMessage-thrainSecurity.txt',as_attachment=True)
else:
return render_template('file-list.html',msg='An issue encountered, our team is working on that')
@app.route('/public-key-directory/')
def downloads_pk():
username = []
if(os.path.isfile("./media/database/database_1.pickle")):
pickleObj = open("./media/database/database_1.pickle","rb")
username = pickle.load(pickleObj)
pickleObj.close()
if len(username) == 0:
return render_template('public-key-list.html',msg='Aww snap! No public key found in the database')
else:
return render_template('public-key-list.html',msg='',itr = 0, length = len(username),directory=username)
@app.route('/file-directory/')
def download_f():
for root,dirs,files in os.walk(UPLOAD_FOLDER):
if(len(files) == 0):
return render_template('file-list.html',msg='Aww snap! No file found in directory')
else:
return render_template('file-list.html',msg='',itr=0,length=len(files),list=files)
@app.route('/data', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return 'NO FILE SELECTED'
if file:
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], file.filename))
return post_upload_redirect()
return 'Invalid File Format !'
@app.route('/register-new-user', methods = ['GET', 'POST'])
def register_user():
files = []
privatekeylist = []
usernamelist = []
if(os.path.isfile("./media/database/database.pickle")):
pickleObj = open("./media/database/database.pickle","rb")
privatekeylist = pickle.load(pickleObj)
pickleObj.close()
if(os.path.isfile("./media/database/database_1.pickle")):
pickleObj = open("./media/database/database_1.pickle","rb")
usernamelist = pickle.load(pickleObj)
pickleObj.close()
if request.form['username'] in usernamelist:
return render_template('register.html', name='Username already exists')
username = request.form['username']
firstname = request.form['first-name']
secondname = request.form['last-name']
pin = int(random.randint(1,128))
pin = pin % 64
privatekey = DH.generate_private_key(pin)
while privatekey in privatekeylist:
privatekey = DH.generate_private_key(pin)
privatekeylist.append(str(privatekey))
usernamelist.append(username)
pickleObj = open("./media/database/database.pickle","wb")
pickle.dump(privatekeylist,pickleObj)
pickleObj.close()
pickleObj = open("./media/database/database_1.pickle","wb")
pickle.dump(usernamelist,pickleObj)
pickleObj.close()
filename = UPLOAD_KEY+username+'-'+secondname.upper()+firstname.lower()+'-PublicKey.pem'
publickey = DH.generate_public_key(privatekey)
fileObject = open(filename,"w")
fileObject.write(str(publickey))
return render_template('key-display.html',privatekey=str(privatekey))
if __name__ == '__main__':
app.run(host="0.0.0.0", port=80)
| true
| true
|
1c4694493a461d612c26de35ae341c0d3a012bea
| 4,594
|
py
|
Python
|
plasmapy/utils/roman/tests/test_roman.py
|
KhalilBryant/PlasmaPy
|
05f7cb60348c7048fb3b8fbaf25985f2fba47fb7
|
[
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2020-02-14T16:35:02.000Z
|
2020-02-14T16:35:02.000Z
|
plasmapy/utils/roman/tests/test_roman.py
|
KhalilBryant/PlasmaPy
|
05f7cb60348c7048fb3b8fbaf25985f2fba47fb7
|
[
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
plasmapy/utils/roman/tests/test_roman.py
|
KhalilBryant/PlasmaPy
|
05f7cb60348c7048fb3b8fbaf25985f2fba47fb7
|
[
"MIT",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null |
import pytest
import numpy as np
import plasmapy.utils.roman as roman
from plasmapy.utils.pytest_helpers import run_test
ints_and_roman_numerals = [
(1, "I"),
(2, "II"),
(3, "III"),
(4, "IV"),
(5, "V"),
(6, "VI"),
(7, "VII"),
(8, "VIII"),
(9, "IX"),
(10, "X"),
(11, "XI"),
(12, "XII"),
(13, "XIII"),
(14, "XIV"),
(15, "XV"),
(16, "XVI"),
(17, "XVII"),
(18, "XVIII"),
(19, "XIX"),
(20, "XX"),
(21, "XXI"),
(22, "XXII"),
(23, "XXIII"),
(24, "XXIV"),
(25, "XXV"),
(26, "XXVI"),
(27, "XXVII"),
(28, "XXVIII"),
(29, "XXIX"),
(30, "XXX"),
(31, "XXXI"),
(32, "XXXII"),
(33, "XXXIII"),
(34, "XXXIV"),
(35, "XXXV"),
(36, "XXXVI"),
(37, "XXXVII"),
(38, "XXXVIII"),
(39, "XXXIX"),
(40, "XL"),
(41, "XLI"),
(42, "XLII"),
(43, "XLIII"),
(44, "XLIV"),
(45, "XLV"),
(46, "XLVI"),
(47, "XLVII"),
(48, "XLVIII"),
(49, "XLIX"),
(50, "L"),
(51, "LI"),
(52, "LII"),
(53, "LIII"),
(54, "LIV"),
(55, "LV"),
(56, "LVI"),
(57, "LVII"),
(58, "LVIII"),
(59, "LIX"),
(60, "LX"),
(61, "LXI"),
(62, "LXII"),
(63, "LXIII"),
(64, "LXIV"),
(65, "LXV"),
(66, "LXVI"),
(67, "LXVII"),
(68, "LXVIII"),
(69, "LXIX"),
(70, "LXX"),
(71, "LXXI"),
(72, "LXXII"),
(73, "LXXIII"),
(74, "LXXIV"),
(75, "LXXV"),
(76, "LXXVI"),
(77, "LXXVII"),
(78, "LXXVIII"),
(79, "LXXIX"),
(80, "LXXX"),
(81, "LXXXI"),
(82, "LXXXII"),
(83, "LXXXIII"),
(84, "LXXXIV"),
(85, "LXXXV"),
(86, "LXXXVI"),
(87, "LXXXVII"),
(88, "LXXXVIII"),
(89, "LXXXIX"),
(90, "XC"),
(91, "XCI"),
(92, "XCII"),
(93, "XCIII"),
(94, "XCIV"),
(95, "XCV"),
(96, "XCVI"),
(97, "XCVII"),
(98, "XCVIII"),
(99, "XCIX"),
(100, "C"),
(101, "CI"),
(102, "CII"),
(103, "CIII"),
(104, "CIV"),
(105, "CV"),
(106, "CVI"),
(107, "CVII"),
(108, "CVIII"),
(109, "CIX"),
(110, "CX"),
(111, "CXI"),
(112, "CXII"),
(113, "CXIII"),
(114, "CXIV"),
(115, "CXV"),
(116, "CXVI"),
(117, "CXVII"),
(118, "CXVIII"),
(119, "CXIX"),
(120, "CXX"),
(121, "CXXI"),
(122, "CXXII"),
(188, "CLXXXVIII"),
(189, "CLXXXIX"),
(198, "CXCVIII"),
(199, "CXCIX"),
(200, "CC"),
(np.int(9), "IX"),
(np.int16(10), "X"),
(np.int32(11), "XI"),
(np.int64(14), "XIV"),
]
toRoman_exceptions_table = [
("X", TypeError),
(-1, roman.OutOfRangeError),
(0, roman.OutOfRangeError),
(5000, roman.OutOfRangeError),
]
fromRoman_exceptions_table = [
("asdfasd", roman.InvalidRomanNumeralError),
(1, TypeError),
("xi", roman.InvalidRomanNumeralError),
]
@pytest.mark.parametrize("integer, roman_numeral", ints_and_roman_numerals)
def test_to_roman(integer, roman_numeral):
"""
Test that `~plasmapy.utils.roman.to_roman` correctly converts
integers to Roman numerals.
"""
run_test(func=roman.to_roman, args=integer, expected_outcome=roman_numeral)
@pytest.mark.parametrize("integer, roman_numeral", ints_and_roman_numerals)
def test_from_roman(integer, roman_numeral):
"""
Test that `~plasmapy.utils.roman.from_roman` correctly converts
Roman numerals to integers.
"""
run_test(func=roman.from_roman, args=roman_numeral, expected_outcome=int(integer))
@pytest.mark.parametrize("input, expected_exception", toRoman_exceptions_table)
def test_to_roman_exceptions(input, expected_exception):
"""
Test that `~plasmapy.utils.roman.to_roman` raises the correct
exceptions when necessary.
"""
run_test(func=roman.to_roman, args=input, expected_outcome=expected_exception)
@pytest.mark.parametrize("input, expected_exception", fromRoman_exceptions_table)
def test_from_roman_exceptions(input, expected_exception):
"""
Test that `~plasmapy.utils.roman.from_roman` raises the correct
exceptions when necessary.
"""
run_test(func=roman.from_roman, args=input, expected_outcome=expected_exception)
test_is_roman_numeral_table = [
("I", True),
("i", False),
("CLXXXVIII", True),
(1, TypeError),
("khjfda", False),
("VIIII", False),
("IXX", False),
(("I", "II"), TypeError),
]
@pytest.mark.parametrize("input, expected", test_is_roman_numeral_table)
def test_is_roman_numeral(input, expected):
run_test(func=roman.is_roman_numeral, args=input, expected_outcome=expected)
| 22.300971
| 86
| 0.529822
|
import pytest
import numpy as np
import plasmapy.utils.roman as roman
from plasmapy.utils.pytest_helpers import run_test
ints_and_roman_numerals = [
(1, "I"),
(2, "II"),
(3, "III"),
(4, "IV"),
(5, "V"),
(6, "VI"),
(7, "VII"),
(8, "VIII"),
(9, "IX"),
(10, "X"),
(11, "XI"),
(12, "XII"),
(13, "XIII"),
(14, "XIV"),
(15, "XV"),
(16, "XVI"),
(17, "XVII"),
(18, "XVIII"),
(19, "XIX"),
(20, "XX"),
(21, "XXI"),
(22, "XXII"),
(23, "XXIII"),
(24, "XXIV"),
(25, "XXV"),
(26, "XXVI"),
(27, "XXVII"),
(28, "XXVIII"),
(29, "XXIX"),
(30, "XXX"),
(31, "XXXI"),
(32, "XXXII"),
(33, "XXXIII"),
(34, "XXXIV"),
(35, "XXXV"),
(36, "XXXVI"),
(37, "XXXVII"),
(38, "XXXVIII"),
(39, "XXXIX"),
(40, "XL"),
(41, "XLI"),
(42, "XLII"),
(43, "XLIII"),
(44, "XLIV"),
(45, "XLV"),
(46, "XLVI"),
(47, "XLVII"),
(48, "XLVIII"),
(49, "XLIX"),
(50, "L"),
(51, "LI"),
(52, "LII"),
(53, "LIII"),
(54, "LIV"),
(55, "LV"),
(56, "LVI"),
(57, "LVII"),
(58, "LVIII"),
(59, "LIX"),
(60, "LX"),
(61, "LXI"),
(62, "LXII"),
(63, "LXIII"),
(64, "LXIV"),
(65, "LXV"),
(66, "LXVI"),
(67, "LXVII"),
(68, "LXVIII"),
(69, "LXIX"),
(70, "LXX"),
(71, "LXXI"),
(72, "LXXII"),
(73, "LXXIII"),
(74, "LXXIV"),
(75, "LXXV"),
(76, "LXXVI"),
(77, "LXXVII"),
(78, "LXXVIII"),
(79, "LXXIX"),
(80, "LXXX"),
(81, "LXXXI"),
(82, "LXXXII"),
(83, "LXXXIII"),
(84, "LXXXIV"),
(85, "LXXXV"),
(86, "LXXXVI"),
(87, "LXXXVII"),
(88, "LXXXVIII"),
(89, "LXXXIX"),
(90, "XC"),
(91, "XCI"),
(92, "XCII"),
(93, "XCIII"),
(94, "XCIV"),
(95, "XCV"),
(96, "XCVI"),
(97, "XCVII"),
(98, "XCVIII"),
(99, "XCIX"),
(100, "C"),
(101, "CI"),
(102, "CII"),
(103, "CIII"),
(104, "CIV"),
(105, "CV"),
(106, "CVI"),
(107, "CVII"),
(108, "CVIII"),
(109, "CIX"),
(110, "CX"),
(111, "CXI"),
(112, "CXII"),
(113, "CXIII"),
(114, "CXIV"),
(115, "CXV"),
(116, "CXVI"),
(117, "CXVII"),
(118, "CXVIII"),
(119, "CXIX"),
(120, "CXX"),
(121, "CXXI"),
(122, "CXXII"),
(188, "CLXXXVIII"),
(189, "CLXXXIX"),
(198, "CXCVIII"),
(199, "CXCIX"),
(200, "CC"),
(np.int(9), "IX"),
(np.int16(10), "X"),
(np.int32(11), "XI"),
(np.int64(14), "XIV"),
]
toRoman_exceptions_table = [
("X", TypeError),
(-1, roman.OutOfRangeError),
(0, roman.OutOfRangeError),
(5000, roman.OutOfRangeError),
]
fromRoman_exceptions_table = [
("asdfasd", roman.InvalidRomanNumeralError),
(1, TypeError),
("xi", roman.InvalidRomanNumeralError),
]
@pytest.mark.parametrize("integer, roman_numeral", ints_and_roman_numerals)
def test_to_roman(integer, roman_numeral):
run_test(func=roman.to_roman, args=integer, expected_outcome=roman_numeral)
@pytest.mark.parametrize("integer, roman_numeral", ints_and_roman_numerals)
def test_from_roman(integer, roman_numeral):
run_test(func=roman.from_roman, args=roman_numeral, expected_outcome=int(integer))
@pytest.mark.parametrize("input, expected_exception", toRoman_exceptions_table)
def test_to_roman_exceptions(input, expected_exception):
run_test(func=roman.to_roman, args=input, expected_outcome=expected_exception)
@pytest.mark.parametrize("input, expected_exception", fromRoman_exceptions_table)
def test_from_roman_exceptions(input, expected_exception):
run_test(func=roman.from_roman, args=input, expected_outcome=expected_exception)
test_is_roman_numeral_table = [
("I", True),
("i", False),
("CLXXXVIII", True),
(1, TypeError),
("khjfda", False),
("VIIII", False),
("IXX", False),
(("I", "II"), TypeError),
]
@pytest.mark.parametrize("input, expected", test_is_roman_numeral_table)
def test_is_roman_numeral(input, expected):
run_test(func=roman.is_roman_numeral, args=input, expected_outcome=expected)
| true
| true
|
1c46946534565ded99af86b7fdb56b8971f0ef04
| 3,778
|
py
|
Python
|
apps/Todo/tests/TodoModelTests.py
|
Eduardo-RFarias/DjangoReactBackend
|
b8183ea4b24be5c0aa557ffbc79fc23e0777b8ad
|
[
"MIT"
] | null | null | null |
apps/Todo/tests/TodoModelTests.py
|
Eduardo-RFarias/DjangoReactBackend
|
b8183ea4b24be5c0aa557ffbc79fc23e0777b8ad
|
[
"MIT"
] | null | null | null |
apps/Todo/tests/TodoModelTests.py
|
Eduardo-RFarias/DjangoReactBackend
|
b8183ea4b24be5c0aa557ffbc79fc23e0777b8ad
|
[
"MIT"
] | null | null | null |
from apps.User.models import User
from django.test import RequestFactory
from django.utils import timezone
from rest_framework import status
from ..models import Todo
from ..serializers import TodoSerializer
from .BaseAuthenticatedTest import BaseAuthenticatedTest
name = "Test todo"
description = "This is an auto generated Todo"
class TodoModelTests(BaseAuthenticatedTest):
def setUp(self) -> None:
loginResponse = self.login_and_set("03699132137", "123456")
self.userJson = loginResponse.get("user")
self.user = User.objects.get(cpf=self.userJson.get("cpf"))
def test_todo_model(self):
todo = Todo.objects.create(name=name, description=description)
self.assertGreater(todo.id, 0)
self.assertEqual(todo.name, name)
self.assertEqual(todo.description, description)
self.assertFalse(todo.done)
self.assertLessEqual(todo.created_at, timezone.now())
self.assertIsNone(todo.owner)
todo.delete()
def test_todo_list(self):
todo = Todo.objects.create(name=name, description=description, owner=self.user)
request = RequestFactory().post("/")
todoJson = TodoSerializer(todo, context={"request": request}).data
response = self.client.get("/api/todo/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data[0].get("name"),
todoJson.get("name"),
)
self.assertEqual(
response.data[0].get("description"),
todoJson.get("description"),
)
self.assertEqual(
response.data[0].get("owner"),
todoJson.get("owner"),
)
def test_todo_detail(self):
todo = Todo.objects.create(name=name, description=description, owner=self.user)
request = RequestFactory().post("/")
todoJson = TodoSerializer(todo, context={"request": request}).data
response = self.client.get(f"/api/todo/{todo.pk}/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data.get("name"),
todoJson.get("name"),
)
self.assertEqual(
response.data.get("description"),
todoJson.get("description"),
)
self.assertEqual(
response.data.get("owner"),
todoJson.get("owner"),
)
def test_todo_create(self):
response = self.client.post(
"/api/todo/",
{"name": name, "description": description},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data.get("name"), name)
self.assertEqual(response.data.get("description"), description)
self.assertEqual(response.data.get("owner"), self.userJson.get("url"))
def test_todo_update(self):
todo = Todo.objects.create(name=name, description=description, owner=self.user)
request = RequestFactory().post("/")
todoJson = TodoSerializer(todo, context={"request": request}).data
new_description = "Updated description for testing"
todoJson["description"] = new_description
response = self.client.put(f"/api/todo/{todo.pk}/", todoJson)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get("description"), new_description)
def test_todo_delete(self):
todo = Todo.objects.create(name=name, description=description, owner=self.user)
response = self.client.delete(f"/api/todo/{todo.pk}/")
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
with self.assertRaises(Todo.DoesNotExist):
Todo.objects.get(pk=1)
| 34.345455
| 87
| 0.647697
|
from apps.User.models import User
from django.test import RequestFactory
from django.utils import timezone
from rest_framework import status
from ..models import Todo
from ..serializers import TodoSerializer
from .BaseAuthenticatedTest import BaseAuthenticatedTest
name = "Test todo"
description = "This is an auto generated Todo"
class TodoModelTests(BaseAuthenticatedTest):
def setUp(self) -> None:
loginResponse = self.login_and_set("03699132137", "123456")
self.userJson = loginResponse.get("user")
self.user = User.objects.get(cpf=self.userJson.get("cpf"))
def test_todo_model(self):
todo = Todo.objects.create(name=name, description=description)
self.assertGreater(todo.id, 0)
self.assertEqual(todo.name, name)
self.assertEqual(todo.description, description)
self.assertFalse(todo.done)
self.assertLessEqual(todo.created_at, timezone.now())
self.assertIsNone(todo.owner)
todo.delete()
def test_todo_list(self):
todo = Todo.objects.create(name=name, description=description, owner=self.user)
request = RequestFactory().post("/")
todoJson = TodoSerializer(todo, context={"request": request}).data
response = self.client.get("/api/todo/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data[0].get("name"),
todoJson.get("name"),
)
self.assertEqual(
response.data[0].get("description"),
todoJson.get("description"),
)
self.assertEqual(
response.data[0].get("owner"),
todoJson.get("owner"),
)
def test_todo_detail(self):
todo = Todo.objects.create(name=name, description=description, owner=self.user)
request = RequestFactory().post("/")
todoJson = TodoSerializer(todo, context={"request": request}).data
response = self.client.get(f"/api/todo/{todo.pk}/")
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
response.data.get("name"),
todoJson.get("name"),
)
self.assertEqual(
response.data.get("description"),
todoJson.get("description"),
)
self.assertEqual(
response.data.get("owner"),
todoJson.get("owner"),
)
def test_todo_create(self):
response = self.client.post(
"/api/todo/",
{"name": name, "description": description},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data.get("name"), name)
self.assertEqual(response.data.get("description"), description)
self.assertEqual(response.data.get("owner"), self.userJson.get("url"))
def test_todo_update(self):
todo = Todo.objects.create(name=name, description=description, owner=self.user)
request = RequestFactory().post("/")
todoJson = TodoSerializer(todo, context={"request": request}).data
new_description = "Updated description for testing"
todoJson["description"] = new_description
response = self.client.put(f"/api/todo/{todo.pk}/", todoJson)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get("description"), new_description)
def test_todo_delete(self):
todo = Todo.objects.create(name=name, description=description, owner=self.user)
response = self.client.delete(f"/api/todo/{todo.pk}/")
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
with self.assertRaises(Todo.DoesNotExist):
Todo.objects.get(pk=1)
| true
| true
|
1c469501b5b017b977afe49a3917b281287e2777
| 1,611
|
py
|
Python
|
src/web/monitorforms/heroku-dyno-status-single/__init__.py
|
anderson-attilio/runbook
|
7b68622f75ef09f654046f0394540025f3ee7445
|
[
"Apache-2.0"
] | 155
|
2015-07-15T14:06:06.000Z
|
2021-03-31T01:41:44.000Z
|
src/web/monitorforms/heroku-dyno-status-single/__init__.py
|
anderson-attilio/runbook
|
7b68622f75ef09f654046f0394540025f3ee7445
|
[
"Apache-2.0"
] | 78
|
2015-01-01T05:49:20.000Z
|
2015-07-12T01:48:44.000Z
|
src/web/monitorforms/heroku-dyno-status-single/__init__.py
|
Runbook/runbook
|
7b68622f75ef09f654046f0394540025f3ee7445
|
[
"Apache-2.0"
] | 36
|
2015-07-20T22:42:23.000Z
|
2021-12-05T10:00:44.000Z
|
######################################################################
# Cloud Routes Web Application
# -------------------------------------------------------------------
# Health Check - Forms Class
######################################################################
from wtforms import TextField
from wtforms.validators import DataRequired
from ..datacenter import DatacenterCheckForm
class CheckForm(DatacenterCheckForm):
''' Creates a wtforms form object for monitors '''
title = "Heroku: Dyno Status"
description = """
This monitor will query the status of a specified Dyno within the specified Application. If the Dyno is not in an "up" or "idle" state this monitor will return False. If the Dyno is in a healthy status this monitor will return True.
"""
placeholders = DatacenterCheckForm.placeholders
placeholders.update({
'appname' : 'Application Name',
'dynoname' : 'web.1',
})
apikey = TextField(
"API Key",
description=DatacenterCheckForm.descriptions['apikey'],
validators=[DataRequired(
message='API Key is a required field')])
appname = TextField(
"Application Name",
description=DatacenterCheckForm.descriptions['heroku']['appname'],
validators=[DataRequired(
message='Application Name is a required field')])
dynoname = TextField(
"Dyno Name",
description=DatacenterCheckForm.descriptions['heroku']['dynoname'],
validators=[DataRequired(
message='Dyno Name is a required field')])
if __name__ == '__main__':
pass
| 38.357143
| 236
| 0.590317
| true
| true
|
|
1c469515024b2146bc7f69544490c4e70ded4e10
| 15,622
|
py
|
Python
|
tests/admin_checks/tests.py
|
PirosB3/django
|
9b729ddd8f2040722971ccfb3b12f7d8162633d1
|
[
"BSD-3-Clause"
] | 1
|
2015-05-16T13:13:06.000Z
|
2015-05-16T13:13:06.000Z
|
tests/admin_checks/tests.py
|
PirosB3/django
|
9b729ddd8f2040722971ccfb3b12f7d8162633d1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/admin_checks/tests.py
|
PirosB3/django
|
9b729ddd8f2040722971ccfb3b12f7d8162633d1
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals
import warnings
from django import forms
from django.contrib import admin
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from .models import Song, Book, Album, TwoAlbumFKAndAnE, City, State
class SongForm(forms.ModelForm):
pass
class ValidFields(admin.ModelAdmin):
form = SongForm
fields = ['title']
class ValidFormFieldsets(admin.ModelAdmin):
def get_form(self, request, obj=None, **kwargs):
class ExtraFieldForm(SongForm):
name = forms.CharField(max_length=50)
return ExtraFieldForm
fieldsets = (
(None, {
'fields': ('name',),
}),
)
class SystemChecksTestCase(TestCase):
def test_checks_are_performed(self):
class MyAdmin(admin.ModelAdmin):
@classmethod
def check(self, model, **kwargs):
return ['error!']
admin.site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
admin.site.unregister(Song)
def test_readonly_and_editable(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ["original_release"]
list_display = ["pk", "original_release"]
list_editable = ["original_release"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
("The value of 'list_editable[0]' refers to 'original_release', "
"which is not editable through the admin."),
hint=None,
obj=SongAdmin,
id='admin.E125',
)
]
self.assertEqual(errors, expected)
def test_editable(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_custom_modelforms_with_fields_fieldsets(self):
"""
# Regression test for #8027: custom ModelForms with fields/fieldsets
"""
errors = ValidFields.check(model=Song)
self.assertEqual(errors, [])
def test_custom_get_form_with_fieldsets(self):
"""
Ensure that the fieldsets checks are skipped when the ModelAdmin.get_form() method
is overridden.
Refs #19445.
"""
errors = ValidFormFieldsets.check(model=Song)
self.assertEqual(errors, [])
def test_exclude_values(self):
"""
Tests for basic system checks of 'exclude' option values (#12689)
"""
class ExcludedFields1(admin.ModelAdmin):
exclude = 'foo'
errors = ExcludedFields1.check(model=Book)
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFields1,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_duplicate_values(self):
class ExcludedFields2(admin.ModelAdmin):
exclude = ('name', 'name')
errors = ExcludedFields2.check(model=Book)
expected = [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
hint=None,
obj=ExcludedFields2,
id='admin.E015',
)
]
self.assertEqual(errors, expected)
def test_exclude_in_inline(self):
class ExcludedFieldsInline(admin.TabularInline):
model = Song
exclude = 'foo'
class ExcludedFieldsAlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [ExcludedFieldsInline]
errors = ExcludedFieldsAlbumAdmin.check(model=Album)
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFieldsInline,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_inline_model_admin(self):
"""
Regression test for #9932 - exclude in InlineModelAdmin should not
contain the ForeignKey field used in ModelAdmin.model
"""
class SongInline(admin.StackedInline):
model = Song
exclude = ['album']
class AlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [SongInline]
errors = AlbumAdmin.check(model=Album)
expected = [
checks.Error(
("Cannot exclude the field 'album', because it is the foreign key "
"to the parent model 'admin_checks.Album'."),
hint=None,
obj=SongInline,
id='admin.E201',
)
]
self.assertEqual(errors, expected)
def test_app_label_in_admin_checks(self):
"""
Regression test for #15669 - Include app label in admin system check messages
"""
class RawIdNonexistingAdmin(admin.ModelAdmin):
raw_id_fields = ('nonexisting',)
errors = RawIdNonexistingAdmin.check(model=Album)
expected = [
checks.Error(
("The value of 'raw_id_fields[0]' refers to 'nonexisting', which is "
"not an attribute of 'admin_checks.Album'."),
hint=None,
obj=RawIdNonexistingAdmin,
id='admin.E002',
)
]
self.assertEqual(errors, expected)
def test_fk_exclusion(self):
"""
Regression test for #11709 - when testing for fk excluding (when exclude is
given) make sure fk_name is honored or things blow up when there is more
than one fk to the parent model.
"""
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
exclude = ("e",)
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
self.assertEqual(errors, [])
def test_inline_self_check(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
expected = [
checks.Error(
"'admin_checks.TwoAlbumFKAndAnE' has more than one ForeignKey to 'admin_checks.Album'.",
hint=None,
obj=TwoAlbumFKAndAnEInline,
id='admin.E202',
)
]
self.assertEqual(errors, expected)
def test_inline_with_specified(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
self.assertEqual(errors, [])
def test_readonly(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_on_method(self):
def my_function(obj):
pass
class SongAdmin(admin.ModelAdmin):
readonly_fields = (my_function,)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_modeladmin",)
def readonly_method_on_modeladmin(self, obj):
pass
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_method_on_model(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_model",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_nonexistant_field(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title", "nonexistant")
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
("The value of 'readonly_fields[1]' is not a callable, an attribute "
"of 'SongAdmin', or an attribute of 'admin_checks.Song'."),
hint=None,
obj=SongAdmin,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_nonexistant_field_on_inline(self):
class CityInline(admin.TabularInline):
model = City
readonly_fields = ['i_dont_exist'] # Missing attribute
errors = CityInline.check(State)
expected = [
checks.Error(
("The value of 'readonly_fields[0]' is not a callable, an attribute "
"of 'CityInline', or an attribute of 'admin_checks.City'."),
hint=None,
obj=CityInline,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_extra(self):
class SongAdmin(admin.ModelAdmin):
def awesome_song(self, instance):
if instance.title == "Born to Run":
return "Best Ever!"
return "Status unknown."
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_lambda(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = (lambda obj: "test",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_graceful_m2m_fail(self):
"""
Regression test for #12203/#12237 - Fail more gracefully when a M2M field that
specifies the 'through' option is included in the 'fields' or the 'fieldsets'
ModelAdmin options.
"""
class BookAdmin(admin.ModelAdmin):
fields = ['authors']
errors = BookAdmin.check(model=Book)
expected = [
checks.Error(
("The value of 'fields' cannot include the ManyToManyField 'authors', "
"because that field manually specifies a relationship model."),
hint=None,
obj=BookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_cannot_include_through(self):
class FieldsetBookAdmin(admin.ModelAdmin):
fieldsets = (
('Header 1', {'fields': ('name',)}),
('Header 2', {'fields': ('authors',)}),
)
errors = FieldsetBookAdmin.check(model=Book)
expected = [
checks.Error(
("The value of 'fieldsets[1][1][\"fields\"]' cannot include the ManyToManyField "
"'authors', because that field manually specifies a relationship model."),
hint=None,
obj=FieldsetBookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_nested_fields(self):
class NestedFieldsAdmin(admin.ModelAdmin):
fields = ('price', ('name', 'subtitle'))
errors = NestedFieldsAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_nested_fieldsets(self):
class NestedFieldsetAdmin(admin.ModelAdmin):
fieldsets = (
('Main', {'fields': ('price', ('name', 'subtitle'))}),
)
errors = NestedFieldsetAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_explicit_through_override(self):
"""
Regression test for #12209 -- If the explicitly provided through model
is specified as a string, the admin should still be able use
Model.m2m_field.through
"""
class AuthorsInline(admin.TabularInline):
model = Book.authors.through
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorsInline]
errors = BookAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_non_model_fields(self):
"""
Regression for ensuring ModelAdmin.fields can contain non-model fields
that broke with r11737
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['title', 'extra_data']
errors = FieldsOnFormOnlyAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_non_model_first_field(self):
"""
Regression for ensuring ModelAdmin.field can handle first elem being a
non-model field (test fix for UnboundLocalError introduced with r16225).
"""
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
fields = '__all__'
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['extra_data', 'title']
errors = FieldsOnFormOnlyAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_validator_compatibility(self):
class MyValidator(object):
def validate(self, cls, model):
raise ImproperlyConfigured("error!")
class MyModelAdmin(admin.ModelAdmin):
validator_class = MyValidator
with warnings.catch_warnings(record=True):
warnings.filterwarnings('ignore', module='django.contrib.admin.options')
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
'error!',
hint=None,
obj=MyModelAdmin,
)
]
self.assertEqual(errors, expected)
def test_check_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fields = ['state', ['state']]
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
hint=None,
obj=MyModelAdmin,
id='admin.E006'
)
]
self.assertEqual(errors, expected)
def test_check_fieldset_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
'fields': ['title', 'album', ('title', 'album')]
}),
]
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
"There are duplicate field(s) in 'fieldsets[0][1]'.",
hint=None,
obj=MyModelAdmin,
id='admin.E012'
)
]
self.assertEqual(errors, expected)
| 31.244
| 104
| 0.563692
|
from __future__ import unicode_literals
import warnings
from django import forms
from django.contrib import admin
from django.core import checks
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from .models import Song, Book, Album, TwoAlbumFKAndAnE, City, State
class SongForm(forms.ModelForm):
pass
class ValidFields(admin.ModelAdmin):
form = SongForm
fields = ['title']
class ValidFormFieldsets(admin.ModelAdmin):
def get_form(self, request, obj=None, **kwargs):
class ExtraFieldForm(SongForm):
name = forms.CharField(max_length=50)
return ExtraFieldForm
fieldsets = (
(None, {
'fields': ('name',),
}),
)
class SystemChecksTestCase(TestCase):
def test_checks_are_performed(self):
class MyAdmin(admin.ModelAdmin):
@classmethod
def check(self, model, **kwargs):
return ['error!']
admin.site.register(Song, MyAdmin)
try:
errors = checks.run_checks()
expected = ['error!']
self.assertEqual(errors, expected)
finally:
admin.site.unregister(Song)
def test_readonly_and_editable(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ["original_release"]
list_display = ["pk", "original_release"]
list_editable = ["original_release"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
("The value of 'list_editable[0]' refers to 'original_release', "
"which is not editable through the admin."),
hint=None,
obj=SongAdmin,
id='admin.E125',
)
]
self.assertEqual(errors, expected)
def test_editable(self):
class SongAdmin(admin.ModelAdmin):
list_display = ["pk", "title"]
list_editable = ["title"]
fieldsets = [
(None, {
"fields": ["title", "original_release"],
}),
]
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_custom_modelforms_with_fields_fieldsets(self):
errors = ValidFields.check(model=Song)
self.assertEqual(errors, [])
def test_custom_get_form_with_fieldsets(self):
errors = ValidFormFieldsets.check(model=Song)
self.assertEqual(errors, [])
def test_exclude_values(self):
class ExcludedFields1(admin.ModelAdmin):
exclude = 'foo'
errors = ExcludedFields1.check(model=Book)
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFields1,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_duplicate_values(self):
class ExcludedFields2(admin.ModelAdmin):
exclude = ('name', 'name')
errors = ExcludedFields2.check(model=Book)
expected = [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
hint=None,
obj=ExcludedFields2,
id='admin.E015',
)
]
self.assertEqual(errors, expected)
def test_exclude_in_inline(self):
class ExcludedFieldsInline(admin.TabularInline):
model = Song
exclude = 'foo'
class ExcludedFieldsAlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [ExcludedFieldsInline]
errors = ExcludedFieldsAlbumAdmin.check(model=Album)
expected = [
checks.Error(
"The value of 'exclude' must be a list or tuple.",
hint=None,
obj=ExcludedFieldsInline,
id='admin.E014',
)
]
self.assertEqual(errors, expected)
def test_exclude_inline_model_admin(self):
class SongInline(admin.StackedInline):
model = Song
exclude = ['album']
class AlbumAdmin(admin.ModelAdmin):
model = Album
inlines = [SongInline]
errors = AlbumAdmin.check(model=Album)
expected = [
checks.Error(
("Cannot exclude the field 'album', because it is the foreign key "
"to the parent model 'admin_checks.Album'."),
hint=None,
obj=SongInline,
id='admin.E201',
)
]
self.assertEqual(errors, expected)
def test_app_label_in_admin_checks(self):
class RawIdNonexistingAdmin(admin.ModelAdmin):
raw_id_fields = ('nonexisting',)
errors = RawIdNonexistingAdmin.check(model=Album)
expected = [
checks.Error(
("The value of 'raw_id_fields[0]' refers to 'nonexisting', which is "
"not an attribute of 'admin_checks.Album'."),
hint=None,
obj=RawIdNonexistingAdmin,
id='admin.E002',
)
]
self.assertEqual(errors, expected)
def test_fk_exclusion(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
exclude = ("e",)
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
self.assertEqual(errors, [])
def test_inline_self_check(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
expected = [
checks.Error(
"'admin_checks.TwoAlbumFKAndAnE' has more than one ForeignKey to 'admin_checks.Album'.",
hint=None,
obj=TwoAlbumFKAndAnEInline,
id='admin.E202',
)
]
self.assertEqual(errors, expected)
def test_inline_with_specified(self):
class TwoAlbumFKAndAnEInline(admin.TabularInline):
model = TwoAlbumFKAndAnE
fk_name = "album1"
class MyAdmin(admin.ModelAdmin):
inlines = [TwoAlbumFKAndAnEInline]
errors = MyAdmin.check(model=Album)
self.assertEqual(errors, [])
def test_readonly(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_on_method(self):
def my_function(obj):
pass
class SongAdmin(admin.ModelAdmin):
readonly_fields = (my_function,)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_on_modeladmin(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_modeladmin",)
def readonly_method_on_modeladmin(self, obj):
pass
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_method_on_model(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("readonly_method_on_model",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_nonexistant_field(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = ("title", "nonexistant")
errors = SongAdmin.check(model=Song)
expected = [
checks.Error(
("The value of 'readonly_fields[1]' is not a callable, an attribute "
"of 'SongAdmin', or an attribute of 'admin_checks.Song'."),
hint=None,
obj=SongAdmin,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_nonexistant_field_on_inline(self):
class CityInline(admin.TabularInline):
model = City
readonly_fields = ['i_dont_exist']
errors = CityInline.check(State)
expected = [
checks.Error(
("The value of 'readonly_fields[0]' is not a callable, an attribute "
"of 'CityInline', or an attribute of 'admin_checks.City'."),
hint=None,
obj=CityInline,
id='admin.E035',
)
]
self.assertEqual(errors, expected)
def test_extra(self):
class SongAdmin(admin.ModelAdmin):
def awesome_song(self, instance):
if instance.title == "Born to Run":
return "Best Ever!"
return "Status unknown."
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_readonly_lambda(self):
class SongAdmin(admin.ModelAdmin):
readonly_fields = (lambda obj: "test",)
errors = SongAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_graceful_m2m_fail(self):
class BookAdmin(admin.ModelAdmin):
fields = ['authors']
errors = BookAdmin.check(model=Book)
expected = [
checks.Error(
("The value of 'fields' cannot include the ManyToManyField 'authors', "
"because that field manually specifies a relationship model."),
hint=None,
obj=BookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_cannot_include_through(self):
class FieldsetBookAdmin(admin.ModelAdmin):
fieldsets = (
('Header 1', {'fields': ('name',)}),
('Header 2', {'fields': ('authors',)}),
)
errors = FieldsetBookAdmin.check(model=Book)
expected = [
checks.Error(
("The value of 'fieldsets[1][1][\"fields\"]' cannot include the ManyToManyField "
"'authors', because that field manually specifies a relationship model."),
hint=None,
obj=FieldsetBookAdmin,
id='admin.E013',
)
]
self.assertEqual(errors, expected)
def test_nested_fields(self):
class NestedFieldsAdmin(admin.ModelAdmin):
fields = ('price', ('name', 'subtitle'))
errors = NestedFieldsAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_nested_fieldsets(self):
class NestedFieldsetAdmin(admin.ModelAdmin):
fieldsets = (
('Main', {'fields': ('price', ('name', 'subtitle'))}),
)
errors = NestedFieldsetAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_explicit_through_override(self):
class AuthorsInline(admin.TabularInline):
model = Book.authors.through
class BookAdmin(admin.ModelAdmin):
inlines = [AuthorsInline]
errors = BookAdmin.check(model=Book)
self.assertEqual(errors, [])
def test_non_model_fields(self):
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['title', 'extra_data']
errors = FieldsOnFormOnlyAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_non_model_first_field(self):
class SongForm(forms.ModelForm):
extra_data = forms.CharField()
class Meta:
model = Song
fields = '__all__'
class FieldsOnFormOnlyAdmin(admin.ModelAdmin):
form = SongForm
fields = ['extra_data', 'title']
errors = FieldsOnFormOnlyAdmin.check(model=Song)
self.assertEqual(errors, [])
def test_validator_compatibility(self):
class MyValidator(object):
def validate(self, cls, model):
raise ImproperlyConfigured("error!")
class MyModelAdmin(admin.ModelAdmin):
validator_class = MyValidator
with warnings.catch_warnings(record=True):
warnings.filterwarnings('ignore', module='django.contrib.admin.options')
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
'error!',
hint=None,
obj=MyModelAdmin,
)
]
self.assertEqual(errors, expected)
def test_check_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fields = ['state', ['state']]
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
hint=None,
obj=MyModelAdmin,
id='admin.E006'
)
]
self.assertEqual(errors, expected)
def test_check_fieldset_sublists_for_duplicates(self):
class MyModelAdmin(admin.ModelAdmin):
fieldsets = [
(None, {
'fields': ['title', 'album', ('title', 'album')]
}),
]
errors = MyModelAdmin.check(model=Song)
expected = [
checks.Error(
"There are duplicate field(s) in 'fieldsets[0][1]'.",
hint=None,
obj=MyModelAdmin,
id='admin.E012'
)
]
self.assertEqual(errors, expected)
| true
| true
|
1c4695708c7b41b299a783d8c3a65e327eb9a2a5
| 2,240
|
py
|
Python
|
cekit/test/behave_tester.py
|
stephengaito/cekit
|
d6d254af6bb6820e5c725680bd77b6c195636cf6
|
[
"MIT"
] | 59
|
2018-03-01T14:32:17.000Z
|
2022-03-31T12:18:05.000Z
|
cekit/test/behave_tester.py
|
stephengaito/cekit
|
d6d254af6bb6820e5c725680bd77b6c195636cf6
|
[
"MIT"
] | 446
|
2018-03-02T08:20:49.000Z
|
2022-03-20T10:10:42.000Z
|
cekit/test/behave_tester.py
|
stephengaito/cekit
|
d6d254af6bb6820e5c725680bd77b6c195636cf6
|
[
"MIT"
] | 29
|
2018-03-01T13:27:55.000Z
|
2022-02-08T08:15:39.000Z
|
import logging
import os
from cekit.builder import Command
from cekit.generator.base import Generator
from cekit.test.collector import BehaveTestCollector
from cekit.test.behave_runner import BehaveTestRunner
LOGGER = logging.getLogger('cekit')
class BehaveTester(Command):
"""
Tester implementation for the Behave framework
"""
def __init__(self, params):
super(BehaveTester, self).__init__('behave', Command.TYPE_TESTER)
self.params = params
self.collected = False
self.test_collector = BehaveTestCollector(os.path.dirname(self.params.descriptor), self.params.target)
self.test_runner = BehaveTestRunner(self.params.target)
self.generator = None
def prepare(self):
self.generator = Generator(self.params.descriptor,
self.params.target,
self.params.overrides)
# Handle dependencies for selected generator, if any
LOGGER.debug("Checking CEKit generate dependencies...")
self.dependency_handler.handle(self.generator, self.params)
self.generator.init()
# TODO: investigate if we can improve handling different schema versions
self.collected = self.test_collector.collect(
self.generator.image.get('schema_version'), self.params.steps_url)
if self.collected:
# Handle test dependencies, if any
LOGGER.debug("Checking CEKit test collector dependencies...")
self.dependency_handler.handle(self.test_collector, self.params)
LOGGER.debug("Checking CEKit test runner dependencies...")
self.dependency_handler.handle(self.test_runner, self.params)
def run(self):
if not self.collected:
LOGGER.warning("No test collected, test can't be run.")
return
test_tags = [self.generator.get_tags()[0]]
# If wip is specified set tags to @wip
if self.params.wip:
test_tags = ['@wip']
image = self.params.image
if not image:
image = self.generator.get_tags()[0]
self.test_runner.run(image, test_tags,
test_names=self.params.names)
| 32.941176
| 110
| 0.647768
|
import logging
import os
from cekit.builder import Command
from cekit.generator.base import Generator
from cekit.test.collector import BehaveTestCollector
from cekit.test.behave_runner import BehaveTestRunner
LOGGER = logging.getLogger('cekit')
class BehaveTester(Command):
def __init__(self, params):
super(BehaveTester, self).__init__('behave', Command.TYPE_TESTER)
self.params = params
self.collected = False
self.test_collector = BehaveTestCollector(os.path.dirname(self.params.descriptor), self.params.target)
self.test_runner = BehaveTestRunner(self.params.target)
self.generator = None
def prepare(self):
self.generator = Generator(self.params.descriptor,
self.params.target,
self.params.overrides)
LOGGER.debug("Checking CEKit generate dependencies...")
self.dependency_handler.handle(self.generator, self.params)
self.generator.init()
self.collected = self.test_collector.collect(
self.generator.image.get('schema_version'), self.params.steps_url)
if self.collected:
LOGGER.debug("Checking CEKit test collector dependencies...")
self.dependency_handler.handle(self.test_collector, self.params)
LOGGER.debug("Checking CEKit test runner dependencies...")
self.dependency_handler.handle(self.test_runner, self.params)
def run(self):
if not self.collected:
LOGGER.warning("No test collected, test can't be run.")
return
test_tags = [self.generator.get_tags()[0]]
# If wip is specified set tags to @wip
if self.params.wip:
test_tags = ['@wip']
image = self.params.image
if not image:
image = self.generator.get_tags()[0]
self.test_runner.run(image, test_tags,
test_names=self.params.names)
| true
| true
|
1c4696b02290e4a4e51e7a76e7e5bf7ddffbc1f9
| 2,567
|
py
|
Python
|
tests/test_io.py
|
nthndy/BayesianTracker
|
443f984ce830373e140f744a27179debdf34ae58
|
[
"MIT"
] | null | null | null |
tests/test_io.py
|
nthndy/BayesianTracker
|
443f984ce830373e140f744a27179debdf34ae58
|
[
"MIT"
] | null | null | null |
tests/test_io.py
|
nthndy/BayesianTracker
|
443f984ce830373e140f744a27179debdf34ae58
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
import numpy as np
import pytest
from _utils import (
create_test_object,
create_test_properties,
simple_tracker_example,
)
import btrack
def test_hdf5_write(tmp_path):
"""Test writing an HDF5 file with some objects."""
fn = os.path.join(tmp_path, "test.h5")
objects = []
for i in range(10):
obj, _ = create_test_object(id=i)
objects.append(obj)
with btrack.dataio.HDF5FileHandler(fn, "w") as h:
h.write_objects(objects)
# now try to read those objects and compare with those used to write
with btrack.dataio.HDF5FileHandler(fn, "r") as h:
objects_from_file = h.objects
properties = ["x", "y", "z", "t", "label", "ID"]
for orig, read in zip(objects, objects_from_file):
for p in properties:
# use all close, since h5 file stores in float32 default
np.testing.assert_allclose(getattr(orig, p), getattr(read, p))
def test_hdf5_write_with_properties(tmp_path):
"""Test writing an HDF5 file with some objects with additional properties."""
fn = os.path.join(tmp_path, "test.h5")
objects = []
for i in range(10):
obj, _ = create_test_object(id=i)
obj.properties = create_test_properties()
objects.append(obj)
with btrack.dataio.HDF5FileHandler(fn, "w") as h:
h.write_objects(objects)
# now try to read those objects and compare with those used to write
with btrack.dataio.HDF5FileHandler(fn, "r") as h:
objects_from_file = h.objects
extra_props = list(create_test_properties().keys())
properties = ["x", "y", "z", "t", "label", "ID"]
for orig, read in zip(objects, objects_from_file):
for p in properties:
# use all close, since h5 file stores in float32 default
np.testing.assert_allclose(getattr(orig, p), getattr(read, p))
for p in extra_props:
np.testing.assert_allclose(orig.properties[p], read.properties[p])
@pytest.mark.parametrize("export_format", ["", ".csv", ".h5"])
def test_tracker_export(tmp_path, export_format):
"""Test that file export works using the `export_delegator`."""
tracker, _ = simple_tracker_example()
export_filename = f"test{export_format}"
# string type path
fn = os.path.join(tmp_path, export_filename)
tracker.export(fn, obj_type="obj_type_1")
# Pathlib type path
fn = Path(tmp_path) / export_filename
tracker.export(fn, obj_type="obj_type_1")
if export_format:
assert os.path.exists(fn)
| 29.848837
| 81
| 0.66342
|
import os
from pathlib import Path
import numpy as np
import pytest
from _utils import (
create_test_object,
create_test_properties,
simple_tracker_example,
)
import btrack
def test_hdf5_write(tmp_path):
fn = os.path.join(tmp_path, "test.h5")
objects = []
for i in range(10):
obj, _ = create_test_object(id=i)
objects.append(obj)
with btrack.dataio.HDF5FileHandler(fn, "w") as h:
h.write_objects(objects)
with btrack.dataio.HDF5FileHandler(fn, "r") as h:
objects_from_file = h.objects
properties = ["x", "y", "z", "t", "label", "ID"]
for orig, read in zip(objects, objects_from_file):
for p in properties:
np.testing.assert_allclose(getattr(orig, p), getattr(read, p))
def test_hdf5_write_with_properties(tmp_path):
fn = os.path.join(tmp_path, "test.h5")
objects = []
for i in range(10):
obj, _ = create_test_object(id=i)
obj.properties = create_test_properties()
objects.append(obj)
with btrack.dataio.HDF5FileHandler(fn, "w") as h:
h.write_objects(objects)
with btrack.dataio.HDF5FileHandler(fn, "r") as h:
objects_from_file = h.objects
extra_props = list(create_test_properties().keys())
properties = ["x", "y", "z", "t", "label", "ID"]
for orig, read in zip(objects, objects_from_file):
for p in properties:
np.testing.assert_allclose(getattr(orig, p), getattr(read, p))
for p in extra_props:
np.testing.assert_allclose(orig.properties[p], read.properties[p])
@pytest.mark.parametrize("export_format", ["", ".csv", ".h5"])
def test_tracker_export(tmp_path, export_format):
tracker, _ = simple_tracker_example()
export_filename = f"test{export_format}"
fn = os.path.join(tmp_path, export_filename)
tracker.export(fn, obj_type="obj_type_1")
fn = Path(tmp_path) / export_filename
tracker.export(fn, obj_type="obj_type_1")
if export_format:
assert os.path.exists(fn)
| true
| true
|
1c46971c776696ac88b0afdf08de5e2e9aa0b53e
| 4,999
|
py
|
Python
|
modules/tools/rosbag/extract_images.py
|
seeclong/apollo
|
99c8afb5ebcae2a3c9359a156a957ff03944b27b
|
[
"Apache-2.0"
] | 27
|
2019-04-06T02:27:14.000Z
|
2021-11-27T13:47:06.000Z
|
modules/tools/rosbag/extract_images.py
|
seeclong/apollo
|
99c8afb5ebcae2a3c9359a156a957ff03944b27b
|
[
"Apache-2.0"
] | 5
|
2021-10-06T22:57:52.000Z
|
2022-02-27T14:04:05.000Z
|
modules/tools/rosbag/extract_images.py
|
seeclong/apollo
|
99c8afb5ebcae2a3c9359a156a957ff03944b27b
|
[
"Apache-2.0"
] | 38
|
2019-04-15T10:58:37.000Z
|
2022-01-27T08:52:39.000Z
|
#!/usr/bin/env python
###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
Extract images from a recorded bag.
Usage:
extract_images.py --input_bag=a.bag
See the gflags for more optional args.
"""
import os
import sys
import cv2
import cv_bridge
import gflags
import glog
import rosbag
import yaml
# Requried flags.
gflags.DEFINE_string('input_bag', None, 'Input bag path.')
# Optional flags.
gflags.DEFINE_string('output_path', './', 'Output path.')
gflags.DEFINE_string('weather', 'CLEAR', 'Options: CLEAR, SUNNY, RAINY.')
gflags.DEFINE_string('scene', 'CITY', 'Options: CITY, HIGHWAY.')
gflags.DEFINE_string('time_interval', 'DAYTIME', 'Options: DAYTIME, NIGHT.')
gflags.DEFINE_float('extract_rate', 3, 'Rate to extract image, in seconds.')
# Stable flags which rarely change.
gflags.DEFINE_string('topic', '/apollo/sensor/camera/obstacle/front_6mm',
'Source topic.')
gflags.DEFINE_integer('sensor_id', 436, 'Source sensor ID.')
gflags.DEFINE_string('capture_place', 'Multiple', 'E.g.: Multiple, Sunnyvale.')
def extract_meta_info(bag):
"""Extract information from a bag file, return an info dict."""
# Extract from bag info.
info_dict = yaml.load(bag._get_yaml_info())
meta_info = {
'car_id': 'MKZ056',
'driver': 'UNKNOWN',
'start': int(info_dict['start']),
'end': int(info_dict['end']),
}
# Extract from bag message.
kStaticInfoTopic = '/apollo/monitor/static_info'
static_info = next(
(msg for _, msg, _ in bag.read_messages(topics=[kStaticInfoTopic])),
None)
if static_info is not None:
if static_info.vehicle.name:
meta_info['car_id'] = static_info.vehicle.name.upper()
if static_info.user.driver:
meta_info['driver'] = static_info.user.driver
return meta_info
def extract_images(bag, dst_dir, args):
"""Extract images to the destination dir."""
time_nsecs = []
pre_time_sec = 0
bridge = cv_bridge.CvBridge()
seq = 0
for _, msg, t in bag.read_messages(topics=args.topic):
# Check timestamp.
cur_time_sec = msg.header.stamp.to_sec()
if cur_time_sec - pre_time_sec < args.extract_rate:
continue
pre_time_sec = cur_time_sec
time_nsecs.append(msg.header.stamp.to_nsec())
# Save image.
seq += 1
msg.encoding = 'yuv422'
img = bridge.imgmsg_to_cv2(msg, 'yuv422')
img = cv2.cvtColor(img, cv2.COLOR_YUV2BGR_YUYV)
img_file = os.path.join(dst_dir, '{}.jpg'.format(seq))
cv2.imwrite(img_file, img)
glog.info('#{}: header.seq={}, header.stamp={}, saved as {}'.format(
seq, msg.header.seq, cur_time_sec, img_file))
return time_nsecs
def process_bag(bag, args):
"""Process a bag."""
meta_info = extract_meta_info(bag)
dst_dir_name = '{}_{}_{}_{}'.format(meta_info['car_id'], args.sensor_id,
meta_info['start'], meta_info['end'])
dst_dir = os.path.join(args.output_path, dst_dir_name)
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
# Generate meta file.
meta_file = os.path.join(dst_dir, dst_dir_name + '.meta')
with open(meta_file, 'w') as meta_w:
meta_w.write('car_id:{}\n'.format(meta_info['car_id']))
meta_w.write('driver:{}\n'.format(meta_info['driver']))
meta_w.write('capture_place:{}\n'.format(args.capture_place))
meta_w.write('weather:{}\n'.format(args.weather))
meta_w.write('topic:{}\n'.format(args.topic))
meta_w.write('scene:{}\n'.format(args.scene))
meta_w.write('time_interval:{}\n'.format(args.time_interval))
# Generate images.
time_nsecs = extract_images(bag, dst_dir, args)
# Generate timestamp sequence.
timestamp_file = os.path.join(dst_dir, 'timestamp.txt')
with open(timestamp_file, 'w') as timestamp_w:
timestamp_w.write('seq\ttimestamp_ns\n')
for seq, timestamp_ns in enumerate(time_nsecs, start=1):
timestamp_w.write('{}\t{}\n'.format(seq, timestamp_ns))
def main():
"""Entry point."""
gflags.FLAGS(sys.argv)
with rosbag.Bag(gflags.FLAGS.input_bag) as bag:
process_bag(bag, gflags.FLAGS)
if __name__ == '__main__':
main()
| 34.475862
| 79
| 0.640928
| true
| true
|
|
1c46987bff6123b37edc08ade402cc724f07010b
| 9,073
|
py
|
Python
|
weechat/python/grep_filter.py
|
TyranicMoron/dotfiles
|
277b85c84cc2d0ed542175db218fc6313b3d85c0
|
[
"MIT"
] | 1
|
2017-04-18T20:05:22.000Z
|
2017-04-18T20:05:22.000Z
|
weechat/python/grep_filter.py
|
TyranicMoron/dotfiles
|
277b85c84cc2d0ed542175db218fc6313b3d85c0
|
[
"MIT"
] | 2
|
2015-06-26T10:53:57.000Z
|
2015-06-26T11:22:56.000Z
|
weechat/python/grep_filter.py
|
MatthewCox/dotfiles
|
277b85c84cc2d0ed542175db218fc6313b3d85c0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 by Simmo Saan <simmo.saan@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# History:
#
# 2019-06-07, Trygve Aaberge <trygveaa@gmail.com>
# version 0.10: remove newlines from command completion
# 2015-10-04, Simmo Saan <simmo.saan@gmail.com>
# version 0.9: fix text search imitation in filter
# 2015-08-27, Simmo Saan <simmo.saan@gmail.com>
# version 0.8: add documentation
# 2015-08-25, Simmo Saan <simmo.saan@gmail.com>
# version 0.7: mute filter add/del
# 2015-08-25, Simmo Saan <simmo.saan@gmail.com>
# version 0.6: imitate search settings in filter
# 2015-08-25, Simmo Saan <simmo.saan@gmail.com>
# version 0.5: option for bar item text
# 2015-08-25, Simmo Saan <simmo.saan@gmail.com>
# version 0.4: option for default state
# 2015-08-25, Simmo Saan <simmo.saan@gmail.com>
# version 0.3: allow toggling during search
# 2015-08-25, Simmo Saan <simmo.saan@gmail.com>
# version 0.2: add bar item for indication
# 2015-08-25, Simmo Saan <simmo.saan@gmail.com>
# version 0.1: initial script
#
"""
Filter buffers automatically while searching them
"""
from __future__ import print_function
SCRIPT_NAME = "grep_filter"
SCRIPT_AUTHOR = "Simmo Saan <simmo.saan@gmail.com>"
SCRIPT_VERSION = "0.10"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = "Filter buffers automatically while searching them"
SCRIPT_REPO = "https://github.com/sim642/grep_filter"
SCRIPT_COMMAND = SCRIPT_NAME
SCRIPT_BAR_ITEM = SCRIPT_NAME
SCRIPT_LOCALVAR = SCRIPT_NAME
IMPORT_OK = True
try:
import weechat
except ImportError:
print("This script must be run under WeeChat.")
print("Get WeeChat now at: http://www.weechat.org/")
IMPORT_OK = False
import re # re.escape
SETTINGS = {
"enable": (
"off",
"enable automatically start filtering when searching"),
"bar_item": (
"grep",
"text to show in bar item when filtering")
}
KEYS = {
"ctrl-G": "/%s toggle" % SCRIPT_COMMAND
}
def get_merged_buffers(ptr):
"""
Get a list of buffers which are merged with "ptr".
"""
hdata = weechat.hdata_get("buffer")
buffers = weechat.hdata_get_list(hdata, "gui_buffers")
buffer = weechat.hdata_search(hdata, buffers, "${buffer.number} == %i" % weechat.hdata_integer(hdata, ptr, "number"), 1)
nbuffer = weechat.hdata_move(hdata, buffer, 1)
ret = []
while buffer:
ret.append(weechat.hdata_string(hdata, buffer, "full_name"))
if (weechat.hdata_integer(hdata, buffer, "number") == weechat.hdata_integer(hdata, nbuffer, "number")):
buffer = nbuffer
nbuffer = weechat.hdata_move(hdata, nbuffer, 1)
else:
buffer = None
return ret
def filter_exists(name):
"""
Check whether a filter named "name" exists.
"""
hdata = weechat.hdata_get("filter")
filters = weechat.hdata_get_list(hdata, "gui_filters")
filter = weechat.hdata_search(hdata, filters, "${filter.name} == %s" % name, 1)
return bool(filter)
def filter_del(name):
"""
Delete a filter named "name".
"""
weechat.command(weechat.buffer_search_main(), "/mute filter del %s" % name)
def filter_addreplace(name, buffers, tags, regex):
"""
Add (or replace if already exists) a filter named "name" with specified argumets.
"""
if filter_exists(name):
filter_del(name)
weechat.command(weechat.buffer_search_main(), "/mute filter add %s %s %s %s" % (name, buffers, tags, regex))
def buffer_searching(buffer):
"""
Check whether "buffer" is in search mode.
"""
hdata = weechat.hdata_get("buffer")
return bool(weechat.hdata_integer(hdata, buffer, "text_search"))
def buffer_filtering(buffer):
"""
Check whether "buffer" should be filtered.
"""
local = weechat.buffer_get_string(buffer, "localvar_%s" % SCRIPT_LOCALVAR)
return {"": None, "0": False, "1": True}[local]
def buffer_build_regex(buffer):
"""
Build a regex according to "buffer"'s search settings.
"""
hdata = weechat.hdata_get("buffer")
input = weechat.hdata_string(hdata, buffer, "input_buffer")
exact = weechat.hdata_integer(hdata, buffer, "text_search_exact")
where = weechat.hdata_integer(hdata, buffer, "text_search_where")
regex = weechat.hdata_integer(hdata, buffer, "text_search_regex")
if not regex:
input = re.escape(input)
if exact:
input = "(?-i)%s" % input
filter_regex = None
if where == 1: # message
filter_regex = input
elif where == 2: # prefix
filter_regex = "%s\\t" % input
else: # prefix | message
filter_regex = input # TODO: impossible with current filter regex
return "!%s" % filter_regex
def buffer_update(buffer):
"""
Refresh filtering in "buffer" by updating (or removing) the filter and update the bar item.
"""
hdata = weechat.hdata_get("buffer")
buffers = ",".join(get_merged_buffers(buffer))
name = "%s_%s" % (SCRIPT_NAME, buffers)
if buffer_searching(buffer):
if buffer_filtering(buffer):
filter_addreplace(name, buffers, "*", buffer_build_regex(buffer))
elif not buffer_filtering(buffer) and filter_exists(name):
filter_del(name)
elif filter_exists(name):
filter_del(name)
where = weechat.hdata_integer(hdata, buffer, "text_search_where")
weechat.buffer_set(buffer, "localvar_set_%s_warn" % SCRIPT_LOCALVAR, "1" if where == 3 else "0") # warn about incorrect filter
weechat.bar_item_update(SCRIPT_BAR_ITEM)
def input_search_cb(data, signal, buffer):
"""
Handle "input_search" signal.
"""
if buffer_searching(buffer) and buffer_filtering(buffer) is None:
enable = weechat.config_string_to_boolean(weechat.config_get_plugin("enable"))
weechat.buffer_set(buffer, "localvar_set_%s" % SCRIPT_LOCALVAR, "1" if enable else "0")
weechat.buffer_set(buffer, "localvar_set_%s_warn" % SCRIPT_LOCALVAR, "0")
elif not buffer_searching(buffer):
weechat.buffer_set(buffer, "localvar_del_%s" % SCRIPT_LOCALVAR, "")
weechat.buffer_set(buffer, "localvar_del_%s_warn" % SCRIPT_LOCALVAR, "")
buffer_update(buffer)
return weechat.WEECHAT_RC_OK
def input_text_changed_cb(data, signal, buffer):
"""
Handle "input_text_changed" signal.
"""
if buffer_searching(buffer) and buffer_filtering(buffer):
buffers = ",".join(get_merged_buffers(buffer))
name = "%s_%s" % (SCRIPT_NAME, buffers)
filter_addreplace(name, buffers, "*", buffer_build_regex(buffer))
return weechat.WEECHAT_RC_OK
def command_cb(data, buffer, args):
"""
Handle command.
"""
if args == "enable":
weechat.buffer_set(buffer, "localvar_set_%s" % SCRIPT_LOCALVAR, "1")
elif args == "disable":
weechat.buffer_set(buffer, "localvar_set_%s" % SCRIPT_LOCALVAR, "0")
elif args == "toggle":
weechat.buffer_set(buffer, "localvar_set_%s" % SCRIPT_LOCALVAR, "0" if buffer_filtering(buffer) else "1")
else:
pass
buffer_update(buffer)
return weechat.WEECHAT_RC_OK
def bar_item_cb(data, item, window, buffer, extra_info):
"""
Build the bar item's content for "buffer".
"""
buffers = ",".join(get_merged_buffers(buffer))
name = "%s_%s" % (SCRIPT_NAME, buffers)
if filter_exists(name):
warn = int(weechat.buffer_get_string(buffer, "localvar_%s_warn" % SCRIPT_LOCALVAR))
return "%s%s%s" % (
weechat.color("input_text_not_found" if warn else "bar_fg"),
weechat.config_get_plugin("bar_item"),
weechat.color("reset"))
else:
return ""
if __name__ == "__main__" and IMPORT_OK:
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, "", ""):
weechat.hook_signal("input_search", "input_search_cb", "")
weechat.hook_signal("input_text_changed", "input_text_changed_cb", "")
weechat.hook_command(SCRIPT_COMMAND, SCRIPT_DESC,
"""enable || disable || toggle""",
""" enable: enable {0} in current buffer
disable: disable {0} in current buffer
toggle: toggle {0} in current buffer
By default a bind in "search" context is added to toggle with "ctrl-G".
To see {0} status during search, add "{1}" item to some bar. On default configuration you can do it with:
/set weechat.bar.input.items "[input_prompt]+(away),[{1}],[input_search],[input_paste],input_text"
Due to technical reasons with /filter it is not possible to exactly {0} in "pre|msg" search mode, thus the bar item is shown in warning color.""".format(SCRIPT_NAME, SCRIPT_BAR_ITEM),
"""enable || disable || toggle""",
"command_cb", "")
weechat.bar_item_new("(extra)%s" % SCRIPT_BAR_ITEM, "bar_item_cb", "")
for option, value in SETTINGS.items():
if not weechat.config_is_set_plugin(option):
weechat.config_set_plugin(option, value[0])
weechat.config_set_desc_plugin(option, "%s (default: \"%s\")" % (value[1], value[0]))
weechat.key_bind("search", KEYS)
| 30.548822
| 183
| 0.721922
|
from __future__ import print_function
SCRIPT_NAME = "grep_filter"
SCRIPT_AUTHOR = "Simmo Saan <simmo.saan@gmail.com>"
SCRIPT_VERSION = "0.10"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = "Filter buffers automatically while searching them"
SCRIPT_REPO = "https://github.com/sim642/grep_filter"
SCRIPT_COMMAND = SCRIPT_NAME
SCRIPT_BAR_ITEM = SCRIPT_NAME
SCRIPT_LOCALVAR = SCRIPT_NAME
IMPORT_OK = True
try:
import weechat
except ImportError:
print("This script must be run under WeeChat.")
print("Get WeeChat now at: http://www.weechat.org/")
IMPORT_OK = False
import re
SETTINGS = {
"enable": (
"off",
"enable automatically start filtering when searching"),
"bar_item": (
"grep",
"text to show in bar item when filtering")
}
KEYS = {
"ctrl-G": "/%s toggle" % SCRIPT_COMMAND
}
def get_merged_buffers(ptr):
hdata = weechat.hdata_get("buffer")
buffers = weechat.hdata_get_list(hdata, "gui_buffers")
buffer = weechat.hdata_search(hdata, buffers, "${buffer.number} == %i" % weechat.hdata_integer(hdata, ptr, "number"), 1)
nbuffer = weechat.hdata_move(hdata, buffer, 1)
ret = []
while buffer:
ret.append(weechat.hdata_string(hdata, buffer, "full_name"))
if (weechat.hdata_integer(hdata, buffer, "number") == weechat.hdata_integer(hdata, nbuffer, "number")):
buffer = nbuffer
nbuffer = weechat.hdata_move(hdata, nbuffer, 1)
else:
buffer = None
return ret
def filter_exists(name):
hdata = weechat.hdata_get("filter")
filters = weechat.hdata_get_list(hdata, "gui_filters")
filter = weechat.hdata_search(hdata, filters, "${filter.name} == %s" % name, 1)
return bool(filter)
def filter_del(name):
weechat.command(weechat.buffer_search_main(), "/mute filter del %s" % name)
def filter_addreplace(name, buffers, tags, regex):
if filter_exists(name):
filter_del(name)
weechat.command(weechat.buffer_search_main(), "/mute filter add %s %s %s %s" % (name, buffers, tags, regex))
def buffer_searching(buffer):
hdata = weechat.hdata_get("buffer")
return bool(weechat.hdata_integer(hdata, buffer, "text_search"))
def buffer_filtering(buffer):
local = weechat.buffer_get_string(buffer, "localvar_%s" % SCRIPT_LOCALVAR)
return {"": None, "0": False, "1": True}[local]
def buffer_build_regex(buffer):
hdata = weechat.hdata_get("buffer")
input = weechat.hdata_string(hdata, buffer, "input_buffer")
exact = weechat.hdata_integer(hdata, buffer, "text_search_exact")
where = weechat.hdata_integer(hdata, buffer, "text_search_where")
regex = weechat.hdata_integer(hdata, buffer, "text_search_regex")
if not regex:
input = re.escape(input)
if exact:
input = "(?-i)%s" % input
filter_regex = None
if where == 1:
filter_regex = input
elif where == 2:
filter_regex = "%s\\t" % input
else:
filter_regex = input
return "!%s" % filter_regex
def buffer_update(buffer):
hdata = weechat.hdata_get("buffer")
buffers = ",".join(get_merged_buffers(buffer))
name = "%s_%s" % (SCRIPT_NAME, buffers)
if buffer_searching(buffer):
if buffer_filtering(buffer):
filter_addreplace(name, buffers, "*", buffer_build_regex(buffer))
elif not buffer_filtering(buffer) and filter_exists(name):
filter_del(name)
elif filter_exists(name):
filter_del(name)
where = weechat.hdata_integer(hdata, buffer, "text_search_where")
weechat.buffer_set(buffer, "localvar_set_%s_warn" % SCRIPT_LOCALVAR, "1" if where == 3 else "0")
weechat.bar_item_update(SCRIPT_BAR_ITEM)
def input_search_cb(data, signal, buffer):
if buffer_searching(buffer) and buffer_filtering(buffer) is None:
enable = weechat.config_string_to_boolean(weechat.config_get_plugin("enable"))
weechat.buffer_set(buffer, "localvar_set_%s" % SCRIPT_LOCALVAR, "1" if enable else "0")
weechat.buffer_set(buffer, "localvar_set_%s_warn" % SCRIPT_LOCALVAR, "0")
elif not buffer_searching(buffer):
weechat.buffer_set(buffer, "localvar_del_%s" % SCRIPT_LOCALVAR, "")
weechat.buffer_set(buffer, "localvar_del_%s_warn" % SCRIPT_LOCALVAR, "")
buffer_update(buffer)
return weechat.WEECHAT_RC_OK
def input_text_changed_cb(data, signal, buffer):
if buffer_searching(buffer) and buffer_filtering(buffer):
buffers = ",".join(get_merged_buffers(buffer))
name = "%s_%s" % (SCRIPT_NAME, buffers)
filter_addreplace(name, buffers, "*", buffer_build_regex(buffer))
return weechat.WEECHAT_RC_OK
def command_cb(data, buffer, args):
if args == "enable":
weechat.buffer_set(buffer, "localvar_set_%s" % SCRIPT_LOCALVAR, "1")
elif args == "disable":
weechat.buffer_set(buffer, "localvar_set_%s" % SCRIPT_LOCALVAR, "0")
elif args == "toggle":
weechat.buffer_set(buffer, "localvar_set_%s" % SCRIPT_LOCALVAR, "0" if buffer_filtering(buffer) else "1")
else:
pass
buffer_update(buffer)
return weechat.WEECHAT_RC_OK
def bar_item_cb(data, item, window, buffer, extra_info):
buffers = ",".join(get_merged_buffers(buffer))
name = "%s_%s" % (SCRIPT_NAME, buffers)
if filter_exists(name):
warn = int(weechat.buffer_get_string(buffer, "localvar_%s_warn" % SCRIPT_LOCALVAR))
return "%s%s%s" % (
weechat.color("input_text_not_found" if warn else "bar_fg"),
weechat.config_get_plugin("bar_item"),
weechat.color("reset"))
else:
return ""
if __name__ == "__main__" and IMPORT_OK:
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE, SCRIPT_DESC, "", ""):
weechat.hook_signal("input_search", "input_search_cb", "")
weechat.hook_signal("input_text_changed", "input_text_changed_cb", "")
weechat.hook_command(SCRIPT_COMMAND, SCRIPT_DESC,
"""enable || disable || toggle""",
""" enable: enable {0} in current buffer
disable: disable {0} in current buffer
toggle: toggle {0} in current buffer
By default a bind in "search" context is added to toggle with "ctrl-G".
To see {0} status during search, add "{1}" item to some bar. On default configuration you can do it with:
/set weechat.bar.input.items "[input_prompt]+(away),[{1}],[input_search],[input_paste],input_text"
Due to technical reasons with /filter it is not possible to exactly {0} in "pre|msg" search mode, thus the bar item is shown in warning color.""".format(SCRIPT_NAME, SCRIPT_BAR_ITEM),
"""enable || disable || toggle""",
"command_cb", "")
weechat.bar_item_new("(extra)%s" % SCRIPT_BAR_ITEM, "bar_item_cb", "")
for option, value in SETTINGS.items():
if not weechat.config_is_set_plugin(option):
weechat.config_set_plugin(option, value[0])
weechat.config_set_desc_plugin(option, "%s (default: \"%s\")" % (value[1], value[0]))
weechat.key_bind("search", KEYS)
| true
| true
|
1c46996f63d32c290afa2e3cc34a753d12d8719d
| 3,601
|
py
|
Python
|
tricks/lsh_pp_pretaining.py
|
yanzhoupan/dlrm_ssm
|
49ca1e4487ff0e148065c0a133acb078835a9b86
|
[
"MIT"
] | 3
|
2021-03-16T03:33:44.000Z
|
2022-03-14T08:48:01.000Z
|
tricks/lsh_pp_pretaining.py
|
yanzhoupan/dlrm_ssm
|
49ca1e4487ff0e148065c0a133acb078835a9b86
|
[
"MIT"
] | 2
|
2021-03-25T08:19:25.000Z
|
2021-04-10T16:43:45.000Z
|
tricks/lsh_pp_pretaining.py
|
yanzhoupan/dlrm_ssm
|
49ca1e4487ff0e148065c0a133acb078835a9b86
|
[
"MIT"
] | 1
|
2021-09-08T21:47:06.000Z
|
2021-09-08T21:47:06.000Z
|
# data preprocessing for LSH embedding
import numpy as np
import torch
from min_hash_generator import SparseBitVectorMinHashGenerator
from collections import defaultdict
# import multiprocessing
from tqdm import tqdm
import time
import random
import concurrent.futures
import pdb
seed = 123
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# use partial data set to get minhash table.
min_hash_gen = None
val_indices = None
import sys
if len(sys.argv) <=1:
print("Usage: <script> embedding hash num_pt")
assert(False)
EMBEDDING = int(sys.argv[1])
NUM_HASH = int(sys.argv[2])
NUM_PT = int(sys.argv[3])
print("EMB:",EMBEDDING, "NUMH",NUM_HASH, "NUM_PT",NUM_PT)
def compute(start, end):
global min_hash_table
p_min_hash_table = np.zeros((end-start, EMBEDDING))
for val_id in range(start, end):
p_min_hash_table[val_id-start] = min_hash_gen.generate(val_indices[val_id])
return start,end ,p_min_hash_table
def getBigMinHashTable():
global min_hash_gen, min_hash_table, val_indices
data = np.load('./input/kaggleAdDisplayChallenge_processed.npz')
data_num, cat_num = data["X_cat"].shape # (45840617, 26) for criteo
partial_idx = np.random.choice(np.arange(data_num), size=NUM_PT, replace=False)
partial_cat_data = data['X_cat'][partial_idx]
print(partial_cat_data.shape)
start_time = time.time()
np.savez(r'./cat_counts.npz', cat_counts = data['counts'])
base = 0
val_indices = defaultdict(lambda:[])
# generate signiture matrix for category values (partial data)
for fea_id in tqdm(range(cat_num)):
cat_fea = partial_cat_data[:, fea_id]
for doc_id in range(len(cat_fea)): # loop over docs
val_indices[cat_fea[doc_id] + base].append(doc_id)
for val in range(data['counts'][fea_id]):
if val_indices[val+base] == []:
val_indices[val+base] = [random.randint(0, data_num+1)] # set val_indices to a random place if never seen it
base += data['counts'][fea_id]
embedding_dim = EMBEDDING
min_hash_table = np.zeros((len(val_indices), embedding_dim))
input_size = len(cat_fea) # number of the data items
min_hash_gen = SparseBitVectorMinHashGenerator(input_size, embedding_dim, NUM_HASH)
batch_size=1000
with concurrent.futures.ProcessPoolExecutor(50) as executor:
print("submitting jobs")
futures = []
print ("total", len(val_indices))
total = len(val_indices)
num_batches = int(np.ceil(len(val_indices) / batch_size))
for i in tqdm(range(num_batches)):
start = i * batch_size
end = min(total, start + batch_size)
if end > start:
futures.append(executor.submit(compute, start, end))
#compute(start, end)
ip = 0
for res in tqdm(concurrent.futures.as_completed(futures), total = num_batches):
st,ed,output = res.result()
ip = ip + 1
min_hash_table[st:ed,:] = output
#print(st, ed, np.sum(min_hash_table[st:ed]))
np.savez(r'./input/bigMinHashTable_H'+ str(NUM_HASH) + '_E' + str(EMBEDDING)+ '_P' + str(NUM_PT) + '.npz', big_min_hash_table = min_hash_table.astype(int))
end_time = time.time()
print(end_time - start_time)
if __name__ == "__main__":
# getMinHashTable()
getBigMinHashTable()
# bigMinHashTable = np.load('./input/bigMinHashTable.npz')
# minHashTables = np.load('./input/minHashTables.npz')
# print(len(minHashTables['arr_0'][:, 0]))
# print(len(bigMinHashTable['big_min_hash_table'][:, 0]))
| 35.303922
| 159
| 0.67759
|
import numpy as np
import torch
from min_hash_generator import SparseBitVectorMinHashGenerator
from collections import defaultdict
from tqdm import tqdm
import time
import random
import concurrent.futures
import pdb
seed = 123
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
min_hash_gen = None
val_indices = None
import sys
if len(sys.argv) <=1:
print("Usage: <script> embedding hash num_pt")
assert(False)
EMBEDDING = int(sys.argv[1])
NUM_HASH = int(sys.argv[2])
NUM_PT = int(sys.argv[3])
print("EMB:",EMBEDDING, "NUMH",NUM_HASH, "NUM_PT",NUM_PT)
def compute(start, end):
global min_hash_table
p_min_hash_table = np.zeros((end-start, EMBEDDING))
for val_id in range(start, end):
p_min_hash_table[val_id-start] = min_hash_gen.generate(val_indices[val_id])
return start,end ,p_min_hash_table
def getBigMinHashTable():
global min_hash_gen, min_hash_table, val_indices
data = np.load('./input/kaggleAdDisplayChallenge_processed.npz')
data_num, cat_num = data["X_cat"].shape
partial_idx = np.random.choice(np.arange(data_num), size=NUM_PT, replace=False)
partial_cat_data = data['X_cat'][partial_idx]
print(partial_cat_data.shape)
start_time = time.time()
np.savez(r'./cat_counts.npz', cat_counts = data['counts'])
base = 0
val_indices = defaultdict(lambda:[])
for fea_id in tqdm(range(cat_num)):
cat_fea = partial_cat_data[:, fea_id]
for doc_id in range(len(cat_fea)):
val_indices[cat_fea[doc_id] + base].append(doc_id)
for val in range(data['counts'][fea_id]):
if val_indices[val+base] == []:
val_indices[val+base] = [random.randint(0, data_num+1)]
base += data['counts'][fea_id]
embedding_dim = EMBEDDING
min_hash_table = np.zeros((len(val_indices), embedding_dim))
input_size = len(cat_fea)
min_hash_gen = SparseBitVectorMinHashGenerator(input_size, embedding_dim, NUM_HASH)
batch_size=1000
with concurrent.futures.ProcessPoolExecutor(50) as executor:
print("submitting jobs")
futures = []
print ("total", len(val_indices))
total = len(val_indices)
num_batches = int(np.ceil(len(val_indices) / batch_size))
for i in tqdm(range(num_batches)):
start = i * batch_size
end = min(total, start + batch_size)
if end > start:
futures.append(executor.submit(compute, start, end))
ip = 0
for res in tqdm(concurrent.futures.as_completed(futures), total = num_batches):
st,ed,output = res.result()
ip = ip + 1
min_hash_table[st:ed,:] = output
np.savez(r'./input/bigMinHashTable_H'+ str(NUM_HASH) + '_E' + str(EMBEDDING)+ '_P' + str(NUM_PT) + '.npz', big_min_hash_table = min_hash_table.astype(int))
end_time = time.time()
print(end_time - start_time)
if __name__ == "__main__":
getBigMinHashTable()
| true
| true
|
1c469984e0ce27e7f993f3a5fceabf990b93bb2c
| 1,686
|
py
|
Python
|
eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-i686-ucs4.egg/EGG-INFO/scripts/maf_split_by_src.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | null | null | null |
eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-i686-ucs4.egg/EGG-INFO/scripts/maf_split_by_src.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | null | null | null |
eggs/bx_python-0.7.1_7b95ff194725-py2.7-linux-i686-ucs4.egg/EGG-INFO/scripts/maf_split_by_src.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | 1
|
2020-07-25T21:03:18.000Z
|
2020-07-25T21:03:18.000Z
|
#!/afs/bx.psu.edu/project/pythons/linux-i686-ucs4/bin/python2.7
"""
Read a MAF from stdin and break into several mafs based on the source of
each block. If the `component` option is provided then only that component
will be used to determine the new file for each block, otherwise the src
for *all* components will be used.
TODO: Should be able to specify component by species/prefix?
usage: %prog [options] < maf
-o, --outprefix: prepend this to the name of each generate maf
-c, --component: use only this component (by index!) to split
"""
import sys, string
import bx.align.maf
from optparse import OptionParser
import psyco_full
INF="inf"
def __main__():
# Parse command line arguments
parser = OptionParser()
parser.add_option( "-o", "--outprefix", action="store", default="" )
parser.add_option( "-c", "--component", action="store", default=None )
( options, args ) = parser.parse_args()
out_prefix = options.outprefix
comp = options.component
if comp is not None:
comp = int( comp )
maf_reader = bx.align.maf.Reader( sys.stdin )
writers = {}
for m in maf_reader:
if comp is None:
writer_key = string.join( [ c.src for c in m.components ], '_' )
else:
writer_key = m.components[ comp ].src
if not writers.has_key( writer_key ):
writer = bx.align.maf.Writer( file( "%s%s.maf" % ( out_prefix, writer_key ), "w" ) )
writers[ writer_key ] = writer
else:
writer = writers[ writer_key ]
writer.write( m )
for key in writers:
writers[ key ].close()
if __name__ == "__main__": __main__()
| 27.639344
| 96
| 0.640569
|
import sys, string
import bx.align.maf
from optparse import OptionParser
import psyco_full
INF="inf"
def __main__():
parser = OptionParser()
parser.add_option( "-o", "--outprefix", action="store", default="" )
parser.add_option( "-c", "--component", action="store", default=None )
( options, args ) = parser.parse_args()
out_prefix = options.outprefix
comp = options.component
if comp is not None:
comp = int( comp )
maf_reader = bx.align.maf.Reader( sys.stdin )
writers = {}
for m in maf_reader:
if comp is None:
writer_key = string.join( [ c.src for c in m.components ], '_' )
else:
writer_key = m.components[ comp ].src
if not writers.has_key( writer_key ):
writer = bx.align.maf.Writer( file( "%s%s.maf" % ( out_prefix, writer_key ), "w" ) )
writers[ writer_key ] = writer
else:
writer = writers[ writer_key ]
writer.write( m )
for key in writers:
writers[ key ].close()
if __name__ == "__main__": __main__()
| true
| true
|
1c469a53f45b615fde86d33a3918d754e428abba
| 1,474
|
py
|
Python
|
src/simgnn.py
|
pulkit1joshi/SimGNN
|
199b6014482a1dc8719394de4fc17f03c1b7192c
|
[
"MIT"
] | 22
|
2020-10-09T13:36:57.000Z
|
2022-02-10T04:07:54.000Z
|
src/simgnn.py
|
kartiklucky9n/SimGNN
|
199b6014482a1dc8719394de4fc17f03c1b7192c
|
[
"MIT"
] | 8
|
2020-10-10T11:02:39.000Z
|
2021-12-29T17:45:05.000Z
|
src/simgnn.py
|
kartiklucky9n/SimGNN
|
199b6014482a1dc8719394de4fc17f03c1b7192c
|
[
"MIT"
] | 11
|
2020-10-11T03:58:36.000Z
|
2022-03-30T09:54:55.000Z
|
from tensorflow import keras
from tensorflow.keras import layers
from keras_gcn import GraphConv
from keras.models import Model
from keras.layers import Input
from custom_layers import Attention, NeuralTensorLayer
"""
Main model : Node-to-Node interaction not implemented.
Functional API :
Shared layers are shared_gcn1, shared_gcn2, shard_gcn3, shared_attention
"""
def simgnn(parser):
inputA = Input(shape=(None,16))
GinputA = Input(shape=(None,None))
inputB = Input(shape=(None,16))
GinputB = Input(shape=(None,None))
shared_gcn1 = GraphConv(units=parser.filters_1,step_num=3, activation="relu")
shared_gcn2 = GraphConv(units=parser.filters_2,step_num=3, activation="relu")
shared_gcn3 = GraphConv(units=parser.filters_3,step_num=3, activation="relu")
shared_attention = Attention(parser)
x = shared_gcn1([inputA, GinputA])
x = shared_gcn2([x, GinputA])
x = shared_gcn3([x, GinputA])
x = shared_attention(x[0])
y = shared_gcn1([inputB, GinputB])
y = shared_gcn2([y, GinputB])
y = shared_gcn3([y, GinputB])
y = shared_attention(y[0])
z = NeuralTensorLayer(output_dim=16, input_dim=16)([x, y])
z = keras.layers.Dense(16, activation="relu")(z)
z = keras.layers.Dense(8, activation="relu")(z)
z = keras.layers.Dense(4, activation="relu")(z)
z = keras.layers.Dense(1)(z)
z = keras.activations.sigmoid(z)
return Model(inputs=[inputA, GinputA, inputB, GinputB], outputs=z)
| 36.85
| 82
| 0.705563
|
from tensorflow import keras
from tensorflow.keras import layers
from keras_gcn import GraphConv
from keras.models import Model
from keras.layers import Input
from custom_layers import Attention, NeuralTensorLayer
def simgnn(parser):
inputA = Input(shape=(None,16))
GinputA = Input(shape=(None,None))
inputB = Input(shape=(None,16))
GinputB = Input(shape=(None,None))
shared_gcn1 = GraphConv(units=parser.filters_1,step_num=3, activation="relu")
shared_gcn2 = GraphConv(units=parser.filters_2,step_num=3, activation="relu")
shared_gcn3 = GraphConv(units=parser.filters_3,step_num=3, activation="relu")
shared_attention = Attention(parser)
x = shared_gcn1([inputA, GinputA])
x = shared_gcn2([x, GinputA])
x = shared_gcn3([x, GinputA])
x = shared_attention(x[0])
y = shared_gcn1([inputB, GinputB])
y = shared_gcn2([y, GinputB])
y = shared_gcn3([y, GinputB])
y = shared_attention(y[0])
z = NeuralTensorLayer(output_dim=16, input_dim=16)([x, y])
z = keras.layers.Dense(16, activation="relu")(z)
z = keras.layers.Dense(8, activation="relu")(z)
z = keras.layers.Dense(4, activation="relu")(z)
z = keras.layers.Dense(1)(z)
z = keras.activations.sigmoid(z)
return Model(inputs=[inputA, GinputA, inputB, GinputB], outputs=z)
| true
| true
|
1c469b82d550b35023a31690c11f6d79e68fe635
| 3,452
|
py
|
Python
|
testing/modules/args.py
|
marshallmidden/m4
|
8ff1cb050efdefe6963c6d7f459fd6f3d25eea94
|
[
"BSD-2-Clause"
] | null | null | null |
testing/modules/args.py
|
marshallmidden/m4
|
8ff1cb050efdefe6963c6d7f459fd6f3d25eea94
|
[
"BSD-2-Clause"
] | null | null | null |
testing/modules/args.py
|
marshallmidden/m4
|
8ff1cb050efdefe6963c6d7f459fd6f3d25eea94
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python3
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
#-----------------------------------------------------------------------------
import argparse
import sys
#-----------------------------------------------------------------------------
# Global option variables follow.
#-----------------------------------------------------------------------------
def parse_args(values):
global args, initiator, target, qla2xxx
init = ('i', 'in', 'ini', 'init', 'initi', 'initia', 'initiat', 'initiato', 'initiator', 'initiators')
targ = ('t', 'ta', 'tar', 'targ', 'targe', 'target', 'targets')
qla2 = ('q', 'ql', 'qla', 'qla2', 'qla2x', 'qla2xx', 'qla2xxx')
initiator = False
target = False
qla2xxx = False
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog= "List Linux host numbers, Fibre Channel WWPNs, Card type and Firmware versions,\n" +
"PCI slot numbers, Speed and Supported Speeds, and Link States.\n" +
"May be useful for setting up the file: /etc/modprobe.d/qla2xxx.conf\n")
parser.add_argument('--verbose', '-v', action='store_true',
help = 'Print each line of qla2xxx.conf file.')
parser.add_argument('--vv', '-vv', action='store_true',
help = 'Print each Fibre line of \'lspci -vmmD\' output.')
parser.add_argument('--seen', '-s', action='store_true',
help = 'Print rports seen on target ports. (Positional arguments should not be present. See (nothing) above.)')
parser.add_argument('rest', nargs='*',
metavar="initiator|target|qla2xxx",
help='Optional output or format limiting.')
args = parser.parse_args(values)
error = False
for a in args.rest:
if a in init:
initiator = True
elif a in targ:
target = True
elif a in qla2:
qla2xxx = True
else:
if not error:
print('-' * 78, file=sys.stderr)
print("ERROR - unrecognized argument '{}'".format(a), file=sys.stderr)
error = True
if error:
print('-' * 78, file=sys.stderr)
parser.print_help()
print('-' * 78, file=sys.stderr)
print("ERROR - read line(s) above help message!", file=sys.stderr)
exit(1)
# End of parse_args
#-----------------------------------------------------------------------------
def print_args():
global args, initiator, target, qla2xxx
print(type(args.verbose), "args.verbose=", args.verbose)
print(type(args.vv), "args.vv=", args.vv)
print(type(args.seen), "args.seen=", args.seen)
print(type(args.rest), "args.rest=", args.rest)
print(type(initiator), "initiator=", initiator)
print(type(target), "target=", target)
print(type(qla2xxx), "qla2xxx=", qla2xxx)
# End of print_args
#-----------------------------------------------------------------------------
# Main script processing.
def main(values):
print("values=", values)
parse_args(values)
print_args()
# End of main
#-----------------------------------------------------------------------------
# Execute the main routine.
if __name__ == "__main__":
main(sys.argv[1:])
exit(0)
#-----------------------------------------------------------------------------
# End of file args.py
| 41.095238
| 135
| 0.503476
|
import argparse
import sys
def parse_args(values):
global args, initiator, target, qla2xxx
init = ('i', 'in', 'ini', 'init', 'initi', 'initia', 'initiat', 'initiato', 'initiator', 'initiators')
targ = ('t', 'ta', 'tar', 'targ', 'targe', 'target', 'targets')
qla2 = ('q', 'ql', 'qla', 'qla2', 'qla2x', 'qla2xx', 'qla2xxx')
initiator = False
target = False
qla2xxx = False
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog= "List Linux host numbers, Fibre Channel WWPNs, Card type and Firmware versions,\n" +
"PCI slot numbers, Speed and Supported Speeds, and Link States.\n" +
"May be useful for setting up the file: /etc/modprobe.d/qla2xxx.conf\n")
parser.add_argument('--verbose', '-v', action='store_true',
help = 'Print each line of qla2xxx.conf file.')
parser.add_argument('--vv', '-vv', action='store_true',
help = 'Print each Fibre line of \'lspci -vmmD\' output.')
parser.add_argument('--seen', '-s', action='store_true',
help = 'Print rports seen on target ports. (Positional arguments should not be present. See (nothing) above.)')
parser.add_argument('rest', nargs='*',
metavar="initiator|target|qla2xxx",
help='Optional output or format limiting.')
args = parser.parse_args(values)
error = False
for a in args.rest:
if a in init:
initiator = True
elif a in targ:
target = True
elif a in qla2:
qla2xxx = True
else:
if not error:
print('-' * 78, file=sys.stderr)
print("ERROR - unrecognized argument '{}'".format(a), file=sys.stderr)
error = True
if error:
print('-' * 78, file=sys.stderr)
parser.print_help()
print('-' * 78, file=sys.stderr)
print("ERROR - read line(s) above help message!", file=sys.stderr)
exit(1)
def print_args():
global args, initiator, target, qla2xxx
print(type(args.verbose), "args.verbose=", args.verbose)
print(type(args.vv), "args.vv=", args.vv)
print(type(args.seen), "args.seen=", args.seen)
print(type(args.rest), "args.rest=", args.rest)
print(type(initiator), "initiator=", initiator)
print(type(target), "target=", target)
print(type(qla2xxx), "qla2xxx=", qla2xxx)
def main(values):
print("values=", values)
parse_args(values)
print_args()
if __name__ == "__main__":
main(sys.argv[1:])
exit(0)
| true
| true
|
1c469ce9ba3bc866b5d62e0d55297ba6728a69f8
| 275
|
py
|
Python
|
flaskblog/App/models/__init__.py
|
riverstation/project-all
|
c56f1879e1303d561e95a3ff3a70f94fb5fa2191
|
[
"Apache-2.0"
] | null | null | null |
flaskblog/App/models/__init__.py
|
riverstation/project-all
|
c56f1879e1303d561e95a3ff3a70f94fb5fa2191
|
[
"Apache-2.0"
] | null | null | null |
flaskblog/App/models/__init__.py
|
riverstation/project-all
|
c56f1879e1303d561e95a3ff3a70f94fb5fa2191
|
[
"Apache-2.0"
] | null | null | null |
from .user import User
from .posts import Posts
from App.extensions import db
#创建一个关联模型和用户 存储id 多对多的个关系模型中间表
collections = db.Table('collections',
db.Column('user_id',db.Integer,db.ForeignKey('user.id')),
db.Column('posts_id',db.Integer,db.ForeignKey('posts.id'))
)
| 30.555556
| 62
| 0.745455
|
from .user import User
from .posts import Posts
from App.extensions import db
collections = db.Table('collections',
db.Column('user_id',db.Integer,db.ForeignKey('user.id')),
db.Column('posts_id',db.Integer,db.ForeignKey('posts.id'))
)
| true
| true
|
1c469d46abeed2c741846f66801fcc1ae85fbd0c
| 1,407
|
py
|
Python
|
pyproc/views/message.py
|
cmin764/pyproc
|
be69b5a35fbe3818accea472735effec0825f17c
|
[
"MIT"
] | null | null | null |
pyproc/views/message.py
|
cmin764/pyproc
|
be69b5a35fbe3818accea472735effec0825f17c
|
[
"MIT"
] | null | null | null |
pyproc/views/message.py
|
cmin764/pyproc
|
be69b5a35fbe3818accea472735effec0825f17c
|
[
"MIT"
] | null | null | null |
"""Handle /message page."""
from flask import (
abort,
request,
)
from pyproc import app, tasks
from pyproc.views.base import responsify
@app.route("/message", methods=["POST"])
@responsify
def message():
"""Messaging page available for users/clients submitting tasks."""
# Retrieve JSON parameters data.
data = request.get_json() or {}
data.update(dict(request.values))
msg = data.get("msg")
if not msg:
raise abort(400, "missing 'msg' data")
# Deffer the message as a task.
result = tasks.process_message.delay(msg, delta=10)
task_id = result.task_id
if not task_id or result.failed():
raise abort(400, "task failed")
# Then check and return ID.
return {
"task_id": result.id
}
@app.route("/result", methods=["GET"])
@responsify
def result():
"""Get results when ready regarding a previously submitted task."""
# Retrieve JSON parameters data.
data = request.get_json() or {}
data.update(dict(request.values))
tid = data.get("tid")
if not tid:
raise abort(400, "missing 'tid' data")
# Get the result (if exists and finished).
result = tasks.process_message.AsyncResult(tid)
# Return status and result if available.
resp = {
"status": result.status,
"result": None,
}
if result.ready():
resp["result"] = result.get()
return resp
| 25.125
| 71
| 0.633262
|
from flask import (
abort,
request,
)
from pyproc import app, tasks
from pyproc.views.base import responsify
@app.route("/message", methods=["POST"])
@responsify
def message():
data = request.get_json() or {}
data.update(dict(request.values))
msg = data.get("msg")
if not msg:
raise abort(400, "missing 'msg' data")
result = tasks.process_message.delay(msg, delta=10)
task_id = result.task_id
if not task_id or result.failed():
raise abort(400, "task failed")
return {
"task_id": result.id
}
@app.route("/result", methods=["GET"])
@responsify
def result():
data = request.get_json() or {}
data.update(dict(request.values))
tid = data.get("tid")
if not tid:
raise abort(400, "missing 'tid' data")
result = tasks.process_message.AsyncResult(tid)
resp = {
"status": result.status,
"result": None,
}
if result.ready():
resp["result"] = result.get()
return resp
| true
| true
|
1c469feaecafc257a8d704b752cfef5883265ac6
| 291
|
py
|
Python
|
random/agent.py
|
iejMac/TTTArena
|
056636f064769c3251fb2448e7487b4fa8394733
|
[
"MIT"
] | 3
|
2021-05-23T23:55:03.000Z
|
2021-07-09T16:01:10.000Z
|
random/agent.py
|
iejMac/TTTArena
|
056636f064769c3251fb2448e7487b4fa8394733
|
[
"MIT"
] | null | null | null |
random/agent.py
|
iejMac/TTTArena
|
056636f064769c3251fb2448e7487b4fa8394733
|
[
"MIT"
] | 2
|
2021-07-09T11:44:09.000Z
|
2021-07-11T12:32:58.000Z
|
from agent import Agent
from numpy.random import randint
class RandomAgent(Agent):
def __init__(self, name):
super().__init__(name)
def make_action(self, state):
movex = randint(0, state.shape[1])
movey = randint(0, state.shape[0])
return (movey, movex)
| 20.785714
| 39
| 0.659794
|
from agent import Agent
from numpy.random import randint
class RandomAgent(Agent):
def __init__(self, name):
super().__init__(name)
def make_action(self, state):
movex = randint(0, state.shape[1])
movey = randint(0, state.shape[0])
return (movey, movex)
| true
| true
|
1c46a09640f355fa068c98b298841bc96a9474b0
| 1,533
|
py
|
Python
|
count_zeroavg.py
|
Renata1995/Topic-Distance-and-Coherence
|
d567d5b3ef71ea5654f214aa3736add7f3ac94bc
|
[
"Apache-2.0"
] | 5
|
2018-08-25T07:16:31.000Z
|
2020-11-12T00:36:15.000Z
|
count_zeroavg.py
|
Renata1995/Topic-Distance-and-Coherence
|
d567d5b3ef71ea5654f214aa3736add7f3ac94bc
|
[
"Apache-2.0"
] | 1
|
2018-09-24T16:17:47.000Z
|
2018-09-24T16:17:47.000Z
|
count_zeroavg.py
|
Renata1995/Topic-Distance-and-Coherence
|
d567d5b3ef71ea5654f214aa3736add7f3ac94bc
|
[
"Apache-2.0"
] | 4
|
2018-05-07T07:52:10.000Z
|
2020-11-12T00:36:18.000Z
|
import sys
import utils.name_convention as name
import numpy as np
if len(sys.argv) <= 1:
corpus_type = "bow"
else:
if sys.argv[1] == "t":
corpus_type = "tfidf"
elif sys.argv[1] == "b":
corpus_type = "binary"
else:
corpus_type = "bow"
if len(sys.argv) <= 2:
topics_count = 3
else:
topics_count = int(sys.argv[2])
if len(sys.argv) <= 3:
src = "pp_reuters"
else:
src = sys.argv[3]
if len(sys.argv) <= 4:
tc = "path"
else:
tc = sys.argv[4]
if len(sys.argv) <= 5:
words_count = 150
else:
words_count = int(sys.argv[5])
word_pairs = words_count*(words_count - 1)/2
ofile = open("wn_zeros_summary.txt", "w")
for tc in "path wup lch lin res jcn".split():
ofile.write(tc + ": ")
avgwn_list = []
avgdist_list = []
for corpus_type in ["tfidf", "bow","binary"]:
for topics_count in [5,10,15,20]:
dname = name.get_output_dir(corpus_type, topics_count, src)
zfile = open(dname + "/zeros_sum_" + tc + "_w" + str(words_count) + ".txt")
not_in_wn = int(zfile.readline().split(":")[1])
no_distance = int(zfile.readline().split(":")[1])
avg_wn = float(not_in_wn)/(topics_count * word_pairs)
avgwn_list.append(avg_wn)
avg_dis = float(no_distance)/(topics_count * word_pairs)
avgdist_list.append(avg_dis)
ofile.write("not in wn: " + str(np.average(avgwn_list))+ " no distance: " + str(np.average(avgdist_list))+"\n")
| 27.375
| 116
| 0.580561
|
import sys
import utils.name_convention as name
import numpy as np
if len(sys.argv) <= 1:
corpus_type = "bow"
else:
if sys.argv[1] == "t":
corpus_type = "tfidf"
elif sys.argv[1] == "b":
corpus_type = "binary"
else:
corpus_type = "bow"
if len(sys.argv) <= 2:
topics_count = 3
else:
topics_count = int(sys.argv[2])
if len(sys.argv) <= 3:
src = "pp_reuters"
else:
src = sys.argv[3]
if len(sys.argv) <= 4:
tc = "path"
else:
tc = sys.argv[4]
if len(sys.argv) <= 5:
words_count = 150
else:
words_count = int(sys.argv[5])
word_pairs = words_count*(words_count - 1)/2
ofile = open("wn_zeros_summary.txt", "w")
for tc in "path wup lch lin res jcn".split():
ofile.write(tc + ": ")
avgwn_list = []
avgdist_list = []
for corpus_type in ["tfidf", "bow","binary"]:
for topics_count in [5,10,15,20]:
dname = name.get_output_dir(corpus_type, topics_count, src)
zfile = open(dname + "/zeros_sum_" + tc + "_w" + str(words_count) + ".txt")
not_in_wn = int(zfile.readline().split(":")[1])
no_distance = int(zfile.readline().split(":")[1])
avg_wn = float(not_in_wn)/(topics_count * word_pairs)
avgwn_list.append(avg_wn)
avg_dis = float(no_distance)/(topics_count * word_pairs)
avgdist_list.append(avg_dis)
ofile.write("not in wn: " + str(np.average(avgwn_list))+ " no distance: " + str(np.average(avgdist_list))+"\n")
| true
| true
|
1c46a1195f1820aed9bcbc13e7c2b5fa70a3462e
| 2,788
|
py
|
Python
|
06-About_json/qq_geci.py
|
jiaxiaochu/Crawler
|
bb54d515dc217c27574b36124e16fd5b993775bd
|
[
"MIT"
] | null | null | null |
06-About_json/qq_geci.py
|
jiaxiaochu/Crawler
|
bb54d515dc217c27574b36124e16fd5b993775bd
|
[
"MIT"
] | 1
|
2020-08-27T10:25:38.000Z
|
2020-08-27T10:25:38.000Z
|
06-About_json/qq_geci.py
|
jiaxiaochu/Crawler
|
bb54d515dc217c27574b36124e16fd5b993775bd
|
[
"MIT"
] | null | null | null |
# !/Library/Frameworks/Python.framework/Versions/3.7/bin/python3
# @blog : www.jiazhixiang.xyz
# @Author : Jiazhixiang
# -*- coding:utf-8 -*-
import requests
url_song = 'https://c.y.qq.com/soso/fcgi-bin/client_search_cp'
for x in range(1, 4):
params_song = {
'ct': '24',
'qqmusic_ver': '1298',
'new_json': '1',
'remoteplace': 'sizer.yqq.song_next',
'searchid': '64405487069162918',
't': '0',
'aggr': '1',
'cr': '1',
'catZhida': '1',
'lossless': '0',
'flag_qc': '0',
'p': x,
'n': '10',
'w': '五月天',
'g_tk': '5381',
'loginUin': '0',
'hostUin': '0',
'format': 'json',
'inCharset': 'utf8',
'outCharset': 'utf-8',
'notice': '0',
'platform': 'yqq.json',
'needNewCode': '0'
}
# 将参数封装为字典
headers_song = {
'origin': 'https://y.qq.com',
# 请求来源
'referer': 'https://y.qq.com/n/yqq/song/004Z8Ihr0JIu5s.html',
# 请求来源
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
# 标记了请求从什么设备,什么浏览器上发出
}
res_music = requests.get(url_song, params=params_song, headers=headers_song)
# 调用get方法,下载这个列表
json_music = res_music.json()
# 使用json()方法,将response对象,转为列表/字典
list_music = json_music['data']['song']['list']
# 一层一层地取字典,获取歌单列表
url_lyric = 'https://c.y.qq.com/lyric/fcgi-bin/fcg_query_lyric_yqq.fcg'
for music in list_music:
# print(music)
# print(music['id'])
name = music['name']
# 以name为键,查找歌曲名,把歌曲名赋值给name
params_lyric = {
'nobase64': '1',
'musicid': str(music['id']),
'-': 'jsonp1',
'g_tk': '5381',
'loginUin': '0',
'hostUin': '0',
'format': 'json',
'inCharset': 'utf8',
'outCharset': 'utf-8',
'notice': '0',
'platform': 'yqq.json',
'needNewCode': '0'
}
# print(params_lyric["musicid"])
headers_lyric = {
'origin': 'https://y.qq.com',
# 请求来源
'referer': 'https://y.qq.com/n/yqq/song/{0}.html'.format(music['mid']),
# 请求来源
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
# 标记了请求从什么设备,什么浏览器上发出
}
res_lyric = requests.get(url_lyric, params=params_lyric, headers=headers_lyric)
# 调用get方法,下载这个列表
json_lyric = res_lyric.json()
# 使用json()方法,将response对象,转为列表/字典
lyric = json_lyric['lyric']
# 查找播放链接,把链接赋值给link
print(name, lyric)
| 33.190476
| 142
| 0.51901
|
import requests
url_song = 'https://c.y.qq.com/soso/fcgi-bin/client_search_cp'
for x in range(1, 4):
params_song = {
'ct': '24',
'qqmusic_ver': '1298',
'new_json': '1',
'remoteplace': 'sizer.yqq.song_next',
'searchid': '64405487069162918',
't': '0',
'aggr': '1',
'cr': '1',
'catZhida': '1',
'lossless': '0',
'flag_qc': '0',
'p': x,
'n': '10',
'w': '五月天',
'g_tk': '5381',
'loginUin': '0',
'hostUin': '0',
'format': 'json',
'inCharset': 'utf8',
'outCharset': 'utf-8',
'notice': '0',
'platform': 'yqq.json',
'needNewCode': '0'
}
headers_song = {
'origin': 'https://y.qq.com',
'referer': 'https://y.qq.com/n/yqq/song/004Z8Ihr0JIu5s.html',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
}
res_music = requests.get(url_song, params=params_song, headers=headers_song)
json_music = res_music.json()
list_music = json_music['data']['song']['list']
url_lyric = 'https://c.y.qq.com/lyric/fcgi-bin/fcg_query_lyric_yqq.fcg'
for music in list_music:
name = music['name']
params_lyric = {
'nobase64': '1',
'musicid': str(music['id']),
'-': 'jsonp1',
'g_tk': '5381',
'loginUin': '0',
'hostUin': '0',
'format': 'json',
'inCharset': 'utf8',
'outCharset': 'utf-8',
'notice': '0',
'platform': 'yqq.json',
'needNewCode': '0'
}
headers_lyric = {
'origin': 'https://y.qq.com',
'referer': 'https://y.qq.com/n/yqq/song/{0}.html'.format(music['mid']),
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
}
res_lyric = requests.get(url_lyric, params=params_lyric, headers=headers_lyric)
json_lyric = res_lyric.json()
lyric = json_lyric['lyric']
print(name, lyric)
| true
| true
|
1c46a149c4c1484cee731d07530a6c9bf4e29a18
| 2,633
|
py
|
Python
|
dstream_excel/tracker/files.py
|
nickderobertis/datastream-excel-downloader-py
|
3407decdf27da117758ce5ecc538d9f65c6ad5f6
|
[
"MIT"
] | 1
|
2019-10-14T10:36:18.000Z
|
2019-10-14T10:36:18.000Z
|
dstream_excel/tracker/files.py
|
whoopnip/datastream-excel-downloader-py
|
3407decdf27da117758ce5ecc538d9f65c6ad5f6
|
[
"MIT"
] | 4
|
2020-03-24T17:45:15.000Z
|
2021-06-02T00:20:24.000Z
|
dstream_excel/tracker/files.py
|
whoopnip/datastream-excel-downloader-py
|
3407decdf27da117758ce5ecc538d9f65c6ad5f6
|
[
"MIT"
] | null | null | null |
import ast
import os
import time
from dstream_excel.tracker.timing import TimeTracker
class FileProcessTracker:
def __init__(self, folder=None, restart=False, file_types=('csv',)):
if folder is None:
self.folder = os.getcwd()
else:
self.folder = os.path.abspath(folder)
self.completed_list_path = os.path.join(self.folder, 'completed.txt')
if restart:
self.delete_completed_files()
self.restart = restart
self.load_completed_files()
self.load_process_files(file_types=file_types)
def file_generator(self):
timer = TimeTracker(self.folder, restart=self.restart)
num_items = len(self.process_list)
for file in self.process_list:
yield os.path.join(self.folder, file)
self.add_file_to_completed(file)
timer.time_estimate(num_items)
def add_file_to_completed(self, file):
self.completed_list.extend([file])
_update_completed_files(self.completed_list_path, self.completed_list)
def load_completed_files(self):
self.completed_list = _load_completed_files(self.completed_list_path)
def load_process_files(self, file_types):
self.process_list = _load_to_process_files(self.folder, self.completed_list, file_types)
def delete_completed_files(self):
_delete_completed_files(self.completed_list_path)
def _load_to_process_files(folder, completed_list, file_types):
files = _load_initial_file_list(folder, file_types)
return [file for file in files if file not in completed_list]
def _update_completed_files(completed_list_path, completed_list):
_write_to_file_with_retries(completed_list_path, completed_list)
def _write_to_file_with_retries(*args, retries_remaining=10, **kwargs):
try:
with open(args[0], 'w') as f:
f.write(f'{args[1]}')
except (OSError, PermissionError):
time.sleep(.1)
_write_to_file_with_retries(*args, retries_remaining=retries_remaining-1, **kwargs)
def _load_completed_files(completed_list_path):
# Not started yet, none completed
if not os.path.exists(completed_list_path):
return []
with open(completed_list_path, 'r') as f:
completed_list = ast.literal_eval(f.read())
return completed_list
def _load_initial_file_list(folder, file_types):
return [file for file in next(os.walk(folder))[2] if any([file.endswith(ending) for ending in file_types])]
def _delete_completed_files(completed_list_path):
if os.path.exists(completed_list_path):
os.remove(completed_list_path)
| 30.976471
| 111
| 0.713635
|
import ast
import os
import time
from dstream_excel.tracker.timing import TimeTracker
class FileProcessTracker:
def __init__(self, folder=None, restart=False, file_types=('csv',)):
if folder is None:
self.folder = os.getcwd()
else:
self.folder = os.path.abspath(folder)
self.completed_list_path = os.path.join(self.folder, 'completed.txt')
if restart:
self.delete_completed_files()
self.restart = restart
self.load_completed_files()
self.load_process_files(file_types=file_types)
def file_generator(self):
timer = TimeTracker(self.folder, restart=self.restart)
num_items = len(self.process_list)
for file in self.process_list:
yield os.path.join(self.folder, file)
self.add_file_to_completed(file)
timer.time_estimate(num_items)
def add_file_to_completed(self, file):
self.completed_list.extend([file])
_update_completed_files(self.completed_list_path, self.completed_list)
def load_completed_files(self):
self.completed_list = _load_completed_files(self.completed_list_path)
def load_process_files(self, file_types):
self.process_list = _load_to_process_files(self.folder, self.completed_list, file_types)
def delete_completed_files(self):
_delete_completed_files(self.completed_list_path)
def _load_to_process_files(folder, completed_list, file_types):
files = _load_initial_file_list(folder, file_types)
return [file for file in files if file not in completed_list]
def _update_completed_files(completed_list_path, completed_list):
_write_to_file_with_retries(completed_list_path, completed_list)
def _write_to_file_with_retries(*args, retries_remaining=10, **kwargs):
try:
with open(args[0], 'w') as f:
f.write(f'{args[1]}')
except (OSError, PermissionError):
time.sleep(.1)
_write_to_file_with_retries(*args, retries_remaining=retries_remaining-1, **kwargs)
def _load_completed_files(completed_list_path):
if not os.path.exists(completed_list_path):
return []
with open(completed_list_path, 'r') as f:
completed_list = ast.literal_eval(f.read())
return completed_list
def _load_initial_file_list(folder, file_types):
return [file for file in next(os.walk(folder))[2] if any([file.endswith(ending) for ending in file_types])]
def _delete_completed_files(completed_list_path):
if os.path.exists(completed_list_path):
os.remove(completed_list_path)
| true
| true
|
1c46a17bd0d5d84ec22deeb07d1811a1fdd110c1
| 37,607
|
py
|
Python
|
msgraph-cli-extensions/v1_0/notes_v1_0/azext_notes_v1_0/vendored_sdks/notes/aio/operations/_sites_onenote_sections_parent_section_group_parent_notebook_operations.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/v1_0/notes_v1_0/azext_notes_v1_0/vendored_sdks/notes/aio/operations/_sites_onenote_sections_parent_section_group_parent_notebook_operations.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | 22
|
2022-03-29T22:54:37.000Z
|
2022-03-29T22:55:27.000Z
|
msgraph-cli-extensions/v1_0/notes_v1_0/azext_notes_v1_0/vendored_sdks/notes/aio/operations/_sites_onenote_sections_parent_section_group_parent_notebook_operations.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SitesOnenoteSectionsParentSectionGroupParentNotebookOperations:
"""SitesOnenoteSectionsParentSectionGroupParentNotebookOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~notes.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_section_groups(
self,
site_id: str,
onenote_section_id: str,
orderby: Optional[List[Union[str, "models.Enum708"]]] = None,
select: Optional[List[Union[str, "models.Enum709"]]] = None,
expand: Optional[List[Union[str, "models.Enum710"]]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfSectionGroup39"]:
"""Get sectionGroups from sites.
Get sectionGroups from sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~notes.models.Enum708]
:param select: Select properties to be returned.
:type select: list[str or ~notes.models.Enum709]
:param expand: Expand related entities.
:type expand: list[str or ~notes.models.Enum710]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfSectionGroup39 or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~notes.models.CollectionOfSectionGroup39]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfSectionGroup39"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_section_groups.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfSectionGroup39', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups'} # type: ignore
async def create_section_groups(
self,
site_id: str,
onenote_section_id: str,
body: "models.MicrosoftGraphSectionGroup",
**kwargs
) -> "models.MicrosoftGraphSectionGroup":
"""Create new navigation property to sectionGroups for sites.
Create new navigation property to sectionGroups for sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param body: New navigation property.
:type body: ~notes.models.MicrosoftGraphSectionGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphSectionGroup, or the result of cls(response)
:rtype: ~notes.models.MicrosoftGraphSectionGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphSectionGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_section_groups.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphSectionGroup')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSectionGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups'} # type: ignore
async def get_section_groups(
self,
site_id: str,
onenote_section_id: str,
section_group_id: str,
select: Optional[List[Union[str, "models.Enum711"]]] = None,
expand: Optional[List[Union[str, "models.Enum712"]]] = None,
**kwargs
) -> "models.MicrosoftGraphSectionGroup":
"""Get sectionGroups from sites.
Get sectionGroups from sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param section_group_id: key: id of sectionGroup.
:type section_group_id: str
:param select: Select properties to be returned.
:type select: list[str or ~notes.models.Enum711]
:param expand: Expand related entities.
:type expand: list[str or ~notes.models.Enum712]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphSectionGroup, or the result of cls(response)
:rtype: ~notes.models.MicrosoftGraphSectionGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphSectionGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_section_groups.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSectionGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups/{sectionGroup-id}'} # type: ignore
async def update_section_groups(
self,
site_id: str,
onenote_section_id: str,
section_group_id: str,
body: "models.MicrosoftGraphSectionGroup",
**kwargs
) -> None:
"""Update the navigation property sectionGroups in sites.
Update the navigation property sectionGroups in sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param section_group_id: key: id of sectionGroup.
:type section_group_id: str
:param body: New navigation property values.
:type body: ~notes.models.MicrosoftGraphSectionGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_section_groups.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphSectionGroup')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups/{sectionGroup-id}'} # type: ignore
async def delete_section_groups(
self,
site_id: str,
onenote_section_id: str,
section_group_id: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property sectionGroups for sites.
Delete navigation property sectionGroups for sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param section_group_id: key: id of sectionGroup.
:type section_group_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_section_groups.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups/{sectionGroup-id}'} # type: ignore
def list_sections(
self,
site_id: str,
onenote_section_id: str,
orderby: Optional[List[Union[str, "models.Enum713"]]] = None,
select: Optional[List[Union[str, "models.Enum714"]]] = None,
expand: Optional[List[Union[str, "models.Enum715"]]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfOnenoteSection39"]:
"""Get sections from sites.
Get sections from sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param orderby: Order items by property values.
:type orderby: list[str or ~notes.models.Enum713]
:param select: Select properties to be returned.
:type select: list[str or ~notes.models.Enum714]
:param expand: Expand related entities.
:type expand: list[str or ~notes.models.Enum715]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfOnenoteSection39 or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~notes.models.CollectionOfOnenoteSection39]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfOnenoteSection39"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_sections.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfOnenoteSection39', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections'} # type: ignore
async def create_sections(
self,
site_id: str,
onenote_section_id: str,
body: "models.MicrosoftGraphOnenoteSection",
**kwargs
) -> "models.MicrosoftGraphOnenoteSection":
"""Create new navigation property to sections for sites.
Create new navigation property to sections for sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param body: New navigation property.
:type body: ~notes.models.MicrosoftGraphOnenoteSection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphOnenoteSection, or the result of cls(response)
:rtype: ~notes.models.MicrosoftGraphOnenoteSection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphOnenoteSection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_sections.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphOnenoteSection')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteSection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections'} # type: ignore
async def get_sections(
self,
site_id: str,
onenote_section_id: str,
onenote_section_id1: str,
select: Optional[List[Union[str, "models.Enum716"]]] = None,
expand: Optional[List[Union[str, "models.Enum717"]]] = None,
**kwargs
) -> "models.MicrosoftGraphOnenoteSection":
"""Get sections from sites.
Get sections from sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_section_id1: key: id of onenoteSection.
:type onenote_section_id1: str
:param select: Select properties to be returned.
:type select: list[str or ~notes.models.Enum716]
:param expand: Expand related entities.
:type expand: list[str or ~notes.models.Enum717]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphOnenoteSection, or the result of cls(response)
:rtype: ~notes.models.MicrosoftGraphOnenoteSection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphOnenoteSection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_sections.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenoteSection-id1': self._serialize.url("onenote_section_id1", onenote_section_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteSection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections/{onenoteSection-id1}'} # type: ignore
async def update_sections(
self,
site_id: str,
onenote_section_id: str,
onenote_section_id1: str,
body: "models.MicrosoftGraphOnenoteSection",
**kwargs
) -> None:
"""Update the navigation property sections in sites.
Update the navigation property sections in sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_section_id1: key: id of onenoteSection.
:type onenote_section_id1: str
:param body: New navigation property values.
:type body: ~notes.models.MicrosoftGraphOnenoteSection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_sections.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenoteSection-id1': self._serialize.url("onenote_section_id1", onenote_section_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphOnenoteSection')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections/{onenoteSection-id1}'} # type: ignore
async def delete_sections(
self,
site_id: str,
onenote_section_id: str,
onenote_section_id1: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
"""Delete navigation property sections for sites.
Delete navigation property sections for sites.
:param site_id: key: id of site.
:type site_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_section_id1: key: id of onenoteSection.
:type onenote_section_id1: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_sections.metadata['url'] # type: ignore
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenoteSection-id1': self._serialize.url("onenote_section_id1", onenote_section_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections/{onenoteSection-id1}'} # type: ignore
| 48.275995
| 183
| 0.653416
|
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SitesOnenoteSectionsParentSectionGroupParentNotebookOperations:
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_section_groups(
self,
site_id: str,
onenote_section_id: str,
orderby: Optional[List[Union[str, "models.Enum708"]]] = None,
select: Optional[List[Union[str, "models.Enum709"]]] = None,
expand: Optional[List[Union[str, "models.Enum710"]]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfSectionGroup39"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_section_groups.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfSectionGroup39', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups'}
async def create_section_groups(
self,
site_id: str,
onenote_section_id: str,
body: "models.MicrosoftGraphSectionGroup",
**kwargs
) -> "models.MicrosoftGraphSectionGroup":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.create_section_groups.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(body, 'MicrosoftGraphSectionGroup')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSectionGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups'}
async def get_section_groups(
self,
site_id: str,
onenote_section_id: str,
section_group_id: str,
select: Optional[List[Union[str, "models.Enum711"]]] = None,
expand: Optional[List[Union[str, "models.Enum712"]]] = None,
**kwargs
) -> "models.MicrosoftGraphSectionGroup":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
url = self.get_section_groups.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphSectionGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups/{sectionGroup-id}'}
async def update_section_groups(
self,
site_id: str,
onenote_section_id: str,
section_group_id: str,
body: "models.MicrosoftGraphSectionGroup",
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.update_section_groups.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(body, 'MicrosoftGraphSectionGroup')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups/{sectionGroup-id}'}
async def delete_section_groups(
self,
site_id: str,
onenote_section_id: str,
section_group_id: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
url = self.delete_section_groups.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
header_parameters = {}
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_section_groups.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sectionGroups/{sectionGroup-id}'}
def list_sections(
self,
site_id: str,
onenote_section_id: str,
orderby: Optional[List[Union[str, "models.Enum713"]]] = None,
select: Optional[List[Union[str, "models.Enum714"]]] = None,
expand: Optional[List[Union[str, "models.Enum715"]]] = None,
**kwargs
) -> AsyncIterable["models.CollectionOfOnenoteSection39"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_sections.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfOnenoteSection39', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections'}
async def create_sections(
self,
site_id: str,
onenote_section_id: str,
body: "models.MicrosoftGraphOnenoteSection",
**kwargs
) -> "models.MicrosoftGraphOnenoteSection":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.create_sections.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(body, 'MicrosoftGraphOnenoteSection')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteSection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections'}
async def get_sections(
self,
site_id: str,
onenote_section_id: str,
onenote_section_id1: str,
select: Optional[List[Union[str, "models.Enum716"]]] = None,
expand: Optional[List[Union[str, "models.Enum717"]]] = None,
**kwargs
) -> "models.MicrosoftGraphOnenoteSection":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
url = self.get_sections.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenoteSection-id1': self._serialize.url("onenote_section_id1", onenote_section_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteSection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections/{onenoteSection-id1}'}
async def update_sections(
self,
site_id: str,
onenote_section_id: str,
onenote_section_id1: str,
body: "models.MicrosoftGraphOnenoteSection",
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self.update_sections.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenoteSection-id1': self._serialize.url("onenote_section_id1", onenote_section_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(body, 'MicrosoftGraphOnenoteSection')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections/{onenoteSection-id1}'}
async def delete_sections(
self,
site_id: str,
onenote_section_id: str,
onenote_section_id1: str,
if_match: Optional[str] = None,
**kwargs
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
url = self.delete_sections.metadata['url']
path_format_arguments = {
'site-id': self._serialize.url("site_id", site_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenoteSection-id1': self._serialize.url("onenote_section_id1", onenote_section_id1, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
header_parameters = {}
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_sections.metadata = {'url': '/sites/{site-id}/onenote/sections/{onenoteSection-id}/parentSectionGroup/parentNotebook/sections/{onenoteSection-id1}'}
| true
| true
|
1c46a26fe64a116c11f1bc95d480deebd0f970c0
| 5,517
|
py
|
Python
|
docs/source/conf.py
|
zeromake/restful-model
|
f2bed56a2aa23ade4a7882296c41222a64dc24f2
|
[
"MIT"
] | 8
|
2018-08-09T10:03:53.000Z
|
2020-03-03T11:02:11.000Z
|
docs/source/conf.py
|
zeromake/restful-model
|
f2bed56a2aa23ade4a7882296c41222a64dc24f2
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
zeromake/restful-model
|
f2bed56a2aa23ade4a7882296c41222a64dc24f2
|
[
"MIT"
] | 2
|
2019-08-12T20:53:46.000Z
|
2021-11-04T06:01:23.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'restful_model'
copyright = '2018, zeromake <a390720046@gmail.com>'
author = 'zeromake <a390720046@gmail.com>'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'restful_modeldoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'restful_model.tex', 'restful\\_model Documentation',
'zeromake \\textless{}a390720046@gmail.com\\textgreater{}', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'restful_model', 'restful_model Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'restful_model', 'restful_model Documentation',
author, 'restful_model', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 31.169492
| 79
| 0.64691
|
project = 'restful_model'
copyright = '2018, zeromake <a390720046@gmail.com>'
author = 'zeromake <a390720046@gmail.com>'
version = ''
release = ''
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = 'zh'
exclude_patterns = []
pygments_style = 'sphinx'
html_theme = 'alabaster'
html_static_path = ['_static']
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'restful_modeldoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'restful_model.tex', 'restful\\_model Documentation',
'zeromake \\textless{}a390720046@gmail.com\\textgreater{}', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'restful_model', 'restful_model Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'restful_model', 'restful_model Documentation',
author, 'restful_model', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| true
| true
|
1c46a40c89f7ba576f27e1b22089cb2513eaa2c5
| 31,354
|
py
|
Python
|
usaspending_api/spending_explorer/tests/integration/test_spending_explorer.py
|
g4brielvs/usaspending-api
|
bae7da2c204937ec1cdf75c052405b13145728d5
|
[
"CC0-1.0"
] | null | null | null |
usaspending_api/spending_explorer/tests/integration/test_spending_explorer.py
|
g4brielvs/usaspending-api
|
bae7da2c204937ec1cdf75c052405b13145728d5
|
[
"CC0-1.0"
] | null | null | null |
usaspending_api/spending_explorer/tests/integration/test_spending_explorer.py
|
g4brielvs/usaspending-api
|
bae7da2c204937ec1cdf75c052405b13145728d5
|
[
"CC0-1.0"
] | null | null | null |
import copy
import json
import pytest
from datetime import datetime, timezone
from model_mommy import mommy
from rest_framework import status
from usaspending_api.awards.models import FinancialAccountsByAwards
from usaspending_api.financial_activities.models import (
FinancialAccountsByProgramActivityObjectClass,
SubmissionAttributes,
TreasuryAppropriationAccount,
)
from usaspending_api.accounts.models import FederalAccount
from usaspending_api.references.models import Agency, GTASSF133Balances, ToptierAgency, ObjectClass
from usaspending_api.submissions.models import DABSSubmissionWindowSchedule
ENDPOINT_URL = "/api/v2/spending/"
CONTENT_TYPE = "application/json"
GLOBAL_MOCK_DICT = [
{
"model": DABSSubmissionWindowSchedule,
"id": 1600031,
"period_end_date": datetime(1599, 12, 31, tzinfo=timezone.utc),
"submission_fiscal_year": 1600,
"submission_fiscal_quarter": 1,
"submission_fiscal_month": 3,
"submission_reveal_date": datetime(1600, 1, 28, tzinfo=timezone.utc),
"is_quarter": True,
},
{"model": ObjectClass, "id": 1},
{"model": GTASSF133Balances, "fiscal_year": 1600, "fiscal_period": 3, "obligations_incurred_total_cpe": -10},
{
"model": SubmissionAttributes,
"submission_id": -1,
"reporting_fiscal_year": 1600,
"reporting_fiscal_period": 3,
"reporting_fiscal_quarter": 1,
},
{
"model": ToptierAgency,
"toptier_agency_id": -1,
"name": "random_funding_name_1",
"toptier_code": "random_funding_code_1",
},
{
"model": ToptierAgency,
"toptier_agency_id": -2,
"name": "random_funding_name_2",
"toptier_code": "random_funding_code_2",
},
{"model": Agency, "toptier_agency_id": -1, "toptier_flag": True},
{"model": Agency, "toptier_agency_id": -2, "toptier_flag": True},
{
"model": TreasuryAppropriationAccount,
"treasury_account_identifier": -1,
"funding_toptier_agency_id": -1,
"federal_account_id": 1,
},
{
"model": FederalAccount,
"id": 1,
"account_title": "Tommy Two-Tone",
"agency_identifier": "867",
"main_account_code": "5309",
"federal_account_code": "867-5309",
},
{
"model": TreasuryAppropriationAccount,
"treasury_account_identifier": -2,
"funding_toptier_agency_id": -2,
"federal_account_id": 1,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -1,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -2,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -10,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -3,
"submission_id": -1,
"treasury_account_id": -2,
"obligations_incurred_by_program_object_class_cpe": -1,
"object_class_id": 1,
},
]
@pytest.mark.django_db
def test_unreported_data_actual_value_file_b(client):
models = copy.deepcopy(GLOBAL_MOCK_DICT)
for entry in models:
mommy.make(entry.pop("model"), **entry)
json_request = {"type": "agency", "filters": {"fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert response.status_code == status.HTTP_200_OK
json_response = response.json()
expected_results = {
"total": -10,
"agencies": ["Unreported Data", "random_funding_name_2", "random_funding_name_1"],
"amounts": [6, -1, -15],
}
actual_results = {
"total": json_response["total"],
"agencies": [entry["name"] for entry in json_response["results"]],
"amounts": [entry["amount"] for entry in json_response["results"]],
}
assert expected_results == actual_results
@pytest.mark.django_db
def test_unreported_data_actual_value_file_c(client):
models_to_mock = [
{
"model": DABSSubmissionWindowSchedule,
"id": 1600031,
"period_end_date": datetime(1599, 12, 31, tzinfo=timezone.utc),
"submission_fiscal_year": 1600,
"submission_fiscal_quarter": 1,
"submission_fiscal_month": 3,
"submission_reveal_date": datetime(1600, 1, 28, tzinfo=timezone.utc),
"is_quarter": True,
},
{"model": GTASSF133Balances, "fiscal_year": 1600, "fiscal_period": 3, "obligations_incurred_total_cpe": -10},
{
"model": SubmissionAttributes,
"submission_id": -1,
"reporting_fiscal_year": 1600,
"reporting_fiscal_quarter": 1,
"reporting_fiscal_period": 3,
},
{
"model": ToptierAgency,
"toptier_agency_id": -1,
"name": "random_funding_name_1",
"toptier_code": "random_funding_code_1",
},
{
"model": ToptierAgency,
"toptier_agency_id": -2,
"name": "random_funding_name_2",
"toptier_code": "random_funding_code_2",
},
{"model": Agency, "id": -1, "toptier_agency_id": -1, "toptier_flag": True},
{"model": Agency, "id": -2, "toptier_agency_id": -2, "toptier_flag": True},
{"model": TreasuryAppropriationAccount, "treasury_account_identifier": -1, "funding_toptier_agency_id": -1},
{"model": TreasuryAppropriationAccount, "treasury_account_identifier": -2, "funding_toptier_agency_id": -2},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -1,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -1,
"transaction_obligated_amount": -2,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -2,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_2",
"treasury_account_id": -1,
"transaction_obligated_amount": -3,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -3,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -2,
"transaction_obligated_amount": -5,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -4,
"submission_id": -1,
"award__latest_transaction__contract_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -1,
"transaction_obligated_amount": -7,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -5,
"submission_id": -1,
"award__latest_transaction__contract_data__awardee_or_recipient_legal": "random_recipient_name_4",
"treasury_account_id": -2,
"transaction_obligated_amount": -11,
},
]
for entry in models_to_mock:
mommy.make(entry.pop("model"), **entry)
json_request = {"type": "recipient", "filters": {"agency": "-1", "fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert response.status_code == status.HTTP_200_OK
json_response = response.json()
expected_results = {
"total": -12,
"agencies": ["random_recipient_name_2", "random_recipient_name_1"],
"amounts": [-3, -9],
}
actual_results = {
"total": json_response["total"],
"agencies": [entry["name"] for entry in json_response["results"]],
"amounts": [entry["amount"] for entry in json_response["results"]],
}
assert expected_results == actual_results
@pytest.mark.django_db
def test_unreported_data_no_data_available(client):
json_request = {"type": "agency", "filters": {"fy": "1700", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert response.status_code == status.HTTP_200_OK
json_response = response.json()
expected_results = {"total": None}
actual_results = {"total": json_response["total"]}
assert expected_results == actual_results
@pytest.mark.django_db
def test_federal_account_linkage(client):
models = copy.deepcopy(GLOBAL_MOCK_DICT)
for entry in models:
mommy.make(entry.pop("model"), **entry)
json_request = {"type": "federal_account", "filters": {"fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
json_response = response.json()
assert json_response["results"][0]["account_number"] == "867-5309"
@pytest.mark.django_db
def test_budget_function_filter_success(client):
# Test for Budget Function Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "budget_function", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Budget Sub Function Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "quarter": 1, "budget_function": "050"}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Federal Account Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "federal_account",
"filters": {"fy": "2017", "quarter": 1, "budget_function": "050", "budget_subfunction": "053"},
}
),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Program Activity Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "program_activity",
"filters": {
"fy": "2017",
"quarter": 1,
"budget_function": "050",
"budget_subfunction": "053",
"federal_account": 2715,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Object Class Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "object_class",
"filters": {
"fy": "2017",
"quarter": 1,
"budget_function": "050",
"budget_subfunction": "053",
"federal_account": 2715,
"program_activity": 17863,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Recipient Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"quarter": 1,
"budget_function": "050",
"budget_subfunction": "053",
"federal_account": 2715,
"program_activity": 17863,
"object_class": "20",
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Award Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"quarter": 1,
"budget_function": "050",
"budget_subfunction": "053",
"federal_account": 2715,
"program_activity": 17863,
"object_class": "20",
"recipient": 13916,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_budget_function_failure(client):
"""Verify error on bad autocomplete request for budget function."""
resp = client.post("/api/v2/search/spending_over_time/", content_type="application/json", data=json.dumps({}))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.django_db
def test_object_class_filter_success(client):
# Test for Object Class Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "object_class", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Agency Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": 1, "object_class": "20"}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Federal Account Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "quarter": 1, "object_class": "20"}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Program Activity Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "program_activity",
"filters": {"fy": "2017", "quarter": 1, "object_class": "20", "federal_account": 2358},
}
),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Recipient Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"quarter": 1,
"object_class": "20",
"federal_account": 2358,
"program_activity": 15103,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Award Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"quarter": 1,
"object_class": "20",
"federal_account": 2358,
"program_activity": 15103,
"recipient": 301773,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_object_class_failure(client):
"""Verify error on bad autocomplete request for budget function."""
resp = client.post("/api/v2/search/spending_over_time/", content_type="application/json", data=json.dumps({}))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.django_db
def test_agency_filter_success(client):
# Test for Object Class Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Object Class Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": "3"}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Agency Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Federal Account Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "program_activity", "filters": {"fy": "2017", "quarter": 1, "federal_account": 1500}}),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Program Activity Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "object_class",
"filters": {"fy": "2017", "quarter": 1, "federal_account": 1500, "program_activity": 12697},
}
),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Recipient Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"quarter": 1,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
# Test for Award Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"quarter": 1,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
"recipient": 792917,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_agency_failure(client):
"""Verify error on bad autocomplete request for budget function."""
resp = client.post("/api/v2/search/spending_over_time/", content_type="application/json", data=json.dumps({}))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
# Test for Object Class Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "23", "quarter": "3"}}),
)
assert resp.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
def test_object_budget_match(client):
models = copy.deepcopy(GLOBAL_MOCK_DICT)
for entry in models:
mommy.make(entry.pop("model"), **entry)
mommy.make(
FinancialAccountsByProgramActivityObjectClass,
**{
"financial_accounts_by_program_activity_object_class_id": -4,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
"object_class_id": 1,
},
)
json_request = {"type": "budget_function", "filters": {"fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert response.status_code == status.HTTP_200_OK
json_response_1 = response.json()
json_request = {"type": "object_class", "filters": {"fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
json_response_2 = response.json()
assert json_response_1["results"][0]["amount"] == json_response_2["results"][0]["amount"]
@pytest.mark.django_db
def test_period(client):
# Test for Object Class Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "period": 3}}),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
# Test for Object Class Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": "3"}}),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "period": "9"}}),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
# Test for Agency Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "period": 3}}),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
# Test for Federal Account Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "program_activity", "filters": {"fy": "2017", "quarter": 1, "federal_account": 1500}}),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "program_activity", "filters": {"fy": "2017", "period": 3, "federal_account": 1500}}),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
# Test for Program Activity Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "object_class",
"filters": {"fy": "2017", "quarter": 1, "federal_account": 1500, "program_activity": 12697},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "object_class",
"filters": {"fy": "2017", "period": 3, "federal_account": 1500, "program_activity": 12697},
}
),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
# Test for Recipient Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"quarter": 1,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"period": 3,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
},
}
),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
# Test for Award Results
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"quarter": 1,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
"recipient": 792917,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"period": 3,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
"recipient": 792917,
},
}
),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
@pytest.mark.django_db
def test_unreported_file_c(client):
models_to_mock = [
{
"model": DABSSubmissionWindowSchedule,
"id": 1600031,
"period_end_date": datetime(1599, 12, 31, tzinfo=timezone.utc),
"submission_fiscal_year": 1600,
"submission_fiscal_quarter": 1,
"submission_fiscal_month": 3,
"submission_reveal_date": datetime(1600, 1, 28, tzinfo=timezone.utc),
"is_quarter": True,
},
{"model": GTASSF133Balances, "fiscal_year": 1600, "fiscal_period": 3, "obligations_incurred_total_cpe": -10},
{
"model": SubmissionAttributes,
"submission_id": -1,
"reporting_fiscal_year": 1600,
"reporting_fiscal_quarter": 1,
"reporting_fiscal_period": 3,
},
{
"model": ToptierAgency,
"toptier_agency_id": -1,
"name": "random_funding_name_1",
"toptier_code": "random_funding_code_1",
},
{
"model": ToptierAgency,
"toptier_agency_id": -2,
"name": "random_funding_name_2",
"toptier_code": "random_funding_code_2",
},
{"model": Agency, "id": -1, "toptier_agency_id": -1, "toptier_flag": True},
{"model": Agency, "id": -2, "toptier_agency_id": -2, "toptier_flag": True},
{"model": TreasuryAppropriationAccount, "treasury_account_identifier": -1, "funding_toptier_agency_id": -1},
{"model": TreasuryAppropriationAccount, "treasury_account_identifier": -2, "funding_toptier_agency_id": -2},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -1,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -2,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -3,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -1,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -1,
"transaction_obligated_amount": -2,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -2,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_2",
"treasury_account_id": -1,
"transaction_obligated_amount": -3,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -3,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -2,
"transaction_obligated_amount": -5,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -4,
"submission_id": -1,
"award__latest_transaction__contract_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -1,
"transaction_obligated_amount": -7,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -5,
"submission_id": -1,
"award__latest_transaction__contract_data__awardee_or_recipient_legal": "random_recipient_name_4",
"treasury_account_id": -2,
"transaction_obligated_amount": -11,
},
]
for entry in models_to_mock:
mommy.make(entry.pop("model"), **entry)
json_request = {"type": "recipient", "filters": {"agency": "-1", "fy": "1600", "quarter": "1"}}
resp = client.post("/api/v2/spending/", content_type="application/json", data=json_request)
json_request2 = {"type": "object_class", "filters": {"agency": "-1", "fy": "1600", "quarter": "1"}}
resp2 = client.post("/api/v2/spending/", content_type="application/json", data=json_request2)
assert resp.status_code == status.HTTP_200_OK
assert resp2.status_code == status.HTTP_200_OK
response = resp.json()
response2 = resp2.json()
expected_results = {
"total": -15,
"agencies": ["random_recipient_name_2", "Non-Award Spending", "random_recipient_name_1"],
"amounts": [-3, -3, -9],
}
actual_results = {
"total": response["total"],
"agencies": [entry["name"] for entry in response["results"]],
"amounts": [entry["amount"] for entry in response["results"]],
}
assert expected_results == actual_results
assert response["total"] == response2["total"]
| 34.607064
| 120
| 0.574919
|
import copy
import json
import pytest
from datetime import datetime, timezone
from model_mommy import mommy
from rest_framework import status
from usaspending_api.awards.models import FinancialAccountsByAwards
from usaspending_api.financial_activities.models import (
FinancialAccountsByProgramActivityObjectClass,
SubmissionAttributes,
TreasuryAppropriationAccount,
)
from usaspending_api.accounts.models import FederalAccount
from usaspending_api.references.models import Agency, GTASSF133Balances, ToptierAgency, ObjectClass
from usaspending_api.submissions.models import DABSSubmissionWindowSchedule
ENDPOINT_URL = "/api/v2/spending/"
CONTENT_TYPE = "application/json"
GLOBAL_MOCK_DICT = [
{
"model": DABSSubmissionWindowSchedule,
"id": 1600031,
"period_end_date": datetime(1599, 12, 31, tzinfo=timezone.utc),
"submission_fiscal_year": 1600,
"submission_fiscal_quarter": 1,
"submission_fiscal_month": 3,
"submission_reveal_date": datetime(1600, 1, 28, tzinfo=timezone.utc),
"is_quarter": True,
},
{"model": ObjectClass, "id": 1},
{"model": GTASSF133Balances, "fiscal_year": 1600, "fiscal_period": 3, "obligations_incurred_total_cpe": -10},
{
"model": SubmissionAttributes,
"submission_id": -1,
"reporting_fiscal_year": 1600,
"reporting_fiscal_period": 3,
"reporting_fiscal_quarter": 1,
},
{
"model": ToptierAgency,
"toptier_agency_id": -1,
"name": "random_funding_name_1",
"toptier_code": "random_funding_code_1",
},
{
"model": ToptierAgency,
"toptier_agency_id": -2,
"name": "random_funding_name_2",
"toptier_code": "random_funding_code_2",
},
{"model": Agency, "toptier_agency_id": -1, "toptier_flag": True},
{"model": Agency, "toptier_agency_id": -2, "toptier_flag": True},
{
"model": TreasuryAppropriationAccount,
"treasury_account_identifier": -1,
"funding_toptier_agency_id": -1,
"federal_account_id": 1,
},
{
"model": FederalAccount,
"id": 1,
"account_title": "Tommy Two-Tone",
"agency_identifier": "867",
"main_account_code": "5309",
"federal_account_code": "867-5309",
},
{
"model": TreasuryAppropriationAccount,
"treasury_account_identifier": -2,
"funding_toptier_agency_id": -2,
"federal_account_id": 1,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -1,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -2,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -10,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -3,
"submission_id": -1,
"treasury_account_id": -2,
"obligations_incurred_by_program_object_class_cpe": -1,
"object_class_id": 1,
},
]
@pytest.mark.django_db
def test_unreported_data_actual_value_file_b(client):
models = copy.deepcopy(GLOBAL_MOCK_DICT)
for entry in models:
mommy.make(entry.pop("model"), **entry)
json_request = {"type": "agency", "filters": {"fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert response.status_code == status.HTTP_200_OK
json_response = response.json()
expected_results = {
"total": -10,
"agencies": ["Unreported Data", "random_funding_name_2", "random_funding_name_1"],
"amounts": [6, -1, -15],
}
actual_results = {
"total": json_response["total"],
"agencies": [entry["name"] for entry in json_response["results"]],
"amounts": [entry["amount"] for entry in json_response["results"]],
}
assert expected_results == actual_results
@pytest.mark.django_db
def test_unreported_data_actual_value_file_c(client):
models_to_mock = [
{
"model": DABSSubmissionWindowSchedule,
"id": 1600031,
"period_end_date": datetime(1599, 12, 31, tzinfo=timezone.utc),
"submission_fiscal_year": 1600,
"submission_fiscal_quarter": 1,
"submission_fiscal_month": 3,
"submission_reveal_date": datetime(1600, 1, 28, tzinfo=timezone.utc),
"is_quarter": True,
},
{"model": GTASSF133Balances, "fiscal_year": 1600, "fiscal_period": 3, "obligations_incurred_total_cpe": -10},
{
"model": SubmissionAttributes,
"submission_id": -1,
"reporting_fiscal_year": 1600,
"reporting_fiscal_quarter": 1,
"reporting_fiscal_period": 3,
},
{
"model": ToptierAgency,
"toptier_agency_id": -1,
"name": "random_funding_name_1",
"toptier_code": "random_funding_code_1",
},
{
"model": ToptierAgency,
"toptier_agency_id": -2,
"name": "random_funding_name_2",
"toptier_code": "random_funding_code_2",
},
{"model": Agency, "id": -1, "toptier_agency_id": -1, "toptier_flag": True},
{"model": Agency, "id": -2, "toptier_agency_id": -2, "toptier_flag": True},
{"model": TreasuryAppropriationAccount, "treasury_account_identifier": -1, "funding_toptier_agency_id": -1},
{"model": TreasuryAppropriationAccount, "treasury_account_identifier": -2, "funding_toptier_agency_id": -2},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -1,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -1,
"transaction_obligated_amount": -2,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -2,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_2",
"treasury_account_id": -1,
"transaction_obligated_amount": -3,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -3,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -2,
"transaction_obligated_amount": -5,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -4,
"submission_id": -1,
"award__latest_transaction__contract_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -1,
"transaction_obligated_amount": -7,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -5,
"submission_id": -1,
"award__latest_transaction__contract_data__awardee_or_recipient_legal": "random_recipient_name_4",
"treasury_account_id": -2,
"transaction_obligated_amount": -11,
},
]
for entry in models_to_mock:
mommy.make(entry.pop("model"), **entry)
json_request = {"type": "recipient", "filters": {"agency": "-1", "fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert response.status_code == status.HTTP_200_OK
json_response = response.json()
expected_results = {
"total": -12,
"agencies": ["random_recipient_name_2", "random_recipient_name_1"],
"amounts": [-3, -9],
}
actual_results = {
"total": json_response["total"],
"agencies": [entry["name"] for entry in json_response["results"]],
"amounts": [entry["amount"] for entry in json_response["results"]],
}
assert expected_results == actual_results
@pytest.mark.django_db
def test_unreported_data_no_data_available(client):
json_request = {"type": "agency", "filters": {"fy": "1700", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert response.status_code == status.HTTP_200_OK
json_response = response.json()
expected_results = {"total": None}
actual_results = {"total": json_response["total"]}
assert expected_results == actual_results
@pytest.mark.django_db
def test_federal_account_linkage(client):
models = copy.deepcopy(GLOBAL_MOCK_DICT)
for entry in models:
mommy.make(entry.pop("model"), **entry)
json_request = {"type": "federal_account", "filters": {"fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
json_response = response.json()
assert json_response["results"][0]["account_number"] == "867-5309"
@pytest.mark.django_db
def test_budget_function_filter_success(client):
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "budget_function", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "quarter": 1, "budget_function": "050"}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "federal_account",
"filters": {"fy": "2017", "quarter": 1, "budget_function": "050", "budget_subfunction": "053"},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "program_activity",
"filters": {
"fy": "2017",
"quarter": 1,
"budget_function": "050",
"budget_subfunction": "053",
"federal_account": 2715,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "object_class",
"filters": {
"fy": "2017",
"quarter": 1,
"budget_function": "050",
"budget_subfunction": "053",
"federal_account": 2715,
"program_activity": 17863,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"quarter": 1,
"budget_function": "050",
"budget_subfunction": "053",
"federal_account": 2715,
"program_activity": 17863,
"object_class": "20",
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"quarter": 1,
"budget_function": "050",
"budget_subfunction": "053",
"federal_account": 2715,
"program_activity": 17863,
"object_class": "20",
"recipient": 13916,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_budget_function_failure(client):
resp = client.post("/api/v2/search/spending_over_time/", content_type="application/json", data=json.dumps({}))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.django_db
def test_object_class_filter_success(client):
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "object_class", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": 1, "object_class": "20"}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "quarter": 1, "object_class": "20"}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "program_activity",
"filters": {"fy": "2017", "quarter": 1, "object_class": "20", "federal_account": 2358},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"quarter": 1,
"object_class": "20",
"federal_account": 2358,
"program_activity": 15103,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"quarter": 1,
"object_class": "20",
"federal_account": 2358,
"program_activity": 15103,
"recipient": 301773,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_object_class_failure(client):
resp = client.post("/api/v2/search/spending_over_time/", content_type="application/json", data=json.dumps({}))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
@pytest.mark.django_db
def test_agency_filter_success(client):
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": "3"}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "program_activity", "filters": {"fy": "2017", "quarter": 1, "federal_account": 1500}}),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "object_class",
"filters": {"fy": "2017", "quarter": 1, "federal_account": 1500, "program_activity": 12697},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"quarter": 1,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"quarter": 1,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
"recipient": 792917,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
@pytest.mark.django_db
def test_agency_failure(client):
resp = client.post("/api/v2/search/spending_over_time/", content_type="application/json", data=json.dumps({}))
assert resp.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "23", "quarter": "3"}}),
)
assert resp.status_code == status.HTTP_400_BAD_REQUEST
@pytest.mark.django_db
def test_object_budget_match(client):
models = copy.deepcopy(GLOBAL_MOCK_DICT)
for entry in models:
mommy.make(entry.pop("model"), **entry)
mommy.make(
FinancialAccountsByProgramActivityObjectClass,
**{
"financial_accounts_by_program_activity_object_class_id": -4,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
"object_class_id": 1,
},
)
json_request = {"type": "budget_function", "filters": {"fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
assert response.status_code == status.HTTP_200_OK
json_response_1 = response.json()
json_request = {"type": "object_class", "filters": {"fy": "1600", "quarter": "1"}}
response = client.post(path=ENDPOINT_URL, content_type=CONTENT_TYPE, data=json.dumps(json_request))
json_response_2 = response.json()
assert json_response_1["results"][0]["amount"] == json_response_2["results"][0]["amount"]
@pytest.mark.django_db
def test_period(client):
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "period": 3}}),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "quarter": "3"}}),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "agency", "filters": {"fy": "2017", "period": "9"}}),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "quarter": 1}}),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "federal_account", "filters": {"fy": "2017", "period": 3}}),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "program_activity", "filters": {"fy": "2017", "quarter": 1, "federal_account": 1500}}),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps({"type": "program_activity", "filters": {"fy": "2017", "period": 3, "federal_account": 1500}}),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "object_class",
"filters": {"fy": "2017", "quarter": 1, "federal_account": 1500, "program_activity": 12697},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "object_class",
"filters": {"fy": "2017", "period": 3, "federal_account": 1500, "program_activity": 12697},
}
),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"quarter": 1,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "recipient",
"filters": {
"fy": "2017",
"period": 3,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
},
}
),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
resp = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"quarter": 1,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
"recipient": 792917,
},
}
),
)
assert resp.status_code == status.HTTP_200_OK
resp2 = client.post(
"/api/v2/spending/",
content_type="application/json",
data=json.dumps(
{
"type": "award",
"filters": {
"fy": "2017",
"period": 3,
"federal_account": 1500,
"program_activity": 12697,
"object_class": "40",
"recipient": 792917,
},
}
),
)
assert resp2.status_code == status.HTTP_200_OK
assert resp.json() == resp2.json()
@pytest.mark.django_db
def test_unreported_file_c(client):
models_to_mock = [
{
"model": DABSSubmissionWindowSchedule,
"id": 1600031,
"period_end_date": datetime(1599, 12, 31, tzinfo=timezone.utc),
"submission_fiscal_year": 1600,
"submission_fiscal_quarter": 1,
"submission_fiscal_month": 3,
"submission_reveal_date": datetime(1600, 1, 28, tzinfo=timezone.utc),
"is_quarter": True,
},
{"model": GTASSF133Balances, "fiscal_year": 1600, "fiscal_period": 3, "obligations_incurred_total_cpe": -10},
{
"model": SubmissionAttributes,
"submission_id": -1,
"reporting_fiscal_year": 1600,
"reporting_fiscal_quarter": 1,
"reporting_fiscal_period": 3,
},
{
"model": ToptierAgency,
"toptier_agency_id": -1,
"name": "random_funding_name_1",
"toptier_code": "random_funding_code_1",
},
{
"model": ToptierAgency,
"toptier_agency_id": -2,
"name": "random_funding_name_2",
"toptier_code": "random_funding_code_2",
},
{"model": Agency, "id": -1, "toptier_agency_id": -1, "toptier_flag": True},
{"model": Agency, "id": -2, "toptier_agency_id": -2, "toptier_flag": True},
{"model": TreasuryAppropriationAccount, "treasury_account_identifier": -1, "funding_toptier_agency_id": -1},
{"model": TreasuryAppropriationAccount, "treasury_account_identifier": -2, "funding_toptier_agency_id": -2},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -1,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -2,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
},
{
"model": FinancialAccountsByProgramActivityObjectClass,
"financial_accounts_by_program_activity_object_class_id": -3,
"submission_id": -1,
"treasury_account_id": -1,
"obligations_incurred_by_program_object_class_cpe": -5,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -1,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -1,
"transaction_obligated_amount": -2,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -2,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_2",
"treasury_account_id": -1,
"transaction_obligated_amount": -3,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -3,
"submission_id": -1,
"award__latest_transaction__assistance_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -2,
"transaction_obligated_amount": -5,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -4,
"submission_id": -1,
"award__latest_transaction__contract_data__awardee_or_recipient_legal": "random_recipient_name_1",
"treasury_account_id": -1,
"transaction_obligated_amount": -7,
},
{
"model": FinancialAccountsByAwards,
"financial_accounts_by_awards_id": -5,
"submission_id": -1,
"award__latest_transaction__contract_data__awardee_or_recipient_legal": "random_recipient_name_4",
"treasury_account_id": -2,
"transaction_obligated_amount": -11,
},
]
for entry in models_to_mock:
mommy.make(entry.pop("model"), **entry)
json_request = {"type": "recipient", "filters": {"agency": "-1", "fy": "1600", "quarter": "1"}}
resp = client.post("/api/v2/spending/", content_type="application/json", data=json_request)
json_request2 = {"type": "object_class", "filters": {"agency": "-1", "fy": "1600", "quarter": "1"}}
resp2 = client.post("/api/v2/spending/", content_type="application/json", data=json_request2)
assert resp.status_code == status.HTTP_200_OK
assert resp2.status_code == status.HTTP_200_OK
response = resp.json()
response2 = resp2.json()
expected_results = {
"total": -15,
"agencies": ["random_recipient_name_2", "Non-Award Spending", "random_recipient_name_1"],
"amounts": [-3, -3, -9],
}
actual_results = {
"total": response["total"],
"agencies": [entry["name"] for entry in response["results"]],
"amounts": [entry["amount"] for entry in response["results"]],
}
assert expected_results == actual_results
assert response["total"] == response2["total"]
| true
| true
|
1c46a526cd71a1a644839ed9cdd68a4e1e211d57
| 15,561
|
py
|
Python
|
examples/tsunami/eta_init_force_dry/setrun.py
|
AsianHam/geoclaw
|
b5f9ee8cd6e64d107ba8bba1e6d588aa7bf6d417
|
[
"BSD-3-Clause"
] | 51
|
2015-07-01T13:39:17.000Z
|
2022-03-07T16:13:17.000Z
|
examples/tsunami/eta_init_force_dry/setrun.py
|
AsianHam/geoclaw
|
b5f9ee8cd6e64d107ba8bba1e6d588aa7bf6d417
|
[
"BSD-3-Clause"
] | 274
|
2015-02-20T18:25:04.000Z
|
2022-03-09T23:51:47.000Z
|
examples/tsunami/eta_init_force_dry/setrun.py
|
AsianHam/geoclaw
|
b5f9ee8cd6e64d107ba8bba1e6d588aa7bf6d417
|
[
"BSD-3-Clause"
] | 66
|
2015-01-10T00:05:00.000Z
|
2022-02-24T22:05:16.000Z
|
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
from __future__ import absolute_import
from __future__ import print_function
import os, sys
import numpy as np
try:
CLAW = os.environ['CLAW']
except:
raise Exception("*** Must first set CLAW enviornment variable")
from clawpack.geoclaw.data import ForceDry
from clawpack.amrclaw.data import FlagRegion
#------------------------------
def setrun(claw_pkg='geoclaw'):
#------------------------------
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "geoclaw" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
#------------------------------------------------------------------
# Problem-specific parameters to be written to setprob.data:
#------------------------------------------------------------------
#probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
#probdata.add_param('variable_eta_init', True) # now in qinit info
#------------------------------------------------------------------
# Standard Clawpack parameters to be written to claw.data:
# (or to amr2ez.data for AMR)
#------------------------------------------------------------------
clawdata = rundata.clawdata # initialized when rundata instantiated
# Set single grid parameters first.
# See below for AMR parameters.
# ---------------
# Spatial domain:
# ---------------
# Number of space dimensions:
clawdata.num_dim = num_dim
# Lower and upper edge of computational domain:
# x values should be integer multipes of 1/3"
# y values should be integer multipes of 1/3"
# Note: always satisfied if limits are multiples of 0.01 degree
arcsec16 = 1./(6*3600.)
# choose domain and offset edges by half a 1/3" cell so
# cell centers are exactly at DEM grid points:
clawdata.lower[0] = -1.9 - arcsec16 # west longitude
clawdata.upper[0] = 0.1 - arcsec16 # east longitude
clawdata.lower[1] = -1.9 - arcsec16 # south latitude
clawdata.upper[1] = 1.9 - arcsec16 # north latitude
# choose mx and my so coarsest grid has 2 minute resolution:
clawdata.num_cells[0] = 60
clawdata.num_cells[1] = 114
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.0
# Restart from checkpoint file of a previous run?
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = ''
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
# The solution at initial time t0 is always written in addition.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output nout frames at equally spaced times up to tfinal:
clawdata.num_output_times = 15
clawdata.tfinal = 30*60.
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list of output times.
clawdata.output_times = [0.5, 1.0]
elif clawdata.output_style == 3:
# Output every iout timesteps with a total of ntot time steps:
clawdata.output_step_interval = 1
clawdata.total_steps = 20
clawdata.output_t0 = True
clawdata.output_format = 'binary'
clawdata.output_q_components = 'all' # need all
clawdata.output_aux_components = 'none' # eta=h+B is in q
clawdata.output_aux_onlyonce = False # output aux arrays each frame
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 1
# --------------
# Time stepping:
# --------------
# if dt_variable==1: variable time steps used based on cfl_desired,
# if dt_variable==0: fixed time steps dt = dt_initial will always be used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# If dt_variable==0 then dt=dt_initial for all steps:
clawdata.dt_initial = 0.2
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used, and max to allow without
# retaking step with a smaller dt:
clawdata.cfl_desired = 0.8
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 5000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'mc' ==> MC limiter
# 4 or 'vanleer' ==> van Leer
clawdata.limiter = ['mc', 'mc', 'mc']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 'godunov'
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 => user specified (must modify bcN.f to use this option)
# 1 => extrapolation (non-reflecting outflow)
# 2 => periodic (must specify this at both boundaries)
# 3 => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap'
clawdata.bc_upper[0] = 'extrap'
clawdata.bc_lower[1] = 'extrap'
clawdata.bc_upper[1] = 'extrap'
# --------------
# Checkpointing:
# --------------
# Specify when checkpoint files should be created that can be
# used to restart a computation.
# negative checkpoint_style means alternate between aaaaa and bbbbb files
# so that at most 2 checkpoint files exist at any time, useful when
# doing frequent checkpoints of large problems.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif abs(clawdata.checkpt_style) == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = 3600.*np.arange(1,16,1)
elif abs(clawdata.checkpt_style) == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 4
# List of refinement ratios at each level (length at least mxnest-1)
# dx = dy = 2', 10", 2", 1/3":
amrdata.refinement_ratios_x = [12,5,6]
amrdata.refinement_ratios_y = [12,5,6]
amrdata.refinement_ratios_t = [12,5,6]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length maux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center','capacity','yleft']
# Flag using refinement routine flag2refine rather than richardson error
amrdata.flag_richardson = False # use Richardson?
amrdata.flag2refine = True
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
# clustering alg. cutoff for (# flagged pts) / (total # of cells refined)
# (closer to 1.0 => more small grids may be needed to cover flagged cells)
amrdata.clustering_cutoff = 0.700000
# print info about each regridding up to this level:
amrdata.verbosity_regrid = 1
# ---------------
# Regions:
# ---------------
#rundata.regiondata.regions = []
# to specify regions of refinement append lines of the form
# [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]
# NO OLD STYLE REGIONS USED HERE
# ---------------
# NEW flagregions
# ---------------
flagregions = rundata.flagregiondata.flagregions # initialized to []
# now append as many flagregions as desired to this list:
# The entire domain restricted to level 1 for illustration:
# Note that this is a rectangle specified in the new way:
# (other regions below will force/allow more refinement)
flagregion = FlagRegion(num_dim=2)
flagregion.name = 'Region_domain'
flagregion.minlevel = 1
flagregion.maxlevel = 1
flagregion.t1 = 0.
flagregion.t2 = 1e9
flagregion.spatial_region_type = 1 # Rectangle
# domain plus a bit so kml files look nicer:
flagregion.spatial_region = [clawdata.lower[0] - 0.1,
clawdata.upper[0] + 0.1,
clawdata.lower[1] - 0.1,
clawdata.upper[1] + 0.1]
flagregions.append(flagregion)
# force 2 levels around dtopo source region for short time:
flagregion = FlagRegion(num_dim=2)
flagregion.name = 'Region_level2_dtopo'
flagregion.minlevel = 2
flagregion.maxlevel = 2
flagregion.t1 = 0.
flagregion.t2 = 2.
flagregion.spatial_region_type = 1 # Rectangle
flagregion.spatial_region = [-2,1,-1,1]
flagregions.append(flagregion)
# allow 3 levels around coastal region for all times:
flagregion = FlagRegion(num_dim=2)
flagregion.name = 'Region_level3'
flagregion.minlevel = 1
flagregion.maxlevel = 3
flagregion.t1 = 0.
flagregion.t2 = 1e9
flagregion.spatial_region_type = 1 # Rectangle
flagregion.spatial_region = [-0.01,0.01,-0.01,0.01]
flagregions.append(flagregion)
# force 4 levels around coastal region starting at 5 minutes:
flagregion = FlagRegion(num_dim=2)
flagregion.name = 'Region_level4'
flagregion.minlevel = 4
flagregion.maxlevel = 4
flagregion.t1 = 5*60.
flagregion.t2 = 1e9
flagregion.spatial_region_type = 1 # Rectangle
flagregion.spatial_region = [-0.005, 0.01, -0.011, 0.011]
flagregions.append(flagregion)
# ---------------
# Gauges:
# ---------------
# for gauges append lines of the form [gaugeno, x, y, t1, t2]
rundata.gaugedata.gauges = []
# Set GeoClaw specific runtime parameters.
try:
geo_data = rundata.geo_data
except:
print("*** Error, this rundata has no geo_data attribute")
raise AttributeError("Missing geo_data attribute")
# == Physics ==
geo_data.gravity = 9.81
geo_data.coordinate_system = 2
geo_data.earth_radius = 6367.5e3
# == Forcing Options
geo_data.coriolis_forcing = False
# == Algorithm and Initial Conditions ==
geo_data.sea_level = 0.0
geo_data.dry_tolerance = 1.e-3
geo_data.friction_forcing = True
geo_data.manning_coefficient =.025
geo_data.friction_depth = 1e6
# Refinement settings
refinement_data = rundata.refinement_data
refinement_data.variable_dt_refinement_ratios = True
refinement_data.wave_tolerance = 0.2
# == settopo.data values ==
topofiles = rundata.topo_data.topofiles
# for topography, append lines of the form
# [topotype, fname]
topodir = 'input_files'
topofiles.append([3, topodir + '/topo_ocean.tt3'])
topofiles.append([3, topodir + '/topo_shore.tt3'])
# == setdtopo.data values ==
dtopo_data = rundata.dtopo_data
# for moving topography, append lines of the form : (<= 1 allowed for now!)
# [topotype, fname]
dtopodir = 'input_files'
dtopo_data.dtopofiles.append([3, dtopodir + '/dtopo_test.tt3'])
dtopo_data.dt_max_dtopo = 1.0
# == setqinit.data values ==
rundata.qinit_data.qinit_type = 0
rundata.qinit_data.qinitfiles = []
# for qinit perturbations, append lines of the form: (<= 1 allowed for now!)
# [fname]
# NEW feature to adjust sea level by dtopo:
rundata.qinit_data.variable_eta_init = True
# NEW feature to force dry land some locations below sea level:
force_dry = ForceDry()
force_dry.tend = 7*60.
force_dry.fname = 'input_files/force_dry_init.tt3'
rundata.qinit_data.force_dry_list.append(force_dry)
# == fgmax.data values ==
#fgmax_files = rundata.fgmax_data.fgmax_files
# for fixed grids append to this list names of any fgmax input files
# ----- For developers -----
# Toggle debugging print statements:
amrdata.dprint = False # print domain flags
amrdata.eprint = False # print err est flags
amrdata.edebug = False # even more err est flags
amrdata.gprint = False # grid bisection/clustering
amrdata.nprint = False # proper nesting output
amrdata.pprint = False # proj. of tagged points
amrdata.rprint = False # print regridding summary
amrdata.sprint = False # space/memory output
amrdata.tprint = False # time step reporting each level
amrdata.uprint = False # update/upbnd reporting
# More AMR parameters can be set -- see the defaults in pyclaw/data.py
return rundata
# end of function setrun
# ----------------------
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
| 30.998008
| 92
| 0.620911
|
from __future__ import absolute_import
from __future__ import print_function
import os, sys
import numpy as np
try:
CLAW = os.environ['CLAW']
except:
raise Exception("*** Must first set CLAW enviornment variable")
from clawpack.geoclaw.data import ForceDry
from clawpack.amrclaw.data import FlagRegion
def setrun(claw_pkg='geoclaw'):
from clawpack.clawutil import data
assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'"
num_dim = 2
rundata = data.ClawRunData(claw_pkg, num_dim)
clawdata = rundata.clawdata
clawdata.num_dim = num_dim
# y values should be integer multipes of 1/3"
arcsec16 = 1./(6*3600.)
# cell centers are exactly at DEM grid points:
clawdata.lower[0] = -1.9 - arcsec16 # west longitude
clawdata.upper[0] = 0.1 - arcsec16 # east longitude
clawdata.lower[1] = -1.9 - arcsec16 # south latitude
clawdata.upper[1] = 1.9 - arcsec16 # north latitude
# choose mx and my so coarsest grid has 2 minute resolution:
clawdata.num_cells[0] = 60
clawdata.num_cells[1] = 114
# ---------------
# Size of system:
# ---------------
# Number of equations in the system:
clawdata.num_eqn = 3
# Number of auxiliary variables in the aux array (initialized in setaux)
clawdata.num_aux = 3
# Index of aux array corresponding to capacity function, if there is one:
clawdata.capa_index = 2
# -------------
# Initial time:
# -------------
clawdata.t0 = 0.0
# Restart from checkpoint file of a previous run?
# If restarting, t0 above should be from original run, and the
# restart_file 'fort.chkNNNNN' specified below should be in
# the OUTDIR indicated in Makefile.
clawdata.restart = False # True to restart from prior results
clawdata.restart_file = ''
# -------------
# Output times:
#--------------
# Specify at what times the results should be written to fort.q files.
# Note that the time integration stops after the final output time.
# The solution at initial time t0 is always written in addition.
clawdata.output_style = 1
if clawdata.output_style==1:
# Output nout frames at equally spaced times up to tfinal:
clawdata.num_output_times = 15
clawdata.tfinal = 30*60.
clawdata.output_t0 = True # output at initial (or restart) time?
elif clawdata.output_style == 2:
# Specify a list of output times.
clawdata.output_times = [0.5, 1.0]
elif clawdata.output_style == 3:
# Output every iout timesteps with a total of ntot time steps:
clawdata.output_step_interval = 1
clawdata.total_steps = 20
clawdata.output_t0 = True
clawdata.output_format = 'binary'
clawdata.output_q_components = 'all' # need all
clawdata.output_aux_components = 'none' # eta=h+B is in q
clawdata.output_aux_onlyonce = False # output aux arrays each frame
# ---------------------------------------------------
# Verbosity of messages to screen during integration:
# ---------------------------------------------------
# The current t, dt, and cfl will be printed every time step
# at AMR levels <= verbosity. Set verbosity = 0 for no printing.
# (E.g. verbosity == 2 means print only on levels 1 and 2.)
clawdata.verbosity = 1
# --------------
# Time stepping:
# --------------
# if dt_variable==1: variable time steps used based on cfl_desired,
# if dt_variable==0: fixed time steps dt = dt_initial will always be used.
clawdata.dt_variable = True
# Initial time step for variable dt.
# If dt_variable==0 then dt=dt_initial for all steps:
clawdata.dt_initial = 0.2
# Max time step to be allowed if variable dt used:
clawdata.dt_max = 1e+99
# Desired Courant number if variable dt used, and max to allow without
# retaking step with a smaller dt:
clawdata.cfl_desired = 0.8
clawdata.cfl_max = 1.0
# Maximum number of time steps to allow between output times:
clawdata.steps_max = 5000
# ------------------
# Method to be used:
# ------------------
# Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters
clawdata.order = 2
# Use dimensional splitting? (not yet available for AMR)
clawdata.dimensional_split = 'unsplit'
# For unsplit method, transverse_waves can be
# 0 or 'none' ==> donor cell (only normal solver used)
# 1 or 'increment' ==> corner transport of waves
# 2 or 'all' ==> corner transport of 2nd order corrections too
clawdata.transverse_waves = 2
# Number of waves in the Riemann solution:
clawdata.num_waves = 3
# List of limiters to use for each wave family:
# Required: len(limiter) == num_waves
# Some options:
# 0 or 'none' ==> no limiter (Lax-Wendroff)
# 1 or 'minmod' ==> minmod
# 2 or 'superbee' ==> superbee
# 3 or 'mc' ==> MC limiter
# 4 or 'vanleer' ==> van Leer
clawdata.limiter = ['mc', 'mc', 'mc']
clawdata.use_fwaves = True # True ==> use f-wave version of algorithms
# Source terms splitting:
# src_split == 0 or 'none' ==> no source term (src routine never called)
# src_split == 1 or 'godunov' ==> Godunov (1st order) splitting used,
# src_split == 2 or 'strang' ==> Strang (2nd order) splitting used, not recommended.
clawdata.source_split = 'godunov'
# --------------------
# Boundary conditions:
# --------------------
# Number of ghost cells (usually 2)
clawdata.num_ghost = 2
# Choice of BCs at xlower and xupper:
# 0 => user specified (must modify bcN.f to use this option)
# 1 => extrapolation (non-reflecting outflow)
# 2 => periodic (must specify this at both boundaries)
# 3 => solid wall for systems where q(2) is normal velocity
clawdata.bc_lower[0] = 'extrap'
clawdata.bc_upper[0] = 'extrap'
clawdata.bc_lower[1] = 'extrap'
clawdata.bc_upper[1] = 'extrap'
# --------------
# Checkpointing:
# --------------
# Specify when checkpoint files should be created that can be
# used to restart a computation.
# negative checkpoint_style means alternate between aaaaa and bbbbb files
# so that at most 2 checkpoint files exist at any time, useful when
# doing frequent checkpoints of large problems.
clawdata.checkpt_style = 0
if clawdata.checkpt_style == 0:
# Do not checkpoint at all
pass
elif clawdata.checkpt_style == 1:
# Checkpoint only at tfinal.
pass
elif abs(clawdata.checkpt_style) == 2:
# Specify a list of checkpoint times.
clawdata.checkpt_times = 3600.*np.arange(1,16,1)
elif abs(clawdata.checkpt_style) == 3:
# Checkpoint every checkpt_interval timesteps (on Level 1)
# and at the final time.
clawdata.checkpt_interval = 5
# ---------------
# AMR parameters:
# ---------------
amrdata = rundata.amrdata
# max number of refinement levels:
amrdata.amr_levels_max = 4
# List of refinement ratios at each level (length at least mxnest-1)
# dx = dy = 2', 10", 2", 1/3":
amrdata.refinement_ratios_x = [12,5,6]
amrdata.refinement_ratios_y = [12,5,6]
amrdata.refinement_ratios_t = [12,5,6]
# Specify type of each aux variable in amrdata.auxtype.
# This must be a list of length maux, each element of which is one of:
# 'center', 'capacity', 'xleft', or 'yleft' (see documentation).
amrdata.aux_type = ['center','capacity','yleft']
# Flag using refinement routine flag2refine rather than richardson error
amrdata.flag_richardson = False # use Richardson?
amrdata.flag2refine = True
# steps to take on each level L between regriddings of level L+1:
amrdata.regrid_interval = 3
# width of buffer zone around flagged points:
# (typically the same as regrid_interval so waves don't escape):
amrdata.regrid_buffer_width = 2
ta.verbosity_regrid = 1
flagregions = rundata.flagregiondata.flagregions
flagregion = FlagRegion(num_dim=2)
flagregion.name = 'Region_domain'
flagregion.minlevel = 1
flagregion.maxlevel = 1
flagregion.t1 = 0.
flagregion.t2 = 1e9
flagregion.spatial_region_type = 1
flagregion.spatial_region = [clawdata.lower[0] - 0.1,
clawdata.upper[0] + 0.1,
clawdata.lower[1] - 0.1,
clawdata.upper[1] + 0.1]
flagregions.append(flagregion)
flagregion = FlagRegion(num_dim=2)
flagregion.name = 'Region_level2_dtopo'
flagregion.minlevel = 2
flagregion.maxlevel = 2
flagregion.t1 = 0.
flagregion.t2 = 2.
flagregion.spatial_region_type = 1
flagregion.spatial_region = [-2,1,-1,1]
flagregions.append(flagregion)
flagregion = FlagRegion(num_dim=2)
flagregion.name = 'Region_level3'
flagregion.minlevel = 1
flagregion.maxlevel = 3
flagregion.t1 = 0.
flagregion.t2 = 1e9
flagregion.spatial_region_type = 1
flagregion.spatial_region = [-0.01,0.01,-0.01,0.01]
flagregions.append(flagregion)
flagregion = FlagRegion(num_dim=2)
flagregion.name = 'Region_level4'
flagregion.minlevel = 4
flagregion.maxlevel = 4
flagregion.t1 = 5*60.
flagregion.t2 = 1e9
flagregion.spatial_region_type = 1
flagregion.spatial_region = [-0.005, 0.01, -0.011, 0.011]
flagregions.append(flagregion)
rundata.gaugedata.gauges = []
try:
geo_data = rundata.geo_data
except:
print("*** Error, this rundata has no geo_data attribute")
raise AttributeError("Missing geo_data attribute")
geo_data.gravity = 9.81
geo_data.coordinate_system = 2
geo_data.earth_radius = 6367.5e3
geo_data.coriolis_forcing = False
geo_data.sea_level = 0.0
geo_data.dry_tolerance = 1.e-3
geo_data.friction_forcing = True
geo_data.manning_coefficient =.025
geo_data.friction_depth = 1e6
refinement_data = rundata.refinement_data
refinement_data.variable_dt_refinement_ratios = True
refinement_data.wave_tolerance = 0.2
topofiles = rundata.topo_data.topofiles
topodir = 'input_files'
topofiles.append([3, topodir + '/topo_ocean.tt3'])
topofiles.append([3, topodir + '/topo_shore.tt3'])
dtopo_data = rundata.dtopo_data
dtopodir = 'input_files'
dtopo_data.dtopofiles.append([3, dtopodir + '/dtopo_test.tt3'])
dtopo_data.dt_max_dtopo = 1.0
rundata.qinit_data.qinit_type = 0
rundata.qinit_data.qinitfiles = []
rundata.qinit_data.variable_eta_init = True
force_dry = ForceDry()
force_dry.tend = 7*60.
force_dry.fname = 'input_files/force_dry_init.tt3'
rundata.qinit_data.force_dry_list.append(force_dry)
amrdata.dprint = False
amrdata.eprint = False
amrdata.edebug = False
amrdata.gprint = False
amrdata.nprint = False
amrdata.pprint = False
amrdata.rprint = False
amrdata.sprint = False
amrdata.tprint = False
amrdata.uprint = False
return rundata
if __name__ == '__main__':
import sys
rundata = setrun(*sys.argv[1:])
rundata.write()
| true
| true
|
1c46a52971174cbf30b7942c93c8d2e79189a054
| 4,037
|
py
|
Python
|
django_opentracing/tracer.py
|
dudymas/opentracing-django
|
3ad67f5c28f7eb36df558dc9e5e171e960afd9cb
|
[
"BSD-3-Clause"
] | null | null | null |
django_opentracing/tracer.py
|
dudymas/opentracing-django
|
3ad67f5c28f7eb36df558dc9e5e171e960afd9cb
|
[
"BSD-3-Clause"
] | null | null | null |
django_opentracing/tracer.py
|
dudymas/opentracing-django
|
3ad67f5c28f7eb36df558dc9e5e171e960afd9cb
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf import settings
import opentracing
try:
from threading import local
except ImportError:
from django.utils._threading_local import local
_thread_locals = local()
django_tracer = None
def get_tracer():
return opentracing.tracer
def get_current_span(request=None):
if request is None:
request = getattr(_thread_locals, "request", None)
# this lets django rest framework work seamlessly since they wrap the request
if hasattr(request, '_request'):
request = request._request
if django_tracer != None:
return django_tracer.get_span(request)
else:
return None
class DjangoTracer(object):
'''
@param tracer the OpenTracing tracer to be used
to trace requests using this DjangoTracer
'''
def __init__(self, tracer):
global django_tracer
django_tracer = self
self._tracer = tracer
self._current_spans = {}
if not hasattr(settings, 'OPENTRACING_TRACE_ALL'):
self._trace_all = False
elif not getattr(settings, 'OPENTRACING_TRACE_ALL'):
self._trace_all = False
else:
self._trace_all = True
def get_span(self, request):
'''
@param request
Returns the span tracing this request
'''
return self._current_spans.get(request, None)
def trace(self, *attributes):
'''
Function decorator that traces functions
NOTE: Must be placed after the @app.route decorator
@param attributes any number of flask.Request attributes
(strings) to be set as tags on the created span
'''
def decorator(view_func):
# TODO: do we want to provide option of overriding trace_all_requests so that they
# can trace certain attributes of the request for just this request (this would require
# to reinstate the name-mangling with a trace identifier, and another settings key)
if self._trace_all:
return view_func
# otherwise, execute decorator
def wrapper(request):
span = self._apply_tracing(request, view_func, list(attributes))
r = view_func(request)
self._finish_tracing(request)
return r
return wrapper
return decorator
def _apply_tracing(self, request, view_func, attributes):
'''
Helper function to avoid rewriting for middleware and decorator.
Returns a new span from the request with logged attributes and
correct operation name from the view_func.
'''
setattr(_thread_locals, 'request', request)
# strip headers for trace info
headers = {}
for k,v in request.META.items():
k = k.lower().replace('_','-')
if k.startswith('http-'):
k = k[5:]
headers[k] = v
# start new span from trace info
span = None
operation_name = view_func.__name__
try:
span_ctx = self._tracer.extract(opentracing.Format.HTTP_HEADERS, headers)
span = self._tracer.start_span(operation_name=operation_name, child_of=span_ctx)
except (opentracing.InvalidCarrierException, opentracing.SpanContextCorruptedException) as e:
span = self._tracer.start_span(operation_name=operation_name)
if span is None:
span = self._tracer.start_span(operation_name=operation_name)
# add span to current spans
self._current_spans[request] = span
# log any traced attributes
for attr in attributes:
if hasattr(request, attr):
payload = str(getattr(request, attr))
if payload:
span.set_tag(attr, payload)
return span
def _finish_tracing(self, request):
span = self._current_spans.pop(request, None)
if span is not None:
span.finish()
setattr(_thread_locals, 'request', None)
| 35.104348
| 101
| 0.628685
|
from django.conf import settings
import opentracing
try:
from threading import local
except ImportError:
from django.utils._threading_local import local
_thread_locals = local()
django_tracer = None
def get_tracer():
return opentracing.tracer
def get_current_span(request=None):
if request is None:
request = getattr(_thread_locals, "request", None)
if hasattr(request, '_request'):
request = request._request
if django_tracer != None:
return django_tracer.get_span(request)
else:
return None
class DjangoTracer(object):
def __init__(self, tracer):
global django_tracer
django_tracer = self
self._tracer = tracer
self._current_spans = {}
if not hasattr(settings, 'OPENTRACING_TRACE_ALL'):
self._trace_all = False
elif not getattr(settings, 'OPENTRACING_TRACE_ALL'):
self._trace_all = False
else:
self._trace_all = True
def get_span(self, request):
return self._current_spans.get(request, None)
def trace(self, *attributes):
def decorator(view_func):
if self._trace_all:
return view_func
def wrapper(request):
span = self._apply_tracing(request, view_func, list(attributes))
r = view_func(request)
self._finish_tracing(request)
return r
return wrapper
return decorator
def _apply_tracing(self, request, view_func, attributes):
setattr(_thread_locals, 'request', request)
headers = {}
for k,v in request.META.items():
k = k.lower().replace('_','-')
if k.startswith('http-'):
k = k[5:]
headers[k] = v
span = None
operation_name = view_func.__name__
try:
span_ctx = self._tracer.extract(opentracing.Format.HTTP_HEADERS, headers)
span = self._tracer.start_span(operation_name=operation_name, child_of=span_ctx)
except (opentracing.InvalidCarrierException, opentracing.SpanContextCorruptedException) as e:
span = self._tracer.start_span(operation_name=operation_name)
if span is None:
span = self._tracer.start_span(operation_name=operation_name)
self._current_spans[request] = span
for attr in attributes:
if hasattr(request, attr):
payload = str(getattr(request, attr))
if payload:
span.set_tag(attr, payload)
return span
def _finish_tracing(self, request):
span = self._current_spans.pop(request, None)
if span is not None:
span.finish()
setattr(_thread_locals, 'request', None)
| true
| true
|
1c46a5e42c962596d622b69453575d9dba1e629a
| 291
|
py
|
Python
|
manage.py
|
cron-ooo/django-influxdb-metrics
|
7cecc315e12219897d941a6c02eac8ffc182b645
|
[
"MIT"
] | 54
|
2016-11-25T10:00:23.000Z
|
2022-03-17T09:27:49.000Z
|
manage.py
|
cron-ooo/django-influxdb-metrics
|
7cecc315e12219897d941a6c02eac8ffc182b645
|
[
"MIT"
] | 27
|
2016-12-01T17:35:37.000Z
|
2021-03-30T16:37:49.000Z
|
manage.py
|
cron-ooo/django-influxdb-metrics
|
7cecc315e12219897d941a6c02eac8ffc182b645
|
[
"MIT"
] | 23
|
2016-11-22T09:26:28.000Z
|
2022-03-14T11:34:33.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'influxdb_metrics.tests.settings')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 24.25
| 64
| 0.71134
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'influxdb_metrics.tests.settings')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| true
| true
|
1c46a757bacabb1d955e01d6573c009ce4f99f02
| 1,247
|
py
|
Python
|
airflow/utils/types.py
|
shashijangra/airflow-1
|
c3e340584bf1892c4f73aa9e7495b5823dab0c40
|
[
"Apache-2.0"
] | 2
|
2021-07-30T17:25:56.000Z
|
2021-08-03T13:51:09.000Z
|
airflow/utils/types.py
|
shashijangra/airflow-1
|
c3e340584bf1892c4f73aa9e7495b5823dab0c40
|
[
"Apache-2.0"
] | 14
|
2019-12-03T02:54:42.000Z
|
2020-02-27T16:08:10.000Z
|
airflow/utils/types.py
|
shashijangra/airflow-1
|
c3e340584bf1892c4f73aa9e7495b5823dab0c40
|
[
"Apache-2.0"
] | 1
|
2021-07-02T04:23:18.000Z
|
2021-07-02T04:23:18.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import enum
class DagRunType(enum.Enum):
"""Class with DagRun types"""
BACKFILL_JOB = "backfill"
SCHEDULED = "scheduled"
MANUAL = "manual"
@staticmethod
def from_run_id(run_id: str) -> "DagRunType":
"""
Resolved DagRun type from run_id.
"""
for run_type in DagRunType:
if run_id and run_id.startswith(f"{run_type.value}__"):
return run_type
return DagRunType.MANUAL
| 34.638889
| 67
| 0.709703
|
import enum
class DagRunType(enum.Enum):
BACKFILL_JOB = "backfill"
SCHEDULED = "scheduled"
MANUAL = "manual"
@staticmethod
def from_run_id(run_id: str) -> "DagRunType":
for run_type in DagRunType:
if run_id and run_id.startswith(f"{run_type.value}__"):
return run_type
return DagRunType.MANUAL
| true
| true
|
1c46a76ba0a8d816e3975d24a0ab719d4db28b66
| 1,038
|
py
|
Python
|
Download/PythonExercicios/ex028.py
|
r-luis/Python-CursoemVideo
|
f978b2f4ab8444ebb746b4c85bd6db6d7775cbb4
|
[
"MIT"
] | null | null | null |
Download/PythonExercicios/ex028.py
|
r-luis/Python-CursoemVideo
|
f978b2f4ab8444ebb746b4c85bd6db6d7775cbb4
|
[
"MIT"
] | null | null | null |
Download/PythonExercicios/ex028.py
|
r-luis/Python-CursoemVideo
|
f978b2f4ab8444ebb746b4c85bd6db6d7775cbb4
|
[
"MIT"
] | null | null | null |
'''Escreva um programa que faça o computador "pensar" em um número inteiro entre 0 e 5 e peça para
o usuário tentar descobrir o número escolhido pelo computador.
O programa deverá escrever na tela se o usuário venceu ou perdeu.'''
from random import randint
from time import sleep
comp = randint(0, 5) #Computador pensa um número
cores = {'limpa':'\033[m',
'azul':'\033[34m',
'vermelho':'\033[31m',
'roxo':'\033[1;35m'}
print('{}-*{}'.format(cores['azul'], cores['limpa'])*12)
txt = '{}JOGO DA ADVINHAÇÃO 1.0v{}'.format(cores['vermelho'], cores['limpa'])
print('{:^30}'.format(txt))
print('{}*-{}'.format(cores['azul'], cores['limpa'])*12)
usu = int(input('''Digite um número entre 0 e 5 e tente descobrir
se é o mesmo número escolhido pelo computador: ''')) #O usuário pensa em um número
print('{}PROCESSANDO...{}'.format(cores['roxo'], cores['limpa']))
sleep(2)
if comp == usu:
print('PARABÉNS, VOCÊ VENCEU!')
else:
print('Você perdeu, o número escolhido pelo PC foi {} e não {}.'.format(comp, usu))
| 43.25
| 98
| 0.662813
|
from random import randint
from time import sleep
comp = randint(0, 5)
cores = {'limpa':'\033[m',
'azul':'\033[34m',
'vermelho':'\033[31m',
'roxo':'\033[1;35m'}
print('{}-*{}'.format(cores['azul'], cores['limpa'])*12)
txt = '{}JOGO DA ADVINHAÇÃO 1.0v{}'.format(cores['vermelho'], cores['limpa'])
print('{:^30}'.format(txt))
print('{}*-{}'.format(cores['azul'], cores['limpa'])*12)
usu = int(input('''Digite um número entre 0 e 5 e tente descobrir
se é o mesmo número escolhido pelo computador: '''))
print('{}PROCESSANDO...{}'.format(cores['roxo'], cores['limpa']))
sleep(2)
if comp == usu:
print('PARABÉNS, VOCÊ VENCEU!')
else:
print('Você perdeu, o número escolhido pelo PC foi {} e não {}.'.format(comp, usu))
| true
| true
|
1c46a7ad97f59fed8090c80f1a79ff7c63808989
| 1,479
|
py
|
Python
|
test/tests/16_view-status/hooks.py
|
airtower-luna/mod_gnutls
|
b6ce8add210095b2b983f63a818f5d157aff9f89
|
[
"Apache-2.0"
] | 2
|
2015-04-06T11:28:40.000Z
|
2021-07-11T12:34:53.000Z
|
test/tests/16_view-status/hooks.py
|
airtower-luna/mod_gnutls
|
b6ce8add210095b2b983f63a818f5d157aff9f89
|
[
"Apache-2.0"
] | 4
|
2020-09-07T19:27:20.000Z
|
2021-07-29T19:29:40.000Z
|
test/tests/16_view-status/hooks.py
|
airtower-luna/mod_gnutls
|
b6ce8add210095b2b983f63a818f5d157aff9f89
|
[
"Apache-2.0"
] | 1
|
2022-01-26T12:17:17.000Z
|
2022-01-26T12:17:17.000Z
|
from mgstest import require_match, TestExpectationFailed
import re
def post_check(conn_log, response_log):
"""Compare the TLS session information reported by gnutls-cli and the
mod_gnutls status listing."""
# Group 1 is the TLS version, group 2 the ciphers. The certificate
# type that may be enclosed in the same brackets as the TLS
# version is ignored.
re_session = r'\((TLS[\d\.]+).*?\)-(.*)'
# Prefix for gnutls-cli output
re_cli = re.compile(r'(?<=^-\sDescription:\s)' + re_session + '$')
# Prefix in mod_status output provided by mod_gnutls
re_status = re.compile(r'(?<=^Current TLS session:\s)' + re_session + '$')
cli_suite = require_match(re_cli, conn_log,
'Client cipher suite information is missing!')
status_suite = require_match(re_status, response_log,
'Server cipher suite information is missing!')
print(f'Client session info: {cli_suite.group(0)}')
print(f'Server session info: {status_suite.group(0)}')
if cli_suite.group(1) != status_suite.group(1):
raise TestExpectationFailed(
f'Client ({cli_suite.group(1)}) and server '
f'({status_suite.group(1)}) report different protocols!')
if cli_suite.group(2) != status_suite.group(2):
raise TestExpectationFailed(
f'Client ({cli_suite.group(2)}) and server '
f'({status_suite.group(2)}) report different ciphers!')
| 41.083333
| 79
| 0.646383
|
from mgstest import require_match, TestExpectationFailed
import re
def post_check(conn_log, response_log):
re_session = r'\((TLS[\d\.]+).*?\)-(.*)'
re_cli = re.compile(r'(?<=^-\sDescription:\s)' + re_session + '$')
re_status = re.compile(r'(?<=^Current TLS session:\s)' + re_session + '$')
cli_suite = require_match(re_cli, conn_log,
'Client cipher suite information is missing!')
status_suite = require_match(re_status, response_log,
'Server cipher suite information is missing!')
print(f'Client session info: {cli_suite.group(0)}')
print(f'Server session info: {status_suite.group(0)}')
if cli_suite.group(1) != status_suite.group(1):
raise TestExpectationFailed(
f'Client ({cli_suite.group(1)}) and server '
f'({status_suite.group(1)}) report different protocols!')
if cli_suite.group(2) != status_suite.group(2):
raise TestExpectationFailed(
f'Client ({cli_suite.group(2)}) and server '
f'({status_suite.group(2)}) report different ciphers!')
| true
| true
|
1c46a7d2b1edab1bb3265dfd5cfcfa7042f23663
| 3,389
|
py
|
Python
|
test/functional/test_framework/coverage.py
|
bitcoinexodus/bitcoinexodus-source
|
742661b3dc9abce61c05fa1561b7fd9496629866
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/coverage.py
|
bitcoinexodus/bitcoinexodus-source
|
742661b3dc9abce61c05fa1561b7fd9496629866
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/coverage.py
|
bitcoinexodus/bitcoinexodus-source
|
742661b3dc9abce61c05fa1561b7fd9496629866
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for doing coverage analysis on the RPC interface.
Provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper():
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, name):
return_val = getattr(self.auth_service_proxy_instance, name)
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
# If proxy getattr returned an unwrapped value, do the same here.
return return_val
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
self._log_call()
return return_val
def _log_call(self):
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
self.coverage_logfile)
def get_request(self, *args, **kwargs):
self._log_call()
return self.auth_service_proxy_instance.get_request(*args, **kwargs)
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `bitcoinexodus-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
| 30.809091
| 87
| 0.661552
|
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper():
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, name):
return_val = getattr(self.auth_service_proxy_instance, name)
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
return return_val
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
self._log_call()
return return_val
def _log_call(self):
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
self.coverage_logfile)
def get_request(self, *args, **kwargs):
self._log_call()
return self.auth_service_proxy_instance.get_request(*args, **kwargs)
def get_filename(dirname, n_node):
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
| true
| true
|
1c46a823ab8bfa1ba9d65641cad3d178ad45586f
| 902
|
py
|
Python
|
psx/_dump_/33/_dump_ida_/overlay_3/set_funcs.py
|
maoa3/scalpel
|
2e7381b516cded28996d290438acc618d00b2aa7
|
[
"Unlicense"
] | 15
|
2018-06-28T01:11:25.000Z
|
2021-09-27T15:57:18.000Z
|
psx/_dump_/33/_dump_ida_/overlay_3/set_funcs.py
|
maoa3/scalpel
|
2e7381b516cded28996d290438acc618d00b2aa7
|
[
"Unlicense"
] | 7
|
2018-06-29T04:08:23.000Z
|
2019-10-17T13:57:22.000Z
|
psx/_dump_/33/_dump_ida_/overlay_3/set_funcs.py
|
maoa3/scalpel
|
2e7381b516cded28996d290438acc618d00b2aa7
|
[
"Unlicense"
] | 7
|
2018-06-28T01:11:34.000Z
|
2020-05-23T09:21:48.000Z
|
del_items(0x800A1608)
SetType(0x800A1608, "void VID_OpenModule__Fv()")
del_items(0x800A16C8)
SetType(0x800A16C8, "void InitScreens__Fv()")
del_items(0x800A17B8)
SetType(0x800A17B8, "void MEM_SetupMem__Fv()")
del_items(0x800A17E4)
SetType(0x800A17E4, "void SetupWorkRam__Fv()")
del_items(0x800A1874)
SetType(0x800A1874, "void SYSI_Init__Fv()")
del_items(0x800A1980)
SetType(0x800A1980, "void GM_Open__Fv()")
del_items(0x800A19A4)
SetType(0x800A19A4, "void PA_Open__Fv()")
del_items(0x800A19DC)
SetType(0x800A19DC, "void PAD_Open__Fv()")
del_items(0x800A1A20)
SetType(0x800A1A20, "void OVR_Open__Fv()")
del_items(0x800A1A40)
SetType(0x800A1A40, "void SCR_Open__Fv()")
del_items(0x800A1A70)
SetType(0x800A1A70, "void DEC_Open__Fv()")
del_items(0x800A1CE4)
SetType(0x800A1CE4, "char *GetVersionString__FPc(char *VersionString2)")
del_items(0x800A1DB8)
SetType(0x800A1DB8, "char *GetWord__FPc(char *VStr)")
| 33.407407
| 72
| 0.805987
|
del_items(0x800A1608)
SetType(0x800A1608, "void VID_OpenModule__Fv()")
del_items(0x800A16C8)
SetType(0x800A16C8, "void InitScreens__Fv()")
del_items(0x800A17B8)
SetType(0x800A17B8, "void MEM_SetupMem__Fv()")
del_items(0x800A17E4)
SetType(0x800A17E4, "void SetupWorkRam__Fv()")
del_items(0x800A1874)
SetType(0x800A1874, "void SYSI_Init__Fv()")
del_items(0x800A1980)
SetType(0x800A1980, "void GM_Open__Fv()")
del_items(0x800A19A4)
SetType(0x800A19A4, "void PA_Open__Fv()")
del_items(0x800A19DC)
SetType(0x800A19DC, "void PAD_Open__Fv()")
del_items(0x800A1A20)
SetType(0x800A1A20, "void OVR_Open__Fv()")
del_items(0x800A1A40)
SetType(0x800A1A40, "void SCR_Open__Fv()")
del_items(0x800A1A70)
SetType(0x800A1A70, "void DEC_Open__Fv()")
del_items(0x800A1CE4)
SetType(0x800A1CE4, "char *GetVersionString__FPc(char *VersionString2)")
del_items(0x800A1DB8)
SetType(0x800A1DB8, "char *GetWord__FPc(char *VStr)")
| true
| true
|
1c46a948950c865d0aa8057ce1992e94cb642566
| 816
|
py
|
Python
|
ta/tests/__init__.py
|
levelvc/ta
|
69aa29f60c691f1628a62e480cfd6bfb3a5c1793
|
[
"MIT"
] | 2
|
2020-04-13T03:34:01.000Z
|
2020-06-01T14:41:26.000Z
|
ta/tests/__init__.py
|
Glyphack/ta
|
ff46a2cb64f7446921bb3c47c882105a16f9d4f9
|
[
"MIT"
] | null | null | null |
ta/tests/__init__.py
|
Glyphack/ta
|
ff46a2cb64f7446921bb3c47c882105a16f9d4f9
|
[
"MIT"
] | null | null | null |
from ta.tests.momentum import (TestKAMAIndicator, TestRateOfChangeIndicator,
TestRSIIndicator, TestStochasticOscillator,
TestTSIIndicator, TestUltimateOscillator,
TestWilliamsRIndicator)
from ta.tests.trend import (TestADXIndicator, TestCCIIndicator,
TestMACDIndicator, TestPSARIndicator,
TestVortexIndicator)
from ta.tests.utils import TestGeneral
from ta.tests.volatility import TestAverageTrueRange, TestBollingerBands
from ta.tests.volume import (TestAccDistIndexIndicator,
TestEaseOfMovementIndicator,
TestForceIndexIndicator, TestMFIIndicator,
TestOnBalanceVolumeIndicator)
| 58.285714
| 76
| 0.629902
|
from ta.tests.momentum import (TestKAMAIndicator, TestRateOfChangeIndicator,
TestRSIIndicator, TestStochasticOscillator,
TestTSIIndicator, TestUltimateOscillator,
TestWilliamsRIndicator)
from ta.tests.trend import (TestADXIndicator, TestCCIIndicator,
TestMACDIndicator, TestPSARIndicator,
TestVortexIndicator)
from ta.tests.utils import TestGeneral
from ta.tests.volatility import TestAverageTrueRange, TestBollingerBands
from ta.tests.volume import (TestAccDistIndexIndicator,
TestEaseOfMovementIndicator,
TestForceIndexIndicator, TestMFIIndicator,
TestOnBalanceVolumeIndicator)
| true
| true
|
1c46aa65d6d2fd5ed331e10e2a59dc999d7176c4
| 2,420
|
py
|
Python
|
experiments/actions/action_utils.py
|
Tobias-Fischer/dreyeve
|
a65342d9c503ce3ec932e2229b90aaeebfd82944
|
[
"MIT"
] | 83
|
2017-05-29T04:16:42.000Z
|
2022-03-03T08:09:22.000Z
|
experiments/actions/action_utils.py
|
ashinmarin/dreyeve
|
d73979d738e706d90a8aa9d696c6e4dcb19c1134
|
[
"MIT"
] | 26
|
2017-11-09T23:35:52.000Z
|
2022-03-11T03:22:57.000Z
|
experiments/actions/action_utils.py
|
ashinmarin/dreyeve
|
d73979d738e706d90a8aa9d696c6e4dcb19c1134
|
[
"MIT"
] | 36
|
2017-09-23T02:48:41.000Z
|
2022-03-11T01:34:23.000Z
|
"""
Utilities for improve code readability in `predict_actions_with_SVM.py`
"""
import itertools
import numpy as np
import matplotlib.pyplot as plt
from os.path import join, exists
class DreyeveRun:
"""
Single run of the DR(eye)VE dataset.
"""
def __init__(self, dataset_data_root, num_run):
self.num_run = num_run
self.file_course = join(dataset_data_root, '{:02d}'.format(self.num_run), 'speed_course_coord.txt')
self.file_steering = join(dataset_data_root, '{:02d}'.format(self.num_run), 'steering_directions.txt')
self.file_actions = join(dataset_data_root, '{:02d}'.format(self.num_run), 'actions.csv')
class DreyeveDataset:
"""
Class that models the Dreyeve dataset
"""
def __init__(self, dataset_root):
self.dataset_data_root = join(dataset_root, 'DATA')
self.dataset_pred_root = join(dataset_root, 'PREDICTIONS_2017')
self.train_runs = [DreyeveRun(self.dataset_data_root, r) for r in range(0 + 1, 38)]
self.test_runs = [DreyeveRun(self.dataset_data_root, r) for r in range(38, 74 + 1)]
self.frames_each_run = 7500
self.num_train_frames = len(self.train_runs) * self.frames_each_run
self.num_test_frames = len(self.test_runs) * self.frames_each_run
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
| 33.150685
| 110
| 0.645868
|
import itertools
import numpy as np
import matplotlib.pyplot as plt
from os.path import join, exists
class DreyeveRun:
def __init__(self, dataset_data_root, num_run):
self.num_run = num_run
self.file_course = join(dataset_data_root, '{:02d}'.format(self.num_run), 'speed_course_coord.txt')
self.file_steering = join(dataset_data_root, '{:02d}'.format(self.num_run), 'steering_directions.txt')
self.file_actions = join(dataset_data_root, '{:02d}'.format(self.num_run), 'actions.csv')
class DreyeveDataset:
def __init__(self, dataset_root):
self.dataset_data_root = join(dataset_root, 'DATA')
self.dataset_pred_root = join(dataset_root, 'PREDICTIONS_2017')
self.train_runs = [DreyeveRun(self.dataset_data_root, r) for r in range(0 + 1, 38)]
self.test_runs = [DreyeveRun(self.dataset_data_root, r) for r in range(38, 74 + 1)]
self.frames_each_run = 7500
self.num_train_frames = len(self.train_runs) * self.frames_each_run
self.num_test_frames = len(self.test_runs) * self.frames_each_run
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.