id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
3396690 | <gh_stars>0
import string
import time
def showShip(displayName):
# This block has a lot of the code we've looked at. Try to
# see if you can understand how and why it makes the choices it does.
#
# Why pick 20 to subtract from? Then why 21 later?
# Are those the correct numbers?
# Why pick the integer division // instead of just a / ?
print(" ___")
print(" ___....-----'---'-----....___")
if (len(displayName) > 0):
shipLine = " "
for x in range(20-(len(displayName)//2)):
shipLine = shipLine + "="
shipLine = shipLine + displayName
for x in range(21-(len(displayName)//2)):
shipLine = shipLine + "="
print(shipLine)
else:
print(" =========================================")
print(" ___'---..._______...---'___ ")
print(" (___) _|_|_|_ (___) ")
print(" \\____.-'_.---._'-.____// ")
print(" cccc'.__'---'__.'cccc ")
print(" ccccc")
def makeSomeScreenSpace(howmany):
# this routine should be pretty easy to understand. It basically clears out 6 lines.
# think about how you'd make this a piece of code that could take an argument so it would
# clear as many lines as you told it too. What would that look like?
if (howmany == 0):
howmany = 6
for x in range(howmany):
print("\n")
def boom():
print(" _.-^^---....,,--")
print(" _-- --_")
print("< >)")
print("| |")
print(" \._ _./")
print(" ```--. . , ; .--'''")
print(" | | |")
print(" .-=|| | |=-.")
print(" `-=#$%&%$#=-'")
print(" | ; :|")
print(" _____.,-#%&$@%#&#~,._____")
def teddy():
print("\n")
print("Quit your crying!! Here's a Teddy Bear!!!")
print("\n")
print(" ,~~.,''''`'.~~.")
print(" : {` .- _ -. '} ;")
print(" `: O(_)O ;'")
print(" '; ._|_, ;`")
print(" '`-.\_/,.'`")
def countdown(t):
while t:
mins, secs = divmod(t, 60)
timer = '{:02d}'.format(secs)
print(timer, end="\r")
time.sleep(1)
t -= 1
boom()
teddy()
def main():
makeSomeScreenSpace(4)
playerName = input("I am ready to be relieved, Commander...? ")
playerName = "Cmdr. " + playerName
print("Welcome to the bridge, " + playerName)
showShip("")
print("\n")
print("Right now I can't do much of anything, can I?")
orders = "take command"
password = '<PASSWORD>'
while orders.lower() != 'quit':
print("\n")
orders = input("What are your orders " + playerName + " ")
if orders.lower() == "rename":
nameTheShip = input("maybe you want to rename the ship, " + playerName +"?")
if (nameTheShip.upper()[:1] == 'Y'):
shipName = input("what should the name of the ship be? ")
showShip(shipName)
if orders.lower() == "self destruct":
confirmation = input("Are you sure?")
if confirmation.upper()[:1] == 'Y':
# TODO add pw request
# Print("What is the Self Destruct")
# countdown function call
countdown(int(5))
orders = 'quit'
if orders.lower() == 'help':
print("I understand orders for Help, Rename, Self Destruct and quit ")
if __name__ == "__main__":
main()
| StarcoderdataPython |
56023 | """handle requests relative to mu-calculus checking"""
from flask import Blueprint, Response, request
import json
from base.webserver_utils import apply_on_node
import flex
from flex.loading.schema.paths.path_item.operation.responses.single.schema\
import schema_validator
import os
import subprocess
mu_blueprint = Blueprint("mu_blueprint", __name__)
@mu_blueprint.route("/graph/check/", methods=["GET"])
@mu_blueprint.route("/graph/check/<path:path_to_graph>", methods=["GET"])
def check(path_to_graph=""):
def check_aux(graph_id):
rep = mu_blueprint.hie().check_all_ancestors(graph_id)
print(rep)
resp = Response(response=json.dumps(rep),
status=200,
mimetype="application/json")
return resp
return apply_on_node(mu_blueprint.hie(), mu_blueprint.top,
path_to_graph, check_aux)
| StarcoderdataPython |
1674441 | <reponame>linkdew/ida-labs<gh_stars>0
import matplotlib.pyplot as plt
import numpy as np
def setIntValue(value):
while not isinstance(value, int):
try:
value = int(input())
if value < 1:
while value < 1:
print("Enter positive integer value: ", end=" ")
value = int(input())
except ValueError:
print("Enter positive integer value: ", end=" ")
else:
break
return value
def generateNormalizedWeightsVector(dim):
vector = []
norm_var = 0
for i in range(dim - 1):
vector.append(np.random.uniform(0, 1 - norm_var))
norm_var += vector[i]
vector.append(1 - norm_var)
np.random.shuffle(vector)
return vector
def main():
models_count = None
metrics_count = None
metrics = []
print("Enter number of alternative models (n): ", end=" ")
models_count = setIntValue(models_count)
print("Enter number of quality metrics (m): ", end=" ")
metrics_count = setIntValue(metrics_count)
for i in range(metrics_count):
metrics.append("metric " + str(i + 1))
metrics.append("aggr")
metrics.append("")
barsdata = generateNormalizedWeightsVector(metrics_count)
x = np.arange(metrics_count + 2)
dotsmatrix = []
for _ in range(models_count):
dotsmatrix.append(generateNormalizedWeightsVector(metrics_count))
dotsmatrix = np.array(dotsmatrix)
aggrglobal = np.array(np.dot(dotsmatrix, barsdata)).reshape((models_count, 1))
dotsmatrix = np.hstack((dotsmatrix, aggrglobal))
dotsmatrix = np.hstack((dotsmatrix, aggrglobal))
figure, bars = plt.subplots()
bars.set_ylabel("metrics weights, %")
plt.ylim(0, 1)
plt.yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
plt.xlim(-0.25, len(metrics) - 0.15)
plt.grid(True)
plt.title("lab plot")
barsdata.append(0) # for aggr
barsdata.append(0) # for aggr
bars.bar(metrics, barsdata, width=0.2, color="orange", edgecolor="black", alpha=0.7)
bars.tick_params(axis='y', labelcolor="grey")
models = bars.twinx()
models.set_ylabel("models weights, %")
colors = ["w", "k", "g", "m", "c", "r", "b"] * 100
styles = ["8", "^", "v", "*", "s", "D", "p"] * 100
for i, color, marker in zip(range(models_count), colors, styles):
models.plot(x, dotsmatrix[i], marker=marker, color=color)
plt.text(metrics_count + 1.1, dotsmatrix[i][metrics_count], f"model {i + 1}", fontsize=10)
models.tick_params(axis='y', labelcolor="grey")
figure.tight_layout()
bars.set_facecolor("grey")
plt.show()
main()
| StarcoderdataPython |
1640838 | # Generated by Django 3.0.7 on 2020-06-10 11:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Hashcat', '0002_auto_20200610_1150'),
]
operations = [
migrations.AlterField(
model_name='hash',
name='hash',
field=models.TextField(null=True),
),
]
| StarcoderdataPython |
194258 | <filename>src/modules/hunting/certificates.py
from ...core.types import Hunter, KubernetesCluster, InformationDisclosure
from ...core.events import handler
from ...core.events.types import Vulnerability, Event, Service
import ssl
import logging
import base64
import re
from socket import socket
email_pattern = re.compile(r"([a-z0-9]+@[a-z0-9]+\.[a-z0-9]+)")
class CertificateEmail(Vulnerability, Event):
"""Certificate includes an email address"""
def __init__(self, email):
Vulnerability.__init__(self, KubernetesCluster, "Certificate Includes Email Address", category=InformationDisclosure)
self.email = email
self.evidence = "email: {}".format(self.email)
@handler.subscribe(Service)
class CertificateDiscovery(Hunter):
"""Certificate Email Hunting
Checks for email addresses in kubernetes ssl certificates
"""
def __init__(self, event):
self.event = event
def execute(self):
try:
logging.debug("Passive hunter is attempting to get server certificate")
addr = (str(self.event.host), self.event.port)
cert = ssl.get_server_certificate(addr)
except ssl.SSLError as e:
# If the server doesn't offer SSL on this port we won't get a certificate
return
c = cert.strip(ssl.PEM_HEADER).strip(ssl.PEM_FOOTER)
certdata = base64.decodestring(c)
emails = re.findall(email_pattern, certdata)
for email in emails:
self.publish_event( CertificateEmail(email=email) ) | StarcoderdataPython |
1785267 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: <NAME> (<EMAIL>)
# @Date: 2021-12-06
# @Filename: acquire.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
from __future__ import annotations
from functools import partial
from typing import TYPE_CHECKING
import click
from cherno.actor.exposer import Exposer
from cherno.astrometry import process_and_correct
from cherno.exceptions import ExposerError
from . import cherno_parser
if TYPE_CHECKING:
from . import ChernoCommandType
__all__ = ["acquire"]
@cherno_parser.command()
@click.option(
"-t",
"--exposure-time",
type=float,
default=None,
help="Cameras exposure time.",
)
@click.option(
"-c",
"--continuous",
is_flag=True,
help="Run acquisition in continuous mode.",
)
@click.option(
"--apply/--no-apply",
default=True,
help="Whether to apply the correction.",
)
@click.option(
"--plot",
is_flag=True,
help="Whether to plot results of astrometry.net.",
)
@click.option(
"--cpulimit",
type=float,
default=15.0,
help="Maximum runtime for astrometry.net.",
)
@click.option(
"-f",
"--full",
is_flag=True,
help="Applies the full correction once. Cannot be used with --continuous.",
)
async def acquire(
command: ChernoCommandType,
exposure_time: float | None = None,
continuous: bool = False,
apply: bool = True,
plot: bool = False,
cpulimit: float = 15.0,
full: bool = False,
):
"""Runs the acquisition procedure."""
assert command.actor
if exposure_time is not None:
if exposure_time < 1.0:
return command.fail("Exposure time not set or too small.")
else:
command.actor.state.exposure_time = exposure_time
callback = partial(
process_and_correct,
run_options={"plot": plot, "cpulimit": cpulimit},
apply=apply,
full=full,
)
exposer = Exposer(command, callback=callback)
try:
await exposer.loop(
None,
count=1 if continuous is False else None,
timeout=25,
)
except ExposerError as err:
return command.fail(f"Acquisition failed: {err}")
return command.finish()
| StarcoderdataPython |
4815851 | import json
from six import text_type as str
from . import DictSource
class JSONFile(DictSource):
def __init__(self, jsonfilename=None, writable=True, **kwargs):
"""Loads and optionally saves configuration files in JSON
format. Since JSON has some support for typed values (supports
numbers, lists, bools, but not dates or datetimes), data from
this source are sometimes typed, sometimes only available as
strings.
:param jsonfile: The name of a JSON file, whose root element
should be a JSON object (python dict). Nested
objects are turned into nested config objects.
:type jsonfile: str
:param writable: Whether changes to the LayeredConfig object
that has this JSONFile object amongst its
sources should be saved in the JSON file.
:type writable: bool
"""
super(JSONFile, self).__init__(**kwargs)
if jsonfilename == None and 'parent' in kwargs and hasattr(kwargs['parent'], 'jsonfilename'):
jsonfilename = kwargs['parent'].jsonfilename
if 'defaults' in kwargs:
self.source = kwargs['defaults']
elif kwargs.get('empty', False):
self.source = {}
else:
with open(jsonfilename) as fp:
self.source = json.load(fp)
self.jsonfilename = jsonfilename
self.dirty = False
self.writable = writable
def typed(self, key):
# if the value is anything other than a string, we can be sure
# that it contains useful type information.
return self.has(key) and not isinstance(self.get(key), str)
def set(self, key, value):
# simple stringification -- should perhaps only be done in the
# save step through a method passed as a default parameter to
# json dumps
self.source[key] = str(value)
def save(self):
assert not self.parent, "save() should only be called on root objects"
if self.jsonfilename:
with open(self.jsonfilename, "w") as fp:
json.dump(self.source, fp, indent=4, separators=(',',': '), sort_keys=True)
| StarcoderdataPython |
3347164 | import unittest
from datetime import timedelta
from display_timedelta import display_timedelta
class CheckVersionTest(unittest.TestCase):
def testAllFour(self):
self.assertEqual("4 days, 3 hours, 5 minutes, and 2 seconds",
display_timedelta(timedelta(days=4, hours=3, minutes=5, seconds=2)))
self.assertEqual("1 day, 3 hours, 5 minutes, and 2 seconds",
display_timedelta(timedelta(days=1, hours=3, minutes=5, seconds=2)))
self.assertEqual("1 day, 1 hour, 1 minute, and 1 second",
display_timedelta(timedelta(days=1, hours=1, minutes=1, seconds=1)))
def test1Missing(self):
self.assertEqual("1 hour, 1 minute, and 1 second",
display_timedelta(timedelta(hours=1, minutes=1, seconds=1)))
self.assertEqual("1 day, 1 minute, and 1 second",
display_timedelta(timedelta(days=1, minutes=1, seconds=1)))
self.assertEqual("1 day, 1 hour, and 1 second",
display_timedelta(timedelta(days=1, hours=1, seconds=1)))
self.assertEqual("1 day, 1 hour, and 1 minute",
display_timedelta(timedelta(days=1, hours=1, minutes=1)))
def test2Missing(self):
self.assertEqual("7 hours and 1 second",
display_timedelta(timedelta(hours=7, seconds=1)))
def testOnlyOne(self):
self.assertEqual("7 hours",
display_timedelta(timedelta(hours=7)))
def testRightNow(self):
self.assertEqual("right now",
display_timedelta(timedelta(seconds=0)))
def testRounding(self):
self.assertEqual("5 minutes",
display_timedelta(timedelta(minutes=5, seconds=0.2)))
self.assertEqual("right now",
display_timedelta(timedelta(seconds=0.2)))
self.assertEqual("right now",
display_timedelta(timedelta(seconds=0.99999)))
def testYears(self):
self.assertEqual("2734032 days", display_timedelta(timedelta(days=2734032)))
def testInPast(self):
self.assertRaises(ValueError, lambda: display_timedelta(timedelta(seconds=-2)))
| StarcoderdataPython |
1775369 | <filename>sqlconfig/tests/tests.py
import tempfile
import unittest
from ..lib import load, dump
import os
class RoundtripTest(unittest.TestCase):
def compare_dirs(self, expected, received):
expected_files = set(os.listdir(expected))
received_files = set(os.listdir(received))
self.assertEqual(expected_files, received_files, "Directories had different sets of files")
for filename in expected_files:
with open(os.path.join(expected, filename), "r") as expected_f:
with open(os.path.join(received, filename), "r") as received_f:
self.assertMultiLineEqual(expected_f.read(), received_f.read(), "File " + filename + " did not match")
def test_roundtrip(self):
test_data = os.path.join(os.path.dirname(__file__), "testdata")
db_truth = os.path.join(test_data, "db", "Chinook_Sqlite.sqlite")
files_truth = os.path.join(test_data, "files")
with tempfile.TemporaryDirectory() as tempdir:
# Export the DB and make sure that they match the source of truth in the repo
files_temp = os.path.join(tempdir, "files")
dump(db_truth, files_temp)
self.compare_dirs(files_truth, files_temp)
with tempfile.TemporaryDirectory() as tempdir:
# Import and re-export the db and make sure it matches the source of truth
db_temp = os.path.join(tempdir, "db.sqlite")
files_temp = os.path.join(tempdir, "files")
load(db_temp, files_truth)
dump(db_temp, files_temp)
self.compare_dirs(files_truth, files_temp)
if __name__ == "__main__":
unittest.main() | StarcoderdataPython |
32946 | <reponame>ActionAnalytics/mds
import base64
import requests
from werkzeug.exceptions import BadRequest, NotFound
from flask import request, current_app, Response
from flask_restplus import Resource
from app.extensions import api
from ...mine.models.mine import Mine
from ....documents.mines.models.mine_document import MineDocument
from ....documents.variances.models.variance import VarianceDocumentXref
from ....utils.access_decorators import (requires_any_of, MINE_CREATE,
MINESPACE_PROPONENT)
from ....utils.resources_mixins import UserMixin, ErrorMixin
from app.api.utils.custom_reqparser import CustomReqparser
from app.api.mines.mine_api_models import VARIANCE_MODEL
from app.api.variances.models.variance import Variance
class MineVarianceDocumentUploadResource(Resource, UserMixin, ErrorMixin):
@api.doc(description='Request a document_manager_guid for uploading a document')
@requires_any_of([MINE_CREATE, MINESPACE_PROPONENT])
def post(self, mine_guid, variance_guid):
metadata = self._parse_request_metadata()
if not metadata or not metadata.get('filename'):
raise BadRequest('Filename not found in request metadata header')
# Save file
mine = Mine.find_by_mine_guid(mine_guid)
document_name = metadata.get('filename')
data = {
'folder': f'mines/{mine.mine_guid}/variances',
'pretty_folder': f'mines/{mine.mine_no}/variances',
'filename': document_name
}
document_manager_URL = f'{current_app.config["DOCUMENT_MANAGER_URL"]}/document-manager'
resp = requests.post(
url=document_manager_URL,
headers={key: value
for (key, value) in request.headers if key != 'Host'},
data=data,
cookies=request.cookies,
)
response = Response(str(resp.content), resp.status_code, resp.raw.headers.items())
return response
@api.doc(
description='Associate an uploaded file with a variance.',
params={
'mine_guid': 'guid for the mine with which the variance is associated',
'variance_guid': 'GUID for the variance to which the document should be associated'
})
@api.marshal_with(VARIANCE_MODEL, code=200)
@requires_any_of([MINE_CREATE, MINESPACE_PROPONENT])
def put(self, mine_guid, variance_guid):
parser = CustomReqparser()
# Arguments required by MineDocument
parser.add_argument('document_name', type=str, required=True)
parser.add_argument('document_manager_guid', type=str, required=True)
parser.add_argument('variance_document_category_code', type=str, required=True)
variance = Variance.find_by_variance_guid(variance_guid)
if not variance:
raise NotFound('Unable to fetch variance.')
data = parser.parse_args()
document_name = data.get('document_name')
document_manager_guid = data.get('document_manager_guid')
# Register new file upload
mine_doc = MineDocument(
mine_guid=mine_guid,
document_manager_guid=document_manager_guid,
document_name=document_name)
if not mine_doc:
raise BadRequest('Unable to register uploaded file as document')
# Associate Variance & MineDocument to create Variance Document
# Add fields specific to Variance Documents
mine_doc.save()
variance_doc = VarianceDocumentXref(
mine_document_guid=mine_doc.mine_document_guid,
variance_id=variance.variance_id,
variance_document_category_code=data.get('variance_document_category_code'))
variance.documents.append(variance_doc)
variance.save()
return variance
def _parse_request_metadata(self):
request_metadata = request.headers.get("Upload-Metadata")
metadata = {}
if not request_metadata:
return metadata
for key_value in request_metadata.split(","):
(key, value) = key_value.split(" ")
metadata[key] = base64.b64decode(value).decode("utf-8")
return metadata
| StarcoderdataPython |
1794015 | <filename>lib/view.py
#!/usr/bin/env python3
# pylint: disable=no-member, too-many-branches, too-many-locals, too-many-statements, broad-except
"""
Generate plots using BlobToolKit Viewer.
Usage:
blobtools view [--format STRING...] [--host STRING] [--interactive] [--out PATH]
[--param STRING...] [--ports RANGE] [--prefix STRING] [--preview STRING...]
[--geckodriver-log PATH] [--remote] [--timeout INT] [--view STRING...] DATASET
Options:
--format STRING Image format (svg|png). [Default: png]
--host STRING Hostname. [Default: http://localhost]
--interactive Start interactive session (opens dataset in Firefox). [Default: False]
--out PATH Directory for outfiles. [Default: .]
--param key=value Query string parameter.
--ports RANGE Port range for viewer and API. [Default: 8000-8099]
--prefix STRING URL prefix. [Default: view]
--preview STRING Field name.
--geckodriver-log PATH Path to geckodriver logfile for debugging. [Default: /dev/null]
--remote Start viewer for remote session. [Default: False]
--timeout INT Time to wait for page load in seconds. Default (0) is no timeout. [Default: 0]
--view STRING Plot type (blob|cumulative|snail). [Default: blob]
"""
import os
import shlex
import signal
import sys
import time
from pathlib import Path
from subprocess import PIPE, Popen
from docopt import docopt
from pyvirtualdisplay import Display
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from tqdm import tqdm
from host import test_port
def file_ready(file_path):
"""Check if file is ready."""
while not os.path.exists(file_path):
# flush nfs cache by chowning parent to current owner
parent = os.path.dirname(os.path.abspath(file_path))
os.chown(parent, os.stat(parent).st_uid, os.stat(parent).st_gid)
time.sleep(1)
if os.path.isfile(file_path):
return True
raise ValueError("%s isn't a file!" % file_path)
def test_loc(args):
"""See if dataset needs to be hosted and, if so, find an empty port."""
info = args["--host"].split(":")
dataset = Path(args["DATASET"]).name
level = "dataset"
if len(info) >= 2 and info[1] != "//localhost":
loc = "%s/%s/%s/dataset/%s" % (
args["--host"],
args["--prefix"],
dataset,
dataset,
)
return loc, None, None, None, level
if len(info) == 1 and info[0] != "localhost":
# need to add test for http vs https
loc = "http://%s/%s/%s/dataset/%s" % (
args["--host"],
args["--prefix"],
dataset,
dataset,
)
return loc, None, None, None, level
if len(info) == 3:
port = info[2]
available = test_port(port, "test")
if available:
print("ERROR: No service running on port %s" % port)
print(" Unable to connect to %s" % args["--host"])
sys.exit(1)
else:
loc = "%s/%s/%s/dataset/%s" % (
args["--host"],
args["--prefix"],
dataset,
dataset,
)
return loc, None, None, None, level
if not Path(args["DATASET"]).exists():
print("ERROR: DATASET '%s' must be a valid path to begin hosting.")
sys.exit(1)
dataset = Path(args["DATASET"]).name
if (
Path("%s/meta.json" % args["DATASET"]).is_file()
or Path("%s/meta.json.gz" % args["DATASET"]).is_file()
):
parent = Path(args["DATASET"]).resolve().absolute().parent
else:
level = "blobdir"
parent = Path(args["DATASET"]).resolve().absolute()
port_range = args["--ports"].split("-")
api_port = False
port = False
for i in range(int(port_range[0]), int(port_range[1])):
if test_port(i, "test"):
if not api_port:
api_port = i
continue
if not port:
port = i
break
directory = Path(__file__).resolve().parent.parent
cmd = "%s/blobtools host --port %d --api-port %d %s" % (
directory,
port,
api_port,
parent,
)
process = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE, encoding="ascii")
loc = "%s:%d/%s" % (args["--host"], port, args["--prefix"])
if level == "dataset":
loc += "/%s/dataset/%s" % (dataset, dataset)
else:
loc += "/all"
for i in tqdm(
range(0, 15),
unit="s",
ncols=75,
desc="Initializing viewer",
bar_format="{desc} |{bar}| {n_fmt}/{total_fmt} seconds",
):
poll = process.poll()
if poll is None:
time.sleep(1)
else:
print(process.stdout.read(), file=sys.stderr)
print(process.stderr.read(), file=sys.stderr)
print("ERROR: Viewer quit unexpectedly", file=sys.stderr)
print("Unable to run: %s" % cmd, file=sys.stderr)
sys.exit(1)
return loc, process, port, api_port, level
def firefox_driver(args):
"""Start firefox."""
outdir = os.path.abspath(args["--out"])
os.makedirs(Path(outdir), exist_ok=True)
profile = webdriver.FirefoxProfile()
profile.set_preference("browser.download.folderList", 2)
profile.set_preference("browser.download.manager.showWhenStarting", False)
profile.set_preference("browser.download.dir", outdir)
profile.set_preference("browser.download.lastDir", args["--out"])
profile.set_preference(
"browser.helperApps.neverAsk.saveToDisk",
"image/png, image/svg+xml, text/csv, text/plain, application/json",
)
options = Options()
# options.set_headless(headless=not args['--interactive'])
options.headless = False
display = Display(visible=0, size=(800, 600))
display.start()
driver = webdriver.Firefox(
options=options,
firefox_profile=profile,
service_log_path=args["--geckodriver-log"],
)
return driver, display
def static_view(args, loc, viewer):
"""Generate static images."""
qstr = "staticThreshold=Infinity"
qstr += "&nohitThreshold=Infinity"
qstr += "&plotGraphics=svg"
if args["--format"] == "svg":
qstr += "&svgThreshold=Infinity"
shape = "square"
for param in args["--param"]:
qstr += "&%s" % str(param)
key, value = param.split("=")
if key == "plotShape":
shape = value
timeout = int(args["--timeout"])
outdir = os.path.abspath(args["--out"])
driver, display = firefox_driver(args)
def handle_error(err):
"""Release resources before quitting."""
if viewer is not None:
viewer.send_signal(signal.SIGINT)
driver.quit()
display.stop()
print(err)
sys.exit(1)
try:
view = args["--view"][0]
if args["--preview"]:
qstr += "#Filters"
url = "%s/%s?%s" % (loc, view, qstr)
print("Loading %s" % url)
try:
driver.get(url)
except Exception as err:
handle_error(err)
for next_view in args["--view"]:
if next_view != view:
view = next_view
url = "%s/%s?%s" % (loc, view, qstr)
print("Navigating to %s" % url)
try:
driver.get(url)
except Exception as err:
handle_error(err)
for fmt in args["--format"]:
file = "%s.%s" % (args["DATASET"], view)
if view == "blob":
file += ".%s" % shape
elif view == "busco":
view = "all_%s" % view
if fmt not in ("csv", "json"):
fmt = "json"
file += ".%s" % fmt
print("Fetching %s" % file)
el_id = "%s_save_%s" % (view, fmt)
print("waiting for element %s" % el_id)
unstable = True
start_time = time.time()
while unstable:
elapsed_time = time.time() - start_time
if timeout and elapsed_time > timeout:
handle_error("Timeout waiting for file")
try:
element = WebDriverWait(driver, timeout).until(
EC.visibility_of_element_located((By.ID, el_id))
)
element.click()
unstable = False
file_name = "%s/%s" % (outdir, file)
print("waiting for file '%s'" % file_name)
file_ready(file_name)
except Exception as err:
time.sleep(1)
for preview in args["--preview"]:
print("Creating %s preview" % preview)
for fmt in args["--format"]:
el_id = "%s_preview_save_%s" % (preview, fmt)
file = "%s.%s.preview.%s" % (args["DATASET"], preview, fmt)
try:
element = WebDriverWait(driver, timeout).until(
EC.visibility_of_element_located((By.ID, el_id))
)
element.click()
file_name = "%s/%s" % (outdir, file)
print("waiting for file '%s'" % file_name)
file_ready(file_name)
except Exception as err:
handle_error(err)
if viewer is not None:
viewer.send_signal(signal.SIGINT)
driver.quit()
display.stop()
except Exception as err:
handle_error(err)
# print(err)
# if viewer is not None:
# viewer.send_signal(signal.SIGINT)
# driver.quit()
# display.stop()
return True
def interactive_view(args, loc, viewer, level):
"""View dataset in Firefox."""
driver, display = firefox_driver(args)
qstr = ""
for param in args["--param"]:
qstr += "&%s" % str(param)
try:
view = args["--view"][0]
if args["--preview"]:
qstr += "#Filters"
if level == "dataset":
url = "%s/%s" % (loc, view)
if qstr:
url += "?%s" % qstr
else:
url = "%s/all" % loc
print("Loading %s" % url)
try:
driver.get(url)
except Exception as err:
print(err)
poll = viewer.poll()
while poll is None:
time.sleep(5)
poll = viewer.poll()
driver.quit()
display.stop()
if viewer is not None:
viewer.send_signal(signal.SIGINT)
except Exception as err:
print(err)
driver.quit()
display.stop()
if viewer is not None:
viewer.send_signal(signal.SIGINT)
return True
def remote_view(args, loc, viewer, port, api_port, level):
"""View dataset remotely."""
qstr = ""
for param in args["--param"]:
qstr += "&%s" % str(param)
try:
view = args["--view"][0]
if args["--preview"]:
qstr += "#Filters"
if level == "dataset":
url = "%s/%s" % (loc, view)
if qstr:
url += "?%s" % qstr
print("View dataset at %s" % url)
else:
print("View datasets at %s" % loc)
print("For remote access use:")
print(
" ssh -L %d:127.0.0.1:%d -L %d:127.0.0.1:%d username@remote_host"
% (port, port, api_port, api_port)
)
while True:
time.sleep(5)
if viewer is not None:
viewer.send_signal(signal.SIGINT)
except Exception as err:
print("remote exception")
print(err)
if viewer is not None:
viewer.send_signal(signal.SIGINT)
return True
def main():
"""Entrypoint for blobtools view."""
args = docopt(__doc__)
loc, viewer, port, api_port, level = test_loc(args)
try:
if args["--interactive"]:
interactive_view(args, loc, viewer, level)
elif args["--remote"]:
remote_view(args, loc, viewer, port, api_port, level)
else:
static_view(args, loc, viewer)
except KeyboardInterrupt:
pass
finally:
time.sleep(1)
if viewer is not None:
viewer.send_signal(signal.SIGINT)
time.sleep(1)
if __name__ == "__main__":
if not os.path.exists(os.environ["HOME"]):
os.mkdir(os.environ["HOME"])
main()
| StarcoderdataPython |
1789933 | import pytest
import os
from src.syn_reports.commands.user_project_access_report import UserProjectAccessReport
@pytest.fixture(scope='session')
def syn_user(syn_client):
return syn_client.getUserProfile(os.environ.get('SYNAPSE_USERNAME'))
def assert_user_success_from_print(capsys, *users):
captured = capsys.readouterr()
assert captured.err == ''
for user in users:
assert 'Username: {0} ({1})'.format(user.userName, user.ownerId) in captured.out
def assert_project_success_from_print(capsys, *projects):
captured = capsys.readouterr()
assert captured.err == ''
for project in projects:
'Project: {0} ({1}) [{2}]'.format(project.name, project.id, 'Adminitrator') in captured.out
def assert_success_from_csv(csv_full_path, user, *entities):
assert os.path.isfile(csv_full_path)
with open(csv_full_path, mode='r') as f:
contents = f.read()
assert user.ownerId in contents
assert user.userName in contents
for entity in entities:
assert entity.id in contents
assert entity.name in contents
def test_it_reports_by_user_id(capsys, syn_user, syn_project):
UserProjectAccessReport(syn_user.ownerId).execute()
assert_user_success_from_print(capsys, syn_user)
assert_project_success_from_print(capsys, syn_project)
def test_it_reports_by_username(capsys, syn_user, syn_project):
UserProjectAccessReport(syn_user.userName).execute()
assert_user_success_from_print(capsys, syn_user)
assert_project_success_from_print(capsys, syn_project)
def test_it_does_not_blowup_if_user_not_found(capsys, syn_test_helper):
username = syn_test_helper.uniq_name(prefix='Invalid-User')
UserProjectAccessReport(username).execute()
captured = capsys.readouterr()
assert 'Could not find user matching: {0}'.format(username) in captured.err
def test_it_outputs_csv_to_dir(capsys, syn_user, syn_project, mk_tempdir):
out_dir = mk_tempdir()
report = UserProjectAccessReport(syn_user.userName, out_path=out_dir)
report.execute()
assert_user_success_from_print(capsys, syn_user)
assert_project_success_from_print(capsys, syn_project)
assert_success_from_csv(report._csv_full_path, syn_user, syn_project)
def test_it_outputs_csv_to_file(capsys, syn_user, syn_project, mk_tempdir):
out_file = os.path.join(mk_tempdir(), 'outfile.csv')
report = UserProjectAccessReport(syn_user.userName, out_path=out_file)
report.execute()
assert report._csv_full_path == out_file
assert_user_success_from_print(capsys, syn_user)
assert_project_success_from_print(capsys, syn_project)
assert_success_from_csv(report._csv_full_path, syn_user, syn_project)
| StarcoderdataPython |
3244155 | import pytest
from uuid import UUID, uuid4
from domains.user.user import User
@pytest.fixture(scope="class")
def set_fields() -> (UUID, str, str, str, str, str):
id = uuid4()
nickname = "nickname"
first_name = "first_name"
last_name = "last_name"
email = "<EMAIL>"
password = "password"
return id, nickname, first_name, last_name, email, password
@pytest.fixture(scope="class")
def update_fields() -> (str, str, str, str, str):
nickname = "new_nickname"
first_name = "new_first_name"
last_name = "new_last_name"
email = "<EMAIL>"
password = "<PASSWORD>"
return nickname, first_name, last_name, email, password
class TestUser:
def test_すべての引数を与えた場合に正常にインスタンスが生成される(self, set_fields: tuple) -> None:
id, nickname, first_name, last_name, email, password = set_fields
user = User.Builder().id(id).nickname(nickname).first_name(
first_name).last_name(last_name).email(email).password(password).build()
assert user.id == id
assert user.nickname == nickname
assert user.first_name == first_name
assert user.last_name == last_name
assert user.email == email
assert user.password == password
def test_idをNoneで与えた場合にUUIDが発行される(self, set_fields: tuple) -> None:
_, nickname, first_name, last_name, email, password = set_fields
user = User.Builder().nickname(nickname).first_name(
first_name).last_name(last_name).email(email).password(password).build()
assert isinstance(user.id, UUID)
def test_update関数を実行するとid以外のメンバ変数が更新される(self, set_fields: tuple, update_fields: tuple) -> None:
id, nickname, first_name, last_name, email, password = set_fields
user = User.Builder().id(id).nickname(nickname).first_name(
first_name).last_name(last_name).email(email).password(password).build()
new_nickname, new_first_name, new_last_name, new_email, new_password = update_fields
user.update(new_nickname, new_first_name,
new_last_name, new_email, new_password)
assert user.id == id
assert user.nickname == new_nickname
assert user.first_name == new_first_name
assert user.last_name == new_last_name
assert user.email == new_email
assert user.password == <PASSWORD>
def test_update関数の引数にNoneを指定したメンバ変数は更新されない(self, set_fields: tuple, update_fields: tuple) -> None:
id, nickname, first_name, last_name, email, password = set_fields
user = User.Builder().id(id).nickname(nickname).first_name(
first_name).last_name(last_name).email(email).password(password).build()
_, new_first_name, new_last_name, new_email, new_password = update_fields
user.update(None, new_first_name,
new_last_name, new_email, new_password)
assert user.id == id
assert user.nickname == nickname
assert user.first_name == new_first_name
assert user.last_name == new_last_name
assert user.email == new_email
assert user.password == <PASSWORD>
| StarcoderdataPython |
4274 | <gh_stars>1-10
"""
Try to load all of the MODFLOW-USG examples in ../examples/data/mfusg_test.
These are the examples that are distributed with MODFLOW-USG.
"""
import os
import flopy
# make the working directory
tpth = os.path.join("temp", "t038")
if not os.path.isdir(tpth):
os.makedirs(tpth)
# build list of name files to try and load
usgpth = os.path.join("..", "examples", "data", "mfusg_test")
usg_files = []
for path, subdirs, files in os.walk(usgpth):
for name in files:
if name.endswith(".nam"):
usg_files.append(os.path.join(path, name))
#
def test_load_usg():
for fusg in usg_files:
d, f = os.path.split(fusg)
yield load_model, f, d
# function to load a MODFLOW-USG model and then write it back out
def load_model(namfile, model_ws):
m = flopy.modflow.Modflow.load(
namfile, model_ws=model_ws, version="mfusg", verbose=True, check=False
)
assert m, f"Could not load namefile {namfile}"
assert m.load_fail is False
m.change_model_ws(tpth)
m.write_input()
return
if __name__ == "__main__":
for fusg in usg_files:
d, f = os.path.split(fusg)
load_model(f, d)
| StarcoderdataPython |
1757066 | import datetime
import decimal
import logging
import uuid
import warnings
from django.conf import settings
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.base.client import BaseDatabaseClient
from django.db.backends.base.creation import BaseDatabaseCreation
from django.db.backends.base.features import BaseDatabaseFeatures
from django.db.backends.base.introspection import (BaseDatabaseIntrospection,
TableInfo)
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.backends.base.validation import BaseDatabaseValidation
from django.utils import six, timezone
from django.utils.encoding import smart_text
from google.appengine.api import datastore_errors
from google.appengine.api.datastore_types import Blob, Text
from djangae.db.backends.appengine import dbapi as Database
from djangae.db.backends.appengine import rpc
from djangae.db.backends.appengine.indexing import load_special_indexes
from djangae.db.utils import (decimal_to_string, get_datastore_key,
make_timezone_naive)
from .commands import (DeleteCommand, FlushCommand, InsertCommand,
SelectCommand, UpdateCommand, coerce_unicode)
logger = logging.getLogger(__name__)
class Connection(object):
""" Dummy connection class """
def __init__(self, wrapper, params):
self.creation = wrapper.creation
self.ops = wrapper.ops
self.params = params
self.queries = []
def rollback(self):
pass
def commit(self):
pass
def close(self):
pass
class Cursor(object):
""" Dummy cursor class """
def __init__(self, connection):
self.connection = connection
self.start_cursor = None
self.returned_ids = []
self.rowcount = -1
self.last_select_command = None
self.last_delete_command = None
def execute(self, sql, *params):
if isinstance(sql, SelectCommand):
# Also catches subclasses of SelectCommand (e.g Update)
self.last_select_command = sql
self.rowcount = self.last_select_command.execute() or -1
elif isinstance(sql, FlushCommand):
sql.execute()
elif isinstance(sql, UpdateCommand):
self.rowcount = sql.execute()
elif isinstance(sql, DeleteCommand):
self.rowcount = sql.execute()
elif isinstance(sql, InsertCommand):
self.connection.queries.append(sql)
self.returned_ids = sql.execute()
else:
raise Database.CouldBeSupportedError("Can't execute traditional SQL: '%s' (although perhaps we could make GQL work)", sql)
def next(self):
row = self.fetchone()
if row is None:
raise StopIteration
return row
def fetchone(self, delete_flag=False):
try:
result = self.last_select_command.results.next()
if isinstance(result, (int, long)):
return (result,)
query = self.last_select_command.query
row = []
# Prepend extra select values to the resulting row
for col, select in query.extra_selects:
row.append(result.get(col))
for col in self.last_select_command.query.init_list:
row.append(result.get(col))
self.returned_ids.append(result.key().id_or_name())
return row
except StopIteration:
return None
def fetchmany(self, size, delete_flag=False):
if not self.last_select_command.results:
return []
result = []
for i in range(size):
entity = self.fetchone(delete_flag)
if entity is None:
break
result.append(entity)
return result
@property
def lastrowid(self):
return self.returned_ids[-1].id_or_name()
def __iter__(self):
return self
def close(self):
pass
MAXINT = 9223372036854775808
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "djangae.db.backends.appengine.compiler"
# Datastore will store all integers as 64bit long values
integer_field_ranges = {
'SmallIntegerField': (-MAXINT, MAXINT-1),
'IntegerField': (-MAXINT, MAXINT-1),
'BigIntegerField': (-MAXINT, MAXINT-1),
'PositiveSmallIntegerField': (0, MAXINT-1),
'PositiveIntegerField': (0, MAXINT-1),
}
def bulk_batch_size(self, field, objs):
# This value is used in cascade deletions, and also on bulk insertions
# Bulk insertions really need to be limited to 25 elsewhere (so that they can be done)
# transactionally, so setting to 30 doesn't matter but for cascade deletions
# (which explode to thing_id__in=[]) we need to limit to MAX_ALLOWABLE_QUERIES
return rpc.MAX_ALLOWABLE_QUERIES
def quote_name(self, name):
return name
def date_trunc_sql(self, lookup_type, field_name):
return ''
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
return '', []
def datetime_extract_sql(self, lookup_name, sql, tzname):
return '', []
def date_extract_sql(self, lookup_name, sql):
return '', []
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
db_type = expression.field.db_type(self.connection)
internal_type = expression.field.get_internal_type()
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type == 'DateTimeField':
converters.append(self.convert_datetime_value)
elif internal_type == 'DateField':
converters.append(self.convert_date_value)
elif internal_type == 'TimeField':
converters.append(self.convert_time_value)
elif internal_type == 'DecimalField':
converters.append(self.convert_decimal_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
elif db_type == 'list':
converters.append(self.convert_list_value)
elif db_type == 'set':
converters.append(self.convert_set_value)
return converters
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
def convert_textfield_value(self, value, expression, connection, context=None):
if isinstance(value, str):
value = value.decode("utf-8")
return value
def convert_datetime_value(self, value, expression, connection, context=None):
return self.connection.ops.value_from_db_datetime(value)
def convert_date_value(self, value, expression, connection, context=None):
return self.connection.ops.value_from_db_date(value)
def convert_time_value(self, value, expression, connection, context=None):
return self.connection.ops.value_from_db_time(value)
def convert_decimal_value(self, value, expression, connection, context=None):
return self.connection.ops.value_from_db_decimal(value)
def convert_list_value(self, value, expression, connection, context=None):
if expression.output_field.db_type(connection) != "list":
return value
if not value:
value = []
return value
def convert_set_value(self, value, expression, connection, context=None):
if expression.output_field.db_type(connection) != "set":
return value
if not value:
value = set()
else:
value = set(value)
return value
def sql_flush(self, style, tables, seqs, allow_cascade=False):
additional_djangaeidx_tables = [
x for x in self.connection.introspection.table_names()
if [y for y in tables if x.startswith("_djangae_idx_{}".format(y))]
]
return [FlushCommand(table, self.connection) for table in tables + additional_djangaeidx_tables]
def prep_lookup_key(self, model, value, field):
if isinstance(value, six.string_types):
value = value[:500]
left = value[500:]
if left:
warnings.warn("Truncating primary key that is over 500 characters. "
"THIS IS AN ERROR IN YOUR PROGRAM.",
RuntimeWarning)
# This is a bit of a hack. Basically when you query an integer PK with a
# string containing an int. SQL seems to return the row regardless of type, and as far as
# I can tell, Django at no point tries to cast the value to an integer. So, in the
# case where the internal type is an AutoField, we try to cast the string value
# I would love a more generic solution... patches welcome!
# It would be nice to see the SQL output of the lookup_int_as_str test is on SQL, if
# the string is converted to an int, I'd love to know where!
if field.get_internal_type() == 'AutoField':
try:
value = int(value)
except (TypeError, ValueError):
pass
value = get_datastore_key(model, value)
else:
value = get_datastore_key(model, value)
return value
def prep_lookup_date(self, model, value, field):
if isinstance(value, datetime.datetime):
return value
return self.adapt_datefield_value(value)
def prep_lookup_time(self, model, value, field):
if isinstance(value, datetime.datetime):
return value
return self.adapt_timefield_value(value)
def prep_lookup_value(self, model, value, field, column=None):
if field.primary_key and (not column or column == model._meta.pk.column):
try:
return self.prep_lookup_key(model, value, field)
except datastore_errors.BadValueError:
# A key couldn't be constructed from this value
return None
db_type = field.db_type(self.connection)
if db_type == 'date':
return self.prep_lookup_date(model, value, field)
elif db_type == 'time':
return self.prep_lookup_time(model, value, field)
elif db_type in ('list', 'set'):
if hasattr(value, "__len__") and not value:
value = None #Convert empty lists to None
elif hasattr(value, "__iter__"):
# Convert sets to lists
value = list(value)
return value
def value_for_db(self, value, field):
if value is None:
return None
db_type = field.db_type(self.connection)
if db_type in ('integer', 'long'):
if isinstance(value, float):
# round() always returns a float, which has a smaller max value than an int
# so only round() it if it's already a float
value = round(value)
value = long(value)
elif db_type == 'float':
value = float(value)
elif db_type == 'string' or db_type == 'text':
value = coerce_unicode(value)
if db_type == 'text':
value = Text(value)
elif db_type == 'bytes':
# Store BlobField, DictField and EmbeddedModelField values as Blobs.
value = Blob(value)
elif db_type == 'decimal':
value = self.adapt_decimalfield_value(value, field.max_digits, field.decimal_places)
elif db_type in ('list', 'set'):
if hasattr(value, "__len__") and not value:
value = None #Convert empty lists to None
elif hasattr(value, "__iter__"):
# Convert sets to lists
value = list(value)
return value
def last_insert_id(self, cursor, db_table, column):
return cursor.lastrowid
def last_executed_query(self, cursor, sql, params):
"""
We shouldn't have to override this, but Django's BaseOperations.last_executed_query
assumes does u"QUERY = %r" % (sql) which blows up if you have encode unicode characters
in your SQL. Technically this is a bug in Django for assuming that sql is ASCII but
it's only our backend that will ever trigger the problem
"""
return u"QUERY = {}".format(smart_text(sql))
def fetch_returned_insert_id(self, cursor):
return cursor.lastrowid
def adapt_datetimefield_value(self, value):
value = make_timezone_naive(value)
return value
def value_to_db_datetime(self, value): # Django 1.8 compatibility
return self.adapt_datetimefield_value(value)
def adapt_datefield_value(self, value):
if value is not None:
value = datetime.datetime.combine(value, datetime.time())
return value
def value_to_db_date(self, value): # Django 1.8 compatibility
return self.adapt_datefield_value(value)
def adapt_timefield_value(self, value):
if value is not None:
value = make_timezone_naive(value)
value = datetime.datetime.combine(datetime.datetime.fromtimestamp(0), value)
return value
def value_to_db_time(self, value): # Django 1.8 compatibility
return self.adapt_timefield_value(value)
def adapt_decimalfield_value(self, value, max_digits, decimal_places):
if isinstance(value, decimal.Decimal):
return decimal_to_string(value, max_digits, decimal_places)
return value
def value_to_db_decimal(self, value, max_digits, decimal_places): # Django 1.8 compatibility
return self.adapt_decimalfield_value(value, max_digits, decimal_places)
# Unlike value_to_db, these are not overridden or standard Django, it's just nice to have symmetry
def value_from_db_datetime(self, value):
if isinstance(value, (int, long)):
# App Engine Query's don't return datetime fields (unlike Get) I HAVE NO IDEA WHY
value = datetime.datetime.fromtimestamp(float(value) / 1000000.0)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
value = value.replace(tzinfo=timezone.utc)
return value
def value_from_db_date(self, value):
if isinstance(value, (int, long)):
# App Engine Query's don't return datetime fields (unlike Get) I HAVE NO IDEA WHY
value = datetime.datetime.fromtimestamp(float(value) / 1000000.0)
if value:
value = value.date()
return value
def value_from_db_time(self, value):
if isinstance(value, (int, long)):
# App Engine Query's don't return datetime fields (unlike Get) I HAVE NO IDEA WHY
value = datetime.datetime.fromtimestamp(float(value) / 1000000.0).time()
if value is not None and settings.USE_TZ and timezone.is_naive(value):
value = value.replace(tzinfo=timezone.utc)
if value:
value = value.time()
return value
def value_from_db_decimal(self, value):
if value:
value = decimal.Decimal(value)
return value
class DatabaseClient(BaseDatabaseClient):
pass
class DatabaseCreation(BaseDatabaseCreation):
data_types = {
'AutoField': 'key',
'RelatedAutoField': 'key',
'ForeignKey': 'key',
'OneToOneField': 'key',
'ManyToManyField': 'key',
'BigIntegerField': 'long',
'BooleanField': 'bool',
'CharField': 'string',
'CommaSeparatedIntegerField': 'string',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'long',
'EmailField': 'string',
'FileField': 'string',
'FilePathField': 'string',
'FloatField': 'float',
'ImageField': 'string',
'IntegerField': 'integer',
'IPAddressField': 'string',
'NullBooleanField': 'bool',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'integer',
'SlugField': 'string',
'SmallIntegerField': 'integer',
'TimeField': 'time',
'URLField': 'string',
'TextField': 'text',
'XMLField': 'text',
'BinaryField': 'bytes'
}
def __init__(self, *args, **kwargs):
self.testbed = None
super(DatabaseCreation, self).__init__(*args, **kwargs)
def sql_create_model(self, model, *args, **kwargs):
return [], {}
def sql_for_pending_references(self, model, *args, **kwargs):
return []
def sql_indexes_for_model(self, model, *args, **kwargs):
return []
def _create_test_db(self, verbosity, autoclobber, *args):
pass
def _destroy_test_db(self, name, verbosity):
pass
class DatabaseIntrospection(BaseDatabaseIntrospection):
@rpc.NonTransactional
def get_table_list(self, cursor):
namespace = self.connection.settings_dict.get("NAMESPACE")
kinds = [kind.key().id_or_name() for kind in rpc.Query('__kind__', namespace=namespace).Run()]
return [TableInfo(x, "t") for x in kinds]
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
def column_sql(self, model, field, include_default=False):
return "", {}
def create_model(self, model):
""" Don't do anything when creating tables """
pass
def alter_unique_together(self, *args, **kwargs):
pass
def alter_field(self, from_model, from_field, to_field):
pass
def remove_field(self, from_model, field):
pass
def add_field(self, model, field):
pass
def alter_index_together(self, model, old_index_together, new_index_together):
pass
def delete_model(self, model):
pass
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = []
supports_transactions = False #FIXME: Make this True!
can_return_id_from_insert = True
supports_select_related = False
autocommits_when_autocommit_is_off = True
uses_savepoints = False
allows_auto_pk_0 = False
class DatabaseWrapper(BaseDatabaseWrapper):
data_types = DatabaseCreation.data_types # These moved in 1.8
operators = {
'exact': '= %s',
'iexact': 'iexact %s',
'contains': 'contains %s',
'icontains': 'icontains %s',
'regex': 'regex %s',
'iregex': 'iregex %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'startswith %s',
'endswith': 'endswith %s',
'istartswith': 'istartswith %s',
'iendswith': 'iendswith %s',
'isnull': 'isnull %s'
}
Database = Database
# These attributes are only used by Django >= 1.11
client_class = DatabaseClient
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
features_class = DatabaseFeatures
ops_class = DatabaseOperations
creation_class = DatabaseCreation
validation_class = BaseDatabaseValidation
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
if not hasattr(self, "client"):
# Django 1.11 creates these automatically, when we call super
# These are here for Django <= 1.10
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
self.autocommit = True
def is_usable(self):
return True
def get_connection_params(self):
return {}
def get_new_connection(self, params):
conn = Connection(self, params)
load_special_indexes() # make sure special indexes are loaded
return conn
def init_connection_state(self):
pass
def _start_transaction_under_autocommit(self):
pass
def _set_autocommit(self, enabled):
self.autocommit = enabled
def create_cursor(self, name=None):
self.name = name # Django >= 1.11
if not self.connection:
self.connection = self.get_new_connection(self.settings_dict)
return Cursor(self.connection)
def schema_editor(self, *args, **kwargs):
return DatabaseSchemaEditor(self, *args, **kwargs)
def validate_no_broken_transaction(self):
# Override this to do nothing, because it's not relevant to the Datastore
pass
| StarcoderdataPython |
3229188 | import glob
import os
files = glob.glob(os.path.join('D*'))
files.sort()
print ('editing \n',len(files),' of files\n ',files)
for idx,fil in enumerate(files):
print (fil[:2])
with open(fil) as data:
lines = data.readlines()
new_file = open(fil,'w')
for i, line in enumerate(lines):
new_line = line
if i == 17:
new_line = ' --imdb rgz_2017_' + fil[:-3] + ' \\\n'
if i == 18:
new_line = ' --iters 9220 \\\n'
if i == 19:
if not fil[:2] == 'D1':
new_line = ' --cfg $RGZ_RCNN/experiments/cfgs/faster_rcnn_end2end.yml \\\n'
if i == 21:
if fil[:2] == 'D1':
new_line = ' --weights $RGZ_RCNN/output/faster_rcnn_end2end/rgz_2017_trainD1_264/VGGnet_fast_rcnn-'+fil[3]+'0000 \\\n'
if fil[:2] == 'D3':
new_line = ' --weights $RGZ_RCNN/output/faster_rcnn_end2end/rgz_2017_trainD3_600/VGGnet_fast_rcnn-'+fil[3]+'0000 \\\n'
if fil[:2] == 'D4':
new_line = ' --weights $RGZ_RCNN/output/faster_rcnn_end2end/rgz_2017_trainD4_600/VGGnet_fast_rcnn-'+fil[3]+'0000 \\\n'
new_file.write(new_line)
new_file.close()
| StarcoderdataPython |
3202658 | from sys import getsizeof
import os
import multiprocessing
def map(func, items, ordered=False):
pool = multiprocessing.Pool(processes=_get_core_count())
items = list(items)
chunk_size = _calculate_chunk_size(items)
map_function = pool.imap if ordered else pool.imap_unordered
return map_function(func, items, chunksize=chunk_size)
def filter(func, items):
results = map(func, items, ordered=True)
counter = 0
for result in results:
if result:
yield items[counter]
counter += 1
def _calculate_chunk_size(items):
first_item = items[0]
size_of_one = getsizeof(first_item)
size_to_use = 1024*1024 # Megabyte
chunk_size = int(size_to_use / size_of_one)
chunk_size = min(len(items) / _get_core_count(), chunk_size)
return int(max(1, chunk_size))
def _get_core_count():
return len(os.sched_getaffinity(0))
| StarcoderdataPython |
3245320 | import torch
from torchtext.vocab import build_vocab_from_iterator
from sentiment_anomaly.db import session
from sentiment_anomaly.vectors import CommentsWords
def train_torch_vocab(vectors=None, dim=50):
"""
Build a Vocab for use with pytorch
With vectors generated by CoreNLP GloVe as below
dimension is 50 by default
"""
vocab = build_vocab_from_iterator(CommentsWords(session()))
if vectors:
with open(vectors, 'r') as vect_in:
dct, vect = vects(vect_in.readlines())
vocab.set_vectors(dct, vect, dim)
return vocab
def vects(lines):
"""Convert CoreNLP vector text into dict with list"""
word_index = {}
vectors = []
for index, line in enumerate(lines):
parts = line.strip().split(' ')
word = parts[0]
word_index[word] = index
vectors.append(torch.tensor([float(num) for num in parts[1:]]))
return word_index, vectors
def glove_corenlp_text():
"""If we want to train reusable GloVe vectors with demo.sh
https://github.com/stanfordnlp/GloVe
then it wants a single file, space separated words
Feed this output to demo.sh instead of the demo data,
add the resulting vectors.txt to root of this project
"""
with open('singlefile.txt', 'w') as singlefile:
for words in CommentsWords(session()):
singlefile.write(' '.join(words) + ' ')
| StarcoderdataPython |
3362351 | <gh_stars>1-10
from panel.pane import Perspective
data = {
0: ['1981 01 01 00 161 28 10173 270 21 0 0 0',
'1981 01 01 01 111 33 10175 270 21 0 0 -9999',
'1981 01 01 02 111 39 10183 330 26 0 0 -9999',
'1981 01 01 03 94 39 10192 -9999 0 0 0 -9999',
'1981 01 01 04 72 39 10196 -9999 0 0 0 -9999'],
'year': [1981, 1981, 1981, 1981, 1981],
}
def test_perspective_int_cols():
psp = Perspective(
data, columns=[0], aggregates={0: 'mean'}, sort=[[0, 'desc']],
row_pivots=[0], column_pivots=[0], filters=[[0, '==', 'None']]
)
model = psp.get_root()
assert '0' in model.source.data
assert model.columns == ['0']
assert model.row_pivots == ['0']
assert model.column_pivots == ['0']
assert model.aggregates == {'0': 'mean'}
assert model.filters == [['0', '==', 'None']]
assert model.sort == [['0', 'desc']]
psp2 = Perspective(data)
psp2._process_events({
'columns': ['0'],
'row_pivots': ['0'],
'column_pivots': ['0'],
'aggregates': {'0': 'mean'},
'filters': [['0', '==', 'None']],
'sort': [['0', 'desc']]
})
assert psp2.columns == [0]
assert psp2.row_pivots == [0]
assert psp2.column_pivots == [0]
assert psp2.aggregates == {0: 'mean'}
assert psp2.sort == [[0, 'desc']]
assert psp2.filters == [[0, '==', 'None']]
| StarcoderdataPython |
1659067 | <reponame>kurpenok/Labs<filename>2 semester/PW/Coursera/Python/3 Week/6.py
p = int(input())
x = int(input())
y = int(input())
money = x * 100 + y
money += money * (p / 100)
print(int(money // 100), int(money % 100))
| StarcoderdataPython |
3248918 | <filename>plugins/fileSize.py
# fileName : plugins/fileSize.py
# copyright ©️ 2021 nabilanavab
#--------------->
#--------> SIZE FORMATER (TO HUMAN READABLE FORM)
#------------------->
async def get_size_format(
b, factor=2**10, suffix="B"
):
for unit in ["", "K", "M", "G", "T"]:
if b < factor:
return f"{b:.2f}{unit}{suffix}"
b /= factor
return f"{b:.2f}Y{suffix}"
"""
Scale bytes to its proper byte format
e.g:
1253656 => '1.20MB'
1253656678 => '1.17GB'
"""
# Telegram: @nabilanavab
| StarcoderdataPython |
1754663 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import warnings
from functools import reduce
import networkx as nx
from kglib.utils.grakn.object.thing import build_thing
from kglib.utils.graph.thing.concept_dict_to_graph import concept_dict_to_graph
def concept_dict_from_concept_map(concept_map, tx):
"""
Given a concept map, build a dictionary of the variables present and the concepts they refer to, locally storing any
information required about those concepts.
Args:
concept_map: A dict of Concepts provided by Grakn keyed by query variables
Returns:
A dictionary of concepts keyed by query variables
"""
return {variable: build_thing(grakn_concept, tx) for variable, grakn_concept in concept_map.map().items()}
def combine_2_graphs(graph1, graph2):
"""
Combine two graphs into one. Do this by recognising common nodes between the two.
Args:
graph1: Graph to compare
graph2: Graph to compare
Returns:
Combined graph
"""
for node, data in graph1.nodes(data=True):
if graph2.has_node(node):
data2 = graph2.nodes[node]
if data2 != data:
raise ValueError((f'Found non-matching node properties for node {node} '
f'between graphs {graph1} and {graph2}:\n'
f'In graph {graph1}: {data}\n'
f'In graph {graph2}: {data2}'))
for sender, receiver, keys, data in graph1.edges(data=True, keys=True):
if graph2.has_edge(sender, receiver, keys):
data2 = graph2.edges[sender, receiver, keys]
if data2 != data:
raise ValueError((f'Found non-matching edge properties for edge {sender, receiver, keys} '
f'between graphs {graph1} and {graph2}:\n'
f'In graph {graph1}: {data}\n'
f'In graph {graph2}: {data2}'))
return nx.compose(graph1, graph2)
def combine_n_graphs(graphs_list):
"""
Combine N graphs into one. Do this by recognising common nodes between the two.
Args:
graphs_list: List of graphs to combine
Returns:
Combined graph
"""
return reduce(lambda x, y: combine_2_graphs(x, y), graphs_list)
def build_graph_from_queries(query_sampler_variable_graph_tuples, grakn_transaction,
concept_dict_converter=concept_dict_to_graph, infer=True):
"""
Builds a graph of Things, interconnected by roles (and *has*), from a set of queries and graphs representing those
queries (variable graphs)of those queries, over a Grakn transaction
Args:
infer: whether to use Grakn's inference engine
query_sampler_variable_graph_tuples: A list of tuples, each tuple containing a query, a sampling function,
and a variable_graph
grakn_transaction: A Grakn transaction
concept_dict_converter: The function to use to convert from concept_dicts to a Grakn model. This could be
a typical model or a mathematical model
Returns:
A networkx graph
"""
query_concept_graphs = []
for query, sampler, variable_graph in query_sampler_variable_graph_tuples:
concept_maps = sampler(grakn_transaction.query(query, infer=infer))
concept_dicts = [concept_dict_from_concept_map(concept_map, grakn_transaction) for concept_map in concept_maps]
answer_concept_graphs = []
for concept_dict in concept_dicts:
try:
answer_concept_graphs.append(concept_dict_converter(concept_dict, variable_graph))
except ValueError as e:
raise ValueError(str(e) + f'Encountered processing query:\n \"{query}\"')
if len(answer_concept_graphs) > 1:
query_concept_graph = combine_n_graphs(answer_concept_graphs)
query_concept_graphs.append(query_concept_graph)
else:
if len(answer_concept_graphs) > 0:
query_concept_graphs.append(answer_concept_graphs[0])
else:
warnings.warn(f'There were no results for query: \n\"{query}\"\nand so nothing will be added to the '
f'graph for this query')
if len(query_concept_graphs) == 0:
# Raise exception when none of the queries returned any results
raise RuntimeError(f'The graph from queries: {[query_sampler_variable_graph_tuple[0] for query_sampler_variable_graph_tuple in query_sampler_variable_graph_tuples]}\n'
f'could not be created, since none of these queries returned results')
concept_graph = combine_n_graphs(query_concept_graphs)
return concept_graph
| StarcoderdataPython |
3203331 | class Solution:
def findKthLargest(self, nums: List[int], k: int) -> int:
heap_size = len(nums)
self.buld_max_heap(nums, heap_size)
if k > len(nums):
return nums[0]
for _ in range(0, k-1):
nums[0], nums[heap_size-1] = nums[heap_size-1], nums[0]
heap_size -= 1
self.max_heapify(nums, 0, heap_size)
return nums[0]
def buld_max_heap(self, a, heap_size):
for i in range(heap_size//2, -1, -1):
self.max_heapify(a, i, heap_size)
def max_heapify(self, a, i, heap_size):
l, r, largest = i*2+1, i*2+2, i
if l < heap_size and a[l] > a[largest]:
largest = l
if r < heap_size and a[r] > a[largest]:
largest = r
if i != largest:
a[i], a[largest] = a[largest], a[i]
self.max_heapify(a, largest, heap_size)
s = Solution()
nums = [2, 1]
print(s.findKthLargest(nums, 2))
| StarcoderdataPython |
3390301 | import os
from ast import NodeVisitor, parse
from io import StringIO
class YieldSearch(NodeVisitor):
def visit_Yield(self, node):
self.has_yield = True
def visit_FunctionDef(self, node):
pass # do not visit nest function definition to determine
# if a function has a yield or not...
class Writer:
def __init__(self):
self.level = 0
self.output = StringIO()
def push(self):
self.level += 1
def pull(self):
self.level -= 1
def write(self, code):
self.output.write(' ' * 4 * self.level + code + '\n')
def value(self):
return self.output.getvalue()
def pythonium_generate_js(filepath, translator_class, requirejs=False, root_path=None, output=None, deep=None):
dirname = os.path.abspath(os.path.dirname(filepath))
if not root_path:
root_path = dirname
basename = os.path.basename(filepath)
output_name = os.path.join(dirname, basename + '.js')
if not output:
print('Generating {}'.format(output_name))
# generate js
with open(os.path.join(dirname, basename)) as f:
input = parse(f.read())
tree = parse(input)
translator = translator_class()
translator.visit(tree)
script = translator.writer.value()
if requirejs:
out = 'define(function(require) {\n'
out += script
if isinstance(translator.__all__, str):
out += '\nreturn {};\n'.format(translator.__all__)
elif isinstance(translator.__all__, list):
all = ["{!r}: {}".format(x, x) for x in translator.__all__]
public = '{{{}}}'.format(', '.join(all))
out += '\nreturn {};\n'.format(public)
else:
raise Exception('__all__ is not defined!')
out += '\n})\n'
script = out
if deep:
for dependency in translator.dependencies:
if dependency.startswith('.'):
pythonium_generate_js(os.path.join(dirname, dependency + '.py'), requirejs, root_path, output, deep)
else:
pythonium_generate_js(os.path.join(root_path, dependency[1:] + '.py'), requirejs, root_path, output, deep)
output.write(script)
| StarcoderdataPython |
3293240 | from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
with open("requirements.txt", "r") as fh:
requirements = fh.readlines()
setup(
name="scpscraper",
packages=['scpscraper'],
version="1.0.1",
license="MIT",
author="JaonHax",
author_email="<EMAIL>",
description="A Python library designed for scraping data from the SCP wiki.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/JaonHax/scp-scraper",
keywords=["scp", "foundation", "webscraper", "tensorflow", "dataset"],
install_requires=requirements,
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
python_requires='>=3.6'
)
| StarcoderdataPython |
4992 | <filename>sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.system_data = None
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class AccessPolicyEntity(ProxyResource):
"""Access policies help define the authentication rules, and control access to specific video resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param role: Defines the access level granted by this policy. Possible values include:
"Reader".
:type role: str or ~video_analyzer.models.AccessPolicyRole
:param authentication: Authentication method to be used when validating client API access.
:type authentication: ~video_analyzer.models.AuthenticationBase
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'role': {'key': 'properties.role', 'type': 'str'},
'authentication': {'key': 'properties.authentication', 'type': 'AuthenticationBase'},
}
def __init__(
self,
**kwargs
):
super(AccessPolicyEntity, self).__init__(**kwargs)
self.role = kwargs.get('role', None)
self.authentication = kwargs.get('authentication', None)
class AccessPolicyEntityCollection(msrest.serialization.Model):
"""A collection of AccessPolicyEntity items.
:param value: A collection of AccessPolicyEntity items.
:type value: list[~video_analyzer.models.AccessPolicyEntity]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AccessPolicyEntity]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccessPolicyEntityCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class AccountEncryption(msrest.serialization.Model):
"""Defines how the Video Analyzer account is (optionally) encrypted.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of key used to encrypt the Account Key. Possible values
include: "SystemKey", "CustomerKey".
:type type: str or ~video_analyzer.models.AccountEncryptionKeyType
:param key_vault_properties: The properties of the key used to encrypt the account.
:type key_vault_properties: ~video_analyzer.models.KeyVaultProperties
:param identity: The Key Vault identity.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the Key Vault mapping.
:vartype status: str
"""
_validation = {
'type': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccountEncryption, self).__init__(**kwargs)
self.type = kwargs['type']
self.key_vault_properties = kwargs.get('key_vault_properties', None)
self.identity = kwargs.get('identity', None)
self.status = None
class AudioEncoderBase(msrest.serialization.Model):
"""Base type for all audio encoder presets, which define the recipe or instructions on how audio should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AudioEncoderAac.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded
(2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160,
192, 224, and 256. If omitted, the bitrate of the input audio is used.
:type bitrate_kbps: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'}
}
def __init__(
self,
**kwargs
):
super(AudioEncoderBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
class AudioEncoderAac(AudioEncoderBase):
"""A custom preset for encoding audio with the AAC codec.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded
(2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160,
192, 224, and 256. If omitted, the bitrate of the input audio is used.
:type bitrate_kbps: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AudioEncoderAac, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.AudioEncoderAac' # type: str
class AuthenticationBase(msrest.serialization.Model):
"""Base class for access policies authentication methods.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JwtAuthentication.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'}
}
def __init__(
self,
**kwargs
):
super(AuthenticationBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class CertificateSource(msrest.serialization.Model):
"""Base class for certificate sources.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: PemCertificateList.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'}
}
def __init__(
self,
**kwargs
):
super(CertificateSource, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class CheckNameAvailabilityRequest(msrest.serialization.Model):
"""The check availability request body.
:param name: The name of the resource for which availability needs to be checked.
:type name: str
:param type: The resource type.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityRequest, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = kwargs.get('type', None)
class CheckNameAvailabilityResponse(msrest.serialization.Model):
"""The check availability result.
:param name_available: Indicates if the resource name is available.
:type name_available: bool
:param reason: The reason why the given name is not available. Possible values include:
"Invalid", "AlreadyExists".
:type reason: str or ~video_analyzer.models.CheckNameAvailabilityReason
:param message: Detailed reason why the given name is available.
:type message: str
"""
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityResponse, self).__init__(**kwargs)
self.name_available = kwargs.get('name_available', None)
self.reason = kwargs.get('reason', None)
self.message = kwargs.get('message', None)
class CredentialsBase(msrest.serialization.Model):
"""Base class for credential objects.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: UsernamePasswordCredentials.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'}
}
def __init__(
self,
**kwargs
):
super(CredentialsBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class TokenKey(msrest.serialization.Model):
"""Key properties for JWT token validation.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EccTokenKey, RsaTokenKey.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param kid: Required. JWT token key id. Validation keys are looked up based on the key id
present on the JWT token header.
:type kid: str
"""
_validation = {
'type': {'required': True},
'kid': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.EccTokenKey': 'EccTokenKey', '#Microsoft.VideoAnalyzer.RsaTokenKey': 'RsaTokenKey'}
}
def __init__(
self,
**kwargs
):
super(TokenKey, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.kid = kwargs['kid']
class EccTokenKey(TokenKey):
"""Required validation properties for tokens generated with Elliptical Curve algorithm.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param kid: Required. JWT token key id. Validation keys are looked up based on the key id
present on the JWT token header.
:type kid: str
:param alg: Required. Elliptical curve algorithm to be used: ES256, ES384 or ES512. Possible
values include: "ES256", "ES384", "ES512".
:type alg: str or ~video_analyzer.models.AccessPolicyEccAlgo
:param x: Required. X coordinate.
:type x: str
:param y: Required. Y coordinate.
:type y: str
"""
_validation = {
'type': {'required': True},
'kid': {'required': True},
'alg': {'required': True},
'x': {'required': True},
'y': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
'alg': {'key': 'alg', 'type': 'str'},
'x': {'key': 'x', 'type': 'str'},
'y': {'key': 'y', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EccTokenKey, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EccTokenKey' # type: str
self.alg = kwargs['alg']
self.x = kwargs['x']
self.y = kwargs['y']
class EdgeModuleEntity(ProxyResource):
"""The representation of an edge module.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:ivar edge_module_id: Internal ID generated for the instance of the Video Analyzer edge module.
:vartype edge_module_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'edge_module_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'edge_module_id': {'key': 'properties.edgeModuleId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeModuleEntity, self).__init__(**kwargs)
self.edge_module_id = None
class EdgeModuleEntityCollection(msrest.serialization.Model):
"""A collection of EdgeModuleEntity items.
:param value: A collection of EdgeModuleEntity items.
:type value: list[~video_analyzer.models.EdgeModuleEntity]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[EdgeModuleEntity]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeModuleEntityCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class EdgeModuleProvisioningToken(msrest.serialization.Model):
"""Provisioning token properties. A provisioning token allows for a single instance of Azure Video analyzer IoT edge module to be initialized and authorized to the cloud account. The provisioning token itself is short lived and it is only used for the initial handshake between IoT edge module and the cloud. After the initial handshake, the IoT edge module will agree on a set of authentication keys which will be auto-rotated as long as the module is able to periodically connect to the cloud. A new provisioning token can be generated for the same IoT edge module in case the module state lost or reset.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar expiration_date: The expiration date of the registration token. The Azure Video Analyzer
IoT edge module must be initialized and connected to the Internet prior to the token expiration
date.
:vartype expiration_date: ~datetime.datetime
:ivar token: The token blob to be provided to the Azure Video Analyzer IoT edge module through
the Azure IoT Edge module twin properties.
:vartype token: str
"""
_validation = {
'expiration_date': {'readonly': True},
'token': {'readonly': True},
}
_attribute_map = {
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'token': {'key': 'token', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeModuleProvisioningToken, self).__init__(**kwargs)
self.expiration_date = None
self.token = None
class EncoderPresetBase(msrest.serialization.Model):
"""Base type for all encoder presets, which define the recipe or instructions on how the input content should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EncoderCustomPreset, EncoderSystemPreset.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.EncoderCustomPreset': 'EncoderCustomPreset', '#Microsoft.VideoAnalyzer.EncoderSystemPreset': 'EncoderSystemPreset'}
}
def __init__(
self,
**kwargs
):
super(EncoderPresetBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class EncoderCustomPreset(EncoderPresetBase):
"""Describes a custom preset for encoding the input content using the encoder processor.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param audio_encoder: Describes a custom preset for encoding audio.
:type audio_encoder: ~video_analyzer.models.AudioEncoderBase
:param video_encoder: Describes a custom preset for encoding video.
:type video_encoder: ~video_analyzer.models.VideoEncoderBase
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'audio_encoder': {'key': 'audioEncoder', 'type': 'AudioEncoderBase'},
'video_encoder': {'key': 'videoEncoder', 'type': 'VideoEncoderBase'},
}
def __init__(
self,
**kwargs
):
super(EncoderCustomPreset, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderCustomPreset' # type: str
self.audio_encoder = kwargs.get('audio_encoder', None)
self.video_encoder = kwargs.get('video_encoder', None)
class NodeBase(msrest.serialization.Model):
"""Base class for nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ProcessorNodeBase, SinkNodeBase, SourceNodeBase.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.ProcessorNodeBase': 'ProcessorNodeBase', '#Microsoft.VideoAnalyzer.SinkNodeBase': 'SinkNodeBase', '#Microsoft.VideoAnalyzer.SourceNodeBase': 'SourceNodeBase'}
}
def __init__(
self,
**kwargs
):
super(NodeBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.name = kwargs['name']
class ProcessorNodeBase(NodeBase):
"""Base class for topology processor nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EncoderProcessor.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'}
}
def __init__(
self,
**kwargs
):
super(ProcessorNodeBase, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.ProcessorNodeBase' # type: str
self.inputs = kwargs['inputs']
class EncoderProcessor(ProcessorNodeBase):
"""Encoder processor allows for encoding of the input content. For example, it can used to change the resolution from 4K to 1280x720.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
:param preset: Required. The encoder preset, which defines the recipe or instructions on how
the input content should be processed.
:type preset: ~video_analyzer.models.EncoderPresetBase
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
'preset': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
'preset': {'key': 'preset', 'type': 'EncoderPresetBase'},
}
def __init__(
self,
**kwargs
):
super(EncoderProcessor, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderProcessor' # type: str
self.preset = kwargs['preset']
class EncoderSystemPreset(EncoderPresetBase):
"""Describes a built-in preset for encoding the input content using the encoder processor.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Name of the built-in encoding preset. Possible values include:
"SingleLayer_540p_H264_AAC", "SingleLayer_720p_H264_AAC", "SingleLayer_1080p_H264_AAC",
"SingleLayer_2160p_H264_AAC".
:type name: str or ~video_analyzer.models.EncoderSystemPresetType
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EncoderSystemPreset, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderSystemPreset' # type: str
self.name = kwargs['name']
class Endpoint(msrest.serialization.Model):
"""The endpoint details.
All required parameters must be populated in order to send to Azure.
:param endpoint_url: The URL of the endpoint.
:type endpoint_url: str
:param type: Required. The type of the endpoint. Possible values include: "ClientApi".
:type type: str or ~video_analyzer.models.VideoAnalyzerEndpointType
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'endpoint_url': {'key': 'endpointUrl', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Endpoint, self).__init__(**kwargs)
self.endpoint_url = kwargs.get('endpoint_url', None)
self.type = kwargs['type']
class EndpointBase(msrest.serialization.Model):
"""Base class for endpoints.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: TlsEndpoint, UnsecuredEndpoint.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'}
}
def __init__(
self,
**kwargs
):
super(EndpointBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.credentials = kwargs['credentials']
self.url = kwargs['url']
self.tunnel = kwargs.get('tunnel', None)
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~video_analyzer.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~video_analyzer.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class GroupLevelAccessControl(msrest.serialization.Model):
"""Group level network access control.
:param public_network_access: Whether or not public network access is allowed for specified
resources under the Video Analyzer account. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess
"""
_attribute_map = {
'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GroupLevelAccessControl, self).__init__(**kwargs)
self.public_network_access = kwargs.get('public_network_access', None)
class IotHub(msrest.serialization.Model):
"""The IoT Hub details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. The IoT Hub resource identifier.
:type id: str
:param identity: Required. The IoT Hub identity.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the Iot Hub mapping.
:vartype status: str
"""
_validation = {
'id': {'required': True},
'identity': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHub, self).__init__(**kwargs)
self.id = kwargs['id']
self.identity = kwargs['identity']
self.status = None
class JwtAuthentication(AuthenticationBase):
"""Properties for access validation based on JSON Web Tokens (JWT).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param issuers: List of expected token issuers. Token issuer is valid if it matches at least
one of the given values.
:type issuers: list[str]
:param audiences: List of expected token audiences. Token audience is valid if it matches at
least one of the given values.
:type audiences: list[str]
:param claims: List of additional token claims to be validated. Token must contains all claims
and respective values for it to be valid.
:type claims: list[~video_analyzer.models.TokenClaim]
:param keys: List of keys which can be used to validate access tokens. Having multiple keys
allow for seamless key rotation of the token signing key. Token signature must match exactly
one key.
:type keys: list[~video_analyzer.models.TokenKey]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'issuers': {'key': 'issuers', 'type': '[str]'},
'audiences': {'key': 'audiences', 'type': '[str]'},
'claims': {'key': 'claims', 'type': '[TokenClaim]'},
'keys': {'key': 'keys', 'type': '[TokenKey]'},
}
def __init__(
self,
**kwargs
):
super(JwtAuthentication, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.JwtAuthentication' # type: str
self.issuers = kwargs.get('issuers', None)
self.audiences = kwargs.get('audiences', None)
self.claims = kwargs.get('claims', None)
self.keys = kwargs.get('keys', None)
class KeyVaultProperties(msrest.serialization.Model):
"""The details for accessing the encryption keys in Key Vault.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param key_identifier: Required. The URL of the Key Vault key used to encrypt the account. The
key may either be versioned (for example https://vault/keys/mykey/version1) or reference a key
without a version (for example https://vault/keys/mykey).
:type key_identifier: str
:ivar current_key_identifier: The current key used to encrypt Video Analyzer account, including
the key version.
:vartype current_key_identifier: str
"""
_validation = {
'key_identifier': {'required': True},
'current_key_identifier': {'readonly': True},
}
_attribute_map = {
'key_identifier': {'key': 'keyIdentifier', 'type': 'str'},
'current_key_identifier': {'key': 'currentKeyIdentifier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultProperties, self).__init__(**kwargs)
self.key_identifier = kwargs['key_identifier']
self.current_key_identifier = None
class ListProvisioningTokenInput(msrest.serialization.Model):
"""The input parameters to generate registration token for the Azure Video Analyzer IoT edge module.
All required parameters must be populated in order to send to Azure.
:param expiration_date: Required. The desired expiration date of the registration token. The
Azure Video Analyzer IoT edge module must be initialized and connected to the Internet prior to
the token expiration date.
:type expiration_date: ~datetime.datetime
"""
_validation = {
'expiration_date': {'required': True},
}
_attribute_map = {
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(ListProvisioningTokenInput, self).__init__(**kwargs)
self.expiration_date = kwargs['expiration_date']
class LivePipeline(ProxyResource):
"""Live pipeline represents a unique instance of a live topology, used for real-time ingestion, archiving and publishing of content for a unique RTSP camera.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: The reference to an existing pipeline topology defined for real-time
content processing. When activated, this live pipeline will process content according to the
pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live pipeline. The
allowed range is from 500 to 3000 Kbps in increments of 100 Kbps. If the RTSP camera exceeds
this capacity, then the service will disconnect temporarily from the camera. It will retry to
re-establish connection (with exponential backoff), checking to see if the camera bitrate is
now below the reserved capacity. Doing so will ensure that one 'noisy neighbor' does not affect
other live pipelines in your account.
:type bitrate_kbps: int
:ivar state: Current state of the pipeline (read-only). Possible values include: "Inactive",
"Activating", "Active", "Deactivating".
:vartype state: str or ~video_analyzer.models.LivePipelineState
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(LivePipeline, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
self.state = None
self.parameters = kwargs.get('parameters', None)
class LivePipelineCollection(msrest.serialization.Model):
"""A collection of LivePipeline items.
:param value: A collection of LivePipeline items.
:type value: list[~video_analyzer.models.LivePipeline]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[LivePipeline]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LivePipelineCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class LivePipelineOperationStatus(msrest.serialization.Model):
"""Used for tracking the status of an operation on the live pipeline.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the live pipeline operation.
:vartype name: str
:ivar status: The status of the live pipeline operation.
:vartype status: str
:ivar error: The error details for the live pipeline operation.
:vartype error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'readonly': True},
'status': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(LivePipelineOperationStatus, self).__init__(**kwargs)
self.name = None
self.status = None
self.error = None
class LivePipelineUpdate(ProxyResource):
"""Live pipeline represents a unique instance of a live topology, used for real-time ingestion, archiving and publishing of content for a unique RTSP camera.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: The reference to an existing pipeline topology defined for real-time
content processing. When activated, this live pipeline will process content according to the
pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live pipeline. The
allowed range is from 500 to 3000 Kbps in increments of 100 Kbps. If the RTSP camera exceeds
this capacity, then the service will disconnect temporarily from the camera. It will retry to
re-establish connection (with exponential backoff), checking to see if the camera bitrate is
now below the reserved capacity. Doing so will ensure that one 'noisy neighbor' does not affect
other live pipelines in your account.
:type bitrate_kbps: int
:ivar state: Current state of the pipeline (read-only). Possible values include: "Inactive",
"Activating", "Active", "Deactivating".
:vartype state: str or ~video_analyzer.models.LivePipelineState
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(LivePipelineUpdate, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
self.state = None
self.parameters = kwargs.get('parameters', None)
class LogSpecification(msrest.serialization.Model):
"""A diagnostic log emitted by service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The diagnostic log category name.
:vartype name: str
:ivar display_name: The diagnostic log category display name.
:vartype display_name: str
:ivar blob_duration: The time range for requests in each blob.
:vartype blob_duration: str
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'blob_duration': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'blob_duration': {'key': 'blobDuration', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LogSpecification, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.blob_duration = None
class MetricDimension(msrest.serialization.Model):
"""A metric dimension.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The metric dimension name.
:vartype name: str
:ivar display_name: The display name for the dimension.
:vartype display_name: str
:ivar to_be_exported_for_shoebox: Whether to export metric to shoebox.
:vartype to_be_exported_for_shoebox: bool
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'to_be_exported_for_shoebox': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(MetricDimension, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.to_be_exported_for_shoebox = None
class MetricSpecification(msrest.serialization.Model):
"""A metric emitted by service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The metric name.
:vartype name: str
:ivar display_name: The metric display name.
:vartype display_name: str
:ivar display_description: The metric display description.
:vartype display_description: str
:ivar unit: The metric unit. Possible values include: "Bytes", "Count", "Milliseconds".
:vartype unit: str or ~video_analyzer.models.MetricUnit
:ivar aggregation_type: The metric aggregation type. Possible values include: "Average",
"Count", "Total".
:vartype aggregation_type: str or ~video_analyzer.models.MetricAggregationType
:ivar lock_aggregation_type: The metric lock aggregation type. Possible values include:
"Average", "Count", "Total".
:vartype lock_aggregation_type: str or ~video_analyzer.models.MetricAggregationType
:param supported_aggregation_types: Supported aggregation types.
:type supported_aggregation_types: list[str]
:ivar dimensions: The metric dimensions.
:vartype dimensions: list[~video_analyzer.models.MetricDimension]
:ivar enable_regional_mdm_account: Indicates whether regional MDM account is enabled.
:vartype enable_regional_mdm_account: bool
:ivar source_mdm_account: The source MDM account.
:vartype source_mdm_account: str
:ivar source_mdm_namespace: The source MDM namespace.
:vartype source_mdm_namespace: str
:ivar supported_time_grain_types: The supported time grain types.
:vartype supported_time_grain_types: list[str]
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'display_description': {'readonly': True},
'unit': {'readonly': True},
'aggregation_type': {'readonly': True},
'lock_aggregation_type': {'readonly': True},
'dimensions': {'readonly': True},
'enable_regional_mdm_account': {'readonly': True},
'source_mdm_account': {'readonly': True},
'source_mdm_namespace': {'readonly': True},
'supported_time_grain_types': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'},
'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},
'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(MetricSpecification, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.display_description = None
self.unit = None
self.aggregation_type = None
self.lock_aggregation_type = None
self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None)
self.dimensions = None
self.enable_regional_mdm_account = None
self.source_mdm_account = None
self.source_mdm_namespace = None
self.supported_time_grain_types = None
class NetworkAccessControl(msrest.serialization.Model):
"""Network access control for video analyzer account.
:param integration: Public network access for integration group.
:type integration: ~video_analyzer.models.GroupLevelAccessControl
:param ingestion: Public network access for ingestion group.
:type ingestion: ~video_analyzer.models.GroupLevelAccessControl
:param consumption: Public network access for consumption group.
:type consumption: ~video_analyzer.models.GroupLevelAccessControl
"""
_attribute_map = {
'integration': {'key': 'integration', 'type': 'GroupLevelAccessControl'},
'ingestion': {'key': 'ingestion', 'type': 'GroupLevelAccessControl'},
'consumption': {'key': 'consumption', 'type': 'GroupLevelAccessControl'},
}
def __init__(
self,
**kwargs
):
super(NetworkAccessControl, self).__init__(**kwargs)
self.integration = kwargs.get('integration', None)
self.ingestion = kwargs.get('ingestion', None)
self.consumption = kwargs.get('consumption', None)
class NodeInput(msrest.serialization.Model):
"""Describes an input signal to be used on a pipeline node.
All required parameters must be populated in order to send to Azure.
:param node_name: Required. The name of the upstream node in the pipeline which output is used
as input of the current node.
:type node_name: str
"""
_validation = {
'node_name': {'required': True},
}
_attribute_map = {
'node_name': {'key': 'nodeName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NodeInput, self).__init__(**kwargs)
self.node_name = kwargs['node_name']
class Operation(msrest.serialization.Model):
"""An operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. The operation name.
:type name: str
:param display: The operation display name.
:type display: ~video_analyzer.models.OperationDisplay
:param origin: Origin of the operation.
:type origin: str
:param properties: Operation properties format.
:type properties: ~video_analyzer.models.Properties
:param is_data_action: Whether the operation applies to data-plane.
:type is_data_action: bool
:param action_type: Indicates the action type. Possible values include: "Internal".
:type action_type: str or ~video_analyzer.models.ActionType
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'Properties'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'action_type': {'key': 'actionType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = kwargs['name']
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
self.properties = kwargs.get('properties', None)
self.is_data_action = kwargs.get('is_data_action', None)
self.action_type = kwargs.get('action_type', None)
class OperationCollection(msrest.serialization.Model):
"""A collection of Operation items.
:param value: A collection of Operation items.
:type value: list[~video_analyzer.models.Operation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(
self,
**kwargs
):
super(OperationCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class OperationDisplay(msrest.serialization.Model):
"""Operation details.
:param provider: The service provider.
:type provider: str
:param resource: Resource on which the operation is performed.
:type resource: str
:param operation: The operation type.
:type operation: str
:param description: The operation description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class ParameterDeclaration(msrest.serialization.Model):
"""Single topology parameter declaration. Declared parameters can and must be referenced throughout the topology and can optionally have default values to be used when they are not defined in the pipelines.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the parameter.
:type name: str
:param type: Required. Type of the parameter. Possible values include: "String",
"SecretString", "Int", "Double", "Bool".
:type type: str or ~video_analyzer.models.ParameterType
:param description: Description of the parameter.
:type description: str
:param default: The default value for the parameter to be used if the pipeline does not specify
a value.
:type default: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'default': {'key': 'default', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ParameterDeclaration, self).__init__(**kwargs)
self.name = kwargs['name']
self.type = kwargs['type']
self.description = kwargs.get('description', None)
self.default = kwargs.get('default', None)
class ParameterDefinition(msrest.serialization.Model):
"""Defines the parameter value of an specific pipeline topology parameter. See pipeline topology parameters for more information.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the parameter declared in the pipeline topology.
:type name: str
:param value: Parameter value to be applied on this specific pipeline.
:type value: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ParameterDefinition, self).__init__(**kwargs)
self.name = kwargs['name']
self.value = kwargs.get('value', None)
class PemCertificateList(CertificateSource):
"""A list of PEM formatted certificates.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param certificates: Required. PEM formatted public certificates. One certificate per entry.
:type certificates: list[str]
"""
_validation = {
'type': {'required': True},
'certificates': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'certificates': {'key': 'certificates', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PemCertificateList, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str
self.certificates = kwargs['certificates']
class PipelineJob(ProxyResource):
"""Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: Reference to an existing pipeline topology. When activated, this pipeline
job will process content according to the pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:ivar state: Current state of the pipeline (read-only). Possible values include: "Processing",
"Canceled", "Completed", "Failed".
:vartype state: str or ~video_analyzer.models.PipelineJobState
:ivar expiration: The date-time by when this pipeline job will be automatically deleted from
your account.
:vartype expiration: ~datetime.datetime
:ivar error: Details about the error, in case the pipeline job fails.
:vartype error: ~video_analyzer.models.PipelineJobError
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
'expiration': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'},
'error': {'key': 'properties.error', 'type': 'PipelineJobError'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(PipelineJob, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.state = None
self.expiration = None
self.error = None
self.parameters = kwargs.get('parameters', None)
class PipelineJobCollection(msrest.serialization.Model):
"""A collection of PipelineJob items.
:param value: A collection of PipelineJob items.
:type value: list[~video_analyzer.models.PipelineJob]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PipelineJob]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PipelineJobError(msrest.serialization.Model):
"""Details about the error for a failed pipeline job.
:param code: The error code.
:type code: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class PipelineJobOperationStatus(msrest.serialization.Model):
"""Used for tracking the status of an operation on the pipeline job.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the pipeline job operation.
:vartype name: str
:ivar status: The status of the pipeline job operation.
:vartype status: str
:ivar error: The error details for the pipeline job operation.
:vartype error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'readonly': True},
'status': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobOperationStatus, self).__init__(**kwargs)
self.name = None
self.status = None
self.error = None
class PipelineJobUpdate(ProxyResource):
"""Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: Reference to an existing pipeline topology. When activated, this pipeline
job will process content according to the pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:ivar state: Current state of the pipeline (read-only). Possible values include: "Processing",
"Canceled", "Completed", "Failed".
:vartype state: str or ~video_analyzer.models.PipelineJobState
:ivar expiration: The date-time by when this pipeline job will be automatically deleted from
your account.
:vartype expiration: ~datetime.datetime
:ivar error: Details about the error, in case the pipeline job fails.
:vartype error: ~video_analyzer.models.PipelineJobError
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
'expiration': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'},
'error': {'key': 'properties.error', 'type': 'PipelineJobError'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobUpdate, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.state = None
self.expiration = None
self.error = None
self.parameters = kwargs.get('parameters', None)
class PipelineTopology(ProxyResource):
"""Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following:
* Parameters: list of user defined parameters that can be references across the topology nodes.
* Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras.
* Processors: list of nodes which perform data analysis or transformations.
* Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param kind: Required. Topology kind. Possible values include: "Live", "Batch".
:type kind: str or ~video_analyzer.models.Kind
:param sku: Required. Describes the properties of a SKU.
:type sku: ~video_analyzer.models.Sku
:param description: An optional description of the pipeline topology. It is recommended that
the expected use of the topology to be described here.
:type description: str
:param parameters: List of the topology parameter declarations. Parameters declared here can be
referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern.
Parameters can have optional default values and can later be defined in individual instances of
the pipeline.
:type parameters: list[~video_analyzer.models.ParameterDeclaration]
:param sources: List of the topology source nodes. Source nodes enable external data to be
ingested by the pipeline.
:type sources: list[~video_analyzer.models.SourceNodeBase]
:param processors: List of the topology processor nodes. Processor nodes enable pipeline data
to be analyzed, processed or transformed.
:type processors: list[~video_analyzer.models.ProcessorNodeBase]
:param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or
exported.
:type sinks: list[~video_analyzer.models.SinkNodeBase]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'kind': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'kind': {'key': 'kind', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'description': {'key': 'properties.description', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'},
'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'},
'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'},
'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'},
}
def __init__(
self,
**kwargs
):
super(PipelineTopology, self).__init__(**kwargs)
self.kind = kwargs['kind']
self.sku = kwargs['sku']
self.description = kwargs.get('description', None)
self.parameters = kwargs.get('parameters', None)
self.sources = kwargs.get('sources', None)
self.processors = kwargs.get('processors', None)
self.sinks = kwargs.get('sinks', None)
class PipelineTopologyCollection(msrest.serialization.Model):
"""A collection of PipelineTopology items.
:param value: A collection of PipelineTopology items.
:type value: list[~video_analyzer.models.PipelineTopology]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PipelineTopology]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PipelineTopologyCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PipelineTopologyUpdate(ProxyResource):
"""Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following:
* Parameters: list of user defined parameters that can be references across the topology nodes.
* Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras.
* Processors: list of nodes which perform data analysis or transformations.
* Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param kind: Topology kind. Possible values include: "Live", "Batch".
:type kind: str or ~video_analyzer.models.Kind
:param sku: Describes the properties of a SKU.
:type sku: ~video_analyzer.models.Sku
:param description: An optional description of the pipeline topology. It is recommended that
the expected use of the topology to be described here.
:type description: str
:param parameters: List of the topology parameter declarations. Parameters declared here can be
referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern.
Parameters can have optional default values and can later be defined in individual instances of
the pipeline.
:type parameters: list[~video_analyzer.models.ParameterDeclaration]
:param sources: List of the topology source nodes. Source nodes enable external data to be
ingested by the pipeline.
:type sources: list[~video_analyzer.models.SourceNodeBase]
:param processors: List of the topology processor nodes. Processor nodes enable pipeline data
to be analyzed, processed or transformed.
:type processors: list[~video_analyzer.models.ProcessorNodeBase]
:param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or
exported.
:type sinks: list[~video_analyzer.models.SinkNodeBase]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'kind': {'key': 'kind', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'description': {'key': 'properties.description', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'},
'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'},
'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'},
'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'},
}
def __init__(
self,
**kwargs
):
super(PipelineTopologyUpdate, self).__init__(**kwargs)
self.kind = kwargs.get('kind', None)
self.sku = kwargs.get('sku', None)
self.description = kwargs.get('description', None)
self.parameters = kwargs.get('parameters', None)
self.sources = kwargs.get('sources', None)
self.processors = kwargs.get('processors', None)
self.sinks = kwargs.get('sinks', None)
class PrivateEndpoint(msrest.serialization.Model):
"""The Private Endpoint resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ARM identifier for Private Endpoint.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpoint, self).__init__(**kwargs)
self.id = None
class PrivateEndpointConnection(Resource):
"""The Private Endpoint Connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param private_endpoint: The resource of private end point.
:type private_endpoint: ~video_analyzer.models.PrivateEndpoint
:param private_link_service_connection_state: A collection of information about the state of
the connection between service consumer and provider.
:type private_link_service_connection_state:
~video_analyzer.models.PrivateLinkServiceConnectionState
:ivar provisioning_state: The provisioning state of the private endpoint connection resource.
Possible values include: "Succeeded", "Creating", "Deleting", "Failed".
:vartype provisioning_state: str or
~video_analyzer.models.PrivateEndpointConnectionProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.private_endpoint = kwargs.get('private_endpoint', None)
self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None)
self.provisioning_state = None
class PrivateEndpointConnectionListResult(msrest.serialization.Model):
"""List of private endpoint connection associated with the specified storage account.
:param value: Array of private endpoint connections.
:type value: list[~video_analyzer.models.PrivateEndpointConnection]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PrivateLinkResource(Resource):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:param required_zone_names: The private link resource Private link DNS zone name.
:type required_zone_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'group_id': {'readonly': True},
'required_members': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResource, self).__init__(**kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = kwargs.get('required_zone_names', None)
class PrivateLinkResourceListResult(msrest.serialization.Model):
"""A list of private link resources.
:param value: Array of private link resources.
:type value: list[~video_analyzer.models.PrivateLinkResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResourceListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PrivateLinkServiceConnectionState(msrest.serialization.Model):
"""A collection of information about the state of the connection between service consumer and provider.
:param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service. Possible values include: "Pending", "Approved", "Rejected".
:type status: str or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus
:param description: The reason for approval/rejection of the connection.
:type description: str
:param actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:type actions_required: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.description = kwargs.get('description', None)
self.actions_required = kwargs.get('actions_required', None)
class Properties(msrest.serialization.Model):
"""Metric properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar service_specification: The service specifications.
:vartype service_specification: ~video_analyzer.models.ServiceSpecification
"""
_validation = {
'service_specification': {'readonly': True},
}
_attribute_map = {
'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'},
}
def __init__(
self,
**kwargs
):
super(Properties, self).__init__(**kwargs)
self.service_specification = None
class ResourceIdentity(msrest.serialization.Model):
"""The user assigned managed identity to use when accessing a resource.
All required parameters must be populated in order to send to Azure.
:param user_assigned_identity: Required. The user assigned managed identity's resource
identifier to use when accessing a resource.
:type user_assigned_identity: str
"""
_validation = {
'user_assigned_identity': {'required': True},
}
_attribute_map = {
'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceIdentity, self).__init__(**kwargs)
self.user_assigned_identity = kwargs['user_assigned_identity']
class RsaTokenKey(TokenKey):
"""Required validation properties for tokens generated with RSA algorithm.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param kid: Required. JWT token key id. Validation keys are looked up based on the key id
present on the JWT token header.
:type kid: str
:param alg: Required. RSA algorithm to be used: RS256, RS384 or RS512. Possible values include:
"RS256", "RS384", "RS512".
:type alg: str or ~video_analyzer.models.AccessPolicyRsaAlgo
:param n: Required. RSA public key modulus.
:type n: str
:param e: Required. RSA public key exponent.
:type e: str
"""
_validation = {
'type': {'required': True},
'kid': {'required': True},
'alg': {'required': True},
'n': {'required': True},
'e': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
'alg': {'key': 'alg', 'type': 'str'},
'n': {'key': 'n', 'type': 'str'},
'e': {'key': 'e', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RsaTokenKey, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.RsaTokenKey' # type: str
self.alg = kwargs['alg']
self.n = kwargs['n']
self.e = kwargs['e']
class SourceNodeBase(NodeBase):
"""Base class for topology source nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: RtspSource, VideoSource.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource', '#Microsoft.VideoAnalyzer.VideoSource': 'VideoSource'}
}
def __init__(
self,
**kwargs
):
super(SourceNodeBase, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.SourceNodeBase' # type: str
class RtspSource(SourceNodeBase):
"""RTSP source allows for media from an RTSP camera or generic RTSP server to be ingested into a pipeline.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When
using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the
RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are
interleaved in the HTTP connections alongside the RTSP messages. Possible values include:
"Http", "Tcp".
:type transport: str or ~video_analyzer.models.RtspTransport
:param endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This
contains the required information for Video Analyzer to connect to RTSP cameras and/or generic
RTSP servers.
:type endpoint: ~video_analyzer.models.EndpointBase
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'endpoint': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'transport': {'key': 'transport', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'},
}
def __init__(
self,
**kwargs
):
super(RtspSource, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str
self.transport = kwargs.get('transport', None)
self.endpoint = kwargs['endpoint']
class TunnelBase(msrest.serialization.Model):
"""Base class for tunnel objects.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SecureIotDeviceRemoteTunnel.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel': 'SecureIotDeviceRemoteTunnel'}
}
def __init__(
self,
**kwargs
):
super(TunnelBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class SecureIotDeviceRemoteTunnel(TunnelBase):
"""A remote tunnel securely established using IoT Hub device information.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param iot_hub_name: Required. Name of the IoT Hub.
:type iot_hub_name: str
:param device_id: Required. The IoT device id to use when establishing the remote tunnel. This
string is case-sensitive.
:type device_id: str
"""
_validation = {
'type': {'required': True},
'iot_hub_name': {'required': True},
'device_id': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'iot_hub_name': {'key': 'iotHubName', 'type': 'str'},
'device_id': {'key': 'deviceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecureIotDeviceRemoteTunnel, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel' # type: str
self.iot_hub_name = kwargs['iot_hub_name']
self.device_id = kwargs['device_id']
class ServiceSpecification(msrest.serialization.Model):
"""The service metric specifications.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar log_specifications: List of log specifications.
:vartype log_specifications: list[~video_analyzer.models.LogSpecification]
:ivar metric_specifications: List of metric specifications.
:vartype metric_specifications: list[~video_analyzer.models.MetricSpecification]
"""
_validation = {
'log_specifications': {'readonly': True},
'metric_specifications': {'readonly': True},
}
_attribute_map = {
'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'},
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},
}
def __init__(
self,
**kwargs
):
super(ServiceSpecification, self).__init__(**kwargs)
self.log_specifications = None
self.metric_specifications = None
class SinkNodeBase(NodeBase):
"""Base class for topology sink nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoSink.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'}
}
def __init__(
self,
**kwargs
):
super(SinkNodeBase, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.SinkNodeBase' # type: str
self.inputs = kwargs['inputs']
class Sku(msrest.serialization.Model):
"""The SKU details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The SKU name. Possible values include: "Live_S1", "Batch_S1".
:type name: str or ~video_analyzer.models.SkuName
:ivar tier: The SKU tier. Possible values include: "Standard".
:vartype tier: str or ~video_analyzer.models.SkuTier
"""
_validation = {
'name': {'required': True},
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = kwargs['name']
self.tier = None
class StorageAccount(msrest.serialization.Model):
"""The details about the associated storage account.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the storage account resource. Video Analyzer relies on tables,
queues, and blobs. The primary storage account must be a Standard Storage account (either
Microsoft.ClassicStorage or Microsoft.Storage).
:type id: str
:param identity: A managed identity that Video Analyzer will use to access the storage account.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the storage account mapping.
:vartype status: str
"""
_validation = {
'id': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccount, self).__init__(**kwargs)
self.id = kwargs['id']
self.identity = kwargs.get('identity', None)
self.status = None
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~video_analyzer.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~video_analyzer.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
class TimeSequenceBase(msrest.serialization.Model):
"""A sequence of datetime ranges as a string.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoSequenceAbsoluteTimeMarkers.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'}
}
def __init__(
self,
**kwargs
):
super(TimeSequenceBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class TlsEndpoint(EndpointBase):
"""TLS endpoint describes an endpoint that the pipeline can connect to over TLS transport (data is encrypted in transit).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
:param trusted_certificates: List of trusted certificate authorities when authenticating a TLS
connection. A null list designates that Azure Video Analyzer's list of trusted authorities
should be used.
:type trusted_certificates: ~video_analyzer.models.CertificateSource
:param validation_options: Validation options to use when authenticating a TLS connection. By
default, strict validation is used.
:type validation_options: ~video_analyzer.models.TlsValidationOptions
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
'trusted_certificates': {'key': 'trustedCertificates', 'type': 'CertificateSource'},
'validation_options': {'key': 'validationOptions', 'type': 'TlsValidationOptions'},
}
def __init__(
self,
**kwargs
):
super(TlsEndpoint, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str
self.trusted_certificates = kwargs.get('trusted_certificates', None)
self.validation_options = kwargs.get('validation_options', None)
class TlsValidationOptions(msrest.serialization.Model):
"""Options for controlling the validation of TLS endpoints.
:param ignore_hostname: When set to 'true' causes the certificate subject name validation to be
skipped. Default is 'false'.
:type ignore_hostname: str
:param ignore_signature: When set to 'true' causes the certificate chain trust validation to be
skipped. Default is 'false'.
:type ignore_signature: str
"""
_attribute_map = {
'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'},
'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TlsValidationOptions, self).__init__(**kwargs)
self.ignore_hostname = kwargs.get('ignore_hostname', None)
self.ignore_signature = kwargs.get('ignore_signature', None)
class TokenClaim(msrest.serialization.Model):
"""Properties for expected token claims.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the claim which must be present on the token.
:type name: str
:param value: Required. Expected value of the claim to be present on the token.
:type value: str
"""
_validation = {
'name': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TokenClaim, self).__init__(**kwargs)
self.name = kwargs['name']
self.value = kwargs['value']
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs['location']
class UnsecuredEndpoint(EndpointBase):
"""Unsecured endpoint describes an endpoint that the pipeline can connect to over clear transport (no encryption in transit).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
}
def __init__(
self,
**kwargs
):
super(UnsecuredEndpoint, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str
class UserAssignedManagedIdentity(msrest.serialization.Model):
"""The details of the user assigned managed identity used by the Video Analyzer resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar client_id: The client ID.
:vartype client_id: str
:ivar principal_id: The principal ID.
:vartype principal_id: str
"""
_validation = {
'client_id': {'readonly': True},
'principal_id': {'readonly': True},
}
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAssignedManagedIdentity, self).__init__(**kwargs)
self.client_id = None
self.principal_id = None
class UsernamePasswordCredentials(CredentialsBase):
"""Username and password credentials.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param username: Required. Username to be presented as part of the credentials.
:type username: str
:param password: Required. Password to be presented as part of the credentials. It is
recommended that this value is parameterized as a secret string in order to prevent this value
to be returned as part of the resource on API requests.
:type password: str
"""
_validation = {
'type': {'required': True},
'username': {'required': True},
'password': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UsernamePasswordCredentials, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str
self.username = kwargs['username']
self.password = kwargs['password']
class VideoAnalyzer(TrackedResource):
"""The Video Analyzer account.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param identity: The identities associated to the Video Analyzer resource.
:type identity: ~video_analyzer.models.VideoAnalyzerIdentity
:param storage_accounts: The storage accounts for this resource.
:type storage_accounts: list[~video_analyzer.models.StorageAccount]
:ivar endpoints: The endpoints associated with this resource.
:vartype endpoints: list[~video_analyzer.models.Endpoint]
:param encryption: The account encryption properties.
:type encryption: ~video_analyzer.models.AccountEncryption
:param iot_hubs: The IoT Hubs for this resource.
:type iot_hubs: list[~video_analyzer.models.IotHub]
:param public_network_access: Whether or not public network access is allowed for resources
under the Video Analyzer account. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess
:param network_access_control: Network access control for Video Analyzer.
:type network_access_control: ~video_analyzer.models.NetworkAccessControl
:ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible values
include: "Failed", "InProgress", "Succeeded".
:vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState
:ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer
account.
:vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
'endpoints': {'readonly': True},
'provisioning_state': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'},
'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'},
'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},
'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzer, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.storage_accounts = kwargs.get('storage_accounts', None)
self.endpoints = None
self.encryption = kwargs.get('encryption', None)
self.iot_hubs = kwargs.get('iot_hubs', None)
self.public_network_access = kwargs.get('public_network_access', None)
self.network_access_control = kwargs.get('network_access_control', None)
self.provisioning_state = None
self.private_endpoint_connections = None
class VideoAnalyzerCollection(msrest.serialization.Model):
"""A collection of VideoAnalyzer items.
:param value: A collection of VideoAnalyzer items.
:type value: list[~video_analyzer.models.VideoAnalyzer]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VideoAnalyzer]'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class VideoAnalyzerIdentity(msrest.serialization.Model):
"""The managed identity for the Video Analyzer resource.
All required parameters must be populated in order to send to Azure.
:param type: Required. The identity type.
:type type: str
:param user_assigned_identities: The User Assigned Managed Identities.
:type user_assigned_identities: dict[str, ~video_analyzer.models.UserAssignedManagedIdentity]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerIdentity, self).__init__(**kwargs)
self.type = kwargs['type']
self.user_assigned_identities = kwargs.get('user_assigned_identities', None)
class VideoAnalyzerOperationStatus(msrest.serialization.Model):
"""Status of video analyzer operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. Operation identifier.
:type name: str
:param id: Operation resource ID.
:type id: str
:param start_time: Operation start time.
:type start_time: str
:param end_time: Operation end time.
:type end_time: str
:param status: Operation status.
:type status: str
:param error: The error detail.
:type error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'str'},
'end_time': {'key': 'endTime', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerOperationStatus, self).__init__(**kwargs)
self.name = kwargs['name']
self.id = kwargs.get('id', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.status = kwargs.get('status', None)
self.error = kwargs.get('error', None)
class VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model):
"""Status of private endpoint connection operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. Operation identifier.
:type name: str
:param id: Operation resource ID.
:type id: str
:param start_time: Operation start time.
:type start_time: str
:param end_time: Operation end time.
:type end_time: str
:param status: Operation status.
:type status: str
:param error: The error detail.
:type error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'str'},
'end_time': {'key': 'endTime', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerPrivateEndpointConnectionOperationStatus, self).__init__(**kwargs)
self.name = kwargs['name']
self.id = kwargs.get('id', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.status = kwargs.get('status', None)
self.error = kwargs.get('error', None)
class VideoAnalyzerUpdate(msrest.serialization.Model):
"""The update operation for a Video Analyzer account.
Variables are only populated by the server, and will be ignored when sending a request.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param identity: The identities associated to the Video Analyzer resource.
:type identity: ~video_analyzer.models.VideoAnalyzerIdentity
:param storage_accounts: The storage accounts for this resource.
:type storage_accounts: list[~video_analyzer.models.StorageAccount]
:ivar endpoints: The endpoints associated with this resource.
:vartype endpoints: list[~video_analyzer.models.Endpoint]
:param encryption: The account encryption properties.
:type encryption: ~video_analyzer.models.AccountEncryption
:param iot_hubs: The IoT Hubs for this resource.
:type iot_hubs: list[~video_analyzer.models.IotHub]
:param public_network_access: Whether or not public network access is allowed for resources
under the Video Analyzer account. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess
:param network_access_control: Network access control for Video Analyzer.
:type network_access_control: ~video_analyzer.models.NetworkAccessControl
:ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible values
include: "Failed", "InProgress", "Succeeded".
:vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState
:ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer
account.
:vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection]
"""
_validation = {
'endpoints': {'readonly': True},
'provisioning_state': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'},
'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'},
'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},
'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerUpdate, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.identity = kwargs.get('identity', None)
self.storage_accounts = kwargs.get('storage_accounts', None)
self.endpoints = None
self.encryption = kwargs.get('encryption', None)
self.iot_hubs = kwargs.get('iot_hubs', None)
self.public_network_access = kwargs.get('public_network_access', None)
self.network_access_control = kwargs.get('network_access_control', None)
self.provisioning_state = None
self.private_endpoint_connections = None
class VideoArchival(msrest.serialization.Model):
"""Video archival properties.
:param retention_period: Video retention period indicates the maximum age of the video archive
segments which are intended to be kept in storage. It must be provided in the ISO8601 duration
format in the granularity of days, up to a maximum of 10 years. For example, if this is set to
P30D (30 days), content older than 30 days will be periodically deleted. This value can be
updated at any time and the new desired retention period will be effective within 24 hours.
:type retention_period: str
"""
_attribute_map = {
'retention_period': {'key': 'retentionPeriod', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoArchival, self).__init__(**kwargs)
self.retention_period = kwargs.get('retention_period', None)
class VideoContentToken(msrest.serialization.Model):
""""Video content token grants access to the video content URLs.".
Variables are only populated by the server, and will be ignored when sending a request.
:ivar expiration_date: The content token expiration date in ISO8601 format (eg.
2021-01-01T00:00:00Z).
:vartype expiration_date: ~datetime.datetime
:ivar token: The content token value to be added to the video content URL as the value for the
"token" query string parameter. The token is specific to a single video.
:vartype token: str
"""
_validation = {
'expiration_date': {'readonly': True},
'token': {'readonly': True},
}
_attribute_map = {
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'token': {'key': 'token', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoContentToken, self).__init__(**kwargs)
self.expiration_date = None
self.token = None
class VideoContentUrls(msrest.serialization.Model):
"""Set of URLs to the video content.
:param download_url: Video file download URL. This URL can be used in conjunction with the
video content authorization token to download the video MP4 file. The resulting MP4 file can be
played on any standard media player. It is available when the video type is 'file' and video
file is available for consumption.
:type download_url: str
:param archive_base_url: Video archive streaming base URL. The archived content can be
automatically played by the Azure Video Analyzer player widget. Alternatively, this URL can be
used in conjunction with the video content authorization token on any compatible DASH or HLS
players by appending the following to the base URL:
.. code-block::
- HLSv4: /manifest(format=m3u8-aapl).m3u8
- HLS CMAF: /manifest(format=m3u8-cmaf)
- DASH CMAF: /manifest(format=mpd-time-cmaf)
Moreover, an ongoing video recording can be played in "live mode" with latencies which are
approximately double of the chosen video segment length. It is available when the video type is
'archive' and video archiving is enabled.
:type archive_base_url: str
:param rtsp_tunnel_url: Video low-latency streaming URL. The live content can be automatically
played by the Azure Video Analyzer player widget. Alternatively, this URL can be used in
conjunction with the video content authorization token to expose a WebSocket tunneled RTSP
stream. It is available when the video type is 'archive' and a live, low-latency feed is
available from the source.
:type rtsp_tunnel_url: str
:param preview_image_urls: Video preview image URLs. These URLs can be used in conjunction with
the video content authorization token to download the most recent still image from the video
archive in different resolutions. They are available when the video type is 'archive' and
preview images are enabled.
:type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls
"""
_attribute_map = {
'download_url': {'key': 'downloadUrl', 'type': 'str'},
'archive_base_url': {'key': 'archiveBaseUrl', 'type': 'str'},
'rtsp_tunnel_url': {'key': 'rtspTunnelUrl', 'type': 'str'},
'preview_image_urls': {'key': 'previewImageUrls', 'type': 'VideoPreviewImageUrls'},
}
def __init__(
self,
**kwargs
):
super(VideoContentUrls, self).__init__(**kwargs)
self.download_url = kwargs.get('download_url', None)
self.archive_base_url = kwargs.get('archive_base_url', None)
self.rtsp_tunnel_url = kwargs.get('rtsp_tunnel_url', None)
self.preview_image_urls = kwargs.get('preview_image_urls', None)
class VideoCreationProperties(msrest.serialization.Model):
"""Optional properties to be used in case a new video resource needs to be created on the service. These will not take effect if the video already exists.
:param title: Optional title provided by the user. Value can be up to 256 characters long.
:type title: str
:param description: Optional description provided by the user. Value can be up to 2048
characters long.
:type description: str
:param segment_length: Segment length indicates the length of individual content files
(segments) which are persisted to storage. Smaller segments provide lower archive playback
latency but generate larger volume of storage transactions. Larger segments reduce the amount
of storage transactions while increasing the archive playback latency. Value must be specified
in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to
5 minutes, in 30 seconds increments. Changing this value after the initial call to create the
video resource can lead to errors when uploading content to the archive. Default value is 30
seconds. This property is only allowed for topologies where "kind" is set to "live".
:type segment_length: str
:param retention_period: Video retention period indicates how long the video is kept in
storage. Value must be specified in ISO8601 duration format (i.e. "P1D" equals 1 day) and can
vary between 1 day to 10 years, in 1 day increments. When absent (null), all video content is
retained indefinitely. This property is only allowed for topologies where "kind" is set to
"live".
:type retention_period: str
"""
_attribute_map = {
'title': {'key': 'title', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'segment_length': {'key': 'segmentLength', 'type': 'str'},
'retention_period': {'key': 'retentionPeriod', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoCreationProperties, self).__init__(**kwargs)
self.title = kwargs.get('title', None)
self.description = kwargs.get('description', None)
self.segment_length = kwargs.get('segment_length', None)
self.retention_period = kwargs.get('retention_period', None)
class VideoEncoderBase(msrest.serialization.Model):
"""Base type for all video encoding presets, which define the recipe or instructions on how the input video should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoEncoderH264.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should
be encoded. If omitted, encoder sets it automatically to try and match the quality of the input
video.
:type bitrate_kbps: str
:param frame_rate: The frame rate (in frames per second) of the encoded video. The value must
be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average
frame rate of the input video.
:type frame_rate: str
:param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the
resolution of the input video.
:type scale: ~video_analyzer.models.VideoScale
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'scale': {'key': 'scale', 'type': 'VideoScale'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'}
}
def __init__(
self,
**kwargs
):
super(VideoEncoderBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
self.frame_rate = kwargs.get('frame_rate', None)
self.scale = kwargs.get('scale', None)
class VideoEncoderH264(VideoEncoderBase):
"""A custom preset for encoding video with the H.264 (AVC) codec.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should
be encoded. If omitted, encoder sets it automatically to try and match the quality of the input
video.
:type bitrate_kbps: str
:param frame_rate: The frame rate (in frames per second) of the encoded video. The value must
be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average
frame rate of the input video.
:type frame_rate: str
:param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the
resolution of the input video.
:type scale: ~video_analyzer.models.VideoScale
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'scale': {'key': 'scale', 'type': 'VideoScale'},
}
def __init__(
self,
**kwargs
):
super(VideoEncoderH264, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoEncoderH264' # type: str
class VideoEntity(ProxyResource):
"""Represents a video resource within Azure Video Analyzer. Videos can be ingested from RTSP cameras through live pipelines or can be created by exporting sequences from existing captured video through a pipeline job. Videos ingested through live pipelines can be streamed through Azure Video Analyzer Player Widget or compatible players. Exported videos can be downloaded as MP4 files.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param title: Optional video title provided by the user. Value can be up to 256 characters
long.
:type title: str
:param description: Optional video description provided by the user. Value can be up to 2048
characters long.
:type description: str
:ivar type_properties_type: Video content type. Different content types are suitable for
different applications and scenarios. Possible values include: "Archive", "File".
:vartype type_properties_type: str or ~video_analyzer.models.VideoType
:ivar flags: Video flags contain information about the available video actions and its dynamic
properties based on the current video state.
:vartype flags: ~video_analyzer.models.VideoFlags
:ivar content_urls: Set of URLs to the video content.
:vartype content_urls: ~video_analyzer.models.VideoContentUrls
:param media_info: Contains information about the video and audio content.
:type media_info: ~video_analyzer.models.VideoMediaInfo
:param archival: Video archival properties.
:type archival: ~video_analyzer.models.VideoArchival
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'type_properties_type': {'readonly': True},
'flags': {'readonly': True},
'content_urls': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'title': {'key': 'properties.title', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'flags': {'key': 'properties.flags', 'type': 'VideoFlags'},
'content_urls': {'key': 'properties.contentUrls', 'type': 'VideoContentUrls'},
'media_info': {'key': 'properties.mediaInfo', 'type': 'VideoMediaInfo'},
'archival': {'key': 'properties.archival', 'type': 'VideoArchival'},
}
def __init__(
self,
**kwargs
):
super(VideoEntity, self).__init__(**kwargs)
self.title = kwargs.get('title', None)
self.description = kwargs.get('description', None)
self.type_properties_type = None
self.flags = None
self.content_urls = None
self.media_info = kwargs.get('media_info', None)
self.archival = kwargs.get('archival', None)
class VideoEntityCollection(msrest.serialization.Model):
"""A collection of VideoEntity items.
:param value: A collection of VideoEntity items.
:type value: list[~video_analyzer.models.VideoEntity]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VideoEntity]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoEntityCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class VideoFlags(msrest.serialization.Model):
"""Video flags contain information about the available video actions and its dynamic properties based on the current video state.
All required parameters must be populated in order to send to Azure.
:param can_stream: Required. Value indicating whether or not the video can be streamed. Only
"archive" type videos can be streamed.
:type can_stream: bool
:param has_data: Required. Value indicating whether or not there has ever been data recorded or
uploaded into the video. Newly created videos have this value set to false.
:type has_data: bool
:param is_in_use: Required. Value indicating whether or not the video is currently being
referenced be an active pipeline. The fact that is being referenced, doesn't necessarily
indicate that data is being received. For example, video recording may be gated on events or
camera may not be accessible at the time.
:type is_in_use: bool
"""
_validation = {
'can_stream': {'required': True},
'has_data': {'required': True},
'is_in_use': {'required': True},
}
_attribute_map = {
'can_stream': {'key': 'canStream', 'type': 'bool'},
'has_data': {'key': 'hasData', 'type': 'bool'},
'is_in_use': {'key': 'isInUse', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(VideoFlags, self).__init__(**kwargs)
self.can_stream = kwargs['can_stream']
self.has_data = kwargs['has_data']
self.is_in_use = kwargs['is_in_use']
class VideoMediaInfo(msrest.serialization.Model):
"""Contains information about the video and audio content.
:param segment_length: Video segment length indicates the length of individual video files
(segments) which are persisted to storage. Smaller segments provide lower archive playback
latency but generate larger volume of storage transactions. Larger segments reduce the amount
of storage transactions while increasing the archive playback latency. Value must be specified
in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to
5 minutes, in 30 seconds increments.
:type segment_length: str
"""
_attribute_map = {
'segment_length': {'key': 'segmentLength', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoMediaInfo, self).__init__(**kwargs)
self.segment_length = kwargs.get('segment_length', None)
class VideoPreviewImageUrls(msrest.serialization.Model):
"""Video preview image URLs. These URLs can be used in conjunction with the video content authorization token to download the most recent still image from the video archive in different resolutions. They are available when the video type is 'archive' and preview images are enabled.
:param small: Low resolution preview image URL.
:type small: str
:param medium: Medium resolution preview image URL.
:type medium: str
:param large: High resolution preview image URL.
:type large: str
"""
_attribute_map = {
'small': {'key': 'small', 'type': 'str'},
'medium': {'key': 'medium', 'type': 'str'},
'large': {'key': 'large', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoPreviewImageUrls, self).__init__(**kwargs)
self.small = kwargs.get('small', None)
self.medium = kwargs.get('medium', None)
self.large = kwargs.get('large', None)
class VideoPublishingOptions(msrest.serialization.Model):
"""Optional flags used to change how video is published. These are only allowed for topologies where "kind" is set to "live".
:param disable_archive: When set to 'true' content will not be archived or recorded. This is
used, for example, when the topology is used only for low latency video streaming. Default is
'false'. If set to 'true', then "disableRtspPublishing" must be set to 'false'.
:type disable_archive: str
:param disable_rtsp_publishing: When set to 'true' the RTSP playback URL will not be published,
disabling low latency streaming. This is used, for example, when the topology is used only for
archiving content. Default is 'false'. If set to 'true', then "disableArchive" must be set to
'false'.
:type disable_rtsp_publishing: str
"""
_attribute_map = {
'disable_archive': {'key': 'disableArchive', 'type': 'str'},
'disable_rtsp_publishing': {'key': 'disableRtspPublishing', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoPublishingOptions, self).__init__(**kwargs)
self.disable_archive = kwargs.get('disable_archive', None)
self.disable_rtsp_publishing = kwargs.get('disable_rtsp_publishing', None)
class VideoScale(msrest.serialization.Model):
"""The video scaling information.
:param height: The desired output video height.
:type height: str
:param width: The desired output video width.
:type width: str
:param mode: Describes the video scaling mode to be applied. Default mode is 'Pad'. If the mode
is 'Pad' or 'Stretch' then both width and height must be specified. Else if the mode is
'PreserveAspectRatio' then only one of width or height need be provided. Possible values
include: "Pad", "PreserveAspectRatio", "Stretch".
:type mode: str or ~video_analyzer.models.VideoScaleMode
"""
_attribute_map = {
'height': {'key': 'height', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoScale, self).__init__(**kwargs)
self.height = kwargs.get('height', None)
self.width = kwargs.get('width', None)
self.mode = kwargs.get('mode', None)
class VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase):
"""A sequence of absolute datetime ranges as a string. The datetime values should follow IS08601, and the sum of the ranges should add up to 24 hours or less. Currently, there can be only one range specified in the sequence.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param ranges: Required. The sequence of datetime ranges. Example: '[["2021-10-05T03:30:00Z",
"2021-10-05T03:40:00Z"]]'.
:type ranges: str
"""
_validation = {
'type': {'required': True},
'ranges': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'ranges': {'key': 'ranges', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoSequenceAbsoluteTimeMarkers, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers' # type: str
self.ranges = kwargs['ranges']
class VideoSink(SinkNodeBase):
"""Video sink in a live topology allows for video and audio to be captured, optionally archived, and published via a video resource. If archiving is enabled, this results in a video of type 'archive'. If used in a batch topology, this allows for video and audio to be stored as a file, and published via a video resource of type 'file'.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
:param video_name: Required. Name of a new or existing video resource used to capture and
publish content. Note: if downstream of RTSP source, and if disableArchive is set to true, then
no content is archived.
:type video_name: str
:param video_creation_properties: Optional video properties to be used in case a new video
resource needs to be created on the service.
:type video_creation_properties: ~video_analyzer.models.VideoCreationProperties
:param video_publishing_options: Options to change how the video sink publishes content via the
video resource. This property is only allowed for topologies where "kind" is set to "live".
:type video_publishing_options: ~video_analyzer.models.VideoPublishingOptions
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
'video_name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
'video_name': {'key': 'videoName', 'type': 'str'},
'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'},
'video_publishing_options': {'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'},
}
def __init__(
self,
**kwargs
):
super(VideoSink, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type: str
self.video_name = kwargs['video_name']
self.video_creation_properties = kwargs.get('video_creation_properties', None)
self.video_publishing_options = kwargs.get('video_publishing_options', None)
class VideoSource(SourceNodeBase):
"""Video source allows for content from a Video Analyzer video resource to be ingested into a pipeline. Currently supported only with batch pipelines.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param video_name: Required. Name of the Video Analyzer video resource to be used as the
source.
:type video_name: str
:param time_sequences: Required. Describes a sequence of datetime ranges. The video source only
picks up recorded media within these ranges.
:type time_sequences: ~video_analyzer.models.TimeSequenceBase
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'video_name': {'required': True},
'time_sequences': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'video_name': {'key': 'videoName', 'type': 'str'},
'time_sequences': {'key': 'timeSequences', 'type': 'TimeSequenceBase'},
}
def __init__(
self,
**kwargs
):
super(VideoSource, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoSource' # type: str
self.video_name = kwargs['video_name']
self.time_sequences = kwargs['time_sequences']
| StarcoderdataPython |
3276916 | # Generated by Django 3.0.7 on 2020-07-29 17:10
import django.contrib.sites.managers
from django.db import migrations
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('permafrost', '0011_auto_20200506_2128'),
]
operations = [
migrations.AlterModelManagers(
name='permafrostrole',
managers=[
('objects', django.db.models.manager.Manager()),
('on_site', django.contrib.sites.managers.CurrentSiteManager()),
],
),
]
| StarcoderdataPython |
3351854 | <filename>candle/parsing_utils.py
from __future__ import absolute_import
import argparse
import os
import sys
import warnings
from pprint import pprint
import numpy as np
if sys.version_info >= (3, 8):
from typing import TypedDict # pylint: disable=no-name-in-module
else:
from typing_extensions import TypedDict
from typing import Any, List, Optional, Set, Type, Union
from .file_utils import directory_from_parameters
from .helper_utils import str2bool
# Seed for random generation -- default value
DEFAULT_SEED = 7102
DEFAULT_TIMEOUT = -1 # no timeout
DEFAULT_DATATYPE = np.float32
class ArgumentStruct:
"""
Class that converts a python dictionary into an object with named
entries given by the dictionary keys.
This structure simplifies the calling convention for accessing the
dictionary values (corresponding to problem parameters). After the
object instantiation both modes of access (dictionary or object
entries) can be used.
"""
def __init__(self, **entries):
self.__dict__.update(entries)
class ListOfListsAction(argparse.Action):
"""This class extends the argparse.Action class by instantiating an
argparser that constructs a list-of-lists from an input (command-line
option or argument) given as a string."""
def __init__(self, option_strings: str, dest, type: Any, **kwargs):
"""Initialize a ListOfListsAction object. If no type is specified, an
integer is assumed by default as the type for the elements of the list-
of-lists.
Parameters
----------
option_strings : string
String to parse
dest : object
Object to store the output (in this case the parsed list-of-lists).
type : data type
Data type to decode the elements of the lists.
Defaults to np.int32.
kwargs : object
Python object containing other argparse.Action parameters.
"""
super(ListOfListsAction, self).__init__(option_strings, dest, **kwargs)
self.dtype = type
if self.dtype is None:
self.dtype = np.int32
def __call__(self, parser, namespace, values, option_string=None):
"""This function overrides the __call__ method of the base
argparse.Action class.
This function implements the action of the ListOfListAction
class by parsing an input string (command-line option or argument)
and maping it into a list-of-lists. The resulting list-of-lists is
added to the namespace of parsed arguments. The parsing assumes that
the separator between lists is a colon ':' and the separator inside
the list is a comma ','. The values of the list are casted to the
type specified at the object initialization.
Parameters
----------
parser : ArgumentParser object
Object that contains this action
namespace : Namespace object
Namespace object that will be returned by the parse_args()
function.
values : string
The associated command-line arguments converted to string type
(i.e. input).
option_string : string
The option string that was used to invoke this action. (optional)
"""
decoded_list = []
removed1 = values.replace("[", "")
removed2 = removed1.replace("]", "")
out_list = removed2.split(":")
for line in out_list:
in_list = []
elem = line.split(",")
for el in elem:
in_list.append(self.dtype(el))
decoded_list.append(in_list)
setattr(namespace, self.dest, decoded_list)
class ParseDict(TypedDict):
"""Definition of the dictionary structure expected for the parsing of
parameters."""
name: str
abv: Optional[str]
action: Union[str, Type[ListOfListsAction]]
type: Optional[Any]
default: Any
help: str
choices: List[str]
nargs: str
class ConfigDict(TypedDict):
"""Definition of the dictionary structure expected for the configuration of
parameters."""
config_file: str
data_type: str
rng_seed: float
train_bool: bool
eval_bool: bool
timeout: int
gpus: Union[List[int], int]
profiling: bool
save_path: str
model_name: str
home_dir: str
train_data: str
val_data: str
test_data: str
output_dir: str
data_url: str
experiment_id: str
run_id: str
verbose: bool
logfile: str
scaling: str
shuffle: bool
feature_subsample: int
dense: Union[List[int], int]
conv: Union[List[int], int]
locally_connected: bool
activation: str
out_activation: str
lstm_size: int
recurrent_dropout: float
dropout: float
pool: int
batch_normalization: bool
loss: str
optimizer: str
metrics: str
epochs: int
batch_size: int
learning_rate: float
early_stop: bool
momentum: float
initialization: str
val_split: float
train_steps: int
val_steps: int
test_steps: int
train_samples: int
val_samples: int
clr_flag: bool
clr_mode: str
clr_base_lr: float
clr_max_lr: float
clr_gamma: float
ckpt_restart_mode: str
ckpt_checksum: bool
ckpt_skip_epochs: int
ckpt_directory: str
ckpt_save_best: bool
ckpt_save_best_metric: str
ckpt_save_weights_only: bool
ckpt_save_interval: int
ckpt_keep_mode: str
ckpt_keep_limit: int
basic_conf = [
{
"name": "config_file",
"type": str,
"default": argparse.SUPPRESS,
"help": "specify model configuration file",
},
{
"name": "data_type",
"abv": "d",
"type": str,
"default": argparse.SUPPRESS,
"choices": ["f16", "f32", "f64"],
"help": "default floating point.",
},
{
"name": "rng_seed",
"abv": "r",
"type": float,
"default": argparse.SUPPRESS,
"help": "random number generator seed.",
},
{"name": "train_bool", "type": str2bool, "default": True, "help": "train model."},
{
"name": "eval_bool",
"type": str2bool,
"default": argparse.SUPPRESS,
"help": "evaluate model (use it for inference).",
},
{
"name": "timeout",
"action": "store",
"type": int,
"default": argparse.SUPPRESS,
"help": "seconds allowed to train model (default: no timeout).",
},
{
"name": "gpus",
"nargs": "+",
"type": int,
"default": argparse.SUPPRESS,
"help": "set IDs of GPUs to use.",
},
{
"name": "profiling",
"abv": "p",
"type": str2bool,
"default": False,
"help": "Turn profiling on or off.",
},
]
input_output_conf = [
{
"name": "save_path",
"abv": "s",
"type": str,
"default": argparse.SUPPRESS,
"help": "file path to save model snapshots.",
},
{
"name": "model_name",
"type": str,
"default": argparse.SUPPRESS,
"help": "specify model name to use when building filenames for saving.",
},
{
"name": "home_dir",
"type": str,
"default": argparse.SUPPRESS,
"help": "set home directory.",
},
{
"name": "train_data",
"action": "store",
"type": str,
"default": argparse.SUPPRESS,
"help": "training data filename.",
},
{
"name": "val_data",
"action": "store",
"type": str,
"default": argparse.SUPPRESS,
"help": "validation data filename.",
},
{
"name": "test_data",
"type": str,
"action": "store",
"default": argparse.SUPPRESS,
"help": "testing data filename.",
},
{
"name": "output_dir",
"type": str,
"default": argparse.SUPPRESS,
"help": "output directory.",
},
{
"name": "data_url",
"type": str,
"default": argparse.SUPPRESS,
"help": "set data source url.",
},
{
"name": "experiment_id",
"type": str,
"default": "EXP000",
"help": "set the experiment unique identifier.",
},
{
"name": "run_id",
"type": str,
"default": "RUN000",
"help": "set the run unique identifier.",
},
]
logging_conf = [
{
"name": "verbose",
"abv": "v",
"type": str2bool,
"default": False,
"help": "increase output verbosity.",
},
{"name": "logfile", "abv": "l", "type": str, "default": None, "help": "log file"},
]
data_preprocess_conf = [
{
"name": "scaling",
"type": str,
"default": argparse.SUPPRESS,
"choices": ["minabs", "minmax", "std", "none"],
"help": "type of feature scaling; 'minabs': to [-1,1]; 'minmax': to [0,1], 'std': standard unit normalization; 'none': no normalization.",
},
{
"name": "shuffle",
"type": str2bool,
"default": False,
"help": "randomly shuffle data set (produces different training and testing partitions each run depending on the seed)",
},
{
"name": "feature_subsample",
"type": int,
"default": argparse.SUPPRESS,
"help": "number of features to randomly sample from each category (cellline expression, drug descriptors, etc), 0 means using all features",
},
]
model_conf = [
{
"name": "dense",
"nargs": "+",
"type": int,
"help": "number of units in fully connected layers in an integer array.",
},
{
"name": "conv",
"nargs": "+",
"type": int,
"default": argparse.SUPPRESS,
"help": "integer array describing convolution layers: conv1_filters, conv1_filter_len, conv1_stride, conv2_filters, conv2_filter_len, conv2_stride ....",
},
{
"name": "locally_connected",
"type": str2bool,
"default": argparse.SUPPRESS,
"help": "use locally connected layers instead of convolution layers.",
},
{
"name": "activation",
"abv": "a",
"type": str,
"default": argparse.SUPPRESS,
"help": "keras activation function to use in inner layers: relu, tanh, sigmoid...",
},
{
"name": "out_activation",
"type": str,
"default": argparse.SUPPRESS,
"help": "keras activation function to use in out layer: softmax, linear, ...",
},
{
"name": "lstm_size",
"nargs": "+",
"type": int,
"default": argparse.SUPPRESS,
"help": "integer array describing size of LSTM internal state per layer.",
},
{
"name": "recurrent_dropout",
"action": "store",
"type": float,
"default": argparse.SUPPRESS,
"help": "ratio of recurrent dropout.",
},
{
"name": "dropout",
"type": float,
"default": argparse.SUPPRESS,
"help": "ratio of dropout used in fully connected layers.",
},
{
"name": "pool",
"type": int,
"default": argparse.SUPPRESS,
"help": "pooling layer length.",
},
{
"name": "batch_normalization",
"type": str2bool,
"default": argparse.SUPPRESS,
"help": "use batch normalization.",
},
{
"name": "loss",
"type": str,
"default": argparse.SUPPRESS,
"help": "keras loss function to use: mse, ...",
},
{
"name": "optimizer",
"type": str,
"default": argparse.SUPPRESS,
"help": "keras optimizer to use: sgd, rmsprop, ...",
},
{
"name": "metrics",
"type": str,
"default": argparse.SUPPRESS,
"help": "metrics to evaluate performance: accuracy, ...",
},
]
training_conf = [
{
"name": "epochs",
"type": int,
"abv": "e",
"default": argparse.SUPPRESS,
"help": "number of training epochs.",
},
{
"name": "batch_size",
"type": int,
"abv": "z",
"default": argparse.SUPPRESS,
"help": "batch size.",
},
{
"name": "learning_rate",
"abv": "lr",
"type": float,
"default": argparse.SUPPRESS,
"help": "overrides the learning rate for training.",
},
{
"name": "early_stop",
"type": str2bool,
"default": argparse.SUPPRESS,
"help": "activates keras callback for early stopping of training in function of the monitored variable specified.",
},
{
"name": "momentum",
"type": float,
"default": argparse.SUPPRESS,
"help": "overrides the momentum to use in the SGD optimizer when training.",
},
{
"name": "initialization",
"type": str,
"default": argparse.SUPPRESS,
"choices": [
"constant",
"uniform",
"normal",
"glorot_uniform",
"glorot_normal",
"lecun_uniform",
"he_normal",
],
"help": "type of weight initialization; 'constant': to 0; 'uniform': to [-0.05,0.05], 'normal': mean 0, stddev 0.05; 'glorot_uniform': [-lim,lim] with lim = sqrt(6/(fan_in+fan_out)); 'lecun_uniform' : [-lim,lim] with lim = sqrt(3/fan_in); 'he_normal' : mean 0, stddev sqrt(2/fan_in).",
},
{
"name": "val_split",
"type": float,
"default": argparse.SUPPRESS,
"help": "fraction of data to use in validation.",
},
{
"name": "train_steps",
"type": int,
"default": argparse.SUPPRESS,
"help": "overrides the number of training batches per epoch if set to nonzero.",
},
{
"name": "val_steps",
"type": int,
"default": argparse.SUPPRESS,
"help": "overrides the number of validation batches per epoch if set to nonzero.",
},
{
"name": "test_steps",
"type": int,
"default": argparse.SUPPRESS,
"help": "overrides the number of test batches per epoch if set to nonzero.",
},
{
"name": "train_samples",
"type": int,
"default": argparse.SUPPRESS,
"help": "overrides the number of training samples if set to nonzero.",
},
{
"name": "val_samples",
"type": int,
"default": argparse.SUPPRESS,
"help": "overrides the number of validation samples if set to nonzero.",
},
]
cyclic_learning_conf = [
{
"name": "clr_flag",
"type": str2bool,
"default": argparse.SUPPRESS,
"help": "CLR flag (boolean).",
},
{
"name": "clr_mode",
"type": str,
"default": argparse.SUPPRESS,
"choices": ["trng1", "trng2", "exp"],
"help": "CLR mode (default: trng1).",
},
{
"name": "clr_base_lr",
"type": float,
"default": argparse.SUPPRESS,
"help": "Base lr for cycle lr.",
},
{
"name": "clr_max_lr",
"type": float,
"default": argparse.SUPPRESS,
"help": "Max lr for cycle lr.",
},
{
"name": "clr_gamma",
"type": float,
"default": argparse.SUPPRESS,
"help": "Gamma parameter for learning cycle LR.",
},
]
ckpt_conf = [
{
"name": "ckpt_restart_mode",
"type": str,
"default": "auto",
"choices": ["off", "auto", "required"],
"help": "Mode to restart from a saved checkpoint file, choices are 'off', 'auto', 'required'.",
},
{
"name": "ckpt_checksum",
"type": str2bool,
"default": False,
"help": "Checksum the restart file after read+write.",
},
{
"name": "ckpt_skip_epochs",
"type": int,
"default": 0,
"help": "Number of epochs to skip before saving epochs.",
},
{
"name": "ckpt_directory",
"type": str,
"default": "./save",
"help": "Base directory in which to save checkpoints.",
},
{
"name": "ckpt_save_best",
"type": str2bool,
"default": True,
"help": "Toggle saving best model.",
},
{
"name": "ckpt_save_best_metric",
"type": str,
"default": "val_loss",
"help": "Metric for determining when to save best model.",
},
{
"name": "ckpt_save_weights_only",
"type": str2bool,
"default": False,
"help": "Toggle saving only weights (not optimizer) (NYI).",
},
{
"name": "ckpt_save_interval",
"type": int,
"default": 0,
"help": "Interval to save checkpoints.",
},
{
"name": "ckpt_keep_mode",
"type": str,
"default": "linear",
"choices": ["linear", "exponential"],
"help": "Checkpoint saving mode, choices are 'linear' or 'exponential'.",
},
{
"name": "ckpt_keep_limit",
"type": int,
"default": 1000000,
"help": "Limit checkpoints to keep.",
},
]
registered_conf = [
basic_conf,
input_output_conf,
logging_conf,
data_preprocess_conf,
model_conf,
training_conf,
cyclic_learning_conf,
ckpt_conf,
]
def extract_keywords(lst_dict, kw):
"""Extract the value associated to a specific keyword in a list of
dictionaries. Returns the list of values extracted from the keywords.
Parameters
----------
lst_dict : python list of dictionaries
list to extract keywords from
kw : string
keyword to extract from dictionary
"""
lst = [di[kw] for di in lst_dict]
return lst
# Extract list of parameters in registered configuration
PARAMETERS_CANDLE = [
item for lst in registered_conf for item in extract_keywords(lst, "name")
]
CONFLICT_LIST = [["clr_flag", "warmup_lr"], ["clr_flag", "reduce_lr"]]
def check_flag_conflicts(params: ConfigDict):
"""
Check if parameters that must be exclusive are used in conjunction. The
check is made against CONFLICT_LIST, a global list that describes parameter
pairs that should be exclusive. Raises an exception if pairs of parameters
in CONFLICT_LIST are specified simulataneously.
:param Dict params: list to extract keywords from
"""
key_set: Set[str] = set(params.keys())
# check for conflicts
# conflict_flag = False
# loop over each set of mutually exclusive flags
# if any set conflicts exit program
for flag_list in CONFLICT_LIST:
flag_count = 0
for i in flag_list:
if i in key_set:
if params[i] is True:
flag_count += 1
if flag_count > 1:
raise Exception(
"ERROR ! Conflict in flag specification. These flags should not be used together: "
+ str(sorted(flag_list))
+ "... Exiting"
)
def check_file_parameters_exists(
params_parser: ConfigDict, params_benchmark: ConfigDict, params_file: ConfigDict
):
"""Functionality to verify that the parameters defined in the configuration
file are recognizable by the command line parser (i.e. no uknown keywords
are used in the configuration file).
Parameters
----------
params_parser : python dictionary
Includes parameters set via the command line.
params_benchmark : python list
Includes additional parameters defined in the benchmark.
params_file : python dictionary
Includes parameters read from the configuration file.
Global:
PARAMETERS_CANDLE : python list
Includes all the core keywords that are specified in CANDLE.
"""
# Get keywords from arguments coming via command line (and CANDLE supervisor)
args_dict = vars(params_parser)
args_set = set(args_dict.keys())
# Get keywords from benchmark definition
bmk_keys: List[str] = []
for item in params_benchmark:
bmk_keys.append(item["name"])
bmk_set = set(bmk_keys)
# Get core CANDLE keywords
candle_set = set(PARAMETERS_CANDLE)
# Consolidate keywords from CANDLE core, command line, CANDLE supervisor and benchmark
candle_set = candle_set.union(args_set)
candle_set = candle_set.union(bmk_set)
# Get keywords used in config_file
file_set = set(params_file.keys())
# Compute keywords that come from the config_file that are not in the CANDLE specs
diff_set = file_set.difference(candle_set)
if len(diff_set) > 0:
message = (
"These keywords used in the configuration file are not defined in CANDLE: "
+ str(sorted(diff_set))
)
warnings.warn(message, RuntimeWarning)
def finalize_parameters(bmk):
"""
Utility to parse parameters in common as well as parameters particular
to each benchmark.
:param Benchmark bmk: Object that has benchmark filepaths and specifications
:return: Dictionary with all the parameters necessary to run the benchmark.\
Command line overwrites config file specifications
"""
# Parse common and benchmark parameters
bmk.parse_parameters()
# print('Args:', args)
# Get parameters from configuration file
# Reads parameter subset, just checking if a config_file has been set
# by comand line (the parse_known_args() function allows a partial
# parsing)
aux = bmk.parser.parse_known_args()
try: # Try to get the 'config_file' option
conffile_txt = aux[0].config_file
except AttributeError: # The 'config_file' option was not set by command-line
conffile = bmk.conffile # use default file
else: # a 'config_file' has been set --> use this file
if os.path.isabs(conffile_txt):
conffile = conffile_txt
else:
conffile = os.path.join(bmk.file_path, conffile_txt)
# print("Configuration file: ", conffile)
fileParameters = bmk.read_config_file(
conffile
) # aux.config_file)#args.config_file)
# Get command-line parameters
args = bmk.parser.parse_args()
# print ('Params:', fileParameters)
# Check keywords from file against CANDLE common and module definitions
bmk_dict = bmk.additional_definitions
check_file_parameters_exists(args, bmk_dict, fileParameters)
# Consolidate parameter set. Command-line parameters overwrite file configuration
gParameters = args_overwrite_config(args, fileParameters)
# Check that required set of parameters has been defined
bmk.check_required_exists(gParameters)
print("Params:")
pprint(gParameters)
# Check that no keywords conflict
check_flag_conflicts(gParameters)
return gParameters
def args_overwrite_config(args, config: ConfigDict):
"""Overwrite configuration parameters with parameters specified via
command-line.
Parameters
----------
args : ArgumentParser object
Parameters specified via command-line
config : python dictionary
Parameters read from configuration file
"""
params = config
args_dict = vars(args)
for key in args_dict.keys():
# try casting here
params[key] = args_dict[key]
if "data_type" not in params:
params["data_type"] = DEFAULT_DATATYPE
else:
if params["data_type"] in set(["f16", "f32", "f64"]):
params["data_type"] = get_choice(params["data_type"])
if "output_dir" not in params:
params["output_dir"] = directory_from_parameters(params)
else:
params["output_dir"] = directory_from_parameters(params, params["output_dir"])
if "rng_seed" not in params:
params["rng_seed"] = DEFAULT_SEED
if "timeout" not in params:
params["timeout"] = DEFAULT_TIMEOUT
return params
def get_choice(name: str):
"""Maps name string to the right type of argument."""
mapping = {}
# dtype
mapping["f16"] = np.float16
mapping["f32"] = np.float32
mapping["f64"] = np.float64
mapped = mapping.get(name)
if not mapped:
raise Exception('No mapping found for "{}"'.format(name))
return mapped
def parse_from_dictlist(dictlist: List[ParseDict], parser):
"""
Functionality to parse options.
:param List pardict: Specification of parameters
:param ArgumentParser parser: Current parser
:return: consolidated parameters
:rtype: ArgumentParser
"""
for d in dictlist:
if "type" not in d:
d["type"] = None
# print(d['name'], 'type is ', d['type'])
if "default" not in d:
d["default"] = argparse.SUPPRESS
if "help" not in d:
d["help"] = ""
if "abv" not in d:
d["abv"] = None
if "action" in d: # Actions
if (
d["action"] == "list-of-lists"
): # Non standard. Specific functionallity has been added
d["action"] = ListOfListsAction
if d["abv"] is None:
parser.add_argument(
"--" + d["name"],
dest=d["name"],
action=d["action"],
type=d["type"],
default=d["default"],
help=d["help"],
)
else:
parser.add_argument(
"-" + d["abv"],
"--" + d["name"],
dest=d["name"],
action=d["action"],
type=d["type"],
default=d["default"],
help=d["help"],
)
elif (d["action"] == "store_true") or (d["action"] == "store_false"):
raise Exception(
"The usage of store_true or store_false cannot be undone in the command line. Use type=str2bool instead."
)
else:
if d["abv"] is None:
parser.add_argument(
"--" + d["name"],
action=d["action"],
default=d["default"],
help=d["help"],
type=d["type"],
)
else:
parser.add_argument(
"-" + d["abv"],
"--" + d["name"],
action=d["action"],
default=d["default"],
help=d["help"],
type=d["type"],
)
else: # Non actions
if "nargs" in d: # variable parameters
if "choices" in d: # choices with variable parameters
if d["abv"] is None:
parser.add_argument(
"--" + d["name"],
nargs=d["nargs"],
choices=d["choices"],
default=d["default"],
help=d["help"],
)
else:
parser.add_argument(
"-" + d["abv"],
"--" + d["name"],
nargs=d["nargs"],
choices=d["choices"],
default=d["default"],
help=d["help"],
)
else: # Variable parameters (free, no limited choices)
if d["abv"] is None:
parser.add_argument(
"--" + d["name"],
nargs=d["nargs"],
type=d["type"],
default=d["default"],
help=d["help"],
)
else:
parser.add_argument(
"-" + d["abv"],
"--" + d["name"],
nargs=d["nargs"],
type=d["type"],
default=d["default"],
help=d["help"],
)
elif "choices" in d: # Select from choice (fixed number of parameters)
if d["abv"] is None:
parser.add_argument(
"--" + d["name"],
choices=d["choices"],
default=d["default"],
help=d["help"],
)
else:
parser.add_argument(
"-" + d["abv"],
"--" + d["name"],
choices=d["choices"],
default=d["default"],
help=d["help"],
)
else: # Non an action, one parameter, no choices
# print('Adding ', d['name'], ' to parser')
if d["abv"] is None:
parser.add_argument(
"--" + d["name"],
type=d["type"],
default=d["default"],
help=d["help"],
)
else:
parser.add_argument(
"-" + d["abv"],
"--" + d["name"],
type=d["type"],
default=d["default"],
help=d["help"],
)
return parser
def parse_common(parser):
"""Functionality to parse options.
Parameters
----------
parser : ArgumentParser object
Current parser
"""
for lst in registered_conf:
parser = parse_from_dictlist(lst, parser)
return parser
| StarcoderdataPython |
4807515 | <reponame>baikov/allflags
# Generated by Django 3.1.8 on 2021-10-18 19:58
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('flags', '0061_auto_20211018_2227'),
]
operations = [
migrations.AddField(
model_name='color',
name='created_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='color',
name='is_published',
field=models.BooleanField(default=False, verbose_name='Published'),
),
migrations.AddField(
model_name='color',
name='updated_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='country',
name='ordering',
field=models.PositiveSmallIntegerField(default=500, verbose_name='Ordering'),
),
migrations.AddField(
model_name='currency',
name='created_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='currency',
name='is_published',
field=models.BooleanField(default=False, verbose_name='Published'),
),
migrations.AddField(
model_name='currency',
name='ordering',
field=models.PositiveSmallIntegerField(default=500, verbose_name='Ordering'),
),
migrations.AddField(
model_name='currency',
name='updated_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='downloadablepicturefile',
name='created_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='downloadablepicturefile',
name='is_published',
field=models.BooleanField(default=False, verbose_name='Published'),
),
migrations.AddField(
model_name='downloadablepicturefile',
name='updated_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='downloadablepicturefilepreview',
name='created_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='downloadablepicturefilepreview',
name='updated_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='flagfact',
name='created_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='flagfact',
name='updated_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='historicalflag',
name='created_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='historicalflag',
name='updated_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='mainflag',
name='ordering',
field=models.PositiveSmallIntegerField(default=500, verbose_name='Ordering'),
),
migrations.AlterField(
model_name='color',
name='ordering',
field=models.PositiveSmallIntegerField(default=500, verbose_name='Ordering'),
),
migrations.AlterField(
model_name='colorgroup',
name='ordering',
field=models.PositiveSmallIntegerField(default=500, verbose_name='Ordering'),
),
migrations.AlterField(
model_name='flagelement',
name='ordering',
field=models.PositiveSmallIntegerField(default=500, verbose_name='Ordering'),
),
migrations.AlterField(
model_name='flagfact',
name='ordering',
field=models.PositiveSmallIntegerField(default=500, verbose_name='Ordering'),
),
migrations.AlterField(
model_name='historicalflag',
name='ordering',
field=models.PositiveSmallIntegerField(default=500, verbose_name='Ordering'),
),
migrations.AlterField(
model_name='region',
name='ordering',
field=models.PositiveSmallIntegerField(default=500, verbose_name='Ordering'),
),
]
| StarcoderdataPython |
3334606 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import filer.fields.image
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('filer', '0002_auto_20150606_2003'),
]
operations = [
migrations.CreateModel(
name='ImageProperties',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('image', filer.fields.image.FilerImageField(related_name='object_image', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='filer.Image', null=True)),
],
),
migrations.CreateModel(
name='ObjectProperties',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('code', models.CharField(default=b'', max_length=255)),
('sequence', models.IntegerField(default=0)),
('name', models.CharField(default=b'', max_length=255)),
('x', models.DecimalField(max_digits=30, decimal_places=25)),
('y', models.DecimalField(max_digits=30, decimal_places=25)),
('width', models.DecimalField(max_digits=30, decimal_places=25)),
('height', models.DecimalField(max_digits=30, decimal_places=25)),
('image_width', models.DecimalField(null=True, max_digits=30, decimal_places=25, blank=True)),
('image_height', models.DecimalField(null=True, max_digits=30, decimal_places=25, blank=True)),
('image_file', models.ForeignKey(related_name='layout_image', blank=True, to='layout.ImageProperties', null=True)),
],
),
]
| StarcoderdataPython |
3224062 | <gh_stars>0
import requests
class GitRepository:
GITHUB_API_URL = "https://api.github.com"
def __init__(self, repo_name, access_token):
self.repo_name = repo_name
self.access_token = access_token
def retrieve_change_commits(self, since, until):
request_uri = f"{self.GITHUB_API_URL}/repos/{self.repo_name}/commits"
params = {
"access_token": self.access_token,
"since": since.strftime("%Y-%m-%dT%H:%M:%SZ"),
"until": until.strftime("%Y-%m-%dT%H:%M:%SZ")
}
r = requests.get(request_uri, params=params)
return r.json()
| StarcoderdataPython |
1600722 | from benchmarks.htap.lib.controller import HTAPController
def add_parser(subparsers):
parser = subparsers.add_parser('htap')
parser.add_argument(
'--oltp-workers', default=32, type=int, help=(
'The number of OLTP workers executing TPC-C-like transactions (i.e. simulated clients), default: 32.'))
parser.add_argument(
'--olap-workers', default=1, type=int, help=(
'The number of OLAP workers (streams) running TPC-H-like queries, default: 1.'))
parser.add_argument(
'--target-tps', default=None, type=int, help=(
'The target TPS for the OLTP workload, default: unlimited.'))
parser.add_argument(
'--duration', default=60, type=int, help=(
'How many seconds the benchmark should run for, default: 60.'))
parser.add_argument(
'--olap-timeout', default='5min', help=(
'Timeout for OLAP queries, default: 5 minutes'))
parser.add_argument(
'--csv-interval', default=10, type=int, help=(
'How often to report stats to the csv files in seconds, default: 10'))
parser.add_argument(
'--dry-run', action='store_true', help=(
"Only generate transactions and analytical queries but don't send them to the database. "
"Can be useful for measuring script throughput."))
parser.add_argument(
'--monitoring-interval', default=1, type=float, help=(
'Number of seconds to wait between updates of the monitoring display, default: 1.0'))
parser.add_argument(
'--stats-dsn', help=('The DSN to use for collecting statistics into a database. '
'Not defining it will disable statistics collection.'))
parser.add_argument('--explain-analyze', action='store_true', default=False,
help=('Whether to run EXPLAIN ANALYZE. Will save plans into the "plan" directory.'
))
parser.add_argument('--use-server-side-cursors', default=False, action='store_true',
required=False, help=('Use server-side cursors for executing the queries')
)
parser.add_argument('--dont-wait-until-enough-data', default=False, action='store_true',
required=False, help=('Do NOT wait until there is enough data for OLAP queries to run with a constant dataset size')
)
parser.add_argument('--olap-dsns', nargs='+',
required=False, help=('Use separate olap servers')
)
parser.add_argument('--output', choices=['csv', 'print'], default='print',
nargs='+', help=('How the results output should look like. '
'Multiple options possible, separated by space'
))
parser.add_argument('--csv-file', default='results.csv', help=(
'Where to save the summary csv file, if csv output is selected. '
'The default is results.csv in the current directory.'
))
parser.add_argument('--ignored-queries', required=False, nargs='+', default=[], help=(
'Optional list of ignored queries for the OLAP workload.'
))
def run(args):
controller = HTAPController(args)
controller.run()
| StarcoderdataPython |
3233412 | <filename>client.py
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: client
Description : ^_^ !!!
Author : anglemiku
Eamil : <EMAIL>
date: 2019-09-09
-------------------------------------------------
Change Activity: 2019-09-09:
-------------------------------------------------
"""
import socket
'''
客户端
接下来我们写一个简单的客户端实例连接到以上创建的服务。端口号为 12345。
socket.connect(hosname, port ) 方法打开一个 TCP 连接到主机为 hostname 端口为 port 的服务商。
连接后我们就可以从服务端获取数据,记住,操作完成后需要关闭连接。
'''
def clintSocket():
s = socket.socket() # 创建 socket 对象
host = socket.gethostname() # 获取本地主机名
port = 12345 # 设置端口号
s.connect((host, port))
print s.recv(1024)
s.close()
if __name__ == '__main__':
clintSocket()
pass | StarcoderdataPython |
1614090 | import os.path as path
from pathlib import Path
from dslib.helpers import replace_csv_header
def create_path(p, reference=__file__):
ref = Path(path.abspath(reference))
return Path(
ref.parent if ref.is_file() else ref,
p
)
def create_data_file(data, header):
"""Create CSV file with header from file"""
replace_csv_header(
create_path(data),
create_path(header),
)
| StarcoderdataPython |
3320226 | <reponame>KesleyK/projeto-redes-unb2020<filename>exemplos-aulas/sockets/client/ip_resolve.py<gh_stars>1-10
import socket
ip = "192.168.3.11"
dn = socket.gethostbyaddr(ip)
print(dn[0]) | StarcoderdataPython |
1796499 | <filename>bot.py
# Реализация бота, который проводит исследовательское тестирование приложения
# TODO:
# > реализовать класс фильтров
# > реализовать получение списка элементов через getEventListeners (ищем элементы, которые в
# теории отреагируют на наше действие)
# > реализовать получение списка элементов через свойство is_displayed
# > надёжное обращение к отдельному элементу через его локатор
# > добавить обработку исключений при "протухании" элемента
# > фильтровать однотонные элементы
# > отключать таймеры и клиентские события на странице, что бы не было искажений состояний
# > реализовать механизм самодиагностики (e.g. анализ скриншота всей страницы), что бы видеть,
# когда фильтры убирают элементы, которые не следовало убирать
# > написать юнит-тесты через тестовые страницы
# > реализовать аутентификацию через POST запросы
# > реализовать закрытие лишних окон
# > реализовать фильтр для элементов - убирать элементы, которые не видны (координаты большие)
# > реализовать проверку интерактивности элемента по наведению на него
# > реализовать вспомогательный метод для дампа текущего состояния (localstorage, скриншот, etc.)
# > реализовать фильтр недоступных методов в затемнении (e.g. z-index)
# > для ситуаций, когда что-то идёт не так (e.g. элемент не кликается) реализовать механизм сохранения состояния для
# последующего анализа ситуации
from element import Element
from terminal import *
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
import random, string
def randomword(length):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
class Bot:
def __init__(self, url, auth=True):
"""Тут передаются настройки и выполняется инициализация бота"""
chrome_options = Options()
# chrome_options.add_argument("--disable-extensions")
# chrome_options.add_argument("--disable-gpu")
# chrome_options.add_argument("--no-sandbox) # linux only
# chrome_options.add_argument("--headless")
self.driver = webdriver.Chrome('chromedriver/chromedriver', options=chrome_options)
self.driver.get(url)
if auth:
self.auth()
def wait(self, n=1):
"""Ждём когда закончится вся 'движуха' после наших действий и страница перейдёт в
новое состояние"""
command = """var performance = window.performance ||
window.mozPerformance ||
window.msPerformance ||
window.webkitPerformance ||
{};
var network = performance.getEntries() ||
{};
return network;"""
prev_reqs = 0
while True:
# TODO: По какой-то причине умирает chromedriver в этом месте при работе с google.com и yandex.ru
current_reqs = len(self.driver.execute_script(command))
if current_reqs - prev_reqs < 5:
break
else:
prev_reqs = current_reqs
time.sleep(2)
time.sleep(n)
if len(self.driver.window_handles) > 1:
self.driver.switch_to.window(self.driver.window_handles[-1])
def auth(self):
self.wait(1)
self.driver.find_element_by_css_selector("[name='auth-loginForm_login'] input").send_keys("Демо_тензор")
self.driver.find_element_by_css_selector("[name='auth-loginForm_password'] input").send_keys("Демо123")
self.driver.find_element_by_css_selector("button.auth-Form__submit").click()
self.wait(2)
def get_elements(self):
"""Механизм генерации списка элементов, с которыми можно взаимодействовать на странице"""
driver = self.driver
log_start('Get elements: ')
elements = driver.find_elements_by_xpath("//*")
elements = [Element(driver, elm) for elm in elements]
log_add('({0}) -> '.format(len(elements)))
elements = [element for element in elements if not element.invalid]
log_add('({0}) -> '.format(len(elements)))
# self.wait()
# Фильтр #1: убираем мелкие элементы, по которым сложно кликнуть
elements = [elm for elm in elements if elm.width >= 6 and elm.height >= 6]
# Фильтр #2: узнаём какие элементы визуально лежат внутри других
for elm in elements:
for other_elm in elements:
if (elm.x <= other_elm.x) and \
(elm.y <= other_elm.y) and \
(elm.x + elm.width >= other_elm.x + other_elm.width) and \
(elm.y + elm.height >= other_elm.y + other_elm.height):
elm.childs.append(other_elm)
log_add('({0}) -> '.format(len(elements)))
# Фильтр #3: убираем одинаковые по размерам и положению элементы
for elm in elements:
if len(elm.childs) < 2:
continue
same = True
for child in elm.childs:
if child.rect != elm.rect:
same = False
if same:
for child in elm.childs:
if child != elm:
elements.remove(child)
elm.childs = ['same']
log_add('({0}) -> '.format(len(elements)))
# Фильтр #4: убираем элементы, которые содержат в себе другие элементы
elements = [elm for elm in elements if len(elm.childs) == 1]
log_add('({0}) -> '.format(len(elements)))
# Фильтр #5: Убираем невидимые эелменты
elements = [elm for elm in elements if elm.is_displayed()]
log_add('({0}) -> '.format(len(elements)))
# Фильтр #6: Убираем элементы, которые находятся ниже нижней границы окна
window_height = driver.execute_script('return document.documentElement.clientHeight')
elements = [elm for elm in elements if elm.y < window_height]
log_add('({0})'.format(len(elements)))
log_end()
return elements
def move_to(self, url):
self.driver.execute_script('localStorage.clear()')
self.driver.get(url)
self.wait()
# Если сессия была завершена - заходим заново
# if '/auth/' in self.driver.current_url:
# self.auth()
def move_by_path(self, path):
"""Итеративно прокликиваем список элементов"""
for elm in path:
if 'input' in elm.xpath or 'input' in elm.get_innerHTML():
# elm.input(f'{randomword(10)}\n')
elm.input(f'Test!!!\n')
else:
elm.click()
self.wait()
def kill(self):
self.driver.close()
def current_url(self):
return self.driver.current_url
def get_screenshot(self):
return self.driver.get_screenshot_as_png()
def close_all_windows(self):
if len(self.driver.window_handles) > 1:
for w in self.driver.window_handles[1:]:
self.driver.switch_to.window(w)
self.driver.close()
self.driver.switch_to.window(self.driver.window_handles[0])
| StarcoderdataPython |
4809001 | <filename>scrapy_plus/project_dir/spiders_middlewares.py<gh_stars>1-10
class BaiduMiddleware:
def process_request(self, request):
'''处理请求头,添加默认的user-agent'''
print("BaiduMiddleware: process_request")
return request
def process_response(self, response):
'''处理数据对象'''
print("BaiduMiddleware: process_response")
return response
class DoubanMiddleware:
def process_request(self, request):
'''处理请求头,添加默认的user-agent'''
print("DoubanMiddleware: process_request")
return request
def process_response(self, response):
'''处理数据对象'''
print("DoubanMiddleware: process_response")
return response | StarcoderdataPython |
34135 | <reponame>theblueskies/prose
import os
from newspaper import Article
url = 'http://fox13now.com/2013/12/30/new-year-new-laws-obamacare-pot-guns-and-drones/'
article = Article(url)
article.download()
article.parse()
with open(os.path.join('testdata', 'article.txt'), 'w') as f:
f.write(article.text)
| StarcoderdataPython |
3340836 | <gh_stars>1-10
import os
import shutil
caminho_original = r'C:\Users\Gustavo\Desktop\Legendas temp'
caminho_novo = r'C:\Users\Gustavo\Desktop\Legendas temp criada Python'
try:
os.mkdir(caminho_novo)
except FileExistsError as error:
print(f'A pasta {caminho_novo} já existe.')
for root, dirs, files in os.walk(caminho_novo):
for file in files:
old_file_path = os.path.join(root, file)
new_file_path = os.path.join(caminho_novo, file)
# shutil.move(old_file_path, new_file_path)
# shutil.copy(old_file_path, new_file_path)
# print(f'Arquivo {file} copiado com sucesso.')
if '.zip' in file:
os.remove(new_file_path)
print(f'Arquiivo {file} removido com sucesso.') | StarcoderdataPython |
1688788 | <filename>source/AI/KI_versions/multiAgent.py<gh_stars>0
# stable baselines
from stable_baselines3 import A2C, PPO
from stable_baselines3.common.env_util import make_vec_env
# threading
import threading
from threads.socketHandler import *
# import env
import KI_v01 # important
# other
import os
class KI():
def __init__(self):
self.env = make_vec_env('ScribbleFight-v0', n_envs=2)
def run(self):
log_path = os.path.join('Traning', 'Logs')
model = A2C("MlpPolicy", self.env, verbose=1, tensorboard_log=log_path)
model = PPO("MlpPolicy", self.env, verbose=1, tensorboard_log=log_path)
model.learn(total_timesteps=4500000)
self.env.close()
if __name__ == "__main__":
ki = KI()
for item in ki.env.get_attr('pygame'):
while not item.scribble_fight.readystate:
continue
ki.run()
| StarcoderdataPython |
3298054 | <gh_stars>0
"""This module implements helper methods for generating docs."""
from __future__ import annotations
import os
def building_docs() -> bool:
"""Return true if currently building documentations."""
if 'READTHEDOCS' in os.environ:
return True
if '__SPHINX_BUILD__' in os.environ:
return True
return False
| StarcoderdataPython |
1683093 | from rest_framework import viewsets, permissions
from .models import SiteDatingProfile
from .serializers import SiteDatingProfileSerializer
from .paginators import DatingProfilePagination
class SiteDatingProfileModelViewSet(viewsets.ModelViewSet):
queryset = SiteDatingProfile.objects.all()
serializer_class = SiteDatingProfileSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, )
pagination_class = DatingProfilePagination
def perform_create(self, serializer):
serializer.save(user=self.request.user)
| StarcoderdataPython |
3260179 | <gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# # Import Libraries
# In[23]:
#Importing and Installing Required Modules and Libraries
get_ipython().system('pip install tensorflow')
get_ipython().system('pip install basic_image_eda')
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import seaborn as sns
sns.set_theme(style="whitegrid")
import cv2
import sys
from re import sub
import tensorflow as tf
from tensorflow.keras import metrics
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Activation, Flatten, Dropout, Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.preprocessing import image
from tensorflow.keras import models
from tensorflow.keras.preprocessing import image_dataset_from_directory
# # Load Dataset
# In[24]:
data_dir = r"Downloads/plantvillage-dataset/color/"
# # Sample Image
# In[25]:
img = plt.imread(data_dir+"Apple___Apple_scab/00075aa8-d81a-4184-8541-b692b78d398a___FREC_Scab 3335.JPG")
plt.imshow(img)
# # Exploratory Data Analysis
# In[26]:
category_count = []
for root, dirs, files in os.walk(data_dir):
for dir_path in dirs:
category_count.append((dir_path, len(os.listdir(root+os.sep+dir_path))))
count_df = pd.DataFrame(category_count, columns=['Category', 'Count'])
count_df.head(10)
# In[27]:
total_images = count_df['Count'].sum()
total_images
# # Data Visualization by Category
# In[28]:
count_df = count_df.sort_values(by='Count', ascending=False)
plt.figure(figsize=(20,20))
sns.barplot(x="Count", y="Category", data=count_df)
plt.plot()
# In[29]:
healthy_images_count = count_df[count_df['Category'].str.endswith("healthy")]['Count'].sum()
healthy_images_count
# In[30]:
disease_images_count = total_images - healthy_images_count
disease_images_count
# In[31]:
temp_df = pd.DataFrame(data=[("Unhealthy", disease_images_count), ("Healthy", healthy_images_count)], columns=['Category', 'Count'])
sns.barplot(y="Count",x="Category", data=temp_df)
plt.plot()
# # Data Augmentation and Pre-processing
# In[32]:
BATCH_SIZE = 32
IMG_SIZE = (240, 240)
# In[33]:
train_dataset = image_dataset_from_directory(data_dir,
shuffle=True,
label_mode = 'categorical',
validation_split = 0.2,
batch_size=BATCH_SIZE,
seed = 42,
subset = "training",
image_size=IMG_SIZE)
validation_dataset = image_dataset_from_directory(data_dir,
shuffle=True,
label_mode = 'categorical',
validation_split = 0.2,
batch_size=BATCH_SIZE,
seed = 42,
subset = "validation",
image_size=IMG_SIZE)
# In[34]:
class_names = train_dataset.class_names
num_classes = len(class_names)
for i in range(1, num_classes + 1):
print(str(i) + ". ", class_names[i - 1])
# In[35]:
val_batches = tf.data.experimental.cardinality(validation_dataset)
test_dataset = validation_dataset.take(val_batches // 5)
validation_dataset = validation_dataset.skip(val_batches // 5)
print('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset))
print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset))
# In[36]:
AUTOTUNE = tf.data.AUTOTUNE
train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE)
test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE)
# In[37]:
# added augmentations
data_augmentation = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
])
# In[38]:
for image, _ in train_dataset.take(1):
plt.figure(figsize=(10, 10))
first_image = image[0]
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
augmented_image = data_augmentation(tf.expand_dims(first_image, 0))
plt.imshow(augmented_image[0] / 255)
plt.axis('off')
# ## Metrics and Plotting functions
# In[39]:
def plot_metrics(history):
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'w']
metrics = ['loss', 'auc', 'precision', 'recall']
plt.figure(figsize=(20,10))
for n, metric in enumerate(metrics):
name = metric.replace("_"," ").capitalize()
plt.subplot(2,2,n+1)
plt.plot(history.epoch, history.history[metric], color=colors[0], label='Train')
plt.plot(history.epoch, history.history['val_'+metric],
color=colors[0], linestyle="--", label='Val')
plt.xlabel('Epoch')
plt.ylabel(name)
if metric == 'loss':
plt.ylim([0, plt.ylim()[1]])
elif metric == 'auc':
plt.ylim([0.8,1])
else:
plt.ylim([0,1])
plt.legend()
# In[40]:
METRICS = [
metrics.TruePositives(name='tp'),
metrics.FalsePositives(name='fp'),
metrics.TrueNegatives(name='tn'),
metrics.FalseNegatives(name='fn'),
metrics.CategoricalAccuracy(name='accuracy'),
metrics.Precision(name='precision'),
metrics.Recall(name='recall'),
metrics.AUC(name='auc')
]
# ## Load and compile model
# In[41]:
IMG_SHAPE = IMG_SIZE + (3,)
# In[42]:
preprocess_input = tf.keras.applications.inception_resnet_v2.preprocess_input
# In[43]:
base_model = tf.keras.applications.InceptionResNetV2(
include_top=False,
weights="imagenet",
input_shape=IMG_SHAPE,
)
# In[44]:
image_batch, label_batch = next(iter(train_dataset))
feature_batch = base_model(image_batch)
print(feature_batch.shape)
# In[45]:
base_model.trainable = False
# In[46]:
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
print(feature_batch_average.shape)
# In[47]:
prediction_layer = tf.keras.layers.Dense(num_classes, activation="softmax")
prediction_batch = prediction_layer(feature_batch_average)
print(prediction_batch.shape)
# In[48]:
inputs = tf.keras.Input(shape=(240, 240, 3))
x = data_augmentation(inputs)
x = preprocess_input(x)
x = base_model(x, training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)
# In[49]:
base_learning_rate = 0.001
model.compile(optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=METRICS)
# In[50]:
model.summary()
# # Training, Validation and Testing
# Before tuning
# In[51]:
initial_epochs = 10
# In[52]:
history = model.fit(train_dataset,
epochs=initial_epochs,
validation_data=validation_dataset)
# After tunung
# In[53]:
base_model.trainable = True
# In[54]:
# Let's take a look to see how many layers are in the base model
print("Number of layers in the base model: ", len(base_model.layers))
# Fine-tune from this layer onwards
fine_tune_at = 700
# Freeze all the layers before the `fine_tune_at` layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
# In[55]:
fine_tuning_learning_rate = 1e-5
model.compile(optimizer=tf.keras.optimizers.Adam(lr=fine_tuning_learning_rate),
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=METRICS)
# In[56]:
model.summary()
# In[57]:
len(model.trainable_variables)
# In[58]:
fine_tune_epochs = 15
total_epochs = initial_epochs + fine_tune_epochs
history_fine = model.fit(train_dataset,
epochs=total_epochs,
initial_epoch=history.epoch[-1],
validation_data=validation_dataset)
# In[62]:
acc = []
val_acc = []
loss = []
val_loss = []
acc += history_fine.history['accuracy']
val_acc += history_fine.history['val_accuracy']
loss += history_fine.history['loss']
val_loss += history_fine.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.ylim([0.8, 1])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.ylabel('Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.ylim([0, 1.0])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.ylabel('Cross Entropy')
plt.show()
# In[63]:
plot_metrics(history_fine)
# In[64]:
result = model.evaluate(test_dataset)
# In[65]:
metrics = ["loss", "tp", "fp", "tn", "fn", "accuracy", "precision", "recall", "auc"]
for i in range(len(result)):
print("{} : {}".format(metrics[i],round(result[i], 3)))
# In[66]:
#Retrieve a batch of images from the test set
image_batch, label_batch = test_dataset.as_numpy_iterator().next()
predictions = model.predict_on_batch(image_batch)
predictions = tf.nn.softmax(predictions)
predictions = list(np.argmax(x) for x in predictions.numpy())
print('Predictions:\n', predictions)
print('Labels:\n', list(np.argmax(x) for x in label_batch))
plt.figure(figsize=(10, 10))
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image_batch[i].astype("uint8"))
plt.title(class_names[predictions[i]])
plt.axis("off")
# # Save Model
# In[67]:
# save model in JSON format
model_json = model.to_json()
json_file = open("model_weights.json", "w")
json_file.write(model_json)
print("Model saved in JSON format!")
# save training weights in h5 file
model.save_weights("model_weights.h5")
print("\nModel weights saved!")
# In[68]:
model.save("inception_V3.0_fineTuning.h5")
| StarcoderdataPython |
3288637 | from glob import glob
from pippi import dsp
def fetch():
samps = glob('samples/wes/*.wav')
return dsp.read(dsp.randchoose(samps)).data
def make(length):
return dsp.fill(fetch(), length)
| StarcoderdataPython |
3209632 | from unittest.mock import patch
from athenian.api.hacks.show_email import main
def test_show_email(capsys):
with patch("sys.argv", ["show_sql.py", "af253b50a4d7b2c9841f436fbe4c635f270f4388653649b0971f2751a441a556fe63a9dabfa150a444dd"]): # noqa
main()
assert capsys.readouterr().out == "<EMAIL>\n"
| StarcoderdataPython |
138077 | <reponame>UBCS-LCP/auxiliary
from numpy import zeros, array_equal
from ubcs_auxiliary import save_object, load_object
def test_save_load(tmpdir):
arr_in = zeros((10,5))
str_in = 'this is string'
dict_in = {'1':1,'2':2}
list_in = [1,2,3,4]
compound_in = {}
compound_in['arr'] = arr_in
compound_in['str'] = str_in
compound_in['dict'] = dict_in
compound_in['list'] = list_in
# save to a file
save_object(tmpdir+'arr.extension',arr_in)
save_object(tmpdir+'str.extension',str_in)
save_object(tmpdir+'dict.extension',dict_in)
save_object(tmpdir+'list.extension',list_in)
save_object(tmpdir+'compound.extension',compound_in)
# Load to a file
arr_out = load_object(tmpdir+'arr.extension')
str_out = load_object(tmpdir+'str.extension')
dict_out = load_object(tmpdir+'dict.extension')
list_out = load_object(tmpdir+'list.extension')
compound_out = load_object(tmpdir+'compound.extension')
#run tests
assert array_equal(arr_in, arr_out)
assert str_in == str_out
assert dict_in == dict_out
assert list_in == list_out
assert compound_in.keys() == compound_out.keys()
| StarcoderdataPython |
109692 | import random
from discord.ext import commands
import discord
from cogs.menus import Menus
def get_role(guild):
return discord.utils.get(guild.roles, name='Giveaway')
async def get_entrants(guild, remove=True):
role = get_role(guild)
entrants = []
for m in guild.members:
if role in m.roles:
entrants.append(m)
if remove:
try:
await m.remove_roles(role)
except:
pass
return entrants
class Giveaway(Menus):
def __init__(self, bot):
self.bot = bot
self.giveaways = {} # {guild.id: {'creator': discord.Member, 'reward': str}}
self.booted = {} # {guild.id: [member.id, ...]}
@commands.group(invoke_without_command=True)
async def giveaway(self, ctx):
...
@giveaway.command()
async def enter(self, ctx):
"""Enter the currently active giveaway."""
if ctx.author.id in self.booted.get(ctx.guild.id, []):
await ctx.send("You were booted from the current giveaway.")
return
elif ctx.guild.id not in self.giveaways:
await ctx.send('There is no active giveaway.')
return
await ctx.author.add_roles(get_role(ctx.guild))
await ctx.send(f'{ctx.author.display_name} has been entered into the giveaway.')
@commands.has_role('Giveaway')
@giveaway.command()
async def withdraw(self, ctx):
"""Leave the currently active giveaway."""
await ctx.author.remove_roles(get_role(ctx.guild))
await ctx.send(f'{ctx.author.display_name} has left the giveaway.')
@giveaway.command(name='list')
async def list_(self, ctx):
"""List the members that have joined the giveaway."""
entrants = await get_entrants(ctx.guild, remove=False)
mentions = [e.mention for e in entrants]
await self.embed_menu(mentions, f'{len(entrants)} Entrants', ctx.author, ctx, count=0, code=False)
@giveaway.command()
async def info(self, ctx):
"""Get info about the currently active giveaway."""
if ctx.guild.id not in self.giveaways:
await ctx.send('There is no active giveaway.')
return
giveaway = self.giveaways[ctx.guild.id]
await ctx.send(f"The currently active giveaway is for \"{giveaway['reward']}\" and is hosted by {giveaway['creator'].display_name}.")
@commands.has_permissions(administrator=True)
@giveaway.command()
async def start(self, ctx, reward):
"""Start a new giveaway."""
if ctx.guild.id in self.giveaways:
await ctx.send('A giveaway is already active here.')
return
self.giveaways[ctx.guild.id] = {'creator': ctx.author, 'reward': reward}
await ctx.send(f'The giveaway "{reward}" is now active.')
@commands.has_permissions(administrator=True)
@giveaway.command()
async def add(self, ctx, member: discord.Member):
"""Manually enter someone into the giveaway.
This will bypass the booted check."""
if ctx.guild.id not in self.giveaways:
await ctx.send('No giveaway is currently active.')
return
await member.add_roles(get_role(ctx.guild))
await ctx.send(f'{member.display_name} has been entered into the giveaway.')
@commands.has_permissions(administrator=True)
@giveaway.command(aliases=['kick'])
async def boot(self, ctx, member: discord.Member):
"""Boot someone from the giveaway."""
if ctx.guild.id not in self.giveaways:
await ctx.send('No giveaway is currently active.')
return
await member.remove_roles(get_role(ctx.guild))
if ctx.guild.id in self.booted:
self.booted[ctx.guild.id].append(member.id)
else:
self.booted[ctx.guild.id] = [member.id]
await ctx.send(f'{member.display_name} has been {ctx.invoked_with}ed from the giveaway.')
@commands.has_permissions(administrator=True)
@giveaway.command()
async def draw(self, ctx, count=1):
"""Draw the winner of the giveaway."""
if count < 1:
await ctx.send('Try the "end" command instead.')
return
elif ctx.guild.id not in self.giveaways:
await ctx.send('No giveaway is currently active.')
return
giveaway = self.giveaways[ctx.guild.id]
entrants = await get_entrants(ctx.guild, remove=False)
if ctx.guild.id not in self.booted:
self.booted[ctx.guild.id] = []
if count == 1:
winner = random.choice(entrants)
await ctx.send(f"{winner.mention} won the giveaway for \"{giveaway['reward']}\" by {giveaway['creator'].mention}.")
try:
await winner.remove_roles(get_role(ctx.guild))
except:
pass
self.booted[ctx.guild.id].append(winner.id)
else:
winners = random.sample(entrants, count)
await ctx.send(f"The winners of the giveaway for \"{giveaway['reward']}\" by {giveaway['creator'].mention} are " +
' '.join(m.mention for m in winners))
for m in winners:
self.booted[ctx.guild.id].append(m.id)
try:
await m.remove_roles(get_role(ctx.guild))
except:
pass
@commands.has_permissions(administrator=True)
@giveaway.command(aliases=['cancel'])
async def end(self, ctx):
"""End the giveaway without drawing a winner."""
if ctx.guild.id not in self.giveaways:
await ctx.send('No giveaway is currently active.')
return
giveaway = self.giveaways.pop(ctx.guild.id)
self.booted.pop(ctx.guild.id, None)
await get_entrants(ctx.guild)
await ctx.send(f"The giveaway for \"{giveaway['reward']}\" by {giveaway['creator'].mention} has been cancelled.")
def setup(bot):
bot.add_cog(Giveaway(bot))
| StarcoderdataPython |
3370047 | <gh_stars>1-10
from django.utils.translation import gettext_lazy as _
from wagtail.contrib.modeladmin.options import ModelAdmin, modeladmin_register
from db.models import SoftSkill
from .permissions import ValidationPermissionHelper
class SoftSkillAdmin(ModelAdmin):
model = SoftSkill
menu_label = _('Soft Skills')
menu_icon = 'folder-open-inverse'
add_to_settings_menu = False
exclude_from_explorer = False
list_display = (
'student',
'company',
)
search_fields = ('student', 'company')
permission_helper_class = ValidationPermissionHelper
modeladmin_register(SoftSkillAdmin)
| StarcoderdataPython |
115144 | from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from .views import (
MapSettingsView,
OverlayMetaView,
OverlayList,
)
urlpatterns = [
url(r'^$', OverlayList.as_view(), name='overlay-list'),
url(r'^metadata/$', OverlayMetaView.as_view(), name='overlay-meta-view'),
url(r'^settings/$', MapSettingsView.as_view(), name='map-settings-view'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
else:
urlpatterns += [
url(r'^static/(.*)$', django.views.static.serve, {'document_root': settings.STATIC_ROOT}),
url(r'^media/(.*)$', django.views.static.serve, {'document_root': settings.MEDIA_ROOT}),
] | StarcoderdataPython |
12190 | <filename>tests/data/program_analysis/PyAST2CAST/import/test_import_3.py
# 'from ... import ...' statement
from sys import exit
def main():
exit(0)
main() | StarcoderdataPython |
133094 | <gh_stars>1-10
"""Unit testing for the application's modules."""
| StarcoderdataPython |
1661052 | #!-*-encoding:UTF-8-*-
import socket
import msvcrt
from os import system
import ctypes
ctypes.windll.kernel32.SetConsoleTitleA("LED kijelzo")
HOST = '192.168.0.1'
PORT = 23
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print "Socket successfully created"
except socket.error as err:
print "socket creation failed with error %s" %(err)
s.connect((HOST, PORT))
while True:
print "\nRakd ossze az uzenetet:"
important = [" ", "Ne zavarj "]
i = 0
for msg in important:
print i,important[i]
i = i + 1
important_sel = ord(msvcrt.getch()) - 48
print ""
if(important_sel==0):
action = [" ", " Bejohetsz", " Pihenek ", "<NAME>"]
else:
action = [" ", " ZH-zom ", " tanulok ", " videozok ", " beszelek ", " [SEM] "]
i = 0
for msg in action:
print i,action[i]
i = i + 1
action_sel = ord(msvcrt.getch()) - 48
print ""
targy_sel = 0;
targy = [" ", " SZABTECH ", " uELEKTRO ", " MERESTECH", " INFOKOMM ", " INFO 1 ", " INFO 2 ", "INNOVATIV ", " gyules "]
if(important_sel==1):
i = 0
for msg in targy:
print i,targy[i]
i = i + 1
targy_sel = ord(msvcrt.getch()) - 48
print ""
s.send(important[important_sel] + action[action_sel] + targy[targy_sel])
#s.send(action[action_sel])
#s.send(targy[targy_sel])
system('cls')
print ""
print "[",important[important_sel],"]"
print "[",action[action_sel],"]"
print "[",targy[targy_sel],"]"
if(important_sel==0 and action_sel==0):
break
s.close() | StarcoderdataPython |
1771790 | from app import create_app,db
from flask_script import Manager,Server
from app.models import User,Post,Comment
from flask_migrate import Migrate,MigrateCommand
#creating app instance
app = create_app('development')
manage = Manager(app)
migrate = Migrate(app,db)
manage.add_command('db',MigrateCommand)
manage.add_command('server',Server)
@manage.command
def test():
'''Run the unit tests.'''
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manage.shell
def make_shell_context():
return dict(app = app, db = db , User = User, Post = Post, Comment = Comment)
if __name__ == '__main__':
manage.run() | StarcoderdataPython |
35838 | <filename>shrike/pipeline/telemetry_utils.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
from opencensus.ext.azure.log_exporter import AzureLogHandler
log = logging.getLogger(__name__)
class TelemetryLogger:
"""Utils class for opencensus azure monitor"""
def __init__(
self, enable_telemetry=True, instrumentation_key=None, level=logging.INFO
):
self.logger = logging.getLogger("telemetry_logger")
self.logger.setLevel(level)
self.enable_telemetry = enable_telemetry
# Why is it okay to include this key directly in the source code?
# For any client-side tool, there is a fundamental problem with protecting instrumentation
# keys. You want the published tool to be able to collect telemetry, but the only way
# it can do this is if it has some kind of instrumentation key.
#
# For an authoritative example, the dotnet CLI contains their telemetry key in a
# public GitHub repository:
# https://github.com/dotnet/cli/blob/master/src/dotnet/Telemetry/Telemetry.cs
#
# The underlying Azure resource is called `aml1p-ml-tooling`.
self.instrumentation_key = (
"<KEY>"
if instrumentation_key is None
else instrumentation_key
)
handler = AzureLogHandler(
connection_string=f"InstrumentationKey={self.instrumentation_key}"
)
handler.add_telemetry_processor(self.scrubber_function)
self.logger.addHandler(handler)
def log_trace(self, message, properties={}, level=logging.INFO):
if self.enable_telemetry:
try:
if level == logging.INFO:
self.logger.info(message, extra=properties)
elif level == logging.WARNING:
self.logger.warning(message, extra=properties)
elif level == logging.ERROR:
self.logger.error(message, extra=properties)
elif level == logging.CRITICAL:
self.logger.critical(message, extra=properties)
else:
log.error("The logging level is not expected!")
except Exception as ex:
log.warning("Send telemetry exception: %s", str(ex))
else:
log.info(
"Sending trace log messages to application insight has been disabled."
)
# Callback function to scrub some columns
def scrubber_function(self, envelope):
envelope.tags["ai.cloud.roleInstance"] = "cloud_RoleInstance_Scrubbed"
envelope.tags["ai.location.ip"] = "IP_Scrubbed"
| StarcoderdataPython |
80042 | """Tests that basis kets are working properly"""
import Python.density_matrix as DM
import Python.measurements as mes
class TestMeasurements:
"""
Testing that measurements produce the expected results.
"""
def test_temp(self):
"""Tests the creation of a simple ket and validates its properties"""
dm = DM.qbit(.25)
assert mes.temp(dm) == .9102392266268373
dm = DM.qbit(.4)
assert mes.temp(dm) == 2.4663034623764326
def test_temp_from_pop(self):
dm = DM.qbit(.4)
assert mes.temp(dm) == mes.temp_from_pop(.4)
def test_pop_from_temp(self):
dm = DM.qbit(.4)
assert mes.pop_from_temp(mes.temp(dm)) == .4
def test_D(self):
dm_1, dm_2 = DM.qbit(.4), DM.qbit(.1)
assert mes.D(dm_1, dm_1) == 0
assert mes.D(dm_1, dm_2) == 0.3112386795830575
def test_extractable_work(self):
T = .4
dm = DM.qbit(mes.pop_from_temp(T))
assert mes.extractable_work(T, dm) == 0
assert mes.extractable_work(T - .1, dm) > mes.extractable_work(T, dm)
assert mes.extractable_work(T - .3, dm) > mes.extractable_work(T - .1, dm)
def test_change_in_extractable_work(self):
T_1, T_2 = .4, .2
dm_1 = DM.qbit(mes.pop_from_temp(T_1))
dm_2 = DM.qbit(mes.pop_from_temp(T_2))
assert mes.change_in_extractable_work(T_1, dm_1, T_2, dm_2) == 0
assert mes.change_in_extractable_work(T_1, dm_1, T_1, dm_1) == 0
assert mes.change_in_extractable_work(T_1, dm_1, T_2, dm_1) == 0.02349421284993547
assert mes.change_in_extractable_work(T_2, dm_1, T_1, dm_2) != 0
def test_entropy(self):
dm = DM.qbit(.3)
assert mes.entropy(dm) == .6108643020548935
| StarcoderdataPython |
3241078 | from django import template
from django.forms.fields import MultipleChoiceField
from copy import copy
from wagtail.core.models import Page
from molo.surveys.models import (
MoloSurveyPage, SurveysIndexPage, PersonalisableSurvey)
from molo.core.templatetags.core_tags import get_pages
from django.shortcuts import get_object_or_404
register = template.Library()
def get_survey_list(context,
only_linked_surveys=False,
only_direct_surveys=False,
only_yourwords=False,
personalisable_survey=False):
if only_linked_surveys and only_direct_surveys:
raise ValueError('arguments "only_linked_surveys" and '
'"only_direct_surveys" cannot both be True')
context = copy(context)
locale_code = context.get('locale_code')
main = context['request'].site.root_page
page = SurveysIndexPage.objects.child_of(main).live().first()
if page:
surveys = []
if only_linked_surveys:
surveys = (MoloSurveyPage.objects.child_of(page)
.filter(language__is_main_language=True,
display_survey_directly=False,
your_words_competition=False)
.exact_type(MoloSurveyPage).specific())
elif only_direct_surveys:
surveys = (MoloSurveyPage.objects.child_of(page)
.filter(language__is_main_language=True,
display_survey_directly=True,
your_words_competition=False)
.exact_type(MoloSurveyPage).specific())
elif only_yourwords:
surveys = (MoloSurveyPage.objects.child_of(page)
.filter(language__is_main_language=True,
your_words_competition=True)
.exact_type(MoloSurveyPage).specific())
elif personalisable_survey:
surveys = (PersonalisableSurvey.objects.child_of(page)
.filter(language__is_main_language=True)
.exact_type(PersonalisableSurvey).specific())
else:
surveys = (MoloSurveyPage.objects.child_of(page)
.filter(language__is_main_language=True)
.exact_type(MoloSurveyPage).specific())
else:
surveys = MoloSurveyPage.objects.none()
context.update({
'surveys': get_pages(context, surveys, locale_code)
})
return context
def add_form_objects_to_surveys(context):
surveys = []
for survey in context['surveys']:
form = None
if (survey.allow_multiple_submissions_per_user or
not survey.has_user_submitted_survey(
context['request'], survey.id)):
form = survey.get_form()
surveys.append({
'molo_survey_page': survey,
'form': form,
})
context.update({
'surveys': surveys,
})
return context
@register.inclusion_tag('surveys/surveys_headline.html', takes_context=True)
def surveys_list_headline(context):
return get_survey_list(context)
@register.inclusion_tag('surveys/surveys_list.html', takes_context=True)
def surveys_list(context, pk=None, only_linked_surveys=False,
only_direct_surveys=False, only_yourwords=False,
personalisable_survey=False):
context = get_survey_list(context,
only_linked_surveys=only_linked_surveys,
only_direct_surveys=only_direct_surveys,
only_yourwords=only_yourwords,
personalisable_survey=personalisable_survey)
return add_form_objects_to_surveys(context)
@register.simple_tag(takes_context=True)
def load_user_choice_poll_survey(context, survey, field, choice):
if not survey or not field:
return False
request = context['request']
survey = survey.specific.get_main_language_page()
SubmissionClass = survey.specific.get_submission_class()
submissions = SubmissionClass.objects.filter(
page=survey, user=request.user)
if not submissions.exists():
return False
for submission in submissions:
data = submission.get_data()
if field in data and data[field] == choice:
return True
return False
@register.simple_tag(takes_context=True)
def submission_has_article(context, survey_id, submission_id):
survey_page = get_object_or_404(Page, id=survey_id).specific
SubmissionClass = survey_page.get_submission_class()
submission = SubmissionClass.objects.filter(
page=survey_page).filter(pk=submission_id).first()
if submission.article_page is None:
return False
return True
@register.inclusion_tag('surveys/surveys_list.html', takes_context=True)
def surveys_list_for_pages(context, pk=None, page=None):
context = copy(context)
locale_code = context.get('locale_code')
if page:
surveys = (
MoloSurveyPage.objects.child_of(page).filter(
language__is_main_language=True).specific())
else:
surveys = MoloSurveyPage.objects.none()
context.update({
'surveys': get_pages(context, surveys, locale_code)
})
return add_form_objects_to_surveys(context)
@register.filter(name='is_multiple_choice_field')
def is_multiple_choice_field(value):
return isinstance(value.field, MultipleChoiceField)
| StarcoderdataPython |
1786649 | # Author: <NAME>
# Collaborators: Prof. <NAME>, Prof. <NAME>, Dr. <NAME>
# Email : <EMAIL>/<EMAIL>
# Affiliation : Imperial Centre for Inference and Cosmology
# Status : Under Development
'''
Setup for the redshift distributions
'''
import numpy as np
import scipy.stats as ss
# our scripts
import settings as st
class nz_dist(object):
def __init__(self, zmin: float = None, zmax: float = None, nzmax: int = None):
if zmin is None:
self.zmin = st.survey_zmin
else:
self.zmin = zmin
if zmax is None:
self.zmax = st.survey_zmax
else:
self.zmax = zmax
if nzmax is None:
self.nzmax = st.nzmax
else:
self.nzmax = nzmax
# n(z) redshift
self.nz_z = np.linspace(self.zmin, self.zmax, self.nzmax)
# shift to midpoint
self.mid_z = 0.5 * (self.nz_z[1:] + self.nz_z[:-1])
self.mid_z = np.concatenate((np.zeros(1), self.mid_z))
def nz_model_1(self, zm: float) -> np.ndarray:
'''
Calculate the analytic function
:math:`n(z)=z^{2}\\text{exp}(-\\frac{z}{z_{0}})`
'''
z0 = zm / 3.
nz = self.nz_z**2 * np.exp(-self.nz_z / z0)
# find normalisation factor
fact = np.trapz(nz, self.nz_z)
# calculate n(z) at mid points and normalise n(z)
nz_new = self.mid_z**2 * np.exp(-self.mid_z / z0) / fact
return nz_new
def nz_model_2(self, z0: float, alpha: float = 2, beta: float = 1.5):
'''
https://arxiv.org/pdf/1502.05872.pdf
Calculate the analytic function
:math:`n(z)=z^{\\alpha}\\text{exp}(-(\\frac{z}{z_{0}})^{\\beta})`
'''
nz = self.nz_z**alpha * np.exp(-(self.nz_z / z0)**beta)
# find normalisation factor
fact = np.trapz(nz, self.nz_z)
# calculate n(z) at mid points and normalise n(z)
nz_new = self.mid_z**alpha * np.exp(-(self.mid_z / z0)**beta) / fact
return nz_new
def nz_gaussian(self, z0: float, sigma: float) -> np.ndarray:
'''
Gaussian n(z) distribution for the tomographic bin
:math:`n(z)=\\frac{1}{2\pi\sigma}\\text{exp}(-\\frac{1}{2}\\frac{(z-z_{0})^{2}}{\sigma^{2}})`
'''
nz_dist = ss.norm(z0, sigma)
nz = nz_dist.pdf(self.nz_z)
return nz
| StarcoderdataPython |
3331061 | x=input("enter name:")
for i in range (0,len(x)):
print(x[i],i)
| StarcoderdataPython |
4806562 | #! /usr/bin/env python3
from aksetup_helper import configure_frontend
configure_frontend()
| StarcoderdataPython |
3235800 | <filename>lecture_13.py
# -*- coding: utf-8 -*-
__author__ = 'wnduan'
# def fib(n):
# global numCalls
# numCalls += 1
# # print 'Call fib with', n
# if n <= 1:
# return 1
# else:
# return fib(n-1) + fib(n-2)
#
# numCalls = 0
# print fib(30)
# print 'Number of calls:', numCalls
#
# def fast_fib(n,memo):
# global numCalls
# numCalls += 1
# # print 'Call fib with', n
# if n in memo:
# return memo[n]
# else:
# memo[n] = fast_fib(n-1, memo) + fast_fib(n-2, memo)
# return memo[n]
#
# def fib1(n):
# memo = {0:1, 1:1}
# return fast_fib(n, memo)
#
# numCalls = 0
# print fib1(30)
# print 'Number of calls:', numCalls
def maxVal(w,v,i,aW):
global numCalls
numCalls += 1
if i==0:
if w[i]<= aW:
return v[i]
else:
return 0
without_i = maxVal(w,v,i-1,aW)
if w[i] > aW:
return without_i
else:
with_i = v[i] + maxVal(w,v,i-1,aW-w[i])
return max(without_i,with_i)
# weights = [1,5,3,4]
# vals = [15,10,9,5]
# numCalls = 0
# res = maxVal(weights,vals,len(weights)-1,8)
# print 'max Val =', res, 'number of calls =', numCalls
#
# weights = [1,1,5,5,3,3,4,4]
# vals = [15,15,10,10,9,9,5,5]
# numCalls = 0
# res = maxVal(weights,vals,len(weights)-1,8)
# print 'max Val =', res, 'number of calls =', numCalls
# maxWeight = 40
# w = [5, 3, 6, 3, 9, 7, 3, 5, 5, 8, 4, 2, 1, 7, 8, 8, 7, 7, 3, 7, 2, 1, 8, 6, 7, 6, 1, 3, 5, 9, 3, 2, 5, 4, 8, 8, 5, 7, 4, 8]
# v = [1, 3, 4, 5, 4, 5, 1, 9, 2, 1, 3, 5, 6, 3, 6, 8, 6, 2, 3, 8, 9, 9, 6, 3, 4, 5, 5, 7, 6, 2, 1, 1, 9, 7, 1, 4, 8, 9, 3, 6]
# numCalls = 0
# res = maxVal(w,v,len(w)-1,maxWeight)
# print 'max Val =', res, 'number of calls =', numCalls
print 'max Val = 88 number of calls = 402140848'
def fastMaxVal(w,v,i,aW,memo):
global numCalls
numCalls += 1
try:
return memo[(i,aW)]
except KeyError:
if i == 0:
if w[i] <= aW:
memo[(i,aW)] = v[i]
return v[i]
else:
memo[(i,aW)] = 0
return 0
without_i = fastMaxVal(w,v,i-1,aW,memo)
if w[i] > aW:
memo[(i,aW)] = without_i
return without_i
else:
with_i = v[i] + fastMaxVal(w,v,i-1,aW-w[i],memo)
res = max(without_i,with_i)
memo[(i,aW)] = res
return res
def maxVal1(w,v,i,aW):
memo = {}
return fastMaxVal(w,v,i,aW,memo)
maxWeight = 40
w = [5, 3, 6, 3, 9, 7, 3, 5, 5, 8, 4, 2, 1, 7, 8, 8, 7, 7, 3, 7, 2, 1, 8, 6, 7, 6, 1, 3, 5, 9, 3, 2, 5, 4, 8, 8, 5, 7, 4, 8]
v = [1, 3, 4, 5, 4, 5, 1, 9, 2, 1, 3, 5, 6, 3, 6, 8, 6, 2, 3, 8, 9, 9, 6, 3, 4, 5, 5, 7, 6, 2, 1, 1, 9, 7, 1, 4, 8, 9, 3, 6]
numCalls = 0
res = maxVal1(w,v,len(w)-1,maxWeight)
print 'max Val =', res, 'number of calls =', numCalls | StarcoderdataPython |
174537 | <reponame>andkononykhin/plenum<gh_stars>100-1000
from common.serializers.json_serializer import JsonSerializer
def testJsonSerializer():
sz = JsonSerializer()
m1 = {'integer': 36, 'name': 'Foo', 'surname': 'Bar', 'float': 14.8639,
'index': 1, 'index_start_at': 56, 'email': '<EMAIL>',
'fullname': 'Foo Bar', 'bool': False}
m1s = '{"bool":false,"email":"<EMAIL>","float":14.8639,"fullname":"Foo Bar",' \
'"index":1,"index_start_at":56,"integer":36,"name":"Foo","surname":"Bar"}'
m2 = {'latitude': 31.351883, 'longitude': -97.466179,
'tags': ['foo', 'bar', 'baz', 'alice', 'bob',
'carol', 'dave']}
m2s = '{"latitude":31.351883,"longitude":-97.466179,' \
'"tags":["foo","bar","baz","alice","bob","carol","dave"]}'
m3 = {'name': '<NAME>', 'website': 'example.com', 'friends': [
{
'id': 0,
'name': 'Dave'
},
{
'id': 1,
'name': 'Carol'
},
{
'id': 2,
'name': 'Dave'
}]}
m3s = '{"friends":[{"id":0,"name":"Dave"},{"id":1,"name":"Carol"},' \
'{"id":2,"name":"Dave"}],' \
'"name":"<NAME>","website":"example.com"}'
assert sz.serialize(m1) == m1s.encode()
assert sz.serialize(m1, toBytes=False) == m1s
assert sz.serialize(m2) == m2s.encode()
assert sz.serialize(m2, toBytes=False) == m2s
assert sz.serialize(m3) == m3s.encode()
assert sz.serialize(m3, toBytes=False) == m3s
assert sz.deserialize(m1s) == m1
assert sz.deserialize(m1s.encode()) == m1
assert sz.deserialize(m2s) == m2
assert sz.deserialize(m2s.encode()) == m2
assert sz.deserialize(m3s) == m3
assert sz.deserialize(m3s.encode()) == m3
| StarcoderdataPython |
80654 | <gh_stars>0
""" Welcome The User To Masonite """
from masonite.request import Request
from masonite.view import View
from events import Event
from app.League import League
from masonite import Broadcast
class WelcomeController:
""" Controller For Welcoming The User """
def __init__(self, view: View, request: Request):
self.view = view
self.request = request
def show(self, event: Event, broadcast: Broadcast) -> View.render:
''' Show Welcome Template '''
return self.view.render('index')
def discover(self) -> View.render:
"""Shows the discover page
Returns:
View.render
"""
if self.request.input('search'):
leagues = League.order_by('id', 'desc').get().filter(lambda league: self.request.input(
'search').lower() in league.name.lower())
else:
leagues = League.order_by('id', 'desc').get().take(100)
return self.view.render('discover', {'leagues': leagues})
def slack(self):
return ''
# response = IntegrationManager.driver('discord').user()
# requests.post(
# 'https://discordapp.com/api/webhooks/{0}/{1}'.format(
# response['webhook']['id'],
# response['webhook']['token']
# ),
# json={
# 'content': 'Masonite was successfully integrated!',
# 'username': 'Masonite'
# }
# )
# return response['access_token']
| StarcoderdataPython |
1729484 | <gh_stars>0
from .abstract_auto import AbstractAuto
class Nano(AbstractAuto):
def start(self):
print("Nano started")
def stop(self):
print("Nano stopped")
| StarcoderdataPython |
4830584 | import numpy as np
from libs.kernel import Kernel
from libs.optimizer import Optimizer
###
class SVM():
def __init__(self, kernel='linear', C=None, deg=None):
self._kernel = None
self._kernel_type = kernel
self._kernel_function = None
self._multipliers = None
self._lambdas = None
self._bias = None
self._sv = None
self._sv_Y = None
self._sv_idxs = None
self.C = C
self.deg = deg if deg is not None else 3
if self._kernel_type == 'linear':
self._kernel_function = lambda x1, x2: Kernel.linear(x1, x2)
elif self._kernel_type == 'poly':
self._kernel_function = lambda x1, x2: Kernel.polynomial(x1, x2, degree=self.deg)
else:
raise Exception(f"SVM: invalid 'kernel={self._kernel_type}' parameter value.")
#
@property
def support_vectors(self):
return self._sv
@property
def support_vectors_Y(self):
return self._sv_Y
@property
def bias(self):
return self._bias
#
def fit(self, X, Y):
### compute the kernel
self._kernel = SVMCore.apply_kernel(self._kernel_function, X)
optimizer = Optimizer()
optimizer.initialize()
### QP problem solution
solution = None
if self.C is not None:
solution = optimizer.cvxopt_soft_margin_solve(Y, self._kernel, self.C)
else:
solution = optimizer.cvxopt_hard_margin_solve(Y, self._kernel)
### lagrangian multipliers
self._multipliers = SVMCore.multipliers(solution)
self._sv_idxs = SVMCore.support_vectors_indexes(self._multipliers)
### lambda params (filtered multipliers)
self._lambdas = self._multipliers[self._sv_idxs]
### support vectors
self._sv = X[self._sv_idxs]
self._sv_Y = Y[self._sv_idxs]
### bias
self._bias = SVMCore.bias(self._lambdas, self._kernel, self._sv_Y, self._sv_idxs)
### +FEATURE: hyperplane coefficients can be pre-computed (only) in the 'linear' case.
def project(self, points):
return SVMCore.hyperplane_projection(
self._kernel_type, self._kernel_function, self._lambdas, self._sv, self._sv_Y,
self._bias
)(points)
def predict(self, X_test):
projections = self.project(X_test)
return np.sign(projections)
###
class SVMCore():
@staticmethod
def apply_kernel(kernel_function, points):
return kernel_function(points, points.T)
@staticmethod
def multipliers(optimizer_solution):
"""
The solver returns the list of optimum variables values. \\
In our case, variables are the lagrangian multipliers.
"""
return np.array(optimizer_solution['x']).flatten()
@staticmethod
def support_vectors_indexes(multipliers):
"""
In the solution, all points `xi` having the corresponding multiplier
`λi` strictly positive are named support vectors. All other points
`xi` have the corresponding `λi = 0` have no effect on the classifier.
"""
zero_threshold = 1e-5
bool_idxs = multipliers > zero_threshold
return np.arange(multipliers.shape[0])[bool_idxs]
@staticmethod
def bias(lambdas, kernel, Y, sv_idxs):
"""
given the primal Lagrangian Formulation:
`min Lp(w,b)` \\
`L(w, b, λ) = (1/2 ||W||^2) - (∑ λi yi (xi • W + b)) + (∑ λi)`
we obtain the following partial derivate of `L(W,b,λ)` (respect to `W`) \\
`𝟃L/𝟃w = W - (∑ λi yi xi)`
and the following partial derivate of `L(W,b,λ)` (respect to `b`): \\
`𝟃L/𝟃b = 0` \\
`∑ λi yi = 0`
and then by applying the KKT (1) condition (used to have guarantees on the
optimality of the result) from the first partial derivate `𝟃L/𝟃w` we get \\
`𝟃L/𝟃W = 0` \\
`W - (∑ λi yi xi) = 0` \\
`W = λ Y X`
now, we have that any point which:
1. satisfies the above `∑ λi yi = 0` condition
2. is a Support Vector `xs`
will have the form: \\
`ys (xs • W + b) = 1`
also we can obtain the set `S` of Support Vectors by
taking all the indexes `i` for which `λi > 0`.
finally, given the set `S`, we can replace `W` with
the above equality (where `m € S`): \\
`ys (∑ λm ym xm • xs + b) = 1`
using an arbitrary Support Vector `xs`, then \\
multiplying the above equation by `ys`, using `y^2 = 1`
and using the original problem constraint (where `m € S`): \\
`∀i . yi (xi • W + b) -1 ≥ 0` \\
we obtain: \\
`ys^2 (∑ λm ym xm • xs + b) = ys` \\
`b = ys - (∑ λm ym xm • xs)`
instead of using an arbitrary Support Vector `xs`, it is better
to take an average over all of the Support Vectors in `S`.
the final formula is (where `m € S`): \\
`b = 1/|S| (∑ ys - (∑ λm ym xm • xs))`
NON-LINEAR CASE: \\
hyperplane coefficients `W` formulation slightly change:
`W - (∑ λi yi kernel(xi)) = 0` \\
`W = (∑ λi yi kernel(xi))`
and, as a consequence, also `b` formulation change:
`b = 1/|S| (∑ ys - (∑ λm ym kernel(xm) • kernel(xs)))`
"""
bias = 0
for n in range(lambdas.shape[0]):
bias += Y[n] - np.sum(lambdas * Y * kernel[sv_idxs[n], sv_idxs])
bias /= lambdas.shape[0]
return bias
@staticmethod
def __hyperplane_linear_coefficients(lambdas, sv, sv_Y):
"""
LINEAR CASE (only) \\
given the hyperplane equation \\
`f(x) = (W • x) + b`
and given the primal Lagrangian formulation of our problem, we
obtain the following partial derivate of `L(W,b,λ)` (respect to `W`) \\
`𝟃L/𝟃w = W - (∑ λi yi xi)`
and then by applying the KKT (1) condition (used to have guarantees on
the optimality of the result) we get \\
`𝟃L/𝟃W = 0` \\
`W - (∑ λi yi xi) = 0` \\
`W = λ Y X`
"""
X = sv
Y = sv_Y
coefficients_to_sum = np.array(lambdas * Y * X.T)
return np.sum(coefficients_to_sum, axis=1)
@staticmethod
def hyperplane_projection(kernel_type, kernel_function, lambdas, sv, sv_Y, bias):
"""
LINEAR CASE
given the hyperplane coefficients `W` and a point `x'` we compute: \\
`f(x') = W • x' + b`
NON-LINEAR CASE \\
(NB. hyperplane bias `b` formulation depends on hyperplane `W` formulation).
in this case the hyperplane coefficients `W` formulation directly depend on the `kernel(x')`
value (where `x'` are input points and `kernel` is the kernel function to apply). \\
This because we have: \\
`W = (∑ λi yi kernel(xi))` \\
and for evaluating a point `x'` we need to compute: \\
`x'_proj = W • kernel(x') + b ` \\
which results in:
`x'_proj = ∑ λi yi kernel(xi, x') + b `
As a consequence, we cannot compute `W` a-priori.
"""
def linear_projection(points):
coefficients = SVMCore.__hyperplane_linear_coefficients(lambdas, sv, sv_Y)
return np.dot(points, coefficients) + bias
def non_linear_projection(points):
projections = np.zeros(points.shape[0])
for (idx1, point) in enumerate(points):
sign = 0
for idx2 in range(sv.shape[0]):
sign += lambdas[idx2] * sv_Y[idx2] * kernel_function(point, sv[idx2])
projections[idx1] = sign + bias
return projections
if kernel_type == 'linear':
return lambda points: linear_projection(points)
else:
return lambda points: non_linear_projection(points)
| StarcoderdataPython |
4833776 | # uid.py - functions for handling Swiss business identifiers
# coding: utf-8
#
# Copyright (C) 2015 <NAME>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""UID (Unternehmens-Identifikationsnummer, Swiss business identifier).
The Swiss UID is used to uniquely identify businesses for taxation purposes.
The number consists of a fixed "CHE" prefix, followed by 9 digits that are
protected with a simple checksum.
This module only supports the "new" format that was introduced in 2011 which
completely replaced the "old" 6-digit format in 2014.
More information:
* https://www.uid.admin.ch/
* https://de.wikipedia.org/wiki/Unternehmens-Identifikationsnummer
>>> validate('CHE-100.155.212')
'CHE100155212'
>>> validate('CHE-100.155.213')
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('CHE100155212')
'CHE-100.155.212'
"""
from stdnum.exceptions import *
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This strips
surrounding whitespace and separators."""
return clean(number, ' -.').strip().upper()
def calc_check_digit(number):
"""Calculate the check digit for organisations. The number passed should
not have the check digit included."""
weights = (5, 4, 3, 2, 7, 6, 5, 4)
s = sum(w * int(n) for w, n in zip(weights, number))
return str((11 - s) % 11)
def validate(number):
"""Check if the number is a valid UID. This checks the length, formatting
and check digit."""
number = compact(number)
if len(number) != 12:
raise InvalidLength()
if not number.startswith('CHE'):
raise InvalidComponent()
if not number[3:].isdigit():
raise InvalidFormat()
if number[-1] != calc_check_digit(number[3:-1]):
raise InvalidChecksum()
return number
def is_valid(number):
"""Check if the number is a valid UID."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the number to the standard presentation format."""
number = compact(number)
return number[:3] + '-' + '.'.join(
number[i:i + 3] for i in range(3, len(number), 3))
| StarcoderdataPython |
1711184 | from blockchain import *
def main():
bc = Blockchain([]).new_blockchain()
bc.add_block("Send 1 BTC to Ivan")
bc.add_block("Send 2 more BTC to Ivan")
for block in bc.blocks:
print("Prev. hash: %s\n" % (block.PrevBlockHash), type(block.PrevBlockHash))
print("Data: %s\n" % (block.Data))
print("Hash: %s\n" % (block.Hash))
main()
| StarcoderdataPython |
1727564 | from django.urls import path
from . import views
urlpatterns = [
# Run data mining
path('get-driving-features', views.GetDrivingFeatures.as_view()),
path('get-driving-features-epsilon-moea', views.GetDrivingFeaturesEpsilonMOEA.as_view()),
path('get-driving-features-with-generalization', views.GetDrivingFeaturesWithGeneralization.as_view()),
path('get-marginal-driving-features', views.GetMarginalDrivingFeatures.as_view()),
# Generalization / simplification
path('generalize-feature', views.GeneralizeFeature.as_view()),
path('simplify-feature-expression', views.SimplifyFeatureExpression.as_view()),
# Stop search
path('stop-search', views.StopSearch.as_view()),
# Clustering
path('cluster-data', views.ClusterData.as_view()),
path('get-cluster', views.GetCluster.as_view()),
# Logical expression
path('compute-complexity', views.ComputeComplexity.as_view()),
path('compute-typicality', views.ComputeTypicality.as_view()),
path('compute-complexity-of-features', views.ComputeComplexityOfFeatures.as_view()),
path('convert-to-CNF', views.ConvertToCNF.as_view()),
path('convert-to-DNF', views.ConvertToDNF.as_view()),
# Get or set problem entities/parameters
path('get-problem-parameters', views.GetProblemParameters.as_view()),
path('set-problem-parameters', views.SetProblemParameters.as_view()),
path('get-problem-concept-hierarchy', views.GetProblemConceptHierarchy.as_view()),
path('set-problem-generalized-concepts', views.SetProblemGeneralizedConcepts.as_view()),
# Import/export feature data
path('import-feature-data', views.ImportFeatureData.as_view()),
path('import-target-selection', views.ImportTargetSelection.as_view()),
path('export-target-selection', views.ExportTargetSelection.as_view()),
]
| StarcoderdataPython |
41471 | import gi
import ctypes as pyc
from ctypes import pythonapi
from gi.repository import GObject as GO
pyc.cdll.LoadLibrary('libgobject-2.0.so')
lego = pyc.CDLL('libgobject-2.0.so')
lego.g_type_name.restype = pyc.c_char_p
lego.g_type_name.argtypes = (pyc.c_ulonglong,)
pythonapi.PyCapsule_GetName.restype = pyc.c_char_p
pythonapi.PyCapsule_GetName.argtypes = (pyc.py_object,)
pythonapi.PyCapsule_GetPointer.restype = pyc.c_void_p
pythonapi.PyCapsule_GetPointer.argtypes = (pyc.py_object, pyc.c_char_p)
###############################################################################
# GObject
###############################################################################
class _PyGObject_Functions(pyc.Structure):
_fields_ = [
('pygobject_register_class',
pyc.PYFUNCTYPE(pyc.c_void_p)),
('pygobject_register_wrapper',
pyc.PYFUNCTYPE(pyc.c_void_p)),
('pygobject_lookup_class',
pyc.PYFUNCTYPE(pyc.c_void_p)),
('pygobject_new',
pyc.PYFUNCTYPE(pyc.py_object, pyc.c_void_p)),
]
def capsule_name(capsule):
return pythonapi.PyCapsule_GetName(capsule)
def capsule_ptr(capsule):
name = capsule_name(capsule)
return pythonapi.PyCapsule_GetPointer(capsule, name)
class _PyGO_CAPI(object):
'''
Static class to that create PyObject (object) from GObject (pointer)
'''
_api = None
@classmethod
def _set_api(cls):
addr = capsule_ptr(gi._gobject._PyGObject_API)
cls._api = _PyGObject_Functions.from_address(addr)
@classmethod
def to_object(cls, addr):
cls._api or cls._set_api()
return cls._api.pygobject_new(addr)
###############################################################################
# GType
###############################################################################
INT, ADDRESS, NONE, NOT_IMPLEMENTED = range(4)
G_PY_INT = {
(GO.TYPE_BOOLEAN, pyc.c_bool),
(GO.TYPE_UNICHAR, pyc.c_ubyte),
(GO.TYPE_UCHAR, pyc.c_ubyte),
(GO.TYPE_CHAR, pyc.c_char),
(GO.TYPE_INT, pyc.c_int),
(GO.TYPE_UINT, pyc.c_uint),
(GO.TYPE_FLAGS, pyc.c_uint),
}
G_PY_ADDRESS = {
(GO.TYPE_LONG, pyc.c_long),
(GO.TYPE_DOUBLE, pyc.c_double),
(GO.TYPE_ULONG, pyc.c_ulong),
(GO.TYPE_INT64, pyc.c_longlong),
(GO.TYPE_UINT64, pyc.c_ulonglong),
(GO.TYPE_ENUM, pyc.c_ulonglong),
(GO.TYPE_FLOAT, pyc.c_float),
(GO.TYPE_STRING, pyc.c_char_p),
(GO.TYPE_POINTER, pyc.c_void_p),
(GO.TYPE_OBJECT, pyc.c_void_p),
(GO.TYPE_PYOBJECT, pyc.py_object),
}
G_PY_NONE = {
(GO.TYPE_NONE, None),
(GO.TYPE_INVALID, None),
}
G_PY_NOT_IMPLEMENTED = {
(GO.TYPE_PARAM, None),
(GO.TYPE_STRV, None),
(GO.TYPE_VARIANT, None),
(GO.TYPE_BOXED, None),
(GO.TYPE_INTERFACE, None),
}
TYPES_G_PY = G_PY_INT | G_PY_ADDRESS | G_PY_NONE | G_PY_NOT_IMPLEMENTED
TYPES_ID = {hash(gt): (gt, ct, INT) for gt, ct in G_PY_INT}
_u = TYPES_ID.update
_u({hash(gt): (gt, ct, ADDRESS) for gt, ct in G_PY_ADDRESS})
_u({hash(gt): (gt, ct, NONE) for gt, ct in G_PY_NONE})
_u({hash(gt): (gt, ct, NOT_IMPLEMENTED) for gt, ct in G_PY_NOT_IMPLEMENTED})
def gtype_name_of(gtype_id=0):
'''
Return a name of gtype if type is a class
this method use glib/gobjec/gtype.c/g_type_name
see code
https://github.com/GNOME/glib/blob/master/gobject/gtype.c#L3787
'''
name = lego.g_type_name(hash(gtype_id))
return name and name.decode('utf-8')
def gtype_and_ctype_of(gtype_id=0):
'''
return (GType, ctype) of gtype_id
May return (None, None, NOT_IMPLEMENTED)
'''
_default = (None, None, NOT_IMPLEMENTED)
g_and_c_type = TYPES_ID.get(hash(gtype_id), _default)
if not g_and_c_type[0]:
name = gtype_name_of(gtype_id)
if name:
gtype = GO.GType.from_name(name)
parent_id = hash(gtype.parent)
parent = TYPES_ID.get(parent_id, _default)
g_and_c_type = (gtype, pyc.c_void_p, parent[2])
return g_and_c_type
def from_int(value, gtype_id):
py_value = value
types = gtype_and_ctype_of(gtype_id)
gtype, ctype, ctg = types
if gtype and ctype:
if gtype.is_a(GO.TYPE_OBJECT):
py_value = _PyGO_CAPI.to_object(value)
elif gtype.is_a(GO.TYPE_GTYPE):
py_value = gtype
elif gtype.is_a(GO.TYPE_STRING):
py_value = ctype(value).value.decode('utf-8')
elif ctg == INT:
py_value = ctype(value).value
elif ctg == ADDRESS:
py_value = ctype.from_address(value)
return py_value, gtype, ctype, ctg
def c_to_py(value, gtype_id):
return from_int(value, gtype_id)[0]
| StarcoderdataPython |
116056 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
try:
from edw.models.related.entity_image import EntityImageModel
EntityImageModel() # Test pass if model materialized
except (ImproperlyConfigured, ImportError):
pass
else:
from .update_images import update_images
from .update_terms import update_terms
from .update_relations import update_relations
from .update_additional_characteristics_or_marks import update_additional_characteristics_or_marks
from .update_related_data_marts import update_related_data_marts
from .update_states import update_states
from .update_active import update_active
from .force_validate import force_validate
from .make_terms_by_additional_attrs import make_terms_by_additional_attrs
from .normalize_additional_attrs import normalize_additional_attrs
from .bulk_delete import bulk_delete
| StarcoderdataPython |
3207324 | <reponame>cffbots/ewatercycle
# -*- coding: utf-8 -*-
"""Documentation about ewatercycle_parametersetdb"""
from typing import Any
from ewatercycle.parametersetdb.config import CONFIG_FORMATS, AbstractConfig
from ewatercycle.parametersetdb.datafiles import DATAFILES_FORMATS, AbstractCopier
class ParameterSet:
def __init__(self, df: AbstractCopier, cfg: AbstractConfig):
"""Parameter set holds the config and datafiles required as input for a model
Args:
df: datafiles url container
cfg: config container
"""
self.df = df
self._cfg = cfg
def save_datafiles(self, target):
"""Saves datafiles to target directory
Args:
target: Path of target directory
"""
self.df.save(target)
def save_config(self, target):
"""Saves config file as target filename
Args:
target: filename of config file
"""
self._cfg.save(target)
@property
def config(self) -> Any:
"""Configuration as dictionary.
To make changes to configuration before saving set the config keys
and/or values.
Can be a nested dict.
"""
return self._cfg.config
def build_from_urls(
config_format, config_url, datafiles_format, datafiles_url
) -> ParameterSet:
"""Construct ParameterSet based on urls
Args:
config_format: Format of file found at config url
config_url: Url of config file
datafiles_format: Method to stage datafiles url
datafiles_url: Source url of datafiles
"""
return ParameterSet(
DATAFILES_FORMATS[datafiles_format](datafiles_url),
CONFIG_FORMATS[config_format](config_url),
)
| StarcoderdataPython |
3213403 | from django.conf.urls import url
from django.views.generic import TemplateView
from .views import (
sponsor_apply,
sponsor_add,
sponsor_list,
sponsor_zip_logo_files,
sponsor_detail,
sponsors_by_level_json,
sponsors_json,
individual_sponsors_json
)
urlpatterns = [
url(r"^$", sponsor_list, name="sponsor_list"),
url(r"^sponsors.json$", sponsors_json, name="sponsors_json"),
url(r"^sponsors-by-level.json$", sponsors_by_level_json, name="sponsors_by_level_json"),
url(r"^individual-sponsors.json$", individual_sponsors_json, name="individual_sponsors_json"),
url(r"^apply/$", sponsor_apply, name="sponsor_apply"),
url(r"^add/$", sponsor_add, name="sponsor_add"),
url(r"^ziplogos/$", sponsor_zip_logo_files, name="sponsor_zip_logos"),
url(r"^(?P<pk>\d+)/$", sponsor_detail, name="sponsor_detail"),
]
| StarcoderdataPython |
4827814 |
from app.database.base_class import Base
from app.models.product import product | StarcoderdataPython |
810 | <filename>framework/database/__init__.py<gh_stars>0
# -*- coding: utf-8 -*-
import functools
import httplib as http
import markupsafe
from django.core.paginator import Paginator
from django.db.models import Q, QuerySet
from framework.exceptions import HTTPError
def get_or_http_error(Model, pk_or_query, allow_deleted=False, display_name=None):
"""Load an instance of Model by primary key or modularodm.Q query. Raise an appropriate
HTTPError if no record is found or if the query fails to find a unique record
:param type Model: StoredObject subclass to query
:param pk_or_query:
:type pk_or_query: either
- a <basestring> representation of the record's primary key, e.g. 'abcdef'
- a <QueryBase> subclass query to uniquely select a record, e.g.
Q('title', 'eq', 'Entitled') & Q('version', 'eq', 1)
:param bool allow_deleted: allow deleleted records?
:param basestring display_name:
:raises: HTTPError(404) if the record does not exist
:raises: HTTPError(400) if no unique record is found
:raises: HTTPError(410) if the resource is deleted and allow_deleted = False
:return: Model instance
"""
display_name = display_name or ''
# FIXME: Not everything that uses this decorator needs to be markupsafe, but OsfWebRenderer error.mako does...
safe_name = markupsafe.escape(display_name)
if isinstance(pk_or_query, Q):
try:
instance = Model.objects.get(pk_or_query)
except Model.DoesNotExist:
raise HTTPError(http.NOT_FOUND, data=dict(
message_long='No {name} record matching that query could be found'.format(name=safe_name)
))
except Model.MultipleObjectsReturned:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='The query must match exactly one {name} record'.format(name=safe_name)
))
else:
instance = Model.load(pk_or_query)
if not instance:
raise HTTPError(http.NOT_FOUND, data=dict(
message_long='No {name} record with that primary key could be found'.format(name=safe_name)
))
if getattr(instance, 'is_deleted', False) and getattr(instance, 'suspended', False):
raise HTTPError(451, data=dict( # 451 - Unavailable For Legal Reasons
message_short='Content removed',
message_long='This content has been removed'
))
if not allow_deleted and getattr(instance, 'is_deleted', False):
raise HTTPError(http.GONE)
return instance
def autoload(Model, extract_key, inject_key, func):
"""Decorator to autoload a StoredObject instance by primary key and inject into kwargs. Raises
an appropriate HTTPError (see #get_or_http_error)
:param type Model: database collection model to query (should be a subclass of StoredObject)
:param basestring extract_key: named URL field containing the desired primary key to be fetched
from the database
:param basestring inject_key: name the instance will be accessible as when it's injected as an
argument to the function
Example usage: ::
def get_node(node_id):
node = Node.load(node_id)
...
becomes
import functools
autoload_node = functools.partial(autoload, Node, 'node_id', 'node')
@autoload_node
def get_node(node):
...
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
primary_key = kwargs.get(extract_key)
instance = get_or_http_error(Model, primary_key)
kwargs[inject_key] = instance
return func(*args, **kwargs)
return wrapper
def paginated(model, query=None, increment=200, each=True, include=None):
"""Paginate a MODM query.
:param StoredObject model: Model to query.
:param Q query: Optional query object.
:param int increment: Page size
:param bool each: If True, each record is yielded. If False, pages
are yielded.
"""
if include and query:
queryset = model.objects.filter(query).include(*include)
elif query:
queryset = model.objects.filter(query)
else:
queryset = model.objects.all()
# Pagination requires an order by clause, especially when using Postgres.
# see: https://docs.djangoproject.com/en/1.10/topics/pagination/#required-arguments
if isinstance(queryset, QuerySet) and not queryset.ordered:
queryset = queryset.order_by(queryset.model._meta.pk.name)
paginator = Paginator(queryset.all(), increment)
for page_num in paginator.page_range:
page = paginator.page(page_num)
if each:
for item in page.object_list:
yield item
else:
yield page.object_list
| StarcoderdataPython |
3306808 | from SublimeLinter.lint import Linter
from SublimeLinter.lint.linter import PermanentError
from threading import Lock
import os
import sublime
import logging
import requests
import time
import subprocess
logger = logging.getLogger('SublimeLinter.plugin.checkstyle')
lock = Lock()
CURRENT_LATEST_CS_VERSION = None
DOWNLOAD_BASE_URL = 'https://github.com/checkstyle/'\
'checkstyle/releases/download/'
GITHUB_RELEASES_API_URL = 'https://api.github.com/repos/checkstyle/'\
'checkstyle/releases/latest'
CACHE_FOLDER_NAME = 'SublimeLinter-checkstyle'
def show_download_progress(finished, total) -> None:
"""
Shows the download progress in the Sublime status bar
:param finished: A measure how much is finished
:type finished: double or None
:param total: A measure of the total amount to download
:type total: double or None
:returns: Nothing
:rtype: None
"""
if finished and total:
percent = finished * 100 / total
sublime.status_message('Downloading Checkstyle: {0:2.2f}%'
.format(percent))
else:
sublime.status_message('Downloading Checkstyle...')
def download_file(url, file_name, show_progress) -> None:
"""
Downloads a file and shows the progress.
:param url: The url to download drom
:type url: string
:param file_name: The path to the file to download to
:type file_name: string
:param show_progress: The show progress
:type show_progress: a function taking to doubles
:returns: Nothing
:rtype: None
"""
r = requests.get(url, stream=True)
total_length = r.headers.get('content-length')
with open(file_name, 'wb') as out_file:
if total_length:
finished = 0
total_length = int(total_length)
last_displayed = 0
for chunk in r.iter_content(chunk_size=4096):
if last_displayed != int(time.time()):
show_progress(finished, total_length)
last_displayed = int(time.time())
finished += len(chunk)
out_file.write(chunk)
else:
out_file.write(r.content)
show_progress(None, None)
show_progress(1, 1)
def jar_filename(version) -> str:
return 'checkstyle-{}-all.jar'.format(version)
def jar_path(version) -> str:
return os.path.abspath(os.path.join(plugin_dir(),
jar_filename(version)))
def plugin_dir() -> str:
return os.path.abspath(os.path.join(sublime.cache_path(),
"..", "Package Storage",
CACHE_FOLDER_NAME))
def download_url(version) -> str:
return DOWNLOAD_BASE_URL +\
'checkstyle-{}/'.format(version) +\
jar_filename(version)
def fetch_latest_cs_version() -> str:
"""
Fetches the latest Checkstyle version by parsing the maven checkstyle repo.
Stores the version string in a module-wide variable to check only once.
:returns: The latest checkstyle version
:rtype: str
"""
global CURRENT_LATEST_CS_VERSION
if (CURRENT_LATEST_CS_VERSION is None):
logger.info('Polling current checkstyle'
'version via GitHub API')
try:
resp = requests.get(GITHUB_RELEASES_API_URL)
data = resp.json()
release_tag = data['tag_name']
CURRENT_LATEST_CS_VERSION = release_tag.split('-')[1]
logger.info('Latest checkstyle version on Maven is {}'
.format(CURRENT_LATEST_CS_VERSION))
except requests.ConnectionError:
logger.warning('Latest cs version could not be fetched!')
return CURRENT_LATEST_CS_VERSION
def cleanup(keep) -> None:
"""
Deletes every file in the plugin directory without the file specified
in keep.
:param keep: A full path to the file to keep
:type keep: str
:returns: Nothing
:rtype: None
"""
for f in os.listdir(plugin_dir()):
abs_path = os.path.abspath(os.path.join(plugin_dir(), f))
if abs_path != keep:
logger.info('Removing jar: {}'.format(abs_path))
os.remove(abs_path)
def delete_corrupted_jars() -> None:
if not os.path.isdir(plugin_dir()):
return
for f in os.listdir(plugin_dir()):
abs_path = os.path.abspath(os.path.join(plugin_dir(), f))
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
ret_code = subprocess.call(['java', '-jar', abs_path, '--version'],
startupinfo=startupinfo)
if ret_code != 0:
logger.info('{} is corrupted'.format(abs_path))
cleanup(None)
delete_corrupted_jars()
class Checkstyle(Linter):
regex = (r'^\[(?:(?P<warning>WARN)|(?P<error>ERROR))\]\s'
r'(?P<filename>.*?):(?P<line>\d+):(?:(?P<col>\d+):)?\s'
r'(?P<message>.*)$')
multiline = True
tempfile_suffix = '-'
defaults = {
'selector': 'source.java',
'config': 'google_checks.xml',
'version': 'latest'
}
def cmd(self):
version = self.cs_version()
checkstyle_jar = None
if version is not None:
logger.info('Using Checkstyle {}'.format(version))
try:
lock.acquire()
checkstyle_jar = self.provide_jar(version)
lock.release()
except requests.ConnectionError:
pass # checkstyle jar is None
if checkstyle_jar is None or not os.path.isfile(checkstyle_jar):
# Search existing jar if version not clear or jar not downloaded
jars = os.listdir(plugin_dir())
if jars:
checkstyle_jar = os.path.join(plugin_dir(), jars[0])
logger.warning('Checkstyle version cannot be '
'determined or downloaded. '
'Using existing jar {}'.format(checkstyle_jar))
else:
logger.error('Checkstyle version cannot be '
'determined or downloaded. '
'Check version setting and network connection')
self.notify_failure()
raise PermanentError()
# Build command
command = ['java', '-jar', '{}'.format(checkstyle_jar)]
checkstyle_config = self.settings.get('config')
logger.info('Using checkstyle config: {}'.format(checkstyle_config))
command += ['-c', '{}'.format(checkstyle_config)]
command += ['${file_on_disk}']
command = tuple(command)
return command
def cs_version(self) -> str:
"""
Returns the checkstyle version to use
:returns: A string representing the checkstyle version
or None if it could not be determined
:rtype: str
"""
global CURRENT_LATEST_CS_VERSION
version = self.settings.get('version')
if version == 'latest':
return fetch_latest_cs_version() # Can be None
else:
return version
def provide_jar(self, version) -> str:
"""
Checks if the jar is locally available. If not initiates a download.
:returns: the path to the jar
:rtype: str
"""
checkstyle_jar = jar_path(version)
if os.path.isfile(checkstyle_jar):
logger.info('Using existing jar: ' + checkstyle_jar)
else:
logger.info('{} does not exists'.format(checkstyle_jar))
logger.info("Make sure folder exists")
os.makedirs(plugin_dir(), exist_ok=True)
url = download_url(version)
logger.info("Downloading from {}".format(url))
download_file(url, checkstyle_jar, show_download_progress)
cleanup(checkstyle_jar)
return checkstyle_jar
| StarcoderdataPython |
3398596 | <filename>save4life/settings.py<gh_stars>0
from __future__ import absolute_import
"""
Django settings for save4life project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
path = lambda *a: os.path.join(BASE_DIR, *a)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'charsleft_widget',
'ussd',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'save4life.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'save4life.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
import dj_database_url
DATABASES['default'] = dj_database_url.config(default='sqlite:///{0}'.format(path('db.sqlite3')))
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = path('static_serve')
####### Celery config #######
BROKER_URL = os.environ.get('BROKER_URL', 'amqp://guest:guest@localhost//')
from celery.schedules import crontab
CELERYBEAT_SCHEDULE = {
'send_reminders': {
'task': 'ussd.tasks.send_messages',
'schedule': crontab(minute='*/10'),
},
'weekly_rewards': {
'task': 'ussd.tasks.calculate_weekly_streaks',
'schedule': crontab(hour=8, minute=0, day_of_week='monday'),
},
'weekly_report': {
'task': 'ussd.tasks.send_weekly_report',
'schedule': crontab(hour=10, minute=0, day_of_week='monday'),
},
}
# Url to Junebug channel for sending SMSes
JUNEBUG_SMS_URL = os.environ.get('JUNEBUG_SMS_URL', 'http://locahost/')
# Airtime API credentials
AIRTIME_WSDL_URL = os.environ.get('AIRTIME_WSDL_URL')
AIRTIME_TERMINAL_NUMBER = os.environ.get('AIRTIME_TERMINAL_NUMBER')
AIRTIME_MSISDN = os.environ.get('AIRTIME_MSISDN')
AIRTIME_PIN = os.environ.get('AIRTIME_PIN')
| StarcoderdataPython |
3269342 | from django.test import TestCase
from django.core.urlresolvers import reverse
from utils.auth_tokens import new_auth_token
import json
# Create your tests here.
class GetPricesTestCase(TestCase):
def test_missing_exchange(self):
"""When there is no 'exchange' parameter, an error should
be returned.
"""
response = self.client.get(
reverse("prices:get_prices"),
{"pair":"btc_usd"},
HTTP_AUTHORIZATION=new_auth_token()
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.content)["error"],
"Exchange/trading pair combination not found."
)
def test_missing_exchange(self):
"""When there is no 'pair' parameter, an error should
be returned.
"""
response = self.client.get(
reverse("prices:get_prices"),
{"exchange":"btc-e"},
HTTP_AUTHORIZATION=new_auth_token()
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.content)["error"],
"Must specify 'exchange' and 'pair' parameters."
)
def test_bad_params(self):
"""When an invalid combination of exchange / pair params is supplied,
an error should be returned.
"""
response = self.client.get(
reverse("prices:get_prices"),
{"exchange":"btc-e", "pair":"349g4ubfref"},
HTTP_AUTHORIZATION=new_auth_token()
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.content)["error"],
"Exchange/trading pair combination not found."
)
def test_success(self):
"""When all params are correct, a list of prices should
be returned.
"""
response = self.client.get(
reverse("prices:get_prices"),
{"exchange":"btc-e", "pair":"btc_usd"},
HTTP_AUTHORIZATION=new_auth_token()
)
self.assertEqual(response.status_code, 200)
result = json.loads(response.content)
for price in result["result"]:
self.assertTrue(type(price) is int or type(price) is float)
def test_limit_days(self):
"""When all params are correct and 'days' is specified,a limited
number of prices should be returned.
"""
response = self.client.get(
reverse("prices:get_prices"),
{"exchange":"btc-e", "pair":"btc_usd", "days":1},
HTTP_AUTHORIZATION=new_auth_token()
)
self.assertEqual(response.status_code, 200)
result = json.loads(response.content)
self.assertTrue(len(result["result"]) <= 1440) | StarcoderdataPython |
3232525 | <gh_stars>0
"""Routes for logged-in application."""
from flask import Blueprint, redirect, jsonify, request, render_template
from datetime import date, timedelta, datetime
from databases.models import Users, Funnels, Sku, Parentsku, Quantities, Sales, Emails, Notifs, SuppressedEmails
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import requests
import dropbox
import csv
import os
import json
import pandas as pd
from decimal import Decimal
from appdb import db
# Blueprint Configuration
main_bp = Blueprint('main_bp', __name__,
template_folder='assets/templates',
static_folder='assets')
@main_bp.route('/dashboard', methods=['GET'])
def dashboard():
"""Serve logged in Dashboard."""
return redirect("/")
"""
return render_template('dashboard.html',
title='Flask-Login Tutorial.',
template='dashboard-template',
current_user=current_user,
body="You are now logged in!")
"""
class global_suppression():
def waypointsoftware(self,email):
print("\n\nWaypoint Software \n")
url = "https://jkwenterprises.waypointsoftware.io/webhooks/"
payload = { 'email' : email, 'xauthentication' : '207b26e4120a8fff9734e18bbfd4a52d' }
headers = {
'Content-Type': 'application/json',
'X-Clickfunnels-Webhook-Delivery-Id': ''
}
response = requests.request("POST", url, headers=headers, json = payload)
print(response.text.encode('utf8'))
def earnware(self, email):
url = "https://api.earnware.com/production/contacts"
payload='userId=8fb65c4442c6098b29ed098ad137debe&sourceId=a0da1b0537b243818168a9713b6750c0&status=suppressed&email=' + email
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
def sendy(self, email):
print("\n\nSendy \n")
esp_data_arr = [
['https://patriotpoweredpromotions.com/unsubscribe','RmVFUJOzwW3fLMyotPVJ','FU9DDFshFtQDa58925zvmvcg'],
['https://boomerwebmail.com/unsubscribe','8fzj8s8zG4F409ems4v5','cjHwS8x892gLuUuGn6uZ9pCQ'],
['https://patriotpoweredemail.com/unsubscribe','DuSfSlBJ30pPuc4ERhU0','LNoJG4a0ndx5tLGonFx763kw'],
['https://patriotpoweredoffers.com/unsubscribe','VOtndGvaVaR3ntrqSebL','xTVSUnctdBBbXuLfl19892Sw']
]
for list in esp_data_arr:
url = list[0]
payload = 'email={0}&api_key={1}&list={2}'.format(email,list[1],list[2])
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
response = requests.request("POST", url, headers=headers, data = payload)
print(response.text.encode('utf8'))
def campaigner(self, email):
print("\n\nCampaigner \n")
esp_data_arr = ['ed040a03-e570-4751-b34d-c368179bb81c','cd58ccf8-46d7-465b-a8f2-f1cba6bf0c91','ef4c777b-07b1-483f-9e5d-d2ad667f345a']
for list in esp_data_arr:
url = "https://edapi.campaigner.com/v1/Subscribers/Remove"
payload = {"EmailAddress": email}
headers = {
'Content-Type': 'application/json',
'ApiKey': '{0}'.format(list)
}
response = requests.request("POST", url, headers=headers, json = payload)
print(response.text.encode('utf8'))
def inboxfirst(self, email):
print("\n\nInbox First \n")
esp_data_arr = [2711]
for list in esp_data_arr:
url = f"http://if.inboxfirst.com/ga/api/v2/suppression_lists/{list}/suppressed_addresses/create_multiple"
payload = { "data": [email]}
headers = {
'Authorization': 'Basic MzgzOmIxZGYxZjMyYjNjOWE5MThlOTYzMmY2ZTA3YTlmZWRhZTk3OTYzZWQ='
}
response = requests.request("POST", url, headers=headers, json = payload)
print(response.text.encode('utf8'))
def sendlane(self,email):
print("\n\nSend Lane \n")
esp_list = [['6cbc8b0030e8e2b','776db60957345ec2796b2ef3ad4f522b'],
['7ab94fb1817d571','730d49d15cfa45315464a60f3a874124']
]
url = "https://sendlane.com/api/v1/unsubscribe"
for list in esp_list:
payload = {'api': list[0],
'hash': list[1],
'email': email,
'optional': '1'}
files = [
]
headers= {}
response = requests.request("POST", url, headers=headers, data = payload, files = files)
print(response.text.encode('utf8'))
def inboxgenie(self, email):
print(" \n\n Inbox Genie \n")
esp_arr = [
['http://click.conservativeheadlinenews.com','88'],
['http://click.firearmslifenews.com','38'],
['http://click2.patriotpoweredpublishing.com','24'],
['http://click.patriotpoweredpublishing.com', '104'],
['http://click.economiccrisisreport.com','107']
]
path ='/Pages/EmailOptout.aspx?email={0}&aid={1}&AP=1'
for list in esp_arr:
url = list[0] + path.format(email, list[1])
payload = { "data": [email]}
headers = {
}
response = requests.request("POST", url, headers=headers, json = payload)
print(response.text.encode('utf8'))
def add_event(self, email):
print("\n\nLead was added into database \n")
url = "https://inventory.jkwenterprises.com/suppression/log"
payload = { 'email' : email }
headers = {
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, json = payload)
print(response.text.encode('utf8'))
@main_bp.route('/suppression/log', methods=['POST'])
def lead_2db():
foo = request.get_data()
data = json.loads(foo)
try:
log = SuppressedEmails(email = data['email'],date_inserted = datetime.now().strftime("%m/%d/%Y, %H:%M:%S"))
db.session.add(log)
db.session.commit()
return(datetime.now().strftime("%m/%d/%Y, %H:%M:%S"))
except Exception as ex:
return (str(ex), 'hi')
@main_bp.route('/suppressv2/<email>', methods=['GET'])
def suppressv2_email(email):
return ('Success -' + email)
@main_bp.route('/suppress/<email>', methods=['GET'])
def suppress_email(email):
try:
gs = global_suppression()
gs.earnware(email)
gs.waypointsoftware(email)
gs.campaigner(email)
gs.inboxfirst(email)
gs.inboxgenie(email)
gs.sendlane(email)
gs.sendy(email)
log = SuppressedEmails(email = email,date_inserted = datetime.now().strftime("%m/%d/%Y, %H:%M:%S"))
db.session.add(log)
db.session.commit()
return render_template("emails.html", suppressed_email = email)
except:
return ('Something Has Failed. Get In Touch With Connor.')
@main_bp.route('/funnel-notifs', methods=['GET'])
def funnel_alerts():
"""Serve logged in Dashboard."""
return render_template("notifications_fe.html")
@main_bp.route('/json/parentsku', methods=['GET'])
def dropdownjson():
p_skus = db.session.query(Parentsku).all()
arr_skus = []
for i in p_skus:
if i.parent_sku != None:
arr_skus.append(i.parent_sku)
dropdown_object = []
for sku in range(len(arr_skus)):
item = dropdown_items(sku+1, arr_skus[sku])
dropdown_object.append(item.asdict())
return jsonify(dropdown_object)
class dropdown_items():
def __init__(self, id, name):
self.id = id
self.name = name
def asdict(self):
return {'id': self.id, 'name': self.name}
@main_bp.route('/updatedb/<days>', methods=['GET'])
def update_db(days):
days = int(days)
parent_skus_arr = []
featured_skus = db.session.query(Parentsku).all()
for i in featured_skus:
parent_skus_arr.append(i.parent_sku)
parent_final = list(dict.fromkeys(parent_skus_arr))
quantity = []
start_date = date.today()
end_date = date.today()
word = None
if days != 1:
start_date = date.today() - timedelta(days=1)
end_date = date.today() - timedelta(days=days)
word = (db.session.query(Sku, Parentsku, Quantities)
.join(Quantities, Quantities.child_sku == Sku.sku)
.join(Parentsku)
.filter(Sku.order_date <= start_date, Sku.order_date >= end_date)
.all())
# Set of arrays to pass back to Dataframe Table
else:
# Query Table Join
word = (db.session.query(Sku, Parentsku, Quantities)
.join(Quantities, Quantities.child_sku == Sku.sku)
.join(Parentsku)
.filter(Sku.order_date == start_date)
.all())
for index_range in range(len(parent_final)):
quantity.append(0)
for psku in parent_final:
for i in word:
if i.Parentsku.parent_sku == psku:
quantity[parent_final.index(psku)] = quantity[parent_final.index(psku)] + i.Quantities.quantity
new_quant = db.session.query(Parentsku).filter(Parentsku.parent_sku == psku).first()
if days == 1:
new_quant.day1 = quantity[parent_final.index(psku)]
db.session.commit()
if days == 3:
new_quant.day3 = quantity[parent_final.index(psku)]
db.session.commit()
if days == 7:
new_quant.day7 = quantity[parent_final.index(psku)]
db.session.commit()
if days == 14:
new_quant.day14 = quantity[parent_final.index(psku)]
db.session.commit()
if days == 28:
new_quant.day28 = quantity[parent_final.index(psku)]
db.session.commit()
return jsonify(dict(zip(parent_final, quantity)))
@main_bp.route('/emailnotifications', methods=['GET'])
def testdb():
sku_names = []
sku_percent_left = []
inbound = []
all_skus = db.session.query(Parentsku).all()
tt = pd.read_csv('/home/jkwent/productreports/your_csv.csv', delimiter=',')
item_arr = []
for index, row in tt.iterrows():
item_arr.append(row['Sku Name'])
for sku in all_skus:
if sku != None:
sku_avg = (sku.day3 * 4.6667 + sku.day7 * 2 + sku.day14 + sku.day28 /2) / 4
if sku.encorestock != None and sku.encorestock != 0 and sku_avg/sku.encorestock >= .75:
sku_names.append(sku.parent_sku)
sku_percent_left.append((sku_avg/sku.encorestock))
if sku.inboundstock > 0:
inbound.append("Awaiting Arrival")
else:
inbound.append("REORDER NOW: Only %f%s of stock left" % ((1 -(sku_avg/sku.encorestock)) * 100, "%"))
df = pd.DataFrame({'Sku Name':sku_names,
'Stock Level Percent':sku_percent_left,
'Inbound':inbound,
'Time': datetime.now().strftime("%m/%d/%Y, %H:%M:%S")}, columns= ['Sku Name', 'Stock Level Percent', 'Inbound', 'Time'])
df.to_csv('/home/jkwent/productreports/your_csv.csv', index=False)
new_arr = []
for x in range(len(sku_names)):
if sku_names[x] not in item_arr:
new_arr.append({'SKU': sku_names[x], 'Inbound': inbound[x] })
if new_arr:
sender_email = "<EMAIL>"
receiver_email = ["<EMAIL>", "<EMAIL>", "<EMAIL>", "<EMAIL>", "<EMAIL>", "<EMAIL>"]
password = "<PASSWORD>ussucks72!"
message = MIMEMultipart("alternative")
message["Subject"] = "New Stock Alert"
message["From"] = sender_email
message["To"] = ", ".join(receiver_email)
# Create the plain-text and HTML version of your message
text = """\
Hi,
How are you?
Real Python has many great tutorials:
www.realpython.com"""
final_string = ""
for item in new_arr:
final_string += """\
<p> ***%s*** : %s </p>
""" % (item['SKU'],item['Inbound'])
html = """\
<html>
<body>
<h4>Critical Stock Levels<h4/>
%s
<br>
<a href="https://inventory.jkwenterprises.com/">Click to go to Inventory Tracker</a>
</body>
</html>
""" % final_string
# Turn these into plain/html MIMEText objects
part1 = MIMEText(text, "plain")
part2 = MIMEText(html, "html")
# Add HTML/plain-text parts to MIMEMultipart message
# The email client will try to render the last part first
message.attach(part1)
message.attach(part2)
# Create secure connection with server and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", context=context) as server:
server.login(sender_email, password)
server.sendmail(
sender_email, receiver_email, message.as_string()
)
return jsonify(new_arr)
@main_bp.route('/cfwebhook', methods=['POST'])
def receive_item():
foo = request.get_data()
data = json.loads(foo)
order = data["id"]
if 'error_message' in data:
if data['error_message'] is None and data["products"] is not None:
for each in data["products"]:
order = combine_orders(data['contact']["email"],data["created_at"], order, data['funnel_id'])
new_sku = Sales(order_id=order, email=data['contact']["email"],
time_created=data["created_at"], funnel_id = data['funnel_id'], product_name=each['name'],price = 0, product_id=each["id"])
db.session.add(new_sku)
db.session.commit()
else:
return jsonify("Card Declined"),200
else:
question = None
answer= None
for key in data:
if 'question' in key:
question = data[key]
if 'answer' in key and data[key]:
answer = data[key]
if question is None:
return jsonify("We only accept first Funnel Steps"),500
email_type = False
if data['event'] is 'updated':
email_type = True
new_sku = Emails(email = data['email'], funnel_id = data['funnel_id'],time_created = data["created_at"], updated = email_type, question = question, answer = answer )
db.session.add(new_sku)
db.session.commit()
return jsonify(data["created_at"]),200
def combine_orders(contact, timerange, order, funnel):
newtimerange = datetime.strptime(timerange, "%Y-%m-%dT%H:%M:%S.000Z")
newtimerange = newtimerange - timedelta(hours=0, minutes=5)
orders = db.session.query(Sales).filter(Sales.time_created <= timerange , Sales.time_created >= newtimerange, Sales.email == contact, Sales.funnel_id == str(funnel)).first()
if orders == None:
return order
else:
return orders.order_id
class notif_item():
def __init__(self, row, name):
self.id = row
self.name = name
@main_bp.route('/funnels/names', methods=['GET', 'POST'])
def send_notifs():
if request.method == 'GET':
funnels = (db.session.query(Funnels, Notifs)
.join(Notifs, Notifs.funnel_name == Funnels.funnel_name)
.all())
dropdown_object = []
num = 0
for i in funnels:
num=num+1
dropdown_object.append(notif_item(num, i.Funnels.funnel_name).__dict__)
return jsonify(dropdown_object)
else:
resp = request.get_json()
notif = Notifs.query.filter(Notifs.funnel_name==resp['funnel']).first()
funnels = Funnels.query.filter(Funnels.funnel_name==resp['funnel']).first()
return {'funnelid' : funnels.funnel_id, 'ga-tag':funnels.view_id, 'landingpage':funnels.optin, 'email' : notif.email, 'sms' : notif.sms}, 200
@main_bp.route('/cfwebhook/funnel_webhooks/test', methods=['POST'])
def cf_webhook():
return jsonify('success'),200
@main_bp.route('/funnel_webhooks/test', methods=['POST'])
def cf_webhooks():
return jsonify('success'),200
@main_bp.route('/funnel/edit/<action>', methods=['GET', 'POST'])
def edit_funnels(action):
if request.method == 'GET':
funnels = (db.session.query(Funnels, Notifs)
.join(Notifs, Notifs.funnel_name == Funnels.funnel_name)
.all())
dropdown_object = []
num = 0
for i in funnels:
num=num+1
dropdown_object.append(notif_item(num, i.Funnels.funnel_name,i.Notifs.email, i.Notifs.sms).__dict__)
return jsonify(dropdown_object)
else:
funnel_object = request.get_json()
if action == 'update':
if funnel_object['funnelname'] == 'none':
funnel = Funnels(funnel_name = funnel_object['newfunnel'], view_id= funnel_object['gatag'],
optin = funnel_object['landingpage'], funnel_id = funnel_object['funnelid'])
db.session.add(funnel)
notif = Notifs(funnel_name = funnel_object['newfunnel'],sms = funnel_object['sms'], email = funnel_object['email'])
db.session.add(notif)
db.session.commit()
else:
funnel = Funnels.query.filter(Funnels.funnel_name== funnel_object['funnelname']).first()
funnel.funnel_name = funnel_object['newfunnel']
funnel.view_id = funnel_object['gatag']
funnel.optin = funnel_object['landingpage']
funnel.funnel_id = funnel_object['funnelid']
db.session.commit()
notif = Notifs.query.filter(Notifs.funnel_name== funnel_object['funnelname']).first()
notif.funnel_name = funnel_object['newfunnel']
notif.sms = funnel_object['sms']
notif.email = funnel_object['email']
db.session.commit()
else:
funnel_object = request.get_json()
funnel = Funnels.query.filter(Funnels.funnel_name== funnel_object['funnelname']).delete()
db.session.commit()
notif = Notifs.query.filter(Notifs.funnel_name== funnel_object['funnelname']).delete()
db.session.commit()
return jsonify("Success"), 200
@main_bp.route('/export/emails/suppressed', methods=['GET'])
def export_emails():
email_csv = "Unsubscribed Emails.csv"
here = os.path.dirname(os.path.abspath(__file__))
new_csv = os.path.join(here,email_csv)
today = date.today()
weekday = today.weekday()
if (weekday):
dbx = dropbox.Dropbox('81NCrijVuKMAAAAAAAAAAdL-1URD2nif2sJUKDq06k_l4VPVUp_ETOkKAKzk7vOl')
print(dbx.users_get_current_account())
with open(new_csv, 'w+') as f:
out = csv.writer(f)
out.writerow(['email', 'date_inserted'])
for item in db.session.query(SuppressedEmails).all():
out.writerow([item.email, item.date_inserted])
f.close()
with open(new_csv,"rb") as f:
# upload gives you metadata about the file
# we want to overwite any previous version of the file
dbx.files_upload(f.read(), f'/Suppressed Emails/{email_csv}', mode=dropbox.files.WriteMode("overwrite"))
return('Done')
return('Done')
| StarcoderdataPython |
70877 | from rknn.api import RKNN
class RKNN_model_container():
def __init__(self, model_path, target=None, device_id=None) -> None:
rknn = RKNN()
# Direct Load RKNN Model
rknn.load_rknn(model_path)
print('--> Init runtime environment')
if target==None:
ret = rknn.init_runtime()
else:
ret = rknn.init_runtime(target=target, device_id=device_id)
if ret != 0:
print('Init runtime environment failed')
exit(ret)
print('done')
self.rknn = rknn
def run(self, inputs):
if isinstance(inputs, list) or isinstance(inputs, tuple):
pass
else:
inputs = [inputs]
result = self.rknn.inference(inputs=inputs)
return result | StarcoderdataPython |
3264763 | from django.contrib.postgres.fields import JSONField
from django.db import models
class ContentType(models.Model):
slug = models.CharField(max_length=100, unique=True)
format = models.CharField(max_length=100, unique=True)
def __str__(self):
return "{} : {}".format(self.slug, self.format)
class Method(models.Model):
name = models.CharField(max_length=10, unique=True)
def __str__(self):
return self.name
class API(models.Model):
slug = models.CharField(max_length=100, unique=True)
url = models.CharField(max_length=100)
methods = models.ManyToManyField(Method)
accepted_content_types = models.ManyToManyField(ContentType)
def __str__(self):
return "{} - {}".format(self.slug, self.url)
class APICallLog(models.Model):
created = models.DateTimeField(auto_now_add=True)
api = models.ForeignKey(API, on_delete=models.CASCADE)
request_path = models.CharField(max_length=1000, blank=True)
request_body = models.TextField(blank=True)
request_body_binary = models.BinaryField(blank=True)
request_headers = JSONField()
content = models.TextField()
status = models.IntegerField()
reason = models.TextField()
charset = models.CharField(blank=True, max_length=100)
content_type = models.CharField(blank=True, max_length=100)
| StarcoderdataPython |
3337322 | <filename>src/lock_peak.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Program: Lock the peaks at same energy location
Version: 20210219
@author: <NAME> (GitHub: @pranabdas)
data = suv.lock_peak(data, refdata, x1='', x2='', E_col=0, I_col=9, I0_col=4)
"""
def lock_peak(data, refdata, x1='', x2='', E_col=0, I_col=9, I0_col=4):
import numpy as np
if data.shape != refdata.shape:
print("Error! Data and reference data must have same dimensions!")
return
if not x1:
x1 = refdata[0, E_col]
if not x2:
x2 = refdata[-1, E_col]
# x1_id = np.where(refdata[:, E_col]==x1)[0][0]
x1_id = np.where(abs(refdata[:, E_col]-x1)==min(abs(refdata[:, E_col]-x1)))[0][0]
# x2_id = np.where(refdata[:, E_col]==x2)[0][0]
x2_id = np.where(abs(refdata[:, E_col]-x2)==min(abs(refdata[:, E_col]-x2)))[0][0]
ref_peak_id = np.where(refdata[x1_id:x2_id, I_col]/refdata[x1_id:x2_id, I0_col]\
==max(refdata[x1_id:x2_id, I_col]/refdata[x1_id:x2_id, I0_col]))[0][0]
peak_id = np.where(data[x1_id:x2_id, I_col]/data[x1_id:x2_id, I0_col]==\
max(data[x1_id:x2_id, I_col]/data[x1_id:x2_id, I0_col]))[0][0]
diff_id = peak_id - ref_peak_id
if diff_id > 0:
for _ in range(diff_id):
data = np.delete(data, 0, 0)
data = np.vstack((data, data[-1, :]))
elif diff_id < 0:
for _ in range(abs(diff_id)):
data = np.delete(data, -1, 0)
data = np.vstack((data[0, :], data))
data[:, E_col] = refdata[:, E_col]
return data
| StarcoderdataPython |
100788 | <reponame>saneletm/sample_chat_app<filename>lib/server/client.py
"""
This is a server implementation of a chat app
This is only safe to use on the same machine (as in run both the server and client on the same machine)
Further cleanup would need to be done to support real clients on different machines (as of Jan 4 2019)
"""
import select
from multiprocessing import Event
from threading import Thread
class ClientThread(Thread):
"""
For every connection the server receive, it will create an instance of a client thread to read from
that conn, and enqueue msg to server inbox queue if it successfully reads from the connection,
OW, send a msg signaling failure to read, which will clean up this thread and the connection obj
"""
def __init__(self, conn, addr, msg_queue):
Thread.__init__(self)
self.addr = addr
self.conn = conn
self.msg_queue = msg_queue
self.shutting_down_event = Event()
def stop(self):
"""
Signal for this client thead to stop
"""
if not self.is_stopped():
self.shutting_down_event.set()
def is_stopped(self):
"""
Checks if this client thread's signal to stop has been set
"""
return self.shutting_down_event.is_set()
def run(self):
"""
Receive on the conn , and enqueue msg to server inbox queue
"""
try:
self.conn.send("Welcome to this awesome chatroom!!!")
hi = "<" + str(self.addr) + "> " + " says hi!"
self.msg_queue.put((self.addr, hi))
sent_bye = False
select_timeout = 1
while not self.shutting_down_event.is_set():
# try to receive from conn
try:
readable, _, _= select.select([self.conn], [], [], select_timeout)
if self.conn in readable:
message = self.conn.recv(2048)
# if you receive something confirmation message
msg_to_send = "<" + str(self.addr) + "> " + message if message else None
if not msg_to_send and not sent_bye:
# will send two msgs this time, a priliminary one to say goodby to our foos, the the empty to cleanup
bye = "<" + str(self.addr) + "> " + " says goodbye!"
self.msg_queue.put((self.addr, bye))
sent_bye = True # need to have this flag since this can run for sometime before the thread stops
self.msg_queue.put((self.addr, msg_to_send))
except: # Of cause this is not a good catch, and lint should complain
# Any exception... just continue, until some set count, then exit?
continue
except KeyboardInterrupt:
pass
finally:
# When all done, close socket and log exit
# self.conn.close() # TODO: verify server clean closes all socket conn objects
print("{} client thread exited ".format(self.addr))
| StarcoderdataPython |
3225325 | <reponame>anderson-attilio/runbook
######################################################################
# Cloud Routes Web Application
# -------------------------------------------------------------------
# Forms Class
######################################################################
from wtforms import Form
from wtforms import PasswordField
from wtforms.validators import Length, EqualTo
class ChangePassForm(Form):
''' Class that creates a Password Change Form '''
old_password = PasswordField("<PASSWORD>")
password = PasswordField(
"Password",
validators=[
Length(min=8, message='Password must be a minimum of 8 characters'),
EqualTo('confirm', message="Passwords did not match")
]
)
confirm = PasswordField("Repeat Password")
if __name__ == '__main__': # pragma: no cover
pass # pragma: no cover
| StarcoderdataPython |
3324457 | <reponame>praekeltfoundation/mc2
# models go here
| StarcoderdataPython |
3201552 | <reponame>YangXiaoo/NoteBook<gh_stars>10-100
def solver(N, K, bins):
orig = [0 for _ in range(N)]
mat = [[-1 for _ in range(N + K -1)] for _ in range(K)]
# print(mat)
for i in range(N):
curValue = []
for r in range(K):
if mat[r][i] != -1:
curValue.append(mat[r][i])
curBin = bins[i]
for n in curValue:
curBin = curBin ^ n
col = i
for r in range(K):
mat[r][col] = curBin
col += 1
# print(mat)
ret = [str(x) for x in mat[0][:N]]
return "".join(ret)
def test():
N, K = 2, 6
bins = [int(x) for x in "01"]
ret = solver(N, K, bins)
print(ret)
if __name__ == '__main__':
test()
| StarcoderdataPython |
3343493 | <gh_stars>10-100
"""app: handle requests."""
from typing import Dict, Tuple
import typing.io
import json
from lambda_proxy.proxy import API
app = API(name="app", debug=True)
@app.get("/", cors=True)
def main() -> Tuple[str, str, str]:
"""Return JSON Object."""
return ("OK", "text/plain", "Yo")
@app.get("/<regex([0-9]{2}-[a-zA-Z]{5}):regex1>", cors=True)
def _re_one(regex1: str) -> Tuple[str, str, str]:
"""Return JSON Object."""
return ("OK", "text/plain", regex1)
@app.get("/<regex([0-9]{1}-[a-zA-Z]{5}):regex2>", cors=True)
def _re_two(regex2: str) -> Tuple[str, str, str]:
"""Return JSON Object."""
return ("OK", "text/plain", regex2)
@app.post("/people", cors=True)
def people_post(body) -> Tuple[str, str, str]:
"""Return JSON Object."""
return ("OK", "text/plain", body)
@app.get("/people", cors=True)
def people_get() -> Tuple[str, str, str]:
"""Return JSON Object."""
return ("OK", "text/plain", "Nope")
@app.get("/<string:user>", cors=True)
@app.get("/<string:user>/<int:num>", cors=True)
def double(user: str, num: int = 0) -> Tuple[str, str, str]:
"""Return JSON Object."""
return ("OK", "text/plain", f"{user}-{num}")
@app.get("/kw/<string:user>", cors=True)
def kw_method(user: str, **kwargs: Dict) -> Tuple[str, str, str]:
"""Return JSON Object."""
return ("OK", "text/plain", f"{user}")
@app.get("/ctx/<string:user>", cors=True)
@app.pass_context
@app.pass_event
def ctx_method(evt: Dict, ctx: Dict, user: str, num: int = 0) -> Tuple[str, str, str]:
"""Return JSON Object."""
return ("OK", "text/plain", f"{user}-{num}")
@app.get("/json", cors=True)
def json_handler() -> Tuple[str, str, str]:
"""Return JSON Object."""
return ("OK", "application/json", json.dumps({"app": "it works"}))
@app.get("/binary", cors=True, payload_compression_method="gzip")
def bin() -> Tuple[str, str, typing.io.BinaryIO]:
"""Return image."""
with open("./rpix.png", "rb") as f:
return ("OK", "image/png", f.read())
@app.get(
"/b64binary", cors=True, payload_compression_method="gzip", binary_b64encode=True,
)
def b64bin() -> Tuple[str, str, typing.io.BinaryIO]:
"""Return base64 encoded image."""
with open("./rpix.png", "rb") as f:
return ("OK", "image/png", f.read())
| StarcoderdataPython |
3225422 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('locations', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='locationtype',
name='_expand_from',
field=models.ForeignKey(related_name='+', db_column='expand_from', to='locations.LocationType', null=True, on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AddField(
model_name='locationtype',
name='_expand_from_root',
field=models.BooleanField(default=False, db_column='expand_from_root'),
preserve_default=True,
),
migrations.AddField(
model_name='locationtype',
name='expand_to',
field=models.ForeignKey(related_name='+', to='locations.LocationType', null=True, on_delete=models.CASCADE),
preserve_default=True,
),
]
| StarcoderdataPython |
1799726 | from FluGibson.nucleotide_protein_converter import NucleotideProteinConverter
from Levenshtein import distance
from Bio import SeqIO
from Bio.Alphabet import DNAAlphabet, ProteinAlphabet
import os
import pytest
package_directory = os.path.dirname(os.path.abspath(__file__))
os.chdir(os.path.join(package_directory, 'test_data'))
np = NucleotideProteinConverter()
np.read_sequences('vic_np_mut2.fasta', 'vic_np_mut1_aa.fasta')
np.convert()
# print(distance(str(np.src_nt.seq), str(np.des_nt.seq)))
print(np.src_nt.seq.alphabet)
def test_convert():
# There are 4 amino acid changes, but because codons are chosen randomly,
# based on experimental tests, there could be anywhere between 8 and 12
# changes, inclusive.
d = distance(str(np.src_nt.seq), str(np.des_nt.seq))
assert d <= 12
assert d >= 8
def test_set_sequences():
src = SeqIO.read('vic_np_mut2.fasta',
'fasta',
alphabet=DNAAlphabet())
des = SeqIO.read('vic_np_mut1_aa.fasta',
'fasta',
alphabet=ProteinAlphabet())
# The following should raise an error.
with pytest.raises(AssertionError):
np.set_sequences(des, src)
np.set_sequences(src, src)
np.set_sequences(des, des)
# The following should not raise an error.
np.set_sequences(src, des)
| StarcoderdataPython |
1791632 | import claripy
from ..shellcode import Shellcode
class X86SetRegister(Shellcode):
os = ["cgc", "unix"]
arches = ["X86"]
name = "setregister"
codes = {
'eax': [b"\xb8", b"\xbb", b"\xff\xe3"],
'ebx': [b"\xbb", b"\xb8", b"\xff\xee"],
'ecx': [b"\xb9", b"\xbb", b"\xff\xe3"],
'edx': [b"\xba", b"\xbb", b"\xff\xe3"],
'edi': [b"\xbf", b"\xbb", b"\xff\xe3"],
'esi': [b"\xbe", b"\xbb", b"\xff\xe3"],
'esp': [b"\xbc", b"\xbb", b"\xff\xe3"],
'ebp': [b"\xbd", b"\xbb", b"\xff\xe3"],
}
def __init__(self, register, value, pc):
"""
:param register: The register to set
:param value: The value to set register to
:param pc: Value of PC to crash at.
"""
self.register = register
self.value = value
self.pc = pc
def raw(self, arch=None):
register = self.register
value = self.value
ip = self.pc
if isinstance(value, int):
value = claripy.BVV(value, 32)
if isinstance(ip, int):
ip = claripy.BVV(ip, 32)
try:
code_row = [claripy.BVV(x) for x in self.codes[register]]
except KeyError:
raise ValueError("register '%s' does not exist" % register)
return claripy.Concat(code_row[0], value.reversed, code_row[1], ip.reversed, code_row[2])
| StarcoderdataPython |
1667682 | from functools import wraps
import tracer_deco
from app_tree import App, UnfinishedLeaf, Leaf, UNFINISHED_APP
#
# Nested Monte Carlo Search
#
# adapted from:
# <NAME>: Monte-carlo expression discovery.
# International Journal on Artificial Intelligence Tools 22.01 (2013): 1250035. APA
<EMAIL>(force_enable=True)
def nested_mc_search(root_tree, max_level, fitness, finish, is_finished, successors, advance,
early_end_test=lambda s: False):
<EMAIL>(print_from_arg=0)
def nested_mc_search_raw(level, tree):
# if the input tree is already finished, evaluate it
if level == 0 or is_finished(tree):
tree = finish(tree)
tree.score = fitness(tree)
return tree
# best finished tree found
best_finished_tree = None
while tree is not None:
assert tree is not None
for tree_successor in successors(tree):
finished_tree = nested_mc_search_raw(level - 1, tree_successor)
if best_finished_tree is None or finished_tree.score > best_finished_tree.score:
best_finished_tree = finished_tree
if early_end_test(best_finished_tree.score):
return best_finished_tree
# the successors must be nonempty, so:
assert best_finished_tree is not None
assert is_finished(best_finished_tree)
tree = advance(tree, best_finished_tree)
return best_finished_tree
return nested_mc_search_raw(max_level, root_tree)
# advance_skeleton should return None
# when the uf_tree (old skeleton) is already finished
def check_skeleton_advancer(f):
@wraps(f)
def g(old_skeleton, finished_tree):
assert old_skeleton.is_skeleton_of(finished_tree)
new_skeleton = f(old_skeleton, finished_tree)
if new_skeleton is not None:
assert new_skeleton.is_skeleton_of(finished_tree)
# the advance_skeleton should concretize just one symbol
old = old_skeleton.count_finished_nodes()
new = new_skeleton.count_finished_nodes()
assert old + 1 == new
return new_skeleton
return g
@check_skeleton_advancer
def dfs_advance_skeleton(old_skeleton, finished_tree):
"""Advance `old_skeleton` one step towards the `finished_tree`."""
if isinstance(old_skeleton, App):
assert isinstance(finished_tree, App)
maybe_new_fun = dfs_advance_skeleton(old_skeleton.fun, finished_tree.fun)
if maybe_new_fun is not None:
return App(maybe_new_fun, old_skeleton.arg)
maybe_new_arg = dfs_advance_skeleton(old_skeleton.arg, finished_tree.arg)
if maybe_new_arg is not None:
return App(old_skeleton.fun, maybe_new_arg)
return None
if isinstance(old_skeleton, UnfinishedLeaf):
if isinstance(finished_tree, Leaf):
return finished_tree
if isinstance(finished_tree, App):
return UNFINISHED_APP
assert False
if isinstance(old_skeleton, Leaf):
return None
assert False
| StarcoderdataPython |
1601021 | '''
Pretty print: python3 -m json.tool < some.json
'''
import json
import argparse
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
def load_json(data_path, jsfile):
with open(os.path.join(data_path, jsfile), 'r') as f:
js = json.load(f)
return js
def generate_dataset(args):
data_dict = {}
js = load_json(args.data_path, args.json)
js = js["_via_img_metadata"]
keys = js.keys()
rgb = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255)]
images_no_objs = []
for key in keys:
entry = js[key]
filename = entry["filename"]
path = os.path.join(args.data_path, filename)
regions = entry["regions"]
masks = []
for region in regions:
shape = region["shape_attributes"]
x = shape["all_points_x"]
y = shape["all_points_y"]
name = region["region_attributes"]
class_id = name["Name"]
fmt = "%s,%s,%s,%s"
line = fmt % (filename, x, y, class_id)
# print(line)
xy = np.array([x, y], dtype=np.int32)
xy = np.transpose(xy)
xy = np.reshape(xy, [1, -1, 2])
mask = { class_id : xy }
masks.append(mask)
image = plt.imread(path)
if args.show:
plt.xlabel('x')
plt.ylabel('y')
plt.title('Input image', fontsize=14)
fname = os.path.splitext(filename)[0]
fname = fname + "-input.png"
path = os.path.join("images", fname)
plt.imshow(image)
plt.savefig(path)
#plt.show()
else:
image = np.zeros_like(image)
shape = image.shape
shape = (shape[0], shape[1])
bg = np.ones(shape, dtype="uint8")
bg.fill(255)
#i = 0
image = np.zeros_like(image)
#image[:] = [128, 0, 128]
for mask in masks:
name = list(mask)[0]
mask = mask[name]
cv2.fillPoly(image, mask, rgb[int(name)-1])
# cv2.fillPoly(image, mask, rgb[i])
#i += 1
cv2.fillPoly(bg, mask, 0)
if args.show:
name = os.path.splitext(filename)[0]
plt.xlabel('x')
plt.ylabel('y')
plt.title('Ground truth semantic segmentation', fontsize=14)
fname = name + "-semantic.png"
path = os.path.join("images", fname)
plt.imshow(image)
plt.savefig(path)
#plt.show()
#plt.xlabel('x')
#plt.ylabel('y')
#plt.title('Background segmentation', fontsize=14)
#fname = name + "-bg.png"
#path = os.path.join("images", fname)
#plt.imshow(bg, cmap='gray', vmin=0, vmax=255)
#plt.savefig(path)
#plt.show()
shape = (*shape, 1)
bg = np.reshape(bg, shape)
#print(bg.shape)
data = np.concatenate((bg, image), axis=-1)
data = data.astype('float32') / 255
data = data.astype('uint8')
data_dict[filename] = data
print(filename, len(masks))
if len(masks) == 0:
images_no_objs.append(filename)
if not args.show:
np.save(args.save_filename, data_dict)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-j",
"--json",
default='segmentation_train.json',
help='Json filename')
parser.add_argument("-p",
"--data-path",
default='../dataset/drinks',
help='Path to dataset')
parser.add_argument("--save-filename",
default="segmentation_train.npy",
help='Path to dataset')
help_ = "Show and save images"
parser.add_argument("--show",
default=False,
action='store_true',
help=help_)
args = parser.parse_args()
generate_dataset(args)
| StarcoderdataPython |
3250196 | #!/usr/bin/env python3
import re
def merge_intervals(intervals):
# make sure they are sorted by first element removing need to compare.
# intervals[i][0] <= intervals[i+1][0] after sort
intervals.sort(key=lambda interval: interval[0])
result = [intervals[0]]
for current in intervals:
previous = result[-1] # last element from list
# Merge if intervals look like this:
# prev_0---curr_0---prev_1---curr_1
# prev_0---curr_0---curr_1---prev_1
if current[0] <= previous[1]:
# as seen from the visual comment above, we need only to choose curr_1 or prev_1
# as the end of merged interval
previous[1] = max(previous[1], current[1])
else:
result.append(current)
return result
def string_darts(target, words_lst):
"""
Returns True if a target string can be represented with any subset of words_lst or whole words_lst
:param target: any string, ex. "Hello there!"
:param words_lst: list of lower cased characters or words, ex. ['a', 'bc', 'hello', '.']
:return: True/False
"""
# Check the easiest case if we compare 2 similar strings
if len(words_lst) == 1 and words_lst[0] == target:
return True
# Remove all whitespaces, lowercase the input string
target = target.replace(" ", "").lower()
# Create a comparator index list aka darts board,
# where words replace 0 with 1 in a range of indices of the target if hit.
target_darts_board = [0] * len(target)
for word in words_lst:
dart = [1] * len(word)
for m in re.finditer(word, target):
# Fill range in board when we found a word
target_darts_board[m.start() : m.end()] = dart
# If all 0 are replaced with 1, then we filled the whole target!
if sum(target_darts_board) == len(target_darts_board):
return True
return False
if __name__ == "__main__":
print("Running: merge_intervals")
input_intervals = [[-1, 15], [-10, 3], [13, 28], [40, 50], [50, 100]]
print(f"Input: {input_intervals}")
merged_intervals = merge_intervals(input_intervals)
print(f"Output: {merged_intervals}")
print("\n================\n")
print("Running: string_darts")
text = "I am eating a frog"
words = ["i", "eat", "ing", "am", "a", "frog"]
print(f"Test string: '{text}'")
print(f"Words array: {words}")
can_represent = string_darts(target=text, words_lst=words)
print(f"Result: {can_represent}")
| StarcoderdataPython |
4813878 | file = open("c:\myfile.txt", "r")
print(file.read()) | StarcoderdataPython |
1613446 | from typing import Type, TypeVar
from eth_typing import BLSSignature
from eth_utils import humanize_hash
from ssz.hashable_container import HashableContainer
from ssz.sedes import bytes32, bytes96, uint64
from eth2.beacon.constants import EMPTY_SIGNATURE
from eth2.beacon.typing import (
Root,
Slot,
ValidatorIndex,
default_root,
default_validator_index,
)
from .defaults import default_slot
TBeaconBlockHeader = TypeVar("TBeaconBlockHeader", bound="BeaconBlockHeader")
class BeaconBlockHeader(HashableContainer):
fields = [
("slot", uint64),
("proposer_index", uint64),
("parent_root", bytes32),
("state_root", bytes32),
("body_root", bytes32),
]
@classmethod
def create(
cls: Type[TBeaconBlockHeader],
*,
slot: Slot = default_slot,
proposer_index: ValidatorIndex = default_validator_index,
parent_root: Root = default_root,
state_root: Root = default_root,
body_root: Root = default_root,
) -> TBeaconBlockHeader:
return super().create(
slot=slot,
proposer_index=proposer_index,
parent_root=parent_root,
state_root=state_root,
body_root=body_root,
)
def __str__(self) -> str:
return (
f"[hash_tree_root]={humanize_hash(self.hash_tree_root)},"
f" slot={self.slot},"
f" proposer_index={self.proposer_index},"
f" parent_root={humanize_hash(self.parent_root)},"
f" state_root={humanize_hash(self.state_root)},"
f" body_root={humanize_hash(self.body_root)},"
)
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {str(self)}>"
default_beacon_block_header = BeaconBlockHeader.create()
TSignedBeaconBlockHeader = TypeVar(
"TSignedBeaconBlockHeader", bound="SignedBeaconBlockHeader"
)
class SignedBeaconBlockHeader(HashableContainer):
fields = [("message", BeaconBlockHeader), ("signature", bytes96)]
@classmethod
def create(
cls: Type[TSignedBeaconBlockHeader],
*,
message: BeaconBlockHeader = default_beacon_block_header,
signature: BLSSignature = EMPTY_SIGNATURE,
) -> TSignedBeaconBlockHeader:
return super().create(message=message, signature=signature)
default_signed_beacon_block_header = SignedBeaconBlockHeader.create()
| StarcoderdataPython |
4831794 | <reponame>tychoish/mongo-python-driver
# Copyright 2011-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the pymongo common module."""
import os
import unittest
import warnings
from pymongo.connection import Connection
from pymongo.errors import ConfigurationError, OperationFailure
from test.utils import drop_collections
host = os.environ.get("DB_IP", 'localhost')
port = int(os.environ.get("DB_PORT", 27017))
pair = '%s:%d' % (host, port)
class TestCommon(unittest.TestCase):
def test_baseobject(self):
warnings.simplefilter("ignore")
c = Connection(pair)
self.assertFalse(c.slave_okay)
self.assertFalse(c.safe)
self.assertEqual({}, c.get_lasterror_options())
db = c.test
self.assertFalse(db.slave_okay)
self.assertFalse(db.safe)
self.assertEqual({}, db.get_lasterror_options())
coll = db.test
self.assertFalse(coll.slave_okay)
self.assertFalse(coll.safe)
self.assertEqual({}, coll.get_lasterror_options())
cursor = coll.find()
self.assertFalse(cursor._Cursor__slave_okay)
cursor = coll.find(slave_okay=True)
self.assertTrue(cursor._Cursor__slave_okay)
c = Connection(pair, slaveok=True, w='majority',
wtimeout=300, fsync=True, j=True)
self.assertTrue(c.slave_okay)
self.assertTrue(c.safe)
d = {'w': 'majority', 'wtimeout': 300, 'fsync': True, 'j': True}
self.assertEqual(d, c.get_lasterror_options())
db = c.test
self.assertTrue(db.slave_okay)
self.assertTrue(db.safe)
self.assertEqual(d, db.get_lasterror_options())
coll = db.test
self.assertTrue(coll.slave_okay)
self.assertTrue(coll.safe)
self.assertEqual(d, coll.get_lasterror_options())
cursor = coll.find()
self.assertTrue(cursor._Cursor__slave_okay)
cursor = coll.find(slave_okay=False)
self.assertFalse(cursor._Cursor__slave_okay)
c = Connection('mongodb://%s/?'
'w=2;wtimeoutMS=300;fsync=true;'
'journal=true' % (pair,))
self.assertTrue(c.safe)
d = {'w': 2, 'wtimeout': 300, 'fsync': True, 'j': True}
self.assertEqual(d, c.get_lasterror_options())
c = Connection('mongodb://%s/?'
'slaveok=true;w=1;wtimeout=300;'
'fsync=true;j=true' % (pair,))
self.assertTrue(c.slave_okay)
self.assertTrue(c.safe)
d = {'w': 1, 'wtimeout': 300, 'fsync': True, 'j': True}
self.assertEqual(d, c.get_lasterror_options())
db = c.test
self.assertTrue(db.slave_okay)
self.assertTrue(db.safe)
self.assertEqual(d, db.get_lasterror_options())
coll = db.test
self.assertTrue(coll.slave_okay)
self.assertTrue(coll.safe)
self.assertEqual(d, coll.get_lasterror_options())
cursor = coll.find()
self.assertTrue(cursor._Cursor__slave_okay)
cursor = coll.find(slave_okay=False)
self.assertFalse(cursor._Cursor__slave_okay)
c.unset_lasterror_options()
self.assertTrue(c.slave_okay)
self.assertTrue(c.safe)
c.safe = False
self.assertFalse(c.safe)
c.slave_okay = False
self.assertFalse(c.slave_okay)
self.assertEqual({}, c.get_lasterror_options())
db = c.test
self.assertFalse(db.slave_okay)
self.assertFalse(db.safe)
self.assertEqual({}, db.get_lasterror_options())
coll = db.test
self.assertFalse(coll.slave_okay)
self.assertFalse(coll.safe)
self.assertEqual({}, coll.get_lasterror_options())
cursor = coll.find()
self.assertFalse(cursor._Cursor__slave_okay)
cursor = coll.find(slave_okay=True)
self.assertTrue(cursor._Cursor__slave_okay)
coll.set_lasterror_options(j=True)
self.assertEqual({'j': True}, coll.get_lasterror_options())
self.assertEqual({}, db.get_lasterror_options())
self.assertFalse(db.safe)
self.assertEqual({}, c.get_lasterror_options())
self.assertFalse(c.safe)
db.set_lasterror_options(w='majority')
self.assertEqual({'j': True}, coll.get_lasterror_options())
self.assertEqual({'w': 'majority'}, db.get_lasterror_options())
self.assertEqual({}, c.get_lasterror_options())
self.assertFalse(c.safe)
db.slave_okay = True
self.assertTrue(db.slave_okay)
self.assertFalse(c.slave_okay)
self.assertFalse(coll.slave_okay)
cursor = coll.find()
self.assertFalse(cursor._Cursor__slave_okay)
cursor = db.coll2.find()
self.assertTrue(cursor._Cursor__slave_okay)
cursor = db.coll2.find(slave_okay=False)
self.assertFalse(cursor._Cursor__slave_okay)
self.assertRaises(ConfigurationError, coll.set_lasterror_options, foo=20)
self.assertRaises(TypeError, coll._BaseObject__set_slave_okay, 20)
self.assertRaises(TypeError, coll._BaseObject__set_safe, 20)
coll.remove()
self.assertEqual(None, coll.find_one(slave_okay=True))
coll.unset_lasterror_options()
coll.set_lasterror_options(w=4, wtimeout=10)
# Fails if we don't have 4 active nodes or we don't have replication...
self.assertRaises(OperationFailure, coll.insert, {'foo': 'bar'})
# Succeeds since we override the lasterror settings per query.
self.assertTrue(coll.insert({'foo': 'bar'}, fsync=True))
drop_collections(db)
warnings.resetwarnings()
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
55607 | <filename>ception/articles/urls.py<gh_stars>1-10
from django.conf.urls import patterns, url
urlpatterns = patterns('ception.articles.views',
url(r'^$', 'articles', name='articles'),
url(r'^write/$', 'write', name='write'),
url(r'^preview/$', 'preview', name='preview'),
url(r'^drafts/$', 'drafts', name='drafts'),
url(r'^comment/$', 'comment', name='comment'),
url(r'^sentence_comments/$', 'sentence_comments', name='sentence_comments'),
url(r'^sentence_vote/$', 'sentence_vote', name='sentence_vote'),
url(r'^tag/(?P<tag_name>.+)/$', 'tag', name='tag'),
url(r'^edit/(?P<id>\d+)/$', 'edit', name='edit'),
url(r'^edit_compare/(?P<id>\d+)/$', 'edit_compare', name='edit_compare'),
url(r'^diff/$', 'diff_test', name='diff_test'),
url(r'^summary/$', 'summary_test', name='summary_test'),
url(r'^merge_api/$', 'merge_api', name='merge_api'),
url(r'^merge_second_stage/$', 'merge_second_stage', name='merge_second_stage'),
# url(r'^(?P<slug>[-\w]+)/$', 'article', name='article'),
)
| StarcoderdataPython |
3283608 | <reponame>Kedyn/SpaceInvaders
class Scene:
"""Game scene template."""
def __init__(self, director):
self.director = director
def keydown(self, key):
pass
def keyup(self, key):
pass
def mousebuttondown(self, button, position):
pass
def reset(self):
pass
def update(self):
pass
def render(self):
pass
def exit(self):
pass
| StarcoderdataPython |
3354061 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
from sklearn.metrics.cluster import (contingency_matrix,
normalized_mutual_info_score)
from sklearn.metrics import (precision_score, recall_score)
__all__ = ['pairwise', 'bcubed', 'nmi', 'precision', 'recall', 'accuracy']
def _check(gt_labels, pred_labels):
if gt_labels.ndim != 1:
raise ValueError("gt_labels must be 1D: shape is %r" %
(gt_labels.shape, ))
if pred_labels.ndim != 1:
raise ValueError("pred_labels must be 1D: shape is %r" %
(pred_labels.shape, ))
if gt_labels.shape != pred_labels.shape:
raise ValueError(
"gt_labels and pred_labels must have same size, got %d and %d" %
(gt_labels.shape[0], pred_labels.shape[0]))
return gt_labels, pred_labels
def _get_lb2idxs(labels):
lb2idxs = {}
for idx, lb in enumerate(labels):
if lb not in lb2idxs:
lb2idxs[lb] = []
lb2idxs[lb].append(idx)
return lb2idxs
def _compute_fscore(pre, rec):
return 2. * pre * rec / (pre + rec)
def fowlkes_mallows_score(gt_labels, pred_labels, sparse=True):
''' The original function is from `sklearn.metrics.fowlkes_mallows_score`.
We output the pairwise precision, pairwise recall and F-measure,
instead of calculating the geometry mean of precision and recall.
'''
n_samples, = gt_labels.shape
c = contingency_matrix(gt_labels, pred_labels, sparse=sparse)
tk = np.dot(c.data, c.data) - n_samples
pk = np.sum(np.asarray(c.sum(axis=0)).ravel()**2) - n_samples
qk = np.sum(np.asarray(c.sum(axis=1)).ravel()**2) - n_samples
avg_pre = tk / pk
avg_rec = tk / qk
fscore = _compute_fscore(avg_pre, avg_rec)
return avg_pre, avg_rec, fscore
def pairwise(gt_labels, pred_labels, sparse=True):
_check(gt_labels, pred_labels)
return fowlkes_mallows_score(gt_labels, pred_labels, sparse)
def bcubed(gt_labels, pred_labels):
_check(gt_labels, pred_labels)
gt_lb2idxs = _get_lb2idxs(gt_labels)
pred_lb2idxs = _get_lb2idxs(pred_labels)
num_lbs = len(gt_lb2idxs)
pre = np.zeros(num_lbs)
rec = np.zeros(num_lbs)
gt_num = np.zeros(num_lbs)
for i, gt_idxs in enumerate(gt_lb2idxs.values()):
all_pred_lbs = np.unique(pred_labels[gt_idxs])
gt_num[i] = len(gt_idxs)
for pred_lb in all_pred_lbs:
pred_idxs = pred_lb2idxs[pred_lb]
n = 1. * np.intersect1d(gt_idxs, pred_idxs).size
pre[i] += n**2 / len(pred_idxs)
rec[i] += n**2 / gt_num[i]
gt_num = gt_num.sum()
avg_pre = pre.sum() / gt_num
avg_rec = rec.sum() / gt_num
fscore = _compute_fscore(avg_pre, avg_rec)
return avg_pre, avg_rec, fscore
def nmi(gt_labels, pred_labels):
return normalized_mutual_info_score(pred_labels, gt_labels)
def precision(gt_labels, pred_labels):
return precision_score(gt_labels, pred_labels)
def recall(gt_labels, pred_labels):
return recall_score(gt_labels, pred_labels)
def accuracy(gt_labels, pred_labels):
return np.mean(gt_labels == pred_labels)
| StarcoderdataPython |
3318834 | import logging
from dataclasses import dataclass
from typing import Any, Dict, List, Union
from ideal_learning_env.defs import ILEConfigurationException, ILEException
from ideal_learning_env.numerics import MinMaxFloat
from .choosers import choose_random
from .components import ILEComponent
from .decorators import ile_config_setter
from .numerics import MinMaxInt
from .validators import ValidateNumber
logger = logging.getLogger(__name__)
@dataclass
class StepBeginEnd():
"""
Contains a step range for a specific event.
- `begin` (int, or list of ints, or [MinMaxInt](#MinMaxInt) dict, or list
of MinMaxInt dicts):
The step where the performer agent starts being frozen
and can only use the `"Pass"` action. For example, if 1, the performer
agent must pass immediately at the start of the scene. This is an
inclusive limit.
- `end` (int, or list of ints, or [MinMaxInt](#MinMaxInt) dict, or list
of MinMaxInt dicts):
The step where the performer agent ends being frozen and can resume
using actions besides `"Pass"`. Therefore, this is an exclusive limit.
"""
begin: Union[int, MinMaxInt, List[Union[int, MinMaxInt]]] = None
end: Union[int, MinMaxInt, List[Union[int, MinMaxInt]]] = None
@dataclass
class TeleportConfig():
"""
Contains data to describe when and where a teleport occurs.
- `step` (int, or list of ints, or [MinMaxInt](#MinMaxInt) dict, or list of
MinMaxInt dicts): The step when the performer agent is teleported.
This field is required for teleport action restrictions.
- `position_x` (float, or list of floats, or [MinMaxFloat](#MinMaxFloat)
dict, or list of MinMaxFloat dicts):
Position in X direction where the performer agent
is teleported. This field along with `position_z` are required
if `rotation_y` is not set.
- `position_z` (float, or list of floats, or [MinMaxFloat](#MinMaxFloat)
dict, or list of MinMaxFloat dicts):
Position in Z direction where the performer agent
is teleported. This field along with `position_x` are required
if `rotation_y` is not set.
- `rotation_y` (float, or list of floats, or [MinMaxFloat](#MinMaxFloat)
dict, or list of MinMaxFloat dicts):
Rotation in Y direction where the performer agent
is teleported. This field is required for teleport action
restrictions if `position_x` and `position_z` are not both set.
"""
step: Union[int, MinMaxInt, List[Union[int, MinMaxInt]]] = None
position_x: Union[float, MinMaxFloat,
List[Union[float, MinMaxFloat]]] = None
position_z: Union[float, MinMaxFloat,
List[Union[float, MinMaxFloat]]] = None
rotation_y: Union[float, MinMaxFloat,
List[Union[float, MinMaxFloat]]] = None
class ActionRestrictionsComponent(ILEComponent):
passive_scene: bool = False
"""
(bool): Determine if scene should be considered passive and the
performer agent should be restricted to only use the `"Pass"` action.
If true, ILE will raise an exception if last_step is not set or either
`freezes` or `teleports` has any entries.
"""
freezes: List[Union[StepBeginEnd, List[StepBeginEnd]]] = None
"""
(list of [StepBeginEnd](#StepBeginEnd) dicts): When a freeze
should occur. A freeze forces the performer agent to only `"Pass"` for a
range of steps. User should try to avoid freeze overlaps, but if using
ranges and choices, the ILE will retry on overlaps. This field must be
blank or an empty array if `passive_scene` is `true`.
"""
teleports: List[Union[TeleportConfig, List[TeleportConfig]]] = None
"""
(list of [TeleportConfig](#TeleportConfig) dicts): When a
kidnap/teleport will occur and where the player agent should be teleported.
This field must contain either both position fields or the `rotation_y`
field or an exception will be thrown. This field must be blank or an empty
array if `passive_scene` is `true`.
Simple Example:
```
passive_scene: false
freezes: null
teleports: null
```
Advanced Example:
```
passive_scene: false
freezes:
-
begin: 1
end: 3
-
begin: [11, 13 ,15]
end:
min: 16
max: 26
teleports:
-
step: 5
position_x: 3
position_z: 6
rotation_y: 45
-
step: [34, 36]
position_x:
min: -3
max: 3
position_z: [3, 5, 7]
-
step:
min: 41
max: 48
rotation_y: [30, 120, 270]
```
"""
@ile_config_setter()
def set_passive_scene(self, data: Any) -> None:
self.passive_scene = data
def get_passive_scene(self) -> bool:
return self.passive_scene
@ile_config_setter(validator=ValidateNumber(
props=['begin', 'end'], min_value=1,
null_ok=True))
def set_freezes(self, data: Any) -> None:
self.freezes = data
def get_freezes(self) -> List[
StepBeginEnd
]:
return [choose_random(f) for f in (self.freezes or [])]
@ile_config_setter(validator=ValidateNumber(
props=['step'], min_value=1))
@ile_config_setter(validator=ValidateNumber(
props=['rotation_y'], min_value=0,
null_ok=True))
def set_teleports(self, data: Any) -> None:
self.teleports = data
def get_teleports(self) -> List[
TeleportConfig
]:
return [choose_random(t) for t in (self.teleports or [])]
# Override
def update_ile_scene(self, scene: Dict[str, Any]) -> Dict[str, Any]:
logger.debug('Running action restriction component...')
scene['objects'] = scene.get('objects', [])
goal = scene.get('goal', {})
total_steps = goal.get('last_step')
freezes = sorted(self.get_freezes(), key=lambda x: x.begin)
teleports = sorted(self.get_teleports(), key=lambda x: x.step)
self._restriction_validation(freezes, teleports, total_steps)
passive = self.get_passive_scene()
if passive:
goal['action_list'] = [['Pass']] * total_steps
logger.debug('Setting whole scene as passive')
if freezes:
self._add_freezes(goal, freezes)
logger.debug(f'Adding {len(freezes)} freezes to scene')
if teleports:
self._add_teleports(goal, teleports, passive)
logger.debug(f'Adding {len(teleports)} teleports to scene')
return scene
def _add_teleports(self, goal, teleports, passive):
goal['action_list'] = goal.get('action_list', [])
al = goal['action_list']
for t in teleports:
step = t.step
cmd = "EndHabituation"
cmd += f",xPosition={t.position_x}" if t.position_x else ""
cmd += f",zPosition={t.position_z}" if t.position_z else ""
cmd += f",yRotation={t.rotation_y}" if t.rotation_y else ""
length = len(al)
if step > length:
al += ([[]] * (step - length))
if al[step - 1] != [] and not passive:
raise ILEException(
f"Cannot teleport during freeze at step={step - 1}")
al[step - 1] = [cmd]
def _add_freezes(self, goal, freezes):
goal['action_list'] = goal.get('action_list', [])
al = goal['action_list']
limit = 1
for f in freezes:
f.begin = 1 if f.begin is None else f.begin
if f.end is None:
if goal['last_step'] is None:
raise ILEConfigurationException(
"Configuration error. A freeze without an 'end' "
"requires 'last_step' to be set.")
else:
# Add one so we include the last step. End is exclusive.
f.end = goal['last_step'] + 1
if (limit > f.begin):
raise ILEException(f"Freezes overlapped at {limit}")
if f.begin >= f.end:
raise ILEException(
f"Freezes has begin >= end ({f.begin} >= {f.end}")
num_free = f.begin - limit
num_limited = f.end - f.begin
al += ([[]] * (num_free))
al += ([['Pass']] * (num_limited))
limit = f.end
def _restriction_validation(self, freezes, teleports, total_steps):
if self.get_passive_scene():
if not total_steps or total_steps <= 0:
raise ILEConfigurationException(
"Error with action restriction "
"configuration. When 'passive_scene'=true"
"total_steps must be set and greater than 0.")
if self.freezes:
raise ILEConfigurationException(
"Error with action restriction "
"configuration. When 'passive_scene'=true"
"'freezes' can not be used")
if freezes:
for f in freezes:
if not f.begin and not f.end:
raise ILEConfigurationException(
"Error with action restriction "
"configuration. 'freezes' entries must have "
"atleast one of 'begin' or 'end' fields.")
if teleports:
for t in teleports:
if (t.position_x and not t.position_z) or (
t.position_z and not t.position_x):
raise ILEConfigurationException(
"Error with action restriction "
"configuration. 'teleport' entries with a "
"'position_x' or 'position_z' must also have the "
"other."
)
if not (t.rotation_y or (t.position_x and t.position_z)):
raise ILEConfigurationException(
"Error with action restriction "
"configuration. 'teleport' entries must have either "
"'rotation_y' field or both 'position_x' and "
"'position_z' fields.")
| StarcoderdataPython |
3218650 | """Zookeeper status interface."""
import socket
import logging
import click
import kazoo
import websocket as ws_client
from treadmill import cli
from treadmill import context
from treadmill import admin
from treadmill import restclient
from treadmill import zknamespace as z
_LOGGER = logging.getLogger(__name__)
def check(func, message):
"""Check function, output status message."""
if func():
_LOGGER.info('%s: ok', message)
else:
_LOGGER.error('%s: failed', message)
def _zkadmin(hostname, port, command):
"""Netcat."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, port))
sock.sendall(command)
sock.shutdown(socket.SHUT_WR)
data = []
while True:
chunk = sock.recv(1024)
if not chunk:
break
data.append(chunk)
sock.close()
return ''.join(data)
def check_zk():
"""Check Zookeeper ensemble health."""
admin_cell = admin.Cell(context.GLOBAL.ldap.conn)
cell = admin_cell.get(context.GLOBAL.cell)
success = True
for master in cell['masters']:
hostname = master['hostname']
port = master['zk-client-port']
try:
zk_status = _zkadmin(hostname, port, 'ruok\n')
_LOGGER.debug('%s:%s - %s', hostname, port, zk_status)
except Exception as err: # pylint: disable=W0703
_LOGGER.error('%s:%s - %s', hostname, port, str(err))
success = False
return success
def _check_api(apis):
"""Check API status."""
success = True
if len(apis) < 2:
_LOGGER.error('API is under capacity: expected 2, running: %s',
len(apis))
success = False
for api in apis:
try:
resp = restclient.get(api, '/', retries=0)
_LOGGER.debug('%s - %r', api, resp.status_code)
except restclient.MaxRequestRetriesError as err:
_LOGGER.error('%s - %s', api, str(err))
success = False
return success
def check_cell_api():
"""Check API status."""
try:
return _check_api(context.GLOBAL.cell_api(None))
except context.ContextError as err:
_LOGGER.error('Unable to resolve cell api: %r', str(err))
return False
def check_state_api():
"""Check API status."""
try:
return _check_api(context.GLOBAL.state_api(None))
except context.ContextError as err:
_LOGGER.error('Unable to resolve state api: %r', str(err))
return False
def check_admin_api():
"""Check admin API."""
try:
return _check_api(context.GLOBAL.admin_api(None))
except context.ContextError as err:
_LOGGER.error('Unable to resolve admin api: %r', str(err))
return False
def check_ws_api():
"""Check websocket API."""
success = True
try:
for api in context.GLOBAL.ws_api(None):
try:
ws_client.create_connection(api)
_LOGGER.debug('%s - ok.', api)
except socket.error:
_LOGGER.error('%s - failed.', api)
success = False
except context.ContextError as err:
_LOGGER.error('Unable to resolve websocket api: %r', str(err))
success = False
return success
def check_blackouts():
"""Check blacked-out servers."""
zkclient = context.GLOBAL.zk.conn
try:
blacked_out_nodes = zkclient.get_children(z.BLACKEDOUT_SERVERS)
for server in blacked_out_nodes:
_LOGGER.warn('Server blackedout: %s', server)
except kazoo.client.NoNodeError:
pass
def check_capacity():
"""Check cell capacity."""
zkclient = context.GLOBAL.zk.conn
configured = len(zkclient.get_children(z.SERVERS))
blacked_out = len(zkclient.get_children(z.BLACKEDOUT_SERVERS))
present = len(zkclient.get_children(z.SERVER_PRESENCE))
_LOGGER.info('Server capacity - total: %s, blacked-out: %s, up: %s',
configured, blacked_out, present)
check_blackouts()
def init():
"""Return top level command handler."""
@click.command(name='ok')
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
expose_value=False)
def _ok():
"""Check status of Zookeeper ensemble."""
log_level = logging.INFO
if not logging.getLogger().isEnabledFor(log_level):
logging.getLogger('treadmill').setLevel(log_level)
logging.getLogger().setLevel(log_level)
check(check_zk, 'Zookeeper ensemble')
check_capacity()
check(check_state_api, 'State api')
check(check_cell_api, 'Cell api')
check(check_admin_api, 'Admin api')
check(check_ws_api, 'Websocket api')
return _ok
| StarcoderdataPython |
3226328 | """
Copyright 2019 Southern California Edison
Copyright 2019 Pacific Gas and Electric Company
ALL RIGHTS RESERVED
"""
import uuid
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
import json
import requests
class GlobalSettings:
name = "PacketCap"
SNIFFER_INTERFACE = "en4"
API_HOST = "192.168.81.250"
API_PORT = "443"
BASE_URL = urlparse.urlunparse(['https', API_HOST] + [''] * 4)
EXCLUDED_PROTOCOLS = [
"_ws.expert",
"_ws.lua",
"_ws.malformed",
"_ws.number_string.decoding_error",
"_ws.short",
"_ws.type_length",
"_ws.unreassembled",
"image-gif",
"image-jfif",
"media",
"png",
"xml",
"zip"
]
FIM_CONFIG = {
"configuration": [
{"path": "/tmp/fimd_file_testing",
"files": ["foo.exe", "bar.exe", "bar.config", "baz.exe"]
},
{"path": "/tmp/fimd_path_testing",
"files": "null"
}
]
}
_devuuid = "71b4d7d9-daf1-4ae0-9027-706c3d88d4af"
def get_devuuid(self, online=False):
if self._devuuid is not None:
return self._devuuid
else:
self._devuuid = str(uuid.uuid4())
return self._devuuid
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.