hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ace0f21a1d3f717c3b6375dd0198cd575b2730ed | 881 | py | Python | build.py | codl/squish | 3aaf76137951bd51c76b7c825860c4d4d9f7b974 | [
"0BSD"
] | 1 | 2018-03-04T22:44:39.000Z | 2018-03-04T22:44:39.000Z | build.py | codl/squish | 3aaf76137951bd51c76b7c825860c4d4d9f7b974 | [
"0BSD"
] | null | null | null | build.py | codl/squish | 3aaf76137951bd51c76b7c825860c4d4d9f7b974 | [
"0BSD"
] | null | null | null | #!/usr/bin/python
import os
import os.path
import glob
import hashlib
import json
assets = glob.iglob('src/**/*', recursive=True)
assetmap = {}
for asset in assets:
asset = asset[4:] # strip 'src/'
if not asset == 'serviceworker.js' and os.path.isfile('src/'+asset):
with open('src/'+asset, 'rb') as f:
contents = f.read()
digest = hashlib.sha256(contents).hexdigest()
digest = digest[:16]
assetmap[asset] = digest
os.makedirs(os.path.dirname('out/'+asset), exist_ok=True)
with open('out/'+asset, 'wb') as fout:
fout.write(contents)
js_assetmap = 'const HASHES = {};\n'.format(json.dumps(assetmap))
with open('src/serviceworker.js', 'r') as swi:
with open('out/serviceworker.js', 'w') as swo:
swo.write(js_assetmap)
swo.write(swi.read())
print('yes')
| 28.419355 | 72 | 0.597049 |
ace0f2d3388fbb821af97c10441270a45cbd5034 | 1,374 | py | Python | Src/Tests/interop/com/dlrcomlib/pytraits/obj.py | Enerccio/ironpython26-fixed | e302db14f05396a378adb438565a829e66acbf94 | [
"MS-PL"
] | 1 | 2020-02-11T06:02:40.000Z | 2020-02-11T06:02:40.000Z | Src/Languages/IronPython/Tests/interop/com/dlrcomlib/pytraits/obj.py | rudimk/dlr-dotnet | 71d11769f99d6ff1516ddbaed091a359eb46c670 | [
"MS-PL"
] | null | null | null | Src/Languages/IronPython/Tests/interop/com/dlrcomlib/pytraits/obj.py | rudimk/dlr-dotnet | 71d11769f99d6ff1516ddbaed091a359eb46c670 | [
"MS-PL"
] | 1 | 2018-11-21T04:10:23.000Z | 2018-11-21T04:10:23.000Z | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Microsoft Public License. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Microsoft Public License, please send an email to
# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Microsoft Public License.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
'''
This test module verifies that properties of COM object are identical to those
of Python object.
'''
#------------------------------------------------------------------------------
from iptest.assert_util import skiptest
skiptest("silverlight")
from iptest.cominterop_util import *
#------------------------------------------------------------------------------
#--GLOBALS
com_obj = getRCWFromProgID("DlrComLibrary.DlrUniversalObj")
def test_sanity_check():
Assert("m0" in dir(com_obj))
# Assert("m0" in vars(com_obj).keys())
#------------------------------------------------------------------------------
run_com_test(__name__, __file__)
| 40.411765 | 97 | 0.532023 |
ace0f2d6aba15e13fd05bdfe27c040ddd951bb45 | 6,928 | py | Python | pyseaflux/data/aux_vars.py | sckw/pySeaFlux | ab125f72f8f0fb2d1b627b4dd229f8744e916120 | [
"MIT"
] | null | null | null | pyseaflux/data/aux_vars.py | sckw/pySeaFlux | ab125f72f8f0fb2d1b627b4dd229f8744e916120 | [
"MIT"
] | null | null | null | pyseaflux/data/aux_vars.py | sckw/pySeaFlux | ab125f72f8f0fb2d1b627b4dd229f8744e916120 | [
"MIT"
] | 1 | 2021-08-21T05:57:50.000Z | 2021-08-21T05:57:50.000Z | from pathlib import Path as path
base = str(path(__file__).resolve().parent.parent)
def area(aux_catalog_fname, dest="../data/output/"):
"""Computes the area of the SeaFlux grid cells"""
import xarray as xr
from fetch_data import read_catalog
from ..area import get_area_from_dataset
from .utils import save_seaflux
cat = read_catalog(aux_catalog_fname)
ds = xr.open_mfdataset(download_sst_ice(cat["oisst_v2"])).sst.rename("temp")
area = get_area_from_dataset(ds)
sname = save_seaflux(area, dest, "area")
return sname
def solubility(aux_catalog_fname, dest="../data/output/"):
"""Computes SeaFlux solubility from SST, Salt and Pres"""
import xarray as xr
from fetch_data import read_catalog
from ..solubility import solubility_weiss1974
from .utils import save_seaflux
cat = read_catalog(aux_catalog_fname)
print("[SeaFlux] fetching SST, Salinity, and sea-level pressure")
ds = xr.merge(
[
xr.open_dataset(download_sst_ice(cat["oisst_v2"])).sst.rename("temp")
+ 273.15,
xr.open_dataset(download_salinity(cat["en4_g10"])).salinity.rename("salt"),
xr.open_dataset(
download_era5_slp(download_dest=cat["era5_mslp"]["dest"])
).sp.rename("mslp")
/ 101325,
]
)
time_mask = ds.to_array("tmp").notnull().all("tmp").any(["lat", "lon"])
ds = ds.where(time_mask, drop=True)
# unit analysis
# mol / L / atm --> mol / m3 / uatm
# mol . L-1 . atm-1 * (1e3L . m-3) * (1e-6 atm . uatm-1) = * 1e-3
print("[SeaFlux] calculating solubility using Weiss (1974)")
arr = solubility_weiss1974(ds.salt, ds.temp, press_atm=ds.mslp) * 1e-3
sol = xr.DataArray(
data=arr,
coords=ds.temp.coords,
dims=ds.temp.dims,
attrs={
"description": "CO2 solubility in seawater using the formulation of Weiss 1974",
"units": "mol/m3/uatm",
"long_name": "CO2 solubility in seawater",
},
)
sname = save_seaflux(sol, dest, "sol")
return sname
def sea_ice_cover(aux_catalog_fname, dest="../data/output/"):
"""Calculates SeaFlux sea ice cover as a fraction"""
import xarray as xr
from fetch_data import read_catalog
from .utils import save_seaflux
cat = read_catalog(aux_catalog_fname)
fname = download_sst_ice(cat["oisst_v2"])
variable = "ice"
ice = xr.open_mfdataset(fname)["icec"].rename(variable) / 100
ice = ice.sel(time=slice("1982", None))
sname = save_seaflux(ice, dest, variable)
return sname
def download_era5_slp(
year=list(range(1982, 2021)),
download_dest="../data/raw/ERA5_mslp/",
process_dest="../data/processed/era5_mslp_monthly.nc",
**kwargs,
):
"""
Shortcut for fetching era5 data. requires the `cdsapi` to be
correctly set up (~/.cdsapi)
Uses data from `reanalysis-era5-single-levels`
Fetches data as monthly files to keep requests to a reasonable size
"""
import logging
import os
from pathlib import Path
import cdsapi
import xarray as xr
from joblib import Parallel, delayed
from numpy import ndarray
from .utils import preprocess
if path(process_dest).is_file():
return process_dest
if isinstance(year, (list, tuple, ndarray)):
logging.info(f"downloading to: {download_dest}")
inputs = [dict(year=y, download_dest=download_dest) for y in year]
flist = Parallel(n_jobs=8)(
delayed(download_era5_slp)(**input_dict) for input_dict in inputs
)
ds = xr.open_mfdataset(flist, preprocess=preprocess())
ds.to_netcdf(
process_dest, encoding={k: dict(zlib=True, complevel=4) for k in ds}
)
return ds
year = str(year)
sname = os.path.join(download_dest, f"ERA5_surfpress_monthly_{year}.nc")
if os.path.isfile(sname):
return sname
Path(sname).parent.mkdir(exist_ok=True, parents=True)
cds_client = cdsapi.Client()
cds_client.retrieve(
"reanalysis-era5-single-levels-monthly-means",
{
"product_type": "monthly_averaged_reanalysis",
"format": "netcdf",
"variable": "surface_pressure",
"year": year,
"month": [
"01",
"02",
"03",
"04",
"05",
"06",
"07",
"08",
"09",
"10",
"11",
"12",
],
"time": ["00:00"],
},
sname,
)
return sname
def download_salinity(
catalog_entry,
verbose=True,
process_dest="../data/processed/en4_salt_temp.nc",
):
"""Downloads salinity from MetOffice for 1982 until today"""
import xarray as xr
from fetch_data import download
from .utils import preprocess
if path(process_dest).is_file():
return process_dest
flist = download(**catalog_entry, verbose=verbose)
ds = preprocess()(
xr.open_mfdataset(paths=flist)[["salinity"]]
.sel(depth=0, method="nearest")
.drop("depth")
)
encode = {k: dict(zlib=True, complevel=4) for k in ds}
ds.load().to_netcdf(process_dest, encoding=encode)
return process_dest
def download_sst_ice(
catalog_entry,
process_dest="../data/processed/noaa_oisst_sst_icec.nc",
):
"""Downloads OISSTv2 data from NOAA"""
import xarray as xr
from fetch_data import download
from .utils import preprocess
if path(process_dest).is_file():
return process_dest
flist = download(**catalog_entry)
ds = (
xr.open_mfdataset(paths=flist, preprocess=preprocess())
.where(lambda a: a.icec.notnull())
.drop("time_bnds")
)
ds.to_netcdf(process_dest, encoding={k: dict(zlib=True, complevel=4) for k in ds})
return process_dest
def calc_seafrac(
process_dest="../data/processed/etopo1_seafrac.nc",
):
from fetch_data import download
from numpy import arange
from xarray import open_mfdataset
fname = download(
url=(
"https://www.ngdc.noaa.gov/mgg/global/relief/ETOPO1/data/"
"ice_surface/cell_registered/netcdf/ETOPO1_Ice_c_gmt4.grd.gz"
),
dest="../data/raw/",
verbose=True,
)
ds = open_mfdataset(fname).rename(x="lon", y="lat", z="topography")
sea = ds.topography < 0
seafrac = sea.coarsen(lat=60, lon=60).sum().compute() / 60 ** 2
seafrac = seafrac.assign_coords(
lat=arange(-89.5, 90), lon=arange(-179.5, 180)
).rename("seafrac")
seafrac.attrs = dict(
description="Fraction of pixel that is covered by ocean. Calculated from ETOPO1. ",
unit="frac",
)
seafrac.to_netcdf(process_dest)
return process_dest
| 26.957198 | 92 | 0.614896 |
ace0f471f32e45d00c09279cca653582c9e773aa | 487 | py | Python | idp_data/idp_data/migrations/0001_initial.py | emre2038/idp-data | c31e9783f86a2745403011f21f5148dd98420510 | [
"MIT"
] | null | null | null | idp_data/idp_data/migrations/0001_initial.py | emre2038/idp-data | c31e9783f86a2745403011f21f5148dd98420510 | [
"MIT"
] | 6 | 2020-08-20T12:39:29.000Z | 2020-12-01T11:17:02.000Z | idp_data/idp_data/migrations/0001_initial.py | emre2038/idp-data | c31e9783f86a2745403011f21f5148dd98420510 | [
"MIT"
] | 3 | 2020-08-04T21:11:06.000Z | 2020-08-18T11:40:00.000Z | # Generated by Django 2.2.10 on 2020-08-06 23:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=500)),
],
),
]
| 22.136364 | 114 | 0.574949 |
ace0f4a842bd3280b9155a5436ab0b2547d8e031 | 169 | py | Python | 2 semester/PW/Coursera/Python/5 Week/12.py | kurpenok/Labs | 069c92b7964a1445d093313b38ebdc56318d2a73 | [
"MIT"
] | 1 | 2022-02-06T17:50:25.000Z | 2022-02-06T17:50:25.000Z | 2 semester/PW/Coursera/Python/5 Week/12.py | kurpenok/Labs | 069c92b7964a1445d093313b38ebdc56318d2a73 | [
"MIT"
] | null | null | null | 2 semester/PW/Coursera/Python/5 Week/12.py | kurpenok/Labs | 069c92b7964a1445d093313b38ebdc56318d2a73 | [
"MIT"
] | 1 | 2022-03-02T06:45:06.000Z | 2022-03-02T06:45:06.000Z | a = list(map(int, input().split()))
for i in range(1, len(a) - 1, 2):
print(a[i], a[i - 1], end=' ')
if len(a) % 2:
print(a[-1])
else:
print(a[-1], a[-2])
| 16.9 | 35 | 0.467456 |
ace0f5798db3754543a95808b268aa505a1f55b7 | 223 | py | Python | sdk/python/feast/utils.py | terryyylim/feast | c44df7f33a1a1316c53860260efc26255c0c0638 | [
"Apache-2.0"
] | 3 | 2021-05-07T06:03:44.000Z | 2022-02-20T10:43:59.000Z | sdk/python/feast/utils.py | terryyylim/feast | c44df7f33a1a1316c53860260efc26255c0c0638 | [
"Apache-2.0"
] | 15 | 2020-11-13T19:01:34.000Z | 2022-02-22T15:47:29.000Z | sdk/python/feast/utils.py | terryyylim/feast | c44df7f33a1a1316c53860260efc26255c0c0638 | [
"Apache-2.0"
] | 2 | 2021-05-07T06:03:40.000Z | 2021-07-17T08:32:02.000Z | from datetime import datetime
from pytz import utc
def make_tzaware(t: datetime):
""" We assume tz-naive datetimes are UTC """
if t.tzinfo is None:
return t.replace(tzinfo=utc)
else:
return t
| 18.583333 | 48 | 0.654709 |
ace0f660c2efef6dd4c8764f0ae575a858ebe0f4 | 2,293 | py | Python | leasing/viewsets/email.py | hkotkanen/mvj | a22d40869ef1b13924da428f3026d248acef81a7 | [
"MIT"
] | null | null | null | leasing/viewsets/email.py | hkotkanen/mvj | a22d40869ef1b13924da428f3026d248acef81a7 | [
"MIT"
] | null | null | null | leasing/viewsets/email.py | hkotkanen/mvj | a22d40869ef1b13924da428f3026d248acef81a7 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.core.mail import send_mail
from django.utils.translation import ugettext_lazy as _
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from leasing.metadata import FieldsMetadata
from leasing.models.email import EmailLog
from leasing.permissions import PerMethodPermission
from leasing.serializers.email import SendEmailSerializer
class SendEmailView(APIView):
permission_classes = (PerMethodPermission,)
metadata_class = FieldsMetadata
perms_map = {
'POST': ['leasing.view_lease'],
}
def get_view_name(self):
return _("Send email")
def get_view_description(self, html=False):
return _("Send email")
def post(self, request, format=None):
serializer = SendEmailSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
email_log = EmailLog.objects.create(
type=serializer.validated_data['type'],
user=request.user,
text=serializer.validated_data['text'],
content_object=serializer.validated_data['lease'],
)
if request.user.email:
from_email = request.user.email
else:
from_email = settings.MVJ_EMAIL_FROM
for recipient in serializer.validated_data['recipients']:
if not recipient.email:
continue
send_mail(
_('MVJ lease {} {}').format(serializer.validated_data['lease'].identifier,
serializer.validated_data['type']),
serializer.validated_data['text'],
from_email,
[recipient.email],
fail_silently=False,
)
email_log.recipients.add(recipient)
result = {
"sent": True,
}
return Response(result, status=status.HTTP_200_OK)
def options(self, request, *args, **kwargs):
if self.metadata_class is None:
return self.http_method_not_allowed(request, *args, **kwargs)
data = self.metadata_class().determine_metadata(request, self, serializer=SendEmailSerializer())
return Response(data, status=status.HTTP_200_OK)
| 32.295775 | 104 | 0.649804 |
ace0f6a41aa697a54b7f5a19c6eac4e632205a43 | 2,379 | py | Python | pysnmp/carrier/twisted/dispatch.py | RKinsey/pysnmp | 96b5cf31e2f5d19f34d0dd1075014c488f6a5789 | [
"BSD-2-Clause"
] | 492 | 2016-03-13T11:03:13.000Z | 2022-03-21T02:52:57.000Z | pysnmp/carrier/twisted/dispatch.py | bartomo/pysnmp | becd15c79c9a6b5696928ecd50bf5cca8b1770a1 | [
"BSD-2-Clause"
] | 372 | 2016-03-29T22:42:05.000Z | 2022-03-26T10:28:25.000Z | pysnmp/carrier/twisted/dispatch.py | bartomo/pysnmp | becd15c79c9a6b5696928ecd50bf5cca8b1770a1 | [
"BSD-2-Clause"
] | 197 | 2016-03-13T11:01:54.000Z | 2022-03-07T19:52:15.000Z | #
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysnmp/license.html
#
# Copyright (C) 2008 Truelite Srl <info@truelite.it>
# Author: Filippo Giunchedi <filippo@truelite.it>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the BSD 2-Clause License as shipped with pysnmp.
#
# Description: Transport dispatcher based on twisted.internet.reactor
#
import sys
import time
import traceback
from twisted.internet import reactor
from twisted.internet import task
from pysnmp.carrier.base import AbstractTransportDispatcher
from pysnmp.error import PySnmpError
class TwistedDispatcher(AbstractTransportDispatcher):
"""TransportDispatcher based on twisted.internet.reactor"""
def __init__(self, *args, **kwargs):
AbstractTransportDispatcher.__init__(self)
self.__transportCount = 0
if 'timeout' in kwargs:
self.setTimerResolution(kwargs['timeout'])
self.loopingcall = task.LoopingCall(
lambda self=self: self.handleTimerTick(time.time())
)
def runDispatcher(self, timeout=0.0):
if not reactor.running:
try:
reactor.run()
except KeyboardInterrupt:
raise
except Exception:
raise PySnmpError('reactor error: %s' % ';'.join(
traceback.format_exception(*sys.exc_info())))
# jobstarted/jobfinished might be okay as-is
def registerTransport(self, transportDomain, transport):
if not self.loopingcall.running and self.getTimerResolution() > 0:
self.loopingcall.start(self.getTimerResolution(), now=False)
AbstractTransportDispatcher.registerTransport(
self, transportDomain, transport
)
self.__transportCount += 1
def unregisterTransport(self, transportDomain):
transport = AbstractTransportDispatcher.getTransport(
self, transportDomain)
if transport:
AbstractTransportDispatcher.unregisterTransport(
self, transportDomain)
self.__transportCount -= 1
# The last transport has been removed, stop the timeout
if self.__transportCount == 0 and self.loopingcall.running:
self.loopingcall.stop()
| 31.302632 | 74 | 0.679697 |
ace0f6ba2c57e74682cbf89e72f1e0d68aeaef02 | 1,755 | py | Python | SpiderNode/HTMLDownloader.py | Honey634546/Distributed_Reptiles | 75434ec4a4aa289e68fca77925b8622acd789238 | [
"MIT"
] | null | null | null | SpiderNode/HTMLDownloader.py | Honey634546/Distributed_Reptiles | 75434ec4a4aa289e68fca77925b8622acd789238 | [
"MIT"
] | null | null | null | SpiderNode/HTMLDownloader.py | Honey634546/Distributed_Reptiles | 75434ec4a4aa289e68fca77925b8622acd789238 | [
"MIT"
] | null | null | null | import requests
import urllib
import codecs
import re
class HTMLDownloader(object):
def download(self, url):
"""
下载页面
:param url: 页面url
:return: 页面类容
"""
if url is None:
return None
headers = {
'user-Agent': "ee122-KeywordHunter"
}
res = requests.get(url, headers=headers)
parsed_url = urllib.parse.urlparse(url)
print("PAGE:host=%s;port=%s;respath=%s;status=%s" % (parsed_url.netloc, 80, parsed_url.path, res.status_code))
if res.status_code == 200:
res.encoding = 'utf-8'
print(url[-8::])
fout = codecs.open('%s.html' % url[-8::], 'w', encoding='utf-8')
fout.write(res.text)
fout.close()
return res.text
return None
def find_key(self, url, key):
parsed_url = urllib.parse.urlparse(url)
with open('%s.html' %url[-8::] , 'r', encoding="utf-8") as f:
num = 1
while True:
line = f.readline()
# print(line)
p = re.compile(key)
m = re.search(p, line)
if m is not None:
# print(num)
print("FOUND_KEYWORD:host=%s;port=%s;respath=%s;line=%s" % (
parsed_url.netloc, 80, parsed_url.path, num))
return True
# break
num += 1
if not line:
return False
# break
if __name__ == "__main__":
load = HTMLDownloader()
load.download("http://www.baidu.com")
load.find_key("http://www.baidu.com", "百度")
| 30.789474 | 119 | 0.467806 |
ace0f88e45ca0a7c9086b751e3826816b2e00035 | 4,160 | py | Python | sdk/python/pulumi_azure_native/scheduler/get_job_collection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/scheduler/get_job_collection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/scheduler/get_job_collection.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetJobCollectionResult',
'AwaitableGetJobCollectionResult',
'get_job_collection',
]
@pulumi.output_type
class GetJobCollectionResult:
def __init__(__self__, id=None, location=None, name=None, properties=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Gets the job collection resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Gets or sets the storage account location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets or sets the job collection resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.JobCollectionPropertiesResponse':
"""
Gets or sets the job collection properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Gets or sets the tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Gets the job collection resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetJobCollectionResult(GetJobCollectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetJobCollectionResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_job_collection(job_collection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetJobCollectionResult:
"""
API Version: 2016-03-01.
:param str job_collection_name: The job collection name.
:param str resource_group_name: The resource group name.
"""
__args__ = dict()
__args__['jobCollectionName'] = job_collection_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:scheduler:getJobCollection', __args__, opts=opts, typ=GetJobCollectionResult).value
return AwaitableGetJobCollectionResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
tags=__ret__.tags,
type=__ret__.type)
| 32.248062 | 133 | 0.632692 |
ace0f8e15ab14d9b3e9c4c1a099eb5e323173165 | 923 | py | Python | setup.py | tamentis/nzbsort | 251b82dc4a544d39e228257a0e98481cc54e59a5 | [
"0BSD"
] | 1 | 2016-11-12T19:58:03.000Z | 2016-11-12T19:58:03.000Z | setup.py | tamentis/nzbsort | 251b82dc4a544d39e228257a0e98481cc54e59a5 | [
"0BSD"
] | null | null | null | setup.py | tamentis/nzbsort | 251b82dc4a544d39e228257a0e98481cc54e59a5 | [
"0BSD"
] | null | null | null | #!/usr/bin/python
from distutils.core import setup
from nzbsort import __version__
setup(
name="nzbsort",
version=__version__,
description="NZB files alpha sorter",
author="Bertrand Janin",
author_email="tamentis@neopulsar.org",
url="http://tamentis.com/projects/nzbsort/",
scripts=["nzbsort.py"],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: ISC License (ISCL)",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 3.0",
"Programming Language :: Python :: 3.1",
"Topic :: Communications :: Usenet News",
"Topic :: Communications :: File Sharing",
]
)
| 30.766667 | 56 | 0.609967 |
ace0f985b4e062af34c354587a28b835ed8a2683 | 6,269 | py | Python | aws_topology/tests/test_lambda.py | Mathieu-Dirkx/stackstate-agent-integrations | 76bb486f2560e1cdcd5aabb65ef8a19ff36297c1 | [
"BSD-3-Clause"
] | 2 | 2020-03-10T13:21:37.000Z | 2021-04-01T07:52:16.000Z | aws_topology/tests/test_lambda.py | Mathieu-Dirkx/stackstate-agent-integrations | 76bb486f2560e1cdcd5aabb65ef8a19ff36297c1 | [
"BSD-3-Clause"
] | 33 | 2020-02-05T16:18:32.000Z | 2022-03-21T14:08:04.000Z | aws_topology/tests/test_lambda.py | Mathieu-Dirkx/stackstate-agent-integrations | 76bb486f2560e1cdcd5aabb65ef8a19ff36297c1 | [
"BSD-3-Clause"
] | 7 | 2020-03-10T13:21:39.000Z | 2021-03-11T07:16:44.000Z | from stackstate_checks.base.stubs import topology as top
from .conftest import BaseApiTest, set_cloudtrail_event
class TestLambda(BaseApiTest):
def get_api(self):
return "lambda"
def get_account_id(self):
return "731070500579"
def get_region(self):
return "eu-west-1"
def test_process_lambda(self):
self.check.run()
topology = [top.get_snapshot(self.check.check_id)]
self.assertEqual(len(topology), 1)
self.assert_executed_ok()
components = topology[0]["components"]
relations = topology[0]["relations"]
# Function
comp = top.assert_component(
components,
"arn:aws:lambda:eu-west-1:731070500579:function:com-stackstate-prod-sam-seed-PutHello-1LUD3ESBOR6EY",
"aws.lambda.function",
checks={"FunctionName": "com-stackstate-prod-sam-seed-PutHello-1LUD3ESBOR6EY", "Tags.Group": "StackState"},
)
self.assert_location_info(comp)
# lambda sts-xray-test-01
top.assert_component(
components, "arn:aws:lambda:eu-west-1:731070500579:function:sts-xray-test-01", "aws.lambda.function"
)
# Lambda sts-xray-test-01 has an alias
top.assert_component(
components,
"arn:aws:lambda:eu-west-1:731070500579:function:sts-xray-test-01:old",
"aws.lambda.alias",
checks={"Function.FunctionName": "sts-xray-test-01", "Name": "old"},
)
# sts-xray-test-01 has vpcid
top.assert_relation(
relations, "arn:aws:lambda:eu-west-1:731070500579:function:sts-xray-test-01", "vpc-c6d073bf", "uses-service"
)
# alias also has relation with vpcid
top.assert_relation(
relations,
"arn:aws:lambda:eu-west-1:731070500579:function:sts-xray-test-01:old",
"vpc-c6d073bf",
"uses-service",
)
top.assert_component(
components, "arn:aws:lambda:eu-west-1:731070500579:function:sts-xray-test-02", "aws.lambda.function"
)
# Lambda sts-xray-test-02 has an alias
top.assert_component(
components,
"arn:aws:lambda:eu-west-1:731070500579:function:sts-xray-test-02:altnm",
"aws.lambda.alias",
checks={"Function.FunctionName": "sts-xray-test-02", "Name": "altnm"},
)
top.assert_relation(
relations,
"arn:aws:lambda:eu-west-1:731070500579:function:sts-xray-test-02",
"arn:aws:rds:eu-west-1:731070500579:db:sn1e7g5j33vyr4o",
"uses-service",
)
top.assert_relation(
relations,
"arn:aws:lambda:eu-west-1:731070500579:function:com-stackstate-prod-PersonIdDynamoDBHandler-6KMIBXKKKCEZ",
"arn:aws:dynamodb:eu-west-1:731070500579:table/table_1/stream/2018-05-17T08:09:27.110",
"uses-service",
)
top.assert_relation(
relations,
"arn:aws:lambda:eu-west-1:731070500579:function:com-stackstate-prod-PersonCreatedKinesisHand-19T8EJADX2DE",
"arn:aws:kinesis:eu-west-1:731070500579:stream/stream_1",
"uses-service",
)
top.assert_all_checked(components, relations)
@set_cloudtrail_event("create_function")
def test_process_lambda_create_function(self):
self.check.run()
topology = [top.get_snapshot(self.check.check_id)]
self.assertEqual(len(topology), 1)
self.assert_updated_ok()
self.assertEqual(len(topology[0]["components"]), 1)
self.assertEqual(
"arn:aws:lambda:eu-west-1:731070500579:function:com-stackstate-prod-sam-seed-PutHello-1LUD3ESBOR6EY",
topology[0]["components"][0]["id"],
)
@set_cloudtrail_event("delete_function")
def test_process_lambda_delete_function(self):
self.check.run()
topology = [top.get_snapshot(self.check.check_id)]
self.assertEqual(len(topology), 1)
self.assert_updated_ok()
self.assertEqual(len(topology[0]["components"]), 0)
self.assertIn("arn:aws:lambda:eu-west-1:731070500579:function:JpkTest", self.check.delete_ids)
@set_cloudtrail_event("update_function")
def test_process_lambda_update_function(self):
self.check.run()
topology = [top.get_snapshot(self.check.check_id)]
self.assertEqual(len(topology), 1)
self.assert_updated_ok()
self.assertEqual(len(topology[0]["components"]), 1)
self.assertEqual(
"arn:aws:lambda:eu-west-1:731070500579:function:com-stackstate-prod-sam-seed-PutHello-1LUD3ESBOR6EY",
topology[0]["components"][0]["id"],
)
@set_cloudtrail_event("publish_version")
def test_process_lambda_publish_version(self):
self.check.run()
topology = [top.get_snapshot(self.check.check_id)]
self.assertEqual(len(topology), 1)
self.assert_updated_ok()
self.assertEqual(len(topology[0]["components"]), 1)
self.assertEqual(
"arn:aws:lambda:eu-west-1:731070500579:function:com-stackstate-prod-sam-seed-PutHello-1LUD3ESBOR6EY",
topology[0]["components"][0]["id"],
)
@set_cloudtrail_event("add_permission")
def test_process_lambda_add_permission(self):
self.check.run()
topology = [top.get_snapshot(self.check.check_id)]
self.assertEqual(len(topology), 1)
self.assert_updated_ok()
self.assertEqual(len(topology[0]["components"]), 1)
self.assertEqual(
"arn:aws:lambda:eu-west-1:731070500579:function:com-stackstate-prod-sam-seed-PutHello-1LUD3ESBOR6EY",
topology[0]["components"][0]["id"],
)
@set_cloudtrail_event("tag_function")
def test_process_lambda_tag_function(self):
self.check.run()
topology = [top.get_snapshot(self.check.check_id)]
self.assertEqual(len(topology), 1)
self.assert_updated_ok()
self.assertEqual(len(topology[0]["components"]), 1)
self.assertEqual(
"arn:aws:lambda:eu-west-1:731070500579:function:com-stackstate-prod-sam-seed-PutHello-1LUD3ESBOR6EY",
topology[0]["components"][0]["id"],
)
| 40.707792 | 120 | 0.635987 |
ace0fac327a2cbd1ddbcdbaf885bfe1c6227a826 | 1,699 | py | Python | classrad/utils/visualization.py | piotrekwoznicki/Radiomics | 2184292b48b25588f5df28c79fa471819243d02f | [
"Apache-2.0"
] | 1 | 2021-10-16T12:43:21.000Z | 2021-10-16T12:43:21.000Z | classrad/utils/visualization.py | piotrekwoznicki/Radiomics | 2184292b48b25588f5df28c79fa471819243d02f | [
"Apache-2.0"
] | null | null | null | classrad/utils/visualization.py | piotrekwoznicki/Radiomics | 2184292b48b25588f5df28c79fa471819243d02f | [
"Apache-2.0"
] | null | null | null | def get_subplots_dimensions(n_plots):
"""
For given number of plots returns 'optimal' rows x columns distribution of subplots and figure size.
Args:
n_plots [int] - number of subplots to be includeed in the plot
Returns:
nrows [int] - suggested number of rows
ncols [int] - suggested number of columns
figsize [tuple[int, int]] - suggested figsize
"""
if n_plots == 1:
nrows = 1
ncols = 1
figsize = (12, 7)
elif n_plots == 2:
nrows = 1
ncols = 2
figsize = (13, 6)
elif n_plots == 3:
nrows = 1
ncols = 3
figsize = (20, 5)
elif n_plots == 4:
nrows = 2
ncols = 2
figsize = (14, 8)
elif n_plots in [5, 6]:
nrows = 2
ncols = 3
figsize = (20, 9)
elif n_plots == 9:
nrows = 3
ncols = 3
figsize = (18, 12)
elif n_plots == 10:
nrows = 2
ncols = 5
figsize = (20, 7)
elif n_plots > 4:
nrows = n_plots // 4 + 1
ncols = 4
figsize = (20, 7 + 5 * nrows)
return nrows, ncols, figsize
# Review the following functions for usefulness
# def plot_for_all(func):
# def wrapper(model_names, predictions, *args, **kwargs):
# nrows, ncols, figsize = get_subplots_dimensions(len(models))
# fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
# for i, model_predictions in enumerate(predictions):
# ax = fig.axes[i]
# model_name = model_names[i]
# func(model_name, model_predictions, ax=ax)
# plt.tight_layout()
# plt.show()
# return wrapper
| 28.316667 | 104 | 0.545615 |
ace0fcdc82fb3ebe99efea4f58dfa617f89b1ce2 | 2,331 | py | Python | src/MapMyNotesApplication/MapMyNotesApplication/models/input_validator.py | Ryan-Gouldsmith/MajorProject-MapMyNotes | 2c350f68f992e454e88d3653e46e7607e224e3ae | [
"MIT"
] | null | null | null | src/MapMyNotesApplication/MapMyNotesApplication/models/input_validator.py | Ryan-Gouldsmith/MajorProject-MapMyNotes | 2c350f68f992e454e88d3653e46e7607e224e3ae | [
"MIT"
] | null | null | null | src/MapMyNotesApplication/MapMyNotesApplication/models/input_validator.py | Ryan-Gouldsmith/MajorProject-MapMyNotes | 2c350f68f992e454e88d3653e46e7607e224e3ae | [
"MIT"
] | null | null | null | class InputValidator(object):
def __init__(self, params):
"""
Creates an input validator instance
Parameters
----------
params: The params from the users input
"""
self.errors = []
self.params = params
def check_all_params_are_less_than_schema_length(self):
"""
Performs sanity check against the length of the params.
Returns
-------
True if there are errors identified
False if there are no errors identified
"""
if len(self.params["module_code_data"]) > 50:
self.errors.append("Module code length too long, max 50 characters.")
if len(self.params['lecturer_name_data']) > 100:
self.errors.append("Lecture name is too long, max length 100 characters")
if len(self.params['location_data']) > 100:
self.errors.append("Location data too long, max length 100 characters")
if len(self.params['title_data']) > 100:
self.errors.append("title data too long, max length 100 characters")
return len(self.errors) == 0
def get_errors(self):
"""
Returns
-------
A list of errors
"""
return self.errors
def check_all_params_exist(self):
"""
Checks to see if the key exists and ensures that the keys are not empty spaces
Returns
-------
True if there are not errors.
False if there are errors.
"""
if "module_code_data" not in self.params or "lecturer_name_data" not in self.params \
or 'location_data' not in self.params or "date_data" not in self.params or 'title_data' not in self.params \
or 'time_data' not in self.params:
return False
""" REFERENCE isspace checks for empty spaces
http://stackoverflow.com/questions/2405292/how-to-check-if-text-is-empty-spaces-tabs-newlines-in-python
"""
if self.params['module_code_data'].isspace() or self.params['lecturer_name_data'].isspace() \
or self.params['location_data'].isspace() or self.params['date_data'].isspace() \
or self.params['title_data'].isspace() or self.params['time_data'].isspace():
return False
return True
| 35.861538 | 124 | 0.599743 |
ace0fdc8bd22ff66db8d4d4211ce008cd49e22e7 | 6,026 | py | Python | training/src/network_base.py | neeraj0104/PoseEstimationForMobile | 8d88dbdc8e9154dd17564b097ecf6fdb05115948 | [
"Apache-2.0"
] | null | null | null | training/src/network_base.py | neeraj0104/PoseEstimationForMobile | 8d88dbdc8e9154dd17564b097ecf6fdb05115948 | [
"Apache-2.0"
] | null | null | null | training/src/network_base.py | neeraj0104/PoseEstimationForMobile | 8d88dbdc8e9154dd17564b097ecf6fdb05115948 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time : 18-4-24 5:48 PM
# @Author : edvard_hua@live.com
# @FileName: network_base.py
# @Software: PyCharm
import tensorflow as tf
import tensorflow.contrib.slim as slim
_init_xavier = tf.contrib.layers.xavier_initializer()
_init_norm = tf.truncated_normal_initializer(stddev=0.01)
_init_zero = slim.init_ops.zeros_initializer()
_l2_regularizer_00004 = tf.contrib.layers.l2_regularizer(0.00004)
_trainable = True
def is_trainable(trainable=True):
global _trainable
_trainable = trainable
def max_pool(inputs, k_h, k_w, s_h, s_w, name, padding="SAME"):
return tf.nn.max_pool2d(inputs,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
def upsample(inputs, factor, name):
return tf.compat.v1.image.resize_bilinear(inputs, [int(inputs.get_shape()[1]) * factor, int(inputs.get_shape()[2]) * factor],
name=name)
def separable_conv(input, c_o, k_s, stride, scope):
with slim.arg_scope([slim.batch_norm],
decay=0.999,
fused=True,
is_training=_trainable,
activation_fn=tf.nn.relu6):
output = slim.separable_convolution2d(input,
num_outputs=None,
stride=stride,
trainable=_trainable,
depth_multiplier=1.0,
kernel_size=[k_s, k_s],
weights_initializer=_init_xavier,
weights_regularizer=_l2_regularizer_00004,
biases_initializer=None,
scope=scope + '_depthwise')
output = slim.convolution2d(output,
c_o,
stride=1,
kernel_size=[1, 1],
weights_initializer=_init_xavier,
biases_initializer=_init_zero,
normalizer_fn=slim.batch_norm,
trainable=_trainable,
weights_regularizer=None,
scope=scope + '_pointwise')
return output
def inverted_bottleneck(inputs, up_channel_rate, channels, subsample, k_s=3, scope=""):
with tf.variable_scope("inverted_bottleneck_%s" % scope):
with slim.arg_scope([slim.batch_norm],
decay=0.999,
fused=True,
is_training=_trainable,
activation_fn=tf.nn.relu6):
stride = 2 if subsample else 1
output = slim.convolution2d(inputs,
up_channel_rate * inputs.get_shape().as_list()[-1],
stride=1,
kernel_size=[1, 1],
weights_initializer=_init_xavier,
biases_initializer=_init_zero,
normalizer_fn=slim.batch_norm,
weights_regularizer=None,
scope=scope + '_up_pointwise',
trainable=_trainable)
output = slim.separable_convolution2d(output,
num_outputs=None,
stride=stride,
depth_multiplier=1.0,
kernel_size=k_s,
weights_initializer=_init_xavier,
weights_regularizer=_l2_regularizer_00004,
biases_initializer=None,
padding="SAME",
scope=scope + '_depthwise',
trainable=_trainable)
output = slim.convolution2d(output,
channels,
stride=1,
kernel_size=[1, 1],
activation_fn=None,
weights_initializer=_init_xavier,
biases_initializer=_init_zero,
normalizer_fn=slim.batch_norm,
weights_regularizer=None,
scope=scope + '_pointwise',
trainable=_trainable)
if inputs.get_shape().as_list()[-1] == channels:
output = tf.add(inputs, output)
return output
def convb(input, k_h, k_w, c_o, stride, name, relu=True):
with slim.arg_scope([slim.batch_norm], decay=0.999, fused=True, is_training=_trainable):
output = slim.convolution2d(
inputs=input,
num_outputs=c_o,
kernel_size=[k_h, k_w],
stride=stride,
normalizer_fn=slim.batch_norm,
weights_regularizer=_l2_regularizer_00004,
weights_initializer=_init_xavier,
biases_initializer=_init_zero,
activation_fn=tf.nn.relu if relu else None,
scope=name,
trainable=_trainable)
return output
| 46.713178 | 129 | 0.433455 |
ace0fe4eeb5f32dfab5839538461b8e529b48b3e | 3,009 | py | Python | Topic 1 - Find solution for single-variable functions/PP_Day_cung/day_cung.py | manhtuanbn12/MI3040-Numerical-Analysis | 32ae63373d258def92e3d934829e22d2ba4ea097 | [
"MIT"
] | null | null | null | Topic 1 - Find solution for single-variable functions/PP_Day_cung/day_cung.py | manhtuanbn12/MI3040-Numerical-Analysis | 32ae63373d258def92e3d934829e22d2ba4ea097 | [
"MIT"
] | null | null | null | Topic 1 - Find solution for single-variable functions/PP_Day_cung/day_cung.py | manhtuanbn12/MI3040-Numerical-Analysis | 32ae63373d258def92e3d934829e22d2ba4ea097 | [
"MIT"
] | null | null | null | #===================================================================================
#
# Code cho phương pháp Dây cung
# * Input: f(x) trong pt f(x) = 0; khoảng cách li ban đầu (a, b); sai số epsilon
# * Output: Nghiệm PT f(x) = 0;
#
#===================================================================================
from math import *
from sympy import *
import sys
#===================================================================================
# Phần thuật toán chính
class daycung_oop:
#{
def __init__(self, a_0, b_0, eps, expr):
#{
x = symbols("x");
self.func = sympify(expr);
self.a_0 = a_0;
self.b_0 = b_0;
self.eps = eps;
f=self.func
self.sym_df = [
f,
diff(f, x),
diff(f, x, 2)
];
self.df = [
lambdify(x, self.sym_df[0], "math"),
lambdify(x, self.sym_df[1], "math"),
lambdify(x, self.sym_df[2], "math"),
];
# print(self.sym_df, file=sys.stderr);
#}
# Tìm max/min của f
def __Kiem_tra_don_dieu(self):
#{
x = symbols("x");
a = self.a_0;
b = self.b_0;
sol_set = solveset(diff(self.func, x ), x, Interval(a, b))
sol_set = Union(sol_set, solveset(diff(self.func, x, 2), x, Interval(a, b)));
sol_set = Union(sol_set, solveset(diff(self.func, x , 3), x, Interval(a, b)));
if(sol_set.is_empty):
for i in sol_set:
print(i)
return 1
else:
return 0
#}
# Chốt 141
def __checkInputValidity(self):
#{
a = self.a_0;
b = self.b_0;
f = self.df[0];
# Check if f(a) * f(b) < 0
if(f(a) * f(b) >= 0):
#{
print(f(a)," ",f(b))
print(f"Khoảng cách ly [{a}, {b}] không hợp lệ");
return 0;
#}
if(self.__Kiem_tra_don_dieu()==0):
#{
print("Khoảng đã cho không hợp lệ");
return 0;
#}
#}
def __Daycung(self):
#{
eps = self.eps
a = self.a_0
b = self.b_0
f = self.df[0]
f1 = self.df[1]
f2 = self.df[2]
if ((f1(a)>0 and f2(a)>0) or (f1(a)<0 and f2(a)<0)):
d=b
x=a
else:
d=a
x=b
x_pre=-1000
while (x-x_pre > eps):
x_pre=x
x=x_pre - (d-x_pre)/(f(d)-f(x_pre))*f(x_pre)
return x
def Solve(self):
if(self.__checkInputValidity() == 0):
print("Vui lòng nhập lại dữ liệu", file=sys.stderr);
exit(0)
return self.__Daycung();
#===================================================================================
# Chương trình chính
expr = "x^4+0.5"
L = 0.1
R = 1
eps = 1e-6
uu = daycung_oop(L, R, eps, expr);
print(f"Nghiệm của phương trình {expr} trên khoảng [{L}, {R}] là: {uu.Solve()}");
| 24.463415 | 86 | 0.40113 |
ace0fe8c7b8d94bf67c2a39aacba899febd2f7b5 | 339 | py | Python | cellxgene_schema_cli/cellxgene_schema/env.py | chanzuckerberg/single-cell-curation | 7ea0aae3b3d8c75d9717b34374a8d10e222d71ce | [
"MIT"
] | 8 | 2021-03-17T23:42:41.000Z | 2022-03-08T13:08:55.000Z | cellxgene_schema_cli/cellxgene_schema/env.py | chanzuckerberg/single-cell-curation | 7ea0aae3b3d8c75d9717b34374a8d10e222d71ce | [
"MIT"
] | 156 | 2021-02-23T18:17:42.000Z | 2022-03-31T20:49:46.000Z | cellxgene_schema_cli/cellxgene_schema/env.py | chanzuckerberg/single-cell-curation | 7ea0aae3b3d8c75d9717b34374a8d10e222d71ce | [
"MIT"
] | 8 | 2021-03-22T17:07:31.000Z | 2022-03-08T11:07:48.000Z | import os
PACKAGE_ROOT = os.path.dirname(os.path.realpath(__file__))
ONTOLOGY_DIR = os.path.join(PACKAGE_ROOT, "ontology_files")
OWL_INFO_YAML = os.path.join(ONTOLOGY_DIR, "owl_info.yml")
PARSED_ONTOLOGIES_FILE = os.path.join(ONTOLOGY_DIR, "all_ontology.json.gz")
SCHEMA_DEFINITIONS_DIR = os.path.join(PACKAGE_ROOT, "schema_definitions")
| 42.375 | 75 | 0.80531 |
ace0fedbfa42fc88eb208cc3f997d3ece1a95cef | 3,494 | py | Python | egs/librispeech/asr/simple_v1/common.py | qindazhu/snowfall | 811e5333281a279e27e3008f5d025111d19cc487 | [
"Apache-2.0"
] | null | null | null | egs/librispeech/asr/simple_v1/common.py | qindazhu/snowfall | 811e5333281a279e27e3008f5d025111d19cc487 | [
"Apache-2.0"
] | null | null | null | egs/librispeech/asr/simple_v1/common.py | qindazhu/snowfall | 811e5333281a279e27e3008f5d025111d19cc487 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2019-2020 Mobvoi AI Lab, Beijing, China (author: Fangjun Kuang)
# Apache 2.0
import os
from datetime import datetime
import logging
import numpy as np
import torch
def setup_logger(log_filename, log_level='info'):
now = datetime.now()
date_time = now.strftime('%Y-%m-%d-%H-%M-%S')
log_filename = '{}-{}'.format(log_filename, date_time)
os.makedirs(os.path.dirname(log_filename), exist_ok=True)
formatter = '%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s'
if log_level == 'debug':
level = logging.DEBUG
elif log_level == 'info':
level = logging.INFO
elif log_level == 'warning':
level = logging.WARNING
logging.basicConfig(filename=log_filename,
format=formatter,
level=level,
filemode='w')
console = logging.StreamHandler()
console.setLevel(level)
console.setFormatter(logging.Formatter(formatter))
logging.getLogger('').addHandler(console)
def load_checkpoint(filename, model):
logging.info('load checkpoint from {}'.format(filename))
checkpoint = torch.load(filename, map_location='cpu')
keys = ['state_dict', 'epoch', 'learning_rate', 'objf']
for k in keys:
assert k in checkpoint
if not list(model.state_dict().keys())[0].startswith('module.') \
and list(checkpoint['state_dict'])[0].startswith('module.'):
# the checkpoint was saved by DDP
logging.info('load checkpoint from DDP')
dst_state_dict = model.state_dict()
src_state_dict = checkpoint['state_dict']
for key in dst_state_dict.keys():
src_key = '{}.{}'.format('module', key)
dst_state_dict[key] = src_state_dict.pop(src_key)
assert len(src_state_dict) == 0
model.load_state_dict(dst_state_dict)
else:
model.load_state_dict(checkpoint['state_dict'])
epoch = checkpoint['epoch']
learning_rate = checkpoint['learning_rate']
objf = checkpoint['objf']
return epoch, learning_rate, objf
def save_checkpoint(filename, model, epoch, learning_rate, objf, local_rank=0):
if local_rank != None and local_rank != 0:
return
logging.info('Save checkpoint to {filename}: epoch={epoch}, '
'learning_rate={learning_rate}, objf={objf}'.format(
filename=filename,
epoch=epoch,
learning_rate=learning_rate,
objf=objf))
checkpoint = {
'state_dict': model.state_dict(),
'epoch': epoch,
'learning_rate': learning_rate,
'objf': objf
}
torch.save(checkpoint, filename)
def save_training_info(filename,
model_path,
current_epoch,
learning_rate,
objf,
best_objf,
best_epoch,
local_rank=0):
if local_rank != None and local_rank != 0:
return
with open(filename, 'w') as f:
f.write('model_path: {}\n'.format(model_path))
f.write('epoch: {}\n'.format(current_epoch))
f.write('learning rate: {}\n'.format(learning_rate))
f.write('objf: {}\n'.format(objf))
f.write('best objf: {}\n'.format(best_objf))
f.write('best epoch: {}\n'.format(best_epoch))
logging.info('write training info to {}'.format(filename))
| 33.596154 | 81 | 0.598741 |
ace0ff98dd35f6e32780d4c08354feaca250c793 | 360 | py | Python | WebScraping-Scrapy/amazon/amazon/pipelines.py | SakshayMahna/WebScraping | 7e1b8ccf117bfde035d27a24fd61d4b5ccacbb2f | [
"MIT"
] | 18 | 2021-09-23T09:41:38.000Z | 2022-03-31T15:42:06.000Z | WebScraping-Scrapy/amazon/amazon/pipelines.py | SakshayMahna/WebScraping | 7e1b8ccf117bfde035d27a24fd61d4b5ccacbb2f | [
"MIT"
] | null | null | null | WebScraping-Scrapy/amazon/amazon/pipelines.py | SakshayMahna/WebScraping | 7e1b8ccf117bfde035d27a24fd61d4b5ccacbb2f | [
"MIT"
] | 5 | 2021-09-24T18:33:26.000Z | 2022-03-12T14:30:52.000Z | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class AmazonPipeline:
def process_item(self, item, spider):
return item
| 25.714286 | 66 | 0.766667 |
ace0ffb567480e1d73e3c76279d87b0c58cb1986 | 8,312 | py | Python | scripts/lpd.py | mikeaalv/LPD_metabolimics | 695fc5dae8aec2d76b6b25ba4bdb3b3142d54b16 | [
"MIT"
] | null | null | null | scripts/lpd.py | mikeaalv/LPD_metabolimics | 695fc5dae8aec2d76b6b25ba4bdb3b3142d54b16 | [
"MIT"
] | null | null | null | scripts/lpd.py | mikeaalv/LPD_metabolimics | 695fc5dae8aec2d76b6b25ba4bdb3b3142d54b16 | [
"MIT"
] | null | null | null | # this script implement LPD (similar to topic modeling) to uncovering latent metabolic funcitonal states
import os
import sys
from collections import OrderedDict
from copy import deepcopy
from time import time
#
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import pandas as pd
#
import theano
import theano.tensor as tt
from theano import shared
from theano.sandbox.rng_mrg import MRG_RandomStream
#
import pymc3 as pm
from pymc3 import Dirichlet
from pymc3 import math as pmmath
from sklearn.decomposition import LatentDirichletAllocation
# environmental variables
os.environ["THEANO_FLAGS"]="device=cpu,floatX=float64"
#
plt.style.use("seaborn-darkgrid")
def logp_lda_doc(beta,theta):
"""Returns the log-likelihood function for given documents.
K : number of functional states in the model
N : number of features
V : number of levels (size of vocabulary)
D : number of time points (in a mini-batch)
Parameters
----------
beta : tensor (K x N x V)
level distributions.
theta : tensor (D x K)
functional states distributions for samples.
"""
def ll_docs_f(data):
dixs,vixs=data.nonzero(data>=0)
val=data[dixs,vixs]
ll_docs=(val*pmmath.logsumexp(tt.log(theta[dixs])+tt.log(beta.T[vixs]), axis=1).ravel())
# Per-levels log-likelihood times num of tokens in the whole dataset
return tt.sum(ll_docs) / (tt.sum(vfreqs) + 1e-9) * n_tokens
return ll_docs_f
class LDAEncoder:
"""Encode (term-frequency) document vectors to variational means and (log-transformed) stds."""
def __init__(self, n_levels, n_hidden, n_topics, p_corruption=0, random_seed=1):
rng = np.random.RandomState(random_seed)
self.n_levels = n_levels
self.n_hidden = n_hidden
self.n_topics = n_topics
self.w0 = shared(0.01 * rng.randn(n_levels, n_hidden).ravel(), name="w0")
self.b0 = shared(0.01 * rng.randn(n_hidden), name="b0")
self.w1 = shared(0.01 * rng.randn(n_hidden, 2 * (n_topics - 1)).ravel(), name="w1")
self.b1 = shared(0.01 * rng.randn(2 * (n_topics - 1)), name="b1")
self.rng = MRG_RandomStreams(seed=random_seed)
self.p_corruption = p_corruption
def encode(self, xs):
if 0 < self.p_corruption:
dixs, vixs = xs.nonzero()
mask = tt.set_subtensor(
tt.zeros_like(xs)[dixs, vixs],
self.rng.binomial(size=dixs.shape, n=1, p=1 - self.p_corruption),
)
xs_ = xs * mask
else:
xs_ = xs
w0 = self.w0.reshape((self.n_levels, self.n_hidden))
w1 = self.w1.reshape((self.n_hidden, 2 * (self.n_topics - 1)))
hs = tt.tanh(xs_.dot(w0) + self.b0)
zs = hs.dot(w1) + self.b1
zs_mean = zs[:, : (self.n_topics - 1)]
zs_rho = zs[:, (self.n_topics - 1) :]
return {"mu": zs_mean, "rho": zs_rho}
def get_params(self):
return [self.w0, self.b0, self.w1, self.b1]
def reduce_rate(a, h, i):
s.set_value(η / ((i / minibatch_size) + 1) ** 0.7)
def print_top_words(beta, feature_names, n_top_words=10):
for i in range(len(beta)):
print(
("Topic #%d: " % i)
+ " ".join([feature_names[j] for j in beta[i].argsort()[: -n_top_words - 1 : -1]])
)
def calc_pp(ws, thetas, beta, wix):
"""
Parameters
----------
ws: ndarray (N,)
Number of times the held-out word appeared in N documents.
thetas: ndarray, shape=(N, K)
Topic distributions for N documents.
beta: ndarray, shape=(K, V)
Word distributions for K topics.
wix: int
Index of the held-out word
Return
------
Log probability of held-out words.
"""
return ws * np.log(thetas.dot(beta[:, wix]))
def eval_lda(transform, beta, docs_te, wixs):
"""Evaluate LDA model by log predictive probability.
Parameters
----------
transform: Python function
Transform document vectors to posterior mean of topic proportions.
wixs: iterable of int
Word indices to be held-out.
"""
lpss = []
docs_ = deepcopy(docs_te)
thetass = []
wss = []
total_words = 0
for wix in wixs:
ws = docs_te[:, wix].ravel()
if 0 < ws.sum():
# Hold-out
docs_[:, wix] = 0
# Topic distributions
thetas = transform(docs_)
# Predictive log probability
lpss.append(calc_pp(ws, thetas, beta, wix))
docs_[:, wix] = ws
thetass.append(thetas)
wss.append(ws)
total_words += ws.sum()
else:
thetass.append(None)
wss.append(None)
# Log-probability
lp = np.sum(np.hstack(lpss)) / total_words
return {"lp": lp, "thetass": thetass, "beta": beta, "wss": wss}
def transform_pymc3(docs):
return sample_vi_theta(docs)
def transform_sklearn(docs):
thetas = lda.transform(docs)
return thetas / thetas.sum(axis=1)[:, np.newaxis]
# The number of words in the vocabulary
n_levels=3
#
print("Loading dataset...")
whl_data=pd.read_csv('/Users/yuewu/Dropbox (Edison_Lab@UGA)/Projects/Bioinformatics_modeling/spectral.related/lpd/data/input.csv',header=None)
whl_data_np=whl_data.to_numpy()
use_data=whl_data_np[1:52,]
# plt.plot(use_data[:10,:])
# plt.show()
n_samples_tr=40
n_samples_te=use_data.shape[0]-n_samples_tr
data_tr=use_data[:n_samples_tr,:]
data_te=use_data[n_samples_tr:,:]
print("Number of samples for training = {}".format(data_tr.shape[0]))
print("Number of samples for test = {}".format(data_te.shape[0]))
#
n_tokens=3
print(f"Number of tokens in training set = {n_tokens}")
#
n_states=5
minibatch_size=52#training set small so should be fine to use the whole set as the batch size
# defining minibatch
doc_t_minibatch = pm.Minibatch(docs_tr.toarray(), minibatch_size)
doc_t = shared(docs_tr.toarray()[:minibatch_size])
with pm.Model() as model:
theta = Dirichlet(
"theta",
a=pm.floatX((1.0 / n_topics) * np.ones((minibatch_size, n_topics))),
shape=(minibatch_size, n_topics),
# do not forget scaling
total_size=n_samples_tr,
)
beta = Dirichlet(
"beta",
a=pm.floatX((1.0 / n_topics) * np.ones((n_topics, n_levels))),
shape=(n_topics, n_levels),
)
# Note, that we defined likelihood with scaling, so here we need no additional `total_size` kwarg
doc = pm.DensityDist("doc",logp_lda_doc(beta,theta),observed=doc_t)
encoder = LDAEncoder(n_levels=n_levels, n_hidden=100, n_topics=n_topics, p_corruption=0.0)
local_RVs = OrderedDict([(theta, encoder.encode(doc_t))])
local_RVs
encoder_params = encoder.get_params()
encoder_params
η = 0.1
s = shared(η)
with model:
approx = pm.MeanField(local_rv=local_RVs)
approx.scale_cost_to_minibatch = False
inference = pm.KLqp(approx)
inference.fit(
10000,
callbacks=[reduce_rate],
obj_optimizer=pm.sgd(learning_rate=s),
more_obj_params=encoder_params,
total_grad_norm_constraint=200,
more_replacements={doc_t: doc_t_minibatch},
)
plt.plot(approx.hist[10:]);
doc_t.set_value(docs_tr.toarray())
samples = pm.sample_approx(approx, draws=100)
beta_pymc3 = samples["beta"].mean(axis=0)
print_top_words(beta_pymc3, feature_names)
lda = LatentDirichletAllocation(
n_components=n_topics,
max_iter=5,
learning_method="online",
learning_offset=50.0,
random_state=0,
)
%time lda.fit(docs_tr)
beta_sklearn = lda.components_ / lda.components_.sum(axis=1)[:, np.newaxis]
print_top_words(beta_sklearn, feature_names)
inp = tt.matrix(dtype="int64")
sample_vi_theta = theano.function(
[inp], approx.sample_node(approx.model.theta, 100, more_replacements={doc_t: inp}).mean(0)
)
result_pymc3 = eval_lda(\
transform_pymc3, beta_pymc3, docs_te.toarray(), np.arange(100)\
)
print("Predictive log prob (pm3) = {}".format(result_pymc3["lp"]))
result_sklearn = eval_lda(\
transform_sklearn, beta_sklearn, docs_te.toarray(), np.arange(100)\
)
print("Predictive log prob (sklearn) = {}".format(result_sklearn["lp"]))
| 30.899628 | 142 | 0.65616 |
ace10156cc5068be896cd48074826fc58d12957f | 5,332 | py | Python | tests/parser/types/numbers/test_constants.py | ActorForth/vyper | 82b9dbb9f26fce2e60496df9f39c2818844b4276 | [
"Apache-2.0"
] | null | null | null | tests/parser/types/numbers/test_constants.py | ActorForth/vyper | 82b9dbb9f26fce2e60496df9f39c2818844b4276 | [
"Apache-2.0"
] | null | null | null | tests/parser/types/numbers/test_constants.py | ActorForth/vyper | 82b9dbb9f26fce2e60496df9f39c2818844b4276 | [
"Apache-2.0"
] | null | null | null | import itertools
from decimal import Decimal
import pytest
from vyper.compiler import compile_code
from vyper.exceptions import InvalidType
from vyper.utils import MemoryPositions
def test_builtin_constants(get_contract_with_gas_estimation):
code = """
@external
def test_zaddress(a: address) -> bool:
return a == ZERO_ADDRESS
@external
def test_empty_bytes32(a: bytes32) -> bool:
return a == EMPTY_BYTES32
@external
def test_int128(a: int128) -> (bool, bool):
return a == MAX_INT128, a == MIN_INT128
@external
def test_decimal(a: decimal) -> (bool, bool):
return a == MAX_DECIMAL, a == MIN_DECIMAL
@external
def test_uint256(a: uint256) -> bool:
return a == MAX_UINT256
@external
def test_arithmetic(a: int128) -> int128:
return MAX_INT128 - a
"""
c = get_contract_with_gas_estimation(code)
assert c.test_empty_bytes32(b"\x00" * 32) is True
assert c.test_empty_bytes32(b"\x0F" * 32) is False
assert c.test_zaddress("0x0000000000000000000000000000000000000000") is True
assert c.test_zaddress("0x0000000000000000000000000000000000000012") is False
assert c.test_int128(2 ** 127 - 1) == [True, False]
assert c.test_int128(-(2 ** 127)) == [False, True]
assert c.test_int128(0) == [False, False]
assert c.test_decimal(Decimal(2 ** 127 - 1)) == [True, False]
assert c.test_decimal(Decimal("-170141183460469231731687303715884105728")) == [False, True]
assert c.test_decimal(Decimal("0.1")) == [False, False]
assert c.test_uint256(2 ** 256 - 1) is True
assert c.test_arithmetic(5000) == 2 ** 127 - 1 - 5000
def test_builtin_constants_assignment(get_contract_with_gas_estimation):
code = """
@external
def foo() -> int128:
bar: int128 = MAX_INT128
return bar
@external
def goo() -> int128:
bar: int128 = MIN_INT128
return bar
@external
def hoo() -> bytes32:
bar: bytes32 = EMPTY_BYTES32
return bar
@external
def joo() -> address:
bar: address = ZERO_ADDRESS
return bar
@external
def koo() -> decimal:
bar: decimal = MAX_DECIMAL
return bar
@external
def loo() -> decimal:
bar: decimal = MIN_DECIMAL
return bar
@external
def zoo() -> uint256:
bar: uint256 = MAX_UINT256
return bar
"""
c = get_contract_with_gas_estimation(code)
assert c.foo() == 2 ** 127 - 1
assert c.goo() == -(2 ** 127)
assert c.hoo() == b"\x00" * 32
assert c.joo() is None
assert c.koo() == Decimal(2 ** 127 - 1)
assert c.loo() == Decimal(-(2 ** 127))
assert c.zoo() == 2 ** 256 - 1
def test_custom_constants(get_contract):
code = """
X_VALUE: constant(uint256) = 33
@external
def test() -> uint256:
return X_VALUE
@external
def test_add(a: uint256) -> uint256:
return X_VALUE + a
"""
c = get_contract(code)
assert c.test() == 33
assert c.test_add(7) == 40
# Would be nice to put this somewhere accessible, like in vyper.types or something
integer_types = ["uint8", "int128", "int256", "uint256"]
@pytest.mark.parametrize("storage_type,return_type", itertools.permutations(integer_types, 2))
def test_custom_constants_fail(get_contract, assert_compile_failed, storage_type, return_type):
code = f"""
MY_CONSTANT: constant({storage_type}) = 1
@external
def foo() -> {return_type}:
return MY_CONSTANT
"""
assert_compile_failed(lambda: get_contract(code), InvalidType)
def test_constant_address(get_contract):
code = """
OWNER: constant(address) = 0x0000000000000000000000000000000000000012
@external
def get_owner() -> address:
return OWNER
@external
def is_owner() -> bool:
if msg.sender == OWNER:
return True
else:
return False
"""
c = get_contract(code)
assert c.get_owner() == "0x0000000000000000000000000000000000000012"
assert c.is_owner() is False
def test_constant_bytes(get_contract):
test_str = b"Alabama, Arkansas. I do love my ma and pa"
code = f"""
X: constant(Bytes[100]) = b"{test_str.decode()}"
@external
def test() -> Bytes[100]:
y: Bytes[100] = X
return y
"""
c = get_contract(code)
assert c.test() == test_str
def test_constant_folds(search_for_sublist):
some_prime = 10013677
code = f"""
SOME_CONSTANT: constant(uint256) = 11 + 1
SOME_PRIME: constant(uint256) = {some_prime}
@external
def test() -> uint256:
# calculate some constant which is really unlikely to be randomly
# in bytecode
ret: uint256 = 2**SOME_CONSTANT * SOME_PRIME
return ret
"""
lll = compile_code(code, ["ir"])["ir"]
assert search_for_sublist(
lll, ["mstore", [MemoryPositions.RESERVED_MEMORY], [2 ** 12 * some_prime]]
)
def test_constant_lists(get_contract):
code = """
BYTE32_LIST: constant(bytes32[2]) = [
0x0000000000000000000000000000000000000000000000000000000000001321,
0x0000000000000000000000000000000000000000000000000000000000001123
]
SPECIAL: constant(int128[3]) = [33, 44, 55]
@external
def test() -> bytes32:
a: bytes32[2] = BYTE32_LIST
return a[1]
@view
@external
def contains(a: int128) -> bool:
return a in SPECIAL
"""
c = get_contract(code)
assert c.test()[-2:] == b"\x11\x23"
assert c.contains(55) is True
assert c.contains(44) is True
assert c.contains(33) is True
assert c.contains(3) is False
| 22.49789 | 95 | 0.677044 |
ace101f1e5f731693c811a8429a37dbeffe3da62 | 534 | py | Python | parser/parser.py | r-ndy/python_challenge | 9005b9090e3e2b4e8466557a527cb751671c4711 | [
"MIT"
] | null | null | null | parser/parser.py | r-ndy/python_challenge | 9005b9090e3e2b4e8466557a527cb751671c4711 | [
"MIT"
] | null | null | null | parser/parser.py | r-ndy/python_challenge | 9005b9090e3e2b4e8466557a527cb751671c4711 | [
"MIT"
] | null | null | null | import re
def get_text_from_file(file_name: str) -> str:
"""
Returns the content from a file text.
"""
try:
the_file = open(file_name, 'r')
text_from_file = the_file.read()
return text_from_file
except Exception as ex:
print('Problem opening the file', file_name)
print(ex)
def parse_ips(raw_text: str) -> list:
"""
Returns a list of all ip addresses.
"""
found_ips_list = re.findall('[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+', raw_text)
return found_ips_list
| 24.272727 | 75 | 0.599251 |
ace102279a8c99a0816bd5c3484aae72ec2c24ea | 3,152 | py | Python | tests/testsite/testsite/settings.py | mariushelf/sa2django | 936b0a70b0ccc8faf3ca26ff241b0b6dac13f204 | [
"MIT"
] | null | null | null | tests/testsite/testsite/settings.py | mariushelf/sa2django | 936b0a70b0ccc8faf3ca26ff241b0b6dac13f204 | [
"MIT"
] | null | null | null | tests/testsite/testsite/settings.py | mariushelf/sa2django | 936b0a70b0ccc8faf3ca26ff241b0b6dac13f204 | [
"MIT"
] | null | null | null | """
Django settings for testsite project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "b!!x6)4-u-1(+g39xl1!+(b%$qwnx3j=(^2lfnlqtf%nh^gnw0"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"tests.testsite.testapp",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "testsite.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "testsite.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "file:memorydb?mode=memory&cache=shared"
# 'NAME': '/home/xmhelf/Documents/amsterdam/sa2django/tests/testdb.sqlite'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
| 26.940171 | 91 | 0.706536 |
ace10363f5bcc170e89ef0c84abd1b978990fee5 | 754 | py | Python | Alg_2.py | makkksimka/Zadachi_EGE | 8b85508a76acf2c910b98139fc92ad290cdb0419 | [
"MIT"
] | null | null | null | Alg_2.py | makkksimka/Zadachi_EGE | 8b85508a76acf2c910b98139fc92ad290cdb0419 | [
"MIT"
] | null | null | null | Alg_2.py | makkksimka/Zadachi_EGE | 8b85508a76acf2c910b98139fc92ad290cdb0419 | [
"MIT"
] | null | null | null | def calculator(program, value):
acc = value
for command in program:
if command == "1":
acc *= acc
elif command == "2":
acc -= 1
return acc
def build_program(start, end):
for i in "*12":
for j in "*12":
for k in "*12":
for n in "*12":
for m in "*12":
program = "".join([i, j, k, n, m])
result = calculator(program, start)
if result == end:
return program.replace("*", "")
return None
if __name__ == "__main__":
program = build_program(5, 8)
if program is not None:
print(program)
else:
print("Решений нет")
| 25.133333 | 59 | 0.438992 |
ace104316c9f91987db264ba6168c9fb6c5e4515 | 491 | py | Python | tests/references/signals.linear_sweep_time.py | pyfar-seminar/pyfar | 5477f594c539bc74e3022a4594402cdeacd0af54 | [
"MIT"
] | 1 | 2020-11-19T08:50:30.000Z | 2020-11-19T08:50:30.000Z | tests/references/signals.linear_sweep_time.py | pyfar-seminar/pyfar | 5477f594c539bc74e3022a4594402cdeacd0af54 | [
"MIT"
] | 2 | 2021-01-15T10:00:37.000Z | 2022-01-31T16:35:23.000Z | tests/references/signals.linear_sweep_time.py | pyfar-seminar/pyfar | 5477f594c539bc74e3022a4594402cdeacd0af54 | [
"MIT"
] | 2 | 2020-12-07T22:39:49.000Z | 2021-03-29T08:43:05.000Z | # Write linear sweep to csv for testing.
# The sweep was manually inspected.
# The time signal was inspected for smootheness and maximum amplitudes of +/-1.
# The spectrum was inspected for the ripple at the edges of the frequency range
# (typical for time domain sweep generation) and constant amplitude across
# frequency.
import numpy as np
from pyfar.signals import linear_sweep_time
sweep = linear_sweep_time(2**10, [1e3, 20e3]).time
np.savetxt("signals.linear_sweep_time.csv", sweep)
| 40.916667 | 79 | 0.786151 |
ace104c75a8250d9a9f02b926266aba1afe77d9a | 4,329 | bzl | Python | cc/defs.bzl | meteorcloudy/rules_cc | d545fa4f798f2a0b82f556b8b0ec59a93c100df7 | [
"Apache-2.0"
] | 3 | 2020-11-30T15:35:37.000Z | 2022-01-06T14:17:18.000Z | cc/defs.bzl | meteorcloudy/rules_cc | d545fa4f798f2a0b82f556b8b0ec59a93c100df7 | [
"Apache-2.0"
] | 54 | 2020-06-23T17:34:04.000Z | 2022-03-31T02:04:06.000Z | cc/defs.bzl | meteorcloudy/rules_cc | d545fa4f798f2a0b82f556b8b0ec59a93c100df7 | [
"Apache-2.0"
] | 12 | 2020-07-14T23:59:57.000Z | 2022-03-22T09:59:18.000Z | # Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starlark rules for building C++ projects."""
load("//cc/private/rules_impl:cc_flags_supplier.bzl", _cc_flags_supplier = "cc_flags_supplier")
load("//cc/private/rules_impl:compiler_flag.bzl", _compiler_flag = "compiler_flag")
_MIGRATION_TAG = "__CC_RULES_MIGRATION_DO_NOT_USE_WILL_BREAK__"
def _add_tags(attrs):
if "tags" in attrs and attrs["tags"] != None:
attrs["tags"] = attrs["tags"] + [_MIGRATION_TAG]
else:
attrs["tags"] = [_MIGRATION_TAG]
return attrs
def cc_binary(**attrs):
"""Bazel cc_binary rule.
https://docs.bazel.build/versions/master/be/c-cpp.html#cc_binary
Args:
**attrs: Rule attributes
"""
# buildifier: disable=native-cc
native.cc_binary(**_add_tags(attrs))
def cc_test(**attrs):
"""Bazel cc_test rule.
https://docs.bazel.build/versions/master/be/c-cpp.html#cc_test
Args:
**attrs: Rule attributes
"""
# buildifier: disable=native-cc
native.cc_test(**_add_tags(attrs))
def cc_library(**attrs):
"""Bazel cc_library rule.
https://docs.bazel.build/versions/master/be/c-cpp.html#cc_library
Args:
**attrs: Rule attributes
"""
# buildifier: disable=native-cc
native.cc_library(**_add_tags(attrs))
def cc_import(**attrs):
"""Bazel cc_import rule.
https://docs.bazel.build/versions/master/be/c-cpp.html#cc_import
Args:
**attrs: Rule attributes
"""
# buildifier: disable=native-cc
native.cc_import(**_add_tags(attrs))
def cc_proto_library(**attrs):
"""Bazel cc_proto_library rule.
https://docs.bazel.build/versions/master/be/c-cpp.html#cc_proto_library
Args:
**attrs: Rule attributes
"""
# buildifier: disable=native-cc
native.cc_proto_library(**_add_tags(attrs))
def fdo_prefetch_hints(**attrs):
"""Bazel fdo_prefetch_hints rule.
https://docs.bazel.build/versions/master/be/c-cpp.html#fdo_prefetch_hints
Args:
**attrs: Rule attributes
"""
# buildifier: disable=native-cc
native.fdo_prefetch_hints(**_add_tags(attrs))
def fdo_profile(**attrs):
"""Bazel fdo_profile rule.
https://docs.bazel.build/versions/master/be/c-cpp.html#fdo_profile
Args:
**attrs: Rule attributes
"""
# buildifier: disable=native-cc
native.fdo_profile(**_add_tags(attrs))
def cc_toolchain(**attrs):
"""Bazel cc_toolchain rule.
https://docs.bazel.build/versions/master/be/c-cpp.html#cc_toolchain
Args:
**attrs: Rule attributes
"""
# buildifier: disable=native-cc
native.cc_toolchain(**_add_tags(attrs))
def cc_toolchain_suite(**attrs):
"""Bazel cc_toolchain_suite rule.
https://docs.bazel.build/versions/master/be/c-cpp.html#cc_toolchain_suite
Args:
**attrs: Rule attributes
"""
# buildifier: disable=native-cc
native.cc_toolchain_suite(**_add_tags(attrs))
def objc_library(**attrs):
"""Bazel objc_library rule.
https://docs.bazel.build/versions/master/be/objective-c.html#objc_library
Args:
**attrs: Rule attributes
"""
# buildifier: disable=native-cc
native.objc_library(**_add_tags(attrs))
def objc_import(**attrs):
"""Bazel objc_import rule.
https://docs.bazel.build/versions/master/be/objective-c.html#objc_import
Args:
**attrs: Rule attributes
"""
# buildifier: disable=native-cc
native.objc_import(**_add_tags(attrs))
def cc_flags_supplier(**attrs):
"""Bazel cc_flags_supplier rule.
Args:
**attrs: Rule attributes
"""
_cc_flags_supplier(**_add_tags(attrs))
def compiler_flag(**attrs):
"""Bazel compiler_flag rule.
Args:
**attrs: Rule attributes
"""
_compiler_flag(**_add_tags(attrs))
| 24.596591 | 95 | 0.684685 |
ace104e3161fb40cb38c78f97c4710c0510ac3e9 | 1,373 | py | Python | playground/accounts/serializers.py | kkomarocker/playground | 1cf0fb438975589c9d4dcab3796a2f6afbb44bf3 | [
"MIT"
] | null | null | null | playground/accounts/serializers.py | kkomarocker/playground | 1cf0fb438975589c9d4dcab3796a2f6afbb44bf3 | [
"MIT"
] | 8 | 2020-02-28T01:19:14.000Z | 2021-06-10T19:20:41.000Z | playground/accounts/serializers.py | kkomarocker/playground | 1cf0fb438975589c9d4dcab3796a2f6afbb44bf3 | [
"MIT"
] | null | null | null | import os
from rest_framework import serializers
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email')
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'password',)
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
if validated_data['email'] == os.environ.get('HAEMAIL') \
or validated_data['email'] == os.environ.get('HJEMAIL'):
user = User.objects.create_superuser(
validated_data['username'],
validated_data['email'],
validated_data['password'])
else:
user = User.objects.create_user(
validated_data['username'],
validated_data['email'],
validated_data['password'])
return user
class LoginSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, data):
user = authenticate(**data)
if user and user.is_active:
return user
raise serializers.ValidationError("Incorrect Credentials")
| 29.212766 | 72 | 0.623452 |
ace105270b52b2a6b700091310969fb6377aec35 | 978 | py | Python | tools/projects/http-parser-example-1.py | jmikedupont2/pythoscope | 58a1149f204897e8f789d93ee7e49b6db0bd346f | [
"MIT"
] | 2 | 2020-04-06T11:02:46.000Z | 2020-05-14T18:37:04.000Z | tools/projects/http-parser-example-1.py | jmikedupont2/pythoscope | 58a1149f204897e8f789d93ee7e49b6db0bd346f | [
"MIT"
] | null | null | null | tools/projects/http-parser-example-1.py | jmikedupont2/pythoscope | 58a1149f204897e8f789d93ee7e49b6db0bd346f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import pythoscope
pythoscope.start()
import socket
from http_parser.parser import HttpParser
def main():
p = HttpParser()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
body = []
header_done = False
try:
s.connect(('gunicorn.org', 80))
s.send("GET / HTTP/1.1\r\nHost: gunicorn.org\r\n\r\n")
while True:
data = s.recv(1024)
if not data:
break
recved = len(data)
nparsed = p.execute(data, recved)
assert nparsed == recved
if p.is_headers_complete() and not header_done:
print(p.get_headers())
header_done = True
if p.is_partial_body():
body.append(p.recv_body())
if p.is_message_complete():
break
print("".join(body))
finally:
s.close()
if __name__ == "__main__":
main()
pythoscope.stop()
| 20.375 | 62 | 0.537832 |
ace105c710631498559c2abe3241218107006758 | 11,570 | py | Python | gooddata-metadata-client/gooddata_metadata_client/model/json_api_analytical_dashboard_out_relationships_datasets.py | gooddata/gooddata-python-sdk | df4d4a4d730ab376960ae2ed01e7d86498e85c6a | [
"MIT"
] | 7 | 2022-01-24T16:27:06.000Z | 2022-02-25T10:18:49.000Z | gooddata-metadata-client/gooddata_metadata_client/model/json_api_analytical_dashboard_out_relationships_datasets.py | gooddata/gooddata-python-sdk | df4d4a4d730ab376960ae2ed01e7d86498e85c6a | [
"MIT"
] | 29 | 2022-01-20T15:45:38.000Z | 2022-03-31T09:39:25.000Z | gooddata-metadata-client/gooddata_metadata_client/model/json_api_analytical_dashboard_out_relationships_datasets.py | gooddata/gooddata-python-sdk | df4d4a4d730ab376960ae2ed01e7d86498e85c6a | [
"MIT"
] | 7 | 2022-01-20T07:11:15.000Z | 2022-03-09T14:50:17.000Z | """
OpenAPI definition
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v0
Contact: support@gooddata.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from gooddata_metadata_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from gooddata_metadata_client.exceptions import ApiAttributeError
def lazy_import():
from gooddata_metadata_client.model.json_api_dataset_to_many_linkage import JsonApiDatasetToManyLinkage
globals()['JsonApiDatasetToManyLinkage'] = JsonApiDatasetToManyLinkage
class JsonApiAnalyticalDashboardOutRelationshipsDatasets(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': (JsonApiDatasetToManyLinkage,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, data, *args, **kwargs): # noqa: E501
"""JsonApiAnalyticalDashboardOutRelationshipsDatasets - a model defined in OpenAPI
Args:
data (JsonApiDatasetToManyLinkage):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.data = data
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, data, *args, **kwargs): # noqa: E501
"""JsonApiAnalyticalDashboardOutRelationshipsDatasets - a model defined in OpenAPI
Args:
data (JsonApiDatasetToManyLinkage):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.data = data
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.011152 | 124 | 0.580985 |
ace1063d8a139be06b43a83e7bf87cec1fa2d561 | 10,729 | py | Python | downloader/deb_downloader.py | Ascend/ascend-deployer | dd4ed8a012fd12225796be89e798cfec10cc340d | [
"Apache-2.0"
] | 1 | 2021-10-11T07:43:35.000Z | 2021-10-11T07:43:35.000Z | downloader/deb_downloader.py | Ascend/ascend-deployer | dd4ed8a012fd12225796be89e798cfec10cc340d | [
"Apache-2.0"
] | null | null | null | downloader/deb_downloader.py | Ascend/ascend-deployer | dd4ed8a012fd12225796be89e798cfec10cc340d | [
"Apache-2.0"
] | 1 | 2021-11-08T05:29:22.000Z | 2021-11-08T05:29:22.000Z | #!/usr/bin/env python3
# coding: utf-8
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
import os
import sys
import gzip
import re
import sqlite3 as sqlite
import urllib.request
import configparser
import logger_config
from urllib.error import HTTPError
from downloader import get_download_path
from download_util import DOWNLOAD_INST
from download_util import calc_sha256
DOC = r"""
ubuntu的子仓
main:完全的自由软件。
restricted:不完全的自由软件。
universe:ubuntu官方不提供支持与补丁,全靠社区支持。
muitiverse:非自由软件,完全不提供支持和补丁。
"""
LOG = logger_config.LOG
class DebianSource(object):
"""
source
"""
def __init__(self, line):
tmp = line.split(' ')
self.url = tmp[1].strip()
self.distro = tmp[2].strip()
self.repoList = [i.strip() for i in tmp[3:]]
def get_url(self):
"""
get source url
"""
return self.url
def repos(self):
"""
get source repos
"""
repos = {}
for repo in self.repoList:
repo_url = "{0}dists/{1}/{2}".format(self.url, self.distro, repo)
yield repo, repo_url
class Package(object):
"""
Package
"""
def __init__(self, package, filename, sha256=None):
self.package = package
self.filename = filename
self.sha256 = sha256
def get_packagename(self):
"""
get_packagename
"""
return self.package
def get_filename(self):
"""
get_filename
"""
return self.filename
def get_sha256(self):
"""
get_sha256
"""
return self.sha256
class Apt(object):
"""
downloader for apt
"""
def __init__(self, source_file, arch):
self.arch = arch
self.binary_path = 'binary-amd64' \
if 'x86' in self.arch else 'binary-arm64'
# 读取源配置
self.source_list = []
self.base_dir = get_download_path()
self.repo_file = os.path.join(self.base_dir, source_file)
self.resources_dir = os.path.join(self.base_dir, 'resources')
with open(self.repo_file) as file:
for line in file.readlines():
source = DebianSource(line)
self.source_list.append(source)
def make_cache(self):
"""
make_cache
"""
self.primary_connection = sqlite.Connection(':memory:')
self.primary_cur = self.primary_connection.cursor()
try:
self.primary_cur.executescript("CREATE TABLE packages \
(name TEXT, version TEXT, source TEXT, repo TEXT, \
url TEXT, sha256 TEXT);")
except sqlite.OperationalError as e:
pass
finally:
pass
for source in self.source_list:
for repo, url in source.repos():
index_url = '{0}/{1}/Packages.gz'.format(url, self.binary_path)
LOG.info('packages_url=[%s]', index_url)
packages = self.fetch_package_index(index_url)
self.make_cache_from_packages(source.get_url(), repo, packages)
self.primary_connection.commit()
self.primary_cur.close()
def clean_cache(self):
"""
clean sqlite Connection
"""
self.primary_connection.close()
@staticmethod
def fetch_package_index(packages_url):
"""
fetch_package_index
:param packages_url:
:return:
"""
tmp_file = DOWNLOAD_INST.download_to_tmp(packages_url)
with gzip.open(tmp_file) as resp:
html = resp.read()
os.unlink(tmp_file)
return html.decode('utf-8')
@staticmethod
def version_compare(ver_a, ver_b):
"""
version_compare
:param ver_a:
:param ver_b:
:return:
"""
ver_a_list = str(ver_a).split(".")
ver_b_list = str(ver_b).split(".")
for i in range(len(ver_a_list)) if len(ver_a_list) < len(ver_b_list) else range(len(ver_b_list)):
ver_a_list[i] = re.sub(r'\D', '', ver_a_list[i])
ver_b_list[i] = re.sub(r'\D', '', ver_b_list[i])
try:
ver_a_list[i] = int(ver_a_list[i])
ver_b_list[i] = int(ver_b_list[i])
except ValueError:
ver_a_list[i] = str(ver_a_list[i])
ver_b_list[i] = str(ver_b_list[i])
finally:
pass
if ver_a_list[i] == ver_b_list[i]:
continue
else:
return ver_a_list[i] > ver_b_list[i]
return len(ver_a) > len(ver_b)
def make_cache_from_packages(self, source_url, repo, packages_content):
"""
make_cache_from_packages
:param packages_content:
:return:
"""
lines = packages_content.split('\n')
package = ''
version = ''
filename = ''
sha256 = None
for line in lines:
if line.startswith("Package:"):
package = line.split(': ')[1]
if line.startswith("Version:"):
version = line.split(': ')[1]
if line.startswith("SHA256:"):
sha256 = line.split(': ')[1]
if line.startswith("Filename:"):
filename = line.split(': ')[1]
if len(line.strip()) == 0:
params = {'name': package,
'version': version,
'source': source_url,
'repo': repo,
'url': filename,
'sha256': sha256}
self.primary_cur.execute("INSERT INTO \
PACKAGES (name, version, source, repo, url, sha256) \
VALUES (:name, :version, :source, :repo, :url, \
:sha256);", params)
def download_by_url(self, pkg, dst_dir):
"""
download_by_url
:param pkg: package information
:return:
"""
download_dir = dst_dir
if 'dst_dir' in pkg:
download_dir = os.path.join(os.path.dirname(dst_dir), pkg['dst_dir'])
if not os.path.exists(download_dir):
os.makedirs(download_dir, mode=0o750, exist_ok=True)
url = pkg['url']
file_name = os.path.basename(url)
dst_file = os.path.join(download_dir, file_name)
checksum = pkg['sha256'] if 'sha256' in pkg else None
if checksum and not self.need_download_again(checksum, dst_file):
print(file_name.ljust(60), 'exists')
return True
try:
LOG.info('download from [%s]', url)
return DOWNLOAD_INST.download(url, dst_file)
except HTTPError as http_error:
print('[{0}]->{1}'.format(url, http_error))
LOG.error('[%s]->[%s]', url, http_error)
return False
finally:
pass
def download_by_name(self, pkg, dst_dir):
"""
download
:param name:
:param dst_dir:
:return:
"""
if 'name' not in pkg:
return False
if 'dst_dir' in pkg:
dst_dir = os.path.join(dst_dir, pkg['dst_dir'])
url = None
name = pkg['name']
cur = self.primary_connection.cursor()
sql = 'SELECT packages.version, packages.url, packages.sha256, \
packages.source, packages.repo \
FROM packages \
WHERE name=:name ORDER by packages.version;'
param = {'name': name}
cur.execute(sql, param)
results = cur.fetchall()
cur.close()
if len(results) == 0:
print("can't find package {0}".format(name))
LOG.error("can't find package %s", name)
return False
pkg_sha256 = ''
version = results[0][0]
url = results[0][3] + results[0][1]
pkg_sha256 = results[0][2]
for item in results:
[cur_ver, cur_url, cur_sha256, cur_source, cur_repo] = item
if not self.version_compare(version, cur_ver):
version = cur_ver
url = cur_source + cur_url
pkg_sha256 = cur_sha256
if 'version' in pkg and pkg['version'] in cur_ver:
url = cur_source + cur_url
pkg_sha256 = cur_sha256
break
try:
LOG.info('[%s] download from [%s]', name, url)
file_name = os.path.basename(url)
dst_file = os.path.join(dst_dir, file_name)
if not self.need_download_again(pkg_sha256, dst_file):
LOG.info("%s no need download again", name)
print(name.ljust(60), 'exists')
return True
if DOWNLOAD_INST.download(url, dst_file):
print(name.ljust(60), 'download success')
return True
print(name.ljust(60), 'download failed')
return False
except HTTPError as http_error:
print('[{0}]->{1}'.format(url, http_error))
LOG.error('[%s]->[%s]', url, http_error)
return False
finally:
pass
def download(self, pkg, dst_dir):
"""
download
"""
if 'url' in pkg:
return self.download_by_url(pkg, dst_dir)
else:
return self.download_by_name(pkg, dst_dir)
@staticmethod
def need_download_again(target_sha256, dst_file):
"""
need_download_again
:param name:
:param dst_dir:
:return:
"""
if target_sha256 is None:
return True
if not os.path.exists(dst_file):
return True
file_sha256 = calc_sha256(dst_file)
if target_sha256 != file_sha256:
LOG.info('target sha256 : %s, existed file sha256 : %s',
target_sha256, file_sha256)
print('target sha256 : {}, existed file sha256 : {}'.format(
target_sha256, file_sha256))
return True
else:
return False
| 30.480114 | 105 | 0.544412 |
ace106462b13bfa3a5544b3ff87bf4f5aaafa03f | 4,464 | py | Python | openrec/legacy/modules/interactions/pairwise_eu_dist.py | amirbiran/openrec | 69a1c57a7a1eec49720b776279b9120b80630ba2 | [
"Apache-2.0"
] | 1 | 2018-01-12T03:46:34.000Z | 2018-01-12T03:46:34.000Z | openrec/legacy/modules/interactions/pairwise_eu_dist.py | amirbiran/openrec | 69a1c57a7a1eec49720b776279b9120b80630ba2 | [
"Apache-2.0"
] | null | null | null | openrec/legacy/modules/interactions/pairwise_eu_dist.py | amirbiran/openrec | 69a1c57a7a1eec49720b776279b9120b80630ba2 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
from openrec.legacy.modules.interactions import PairwiseLog
class PairwiseEuDist(PairwiseLog):
"""
The PairwiseEuDist module minimizes the weighted pairwise euclidean distance-based hinge loss [cml]_ as follows (regularization and bias terms \
are not included):
.. math::
\min \sum_{(i, p, n)} w_{ip} [m + \lVert c(u_i)-c(v_p) \lVert^2 - \lVert c(u_i)-c(v_n) \lVert^2]_+
where :math:`c(x) = \\frac{x}{\max(\lVert x \lVert, 1.0)}`; :math:`u_i` denotes the representation for user :math:`i`; :math:`v_p` and :math:`v_n` denote representations for \
*positive item* :math:`p` and *negative item* :math:`n`, respectively.
Parameters
----------
user: Tensorflow tensor
Representations for users involved in the interactions. Shape: **[number of interactions, dimensionality of \
user representations]**.
item: Tensorflow tensor, required for testing
Representations for items involved in the interactions. Shape: **[number of interactions, dimensionality of \
item representations]**.
item_bias: Tensorflow tensor, required for testing
Biases for items involved in the interactions. Shape: **[number of interactions, 1]**.
p_item: Tensorflow tensor, required for training
Representations for positive items involved in the interactions. Shape: **[number of interactions, dimensionality of \
item representations]**.
p_item_bias: Tensorflow tensor, required for training
Biases for positive items involved in the interactions. Shape: **[number of interactions, 1]**.
n_item: Tensorflow tensor, required for training
Representations for negative items involved in the interactions. Shape: **[number of interactions, dimensionality of \
item representations]**.
n_item_bias: Tensorflow tensor, required for training
Biases for negative items involved in the interactions. Shape: **[number of interactions, 1]**.
weights: Tensorflow tensor, optional
Weights :math:`w`. Shape: **[number of interactions, 1]**.
margin: float, optional
Margin :math:`m`. Default to 1.0.
train: bool, optionl
An indicator for training or serving phase.
scope: str, optional
Scope for module variables.
reuse: bool, optional
Whether or not to reuse module variables.
References
----------
.. [cml] Hsieh, C.K., Yang, L., Cui, Y., Lin, T.Y., Belongie, S. and Estrin, D., 2017, April. Collaborative metric learning. \
In Proceedings of the 26th International Conference on World Wide Web (pp. 193-201). International World Wide Web Conferences \
Steering Committee.
"""
def __init__(self, user, item=None, item_bias=None, p_item=None,
p_item_bias=None, n_item=None, n_item_bias=None, weights=1.0, margin=1.0, train=None,
scope=None, reuse=False):
self._weights = weights
self._margin = margin
super(PairwiseEuDist, self).__init__(user=user, item=item, item_bias=item_bias, p_item=p_item,
n_item=n_item, p_item_bias=p_item_bias, n_item_bias=n_item_bias, train=train, scope=scope, reuse=reuse)
def _build_training_graph(self):
with tf.variable_scope(self._scope, reuse=self._reuse):
l2_user_pos = tf.reduce_sum(tf.square(tf.subtract(self._user, self._p_item)),
reduction_indices=1,
keep_dims=True, name="l2_user_pos")
l2_user_neg = tf.reduce_sum(tf.square(tf.subtract(self._user, self._n_item)),
reduction_indices=1,
keep_dims=True, name="l2_user_neg")
pos_score = (-l2_user_pos) + self._p_item_bias
neg_score = (-l2_user_neg) + self._n_item_bias
diff = pos_score - neg_score
self._loss = tf.reduce_sum(self._weights * tf.maximum(self._margin - diff, 0))
def _build_serving_graph(self):
with tf.variable_scope(self._scope, reuse=self._reuse):
item_norms = tf.reduce_sum(tf.square(self._item), axis=1)
self._outputs.append(2 * tf.matmul(self._user, self._item, transpose_b=True) + \
tf.reshape(self._item_bias, [-1]) - tf.reshape(item_norms, [-1]))
| 51.906977 | 180 | 0.640457 |
ace10a01e4654a6cefff70fb8159e0b47dec72eb | 11,968 | py | Python | scripts/gui.py | UoA-sjer215/2021-python-41 | b27094f5915395636fa85ad6a4a9751c7224c345 | [
"Apache-2.0"
] | null | null | null | scripts/gui.py | UoA-sjer215/2021-python-41 | b27094f5915395636fa85ad6a4a9751c7224c345 | [
"Apache-2.0"
] | null | null | null | scripts/gui.py | UoA-sjer215/2021-python-41 | b27094f5915395636fa85ad6a4a9751c7224c345 | [
"Apache-2.0"
] | null | null | null | import sys
import Network
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PIL import Image
import numpy as np
from torchvision.utils import save_image
Maxvalue = 60000
filePlace = ''
def QPixmapToArray(pixmap):
pixmap = pixmap.scaledToHeight(28)
## Get the size of the current pixmap
size = pixmap.size()
h = size.width()
w = size.height()
## Get the QImage Item and convert it to a byte string
qimg = pixmap.toImage()
array = qimg.bits().asarray(784)
return array
#Main function that creates the window
class App (QWidget):
#important data needed
epoch = 0
train_loader = None
test_loader = None
picplace = 0
#initiate function
def __init__ (self,num):
super().__init__()
self.main(num)
#start function that creates the windows
def main (self,num):
#the numbers are for different windows that can be opened
#not all of them are currently in use
if num == 1:
#main start window
self.setWindowTitle("main")
self.move(500,200)
self.setWindowIcon(QIcon('cat.jpg'))
self.resize(900,600)
#sets the quadrants for the amin window ( and location)
grid = QGridLayout()
grid.addWidget(self.preTraining(), 0, 0)
grid.addWidget(self.training(), 0, 1)
grid.addWidget(self.digitInsert(), 1, 1)
grid.addWidget(self.guess(), 1, 0)
self.setLayout(grid)
self.show()
elif num == 2 :
#The old drawing window, (no longer used)
self.setWindowTitle("drawing")
self.drawing = False
self.lastPoint = QPoint()
self.image = QPixmap("cat.jpg")
self.move(500,200)
self.resize(400,400)
self.show()
elif num == 3 :
#File dialog window, (can get a file path)
self.setWindowTitle('find training data')
self.setGeometry(500,200,900,600)
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", "","All Files (*);;Python Files (*.py)", options=options)
if fileName:
filePlace = fileName
#imports the data
def import_clicked(self):
self.test_loader = Network.get_test_set()
self.train_loader = Network.get_train_set()
print(self.train_loader)
print(self.test_loader)
#self.AllDataImages = iter(self.test_loader)
def go_next(self):
print('next')
if(self.viewing_dataset == 0):
for batch_idx, (data, target) in enumerate(self.train_loader):
if(batch_idx > self.dataset_index):
self.dataset_index += 1
save_image(data, 'dataset_img.png')
data_pixmap = QPixmap('dataset_img.png')
data_pixmap = data_pixmap.scaledToHeight(280)
self.datasetImage.setPixmap(data_pixmap)
break
else:
for batch_idx, (data, target) in enumerate(self.test_loader):
if(batch_idx > self.dataset_index):
self.dataset_index += 1
save_image(data, 'dataset_img.png')
data_pixmap = QPixmap('dataset_img.png')
data_pixmap = data_pixmap.scaledToHeight(280)
self.datasetImage.setPixmap(data_pixmap)
break
def go_previous(self):
print('back')
if(self.viewing_dataset == 0):
for batch_idx, (data, target) in enumerate(self.train_loader):
if(batch_idx > self.dataset_index-2):
self.dataset_index += -1
save_image(data, 'dataset_img.png')
data_pixmap = QPixmap('dataset_img.png')
data_pixmap = data_pixmap.scaledToHeight(280)
self.datasetImage.setPixmap(data_pixmap)
break
else:
for batch_idx, (data, target) in enumerate(self.test_loader):
if(batch_idx > self.dataset_index-2):
self.dataset_index += -1
save_image(data, 'dataset_img.png')
data_pixmap = QPixmap('dataset_img.png')
data_pixmap = data_pixmap.scaledToHeight(280)
self.datasetImage.setPixmap(data_pixmap)
break
def switch_set(self):
print('switch')
if(self.viewing_dataset == 0):
self.viewing_dataset = 1
else:
self.viewing_dataset = 0
#Pre-training, will load and make the dataset usable
#ahould aslo be able to loop through the images in the dataset
def preTraining(self):
groupbox = QGroupBox('Pre-Training Settings')
importTraining = QPushButton('Import')
importTraining.clicked.connect(self.import_clicked)
self.dataImage = QPixmap('cat.jpg')
self.datasetImage = QLabel()
self.datasetImage.setPixmap(self.dataImage)
self.dataset_index = -1
self.viewing_dataset = 0
#Next and Previous buttons
Next = QPushButton('Next')
Next.clicked.connect(self.go_next)
Previous = QPushButton('Previous')
Previous.clicked.connect(self.go_previous)
#Change dataset to view
Change_set = QPushButton('Switch Dataset')
Change_set.clicked.connect(self.switch_set)
vbox = QVBoxLayout()
vbox.addWidget(importTraining)
vbox.addWidget(self.datasetImage)
vbox.addWidget(Next)
vbox.addWidget(Previous)
vbox.addWidget(Change_set)
vbox.addStretch(1)
groupbox.setLayout(vbox)
return groupbox
#changes teh current epoch
def value_changed(self):
self.epoch = self.epoch_value.value()
#trains the dataset based on the epoch amount selected
def train_clicked(self):
for epoch in range(1, self.epoch+1):
progress = Network.train(epoch, self.train_loader)
Network.test(self.test_loader)
self.timerEvent(100/(self.epoch) * progress)
# Saving the model so it can be used again without retraining (unsure if this the right place for this)
print("********************Model Saved***********************")
Network.save(Network.model, 'model.pth')
#updates the trainging progress bar
def timerEvent(self,percentage):
# if percentage >= 100:
# return
self.pbar.setValue(percentage) # update the progress bar
#will submit the drawing created to the NN
def test_drawing_clicked(self):
# self.image = QPixmap('new_digit')
# img = QPixmapToArray(self.image)
img = Image.open('new_digit.png')
img = img.resize((28, 28))
prediction = Network.netEval(img)
self.upgrade_guess(prediction)
#the training quadrant, containt all the stuff inside
def training(self):
groupbox = QGroupBox('Training Settings')
self.epoch_value = QSpinBox()
self.epoch_value.setRange(0, 15)
self.epoch_value.setSingleStep(2)
self.epoch_value.valueChanged.connect(self.value_changed)
lbl1 = QLabel('Epoch Amount (the more you have, the more accurate the model is)')
train = QPushButton('Train Model')
train.clicked.connect(self.train_clicked)
self.pbar = QProgressBar(self)
self.pbar.setGeometry(30, 40, 200, 25)
self.timer = QBasicTimer()
self.step = 0
vbox = QVBoxLayout()
vbox.addWidget(lbl1)
vbox.addWidget(self.epoch_value)
vbox.addWidget(train)
vbox.addWidget(self.pbar)
groupbox.setLayout(vbox)
return groupbox
#creates the drawing window
def open(self):
self.drawW = Drawer()
self.drawW.show()
#Creates teh drawing quadrant
def digitInsert(self):
groupbox = QGroupBox('Drawing')
open_drawing = QPushButton('new draw')
open_drawing.clicked.connect(self.open)
test_drawing = QPushButton('Test Drawing')
test_drawing.clicked.connect(self.test_drawing_clicked)
vbox = QVBoxLayout()
vbox.addWidget(open_drawing)
vbox.addWidget(test_drawing)
groupbox.setLayout(vbox)
return groupbox
#creates the guess quadrant, (only has progress bars)
def guess(self):
groupbox = QGroupBox('Number Guesses')
self.numGuess =QLabel("No guess made")
self.N0 = QProgressBar(self)
self.N1 = QProgressBar(self)
self.N2 = QProgressBar(self)
self.N3 = QProgressBar(self)
self.N4 = QProgressBar(self)
self.N5 = QProgressBar(self)
self.N6 = QProgressBar(self)
self.N7 = QProgressBar(self)
self.N8 = QProgressBar(self)
self.N9 = QProgressBar(self)
vbox = QVBoxLayout()
vbox.addWidget(self.numGuess)
vbox.addWidget(self.N0)
vbox.addWidget(self.N1)
vbox.addWidget(self.N2)
vbox.addWidget(self.N3)
vbox.addWidget(self.N4)
vbox.addWidget(self.N5)
vbox.addWidget(self.N6)
vbox.addWidget(self.N7)
vbox.addWidget(self.N8)
vbox.addWidget(self.N9)
groupbox.setLayout(vbox)
return groupbox
#the function to call to update the guess values
def upgrade_guess(self,prediction):
self.numGuess.setText("Prediction is that it's number" + str(prediction[10]))
self.N0.setValue((prediction[0] + 10)*5)
self.N1.setValue((prediction[1] + 10)*5)
self.N2.setValue((prediction[2] + 10)*5)
self.N3.setValue((prediction[3] + 10)*5)
self.N4.setValue((prediction[4] + 10)*5)
self.N5.setValue((prediction[5] + 10)*5)
self.N6.setValue((prediction[6] + 10)*5)
self.N7.setValue((prediction[7] + 10)*5)
self.N8.setValue((prediction[8] + 10)*5)
self.N9.setValue((prediction[9] + 10)*5)
#this entire code comes from this post from stack overflow
# https://stackoverflow.com/questions/51475306/drawing-on-top-of-image-in-pyqt5-tracing-the-mouse/51475353
class Drawer(QWidget):
#creates the drawer window and
def __init__(self):
super().__init__()
self.setWindowTitle("drawing")
self.drawing = False
self.lastPoint = QPoint()
self.image = QPixmap("blank.png")
self.image = self.image.scaled(280, 280)
self.image.save('blank.png', 'PNG')
self.resize(self.image.width(), self.image.height())
self.move(500,200)
self.show()
def paintEvent(self, event):
painter = QPainter(self)
painter.drawPixmap(self.rect(), self.image)
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.drawing = True
self.lastPoint = event.pos()
def mouseMoveEvent(self, event):
if event.buttons() and Qt.LeftButton and self.drawing :
painter = QPainter(self.image)
painter.setPen(QPen(Qt.white, 15, Qt.SolidLine))
painter.drawLine(self.lastPoint, event.pos())
self.lastPoint = event.pos()
self.update()
self.image.save("new_digit.png", "PNG")
def mouseReleaseEvent(self, event):
if event.button == Qt.LeftButton:
self.drawing = False
#creates the qapplication
parent = QApplication(sys.argv)
mainW = App(1)
# drawW = Drawer()
#execute the qapplication
sys.exit(parent.exec())
| 32.172043 | 149 | 0.59517 |
ace10ad2a7abe788019da6309690766668052ca8 | 953 | py | Python | result/migrations/mig/0020_auto_20190625_0912.py | 0Jihad/uqhs | 16e16742022142d47d0a423aa27ca50fe706a06b | [
"MIT"
] | null | null | null | result/migrations/mig/0020_auto_20190625_0912.py | 0Jihad/uqhs | 16e16742022142d47d0a423aa27ca50fe706a06b | [
"MIT"
] | 11 | 2019-10-13T11:05:26.000Z | 2022-03-11T23:48:57.000Z | result/migrations/mig/0020_auto_20190625_0912.py | 0Jihad/uqhs | 16e16742022142d47d0a423aa27ca50fe706a06b | [
"MIT"
] | null | null | null | # Generated by Django 2.1.3 on 2019-06-24 20:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('result', '0019_auto_20190625_0842'),
]
operations = [
migrations.AddField(
model_name='all_subject',
name='avr',
field=models.FloatField(blank=True, max_length=8, null=True),
),
migrations.AddField(
model_name='all_subject',
name='grade',
field=models.CharField(blank=True, default='0', max_length=8, null=True),
),
migrations.AddField(
model_name='all_subject',
name='posi',
field=models.CharField(blank=True, default='0', max_length=8, null=True),
),
migrations.AlterField(
model_name='all_subject',
name='agr',
field=models.IntegerField(blank=True, null=True),
),
]
| 28.029412 | 85 | 0.56873 |
ace10bac24844a0f37a7aabcc3f37b0c8e4f0e3c | 12,681 | py | Python | python3/pracmln/utils/eval.py | seba90/pracmln | 2af9e11d72f077834cf130343a2506344480fb07 | [
"BSD-2-Clause"
] | 123 | 2016-02-13T08:49:46.000Z | 2022-03-15T10:23:55.000Z | python3/pracmln/utils/eval.py | seba90/pracmln | 2af9e11d72f077834cf130343a2506344480fb07 | [
"BSD-2-Clause"
] | 29 | 2016-06-13T16:06:50.000Z | 2022-01-07T23:31:22.000Z | python3/pracmln/utils/eval.py | seba90/pracmln | 2af9e11d72f077834cf130343a2506344480fb07 | [
"BSD-2-Clause"
] | 51 | 2016-03-22T05:42:45.000Z | 2021-11-06T17:36:01.000Z | # Classifier Evaluation
#
# (C) 2013 by Daniel Nyga (nyga@cs.uni-bremen.de)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import pickle
from subprocess import Popen, PIPE
from ..mln.util import logx
def KLDivergence(p, q):
'''
Computes the Kullback-Leibler Divergence of two distributions p and q.
'''
if type(p) is str:
p = pickle.load(open(p))
if type(q) is str:
q = pickle.load(open(q))
kl_div = 0
for p_, q_ in zip(p, q):
p_ = max(1E-10, p_)
q_ = max(1E-10, q_)
kl_div += p_ * logx(float(p_) / q_)
return kl_div
class ConfusionMatrix(object):
'''
Represents a confusion matrix and provides some convenience methods
for computing statistics like precision, recall, F1 score or methods
for creating LaTex output.
'''
def __init__(self):
self.matrix = {} # maps classification result to vector of ground truths
self.instanceCount = 0
self.labels = []
def addClassificationResult(self, prediction, groundTruth, inc=1):
'''
Add a new classification result to the confusion matrix.
- gndTruth: the correct label of an example
- prediction: the predicted class label of an example
- inc: the increment (default: 1)
'''
if not prediction in self.labels:
self.labels.append(prediction)
if not groundTruth in self.labels:
self.labels.append(groundTruth)
gndTruths = self.matrix.get(prediction, None)
if gndTruths is None:
gndTruths = {}
self.matrix[prediction] = gndTruths
if self.matrix.get(groundTruth, None) is None:
self.matrix[groundTruth] = {groundTruth: 0}
gndTruths[groundTruth] = gndTruths.get(groundTruth, 0) + inc
self.instanceCount += inc
def getMatrixEntry(self, pred, clazz):
'''
Returns the matrix entry for the prediction pred and ground truth clazz.
'''
if self.matrix.get(pred, None) is None or self.matrix[pred].get(clazz, None) is None:
return 0
return self.matrix[pred][clazz]
def countClassifications(self, classname):
'''
Returns the true positive, true negative, false positive, false negative
classification counts (in this order).
'''
tp = self.matrix.get(classname, {}).get(classname, 0)
classes = list(self.matrix.keys())
fp = 0
for c in classes:
if c != classname:
fp += self.getMatrixEntry(classname, c)
fn = 0
for c in classes:
if c != classname:
fn += self.getMatrixEntry(c, classname)
tn = 0
for c in classes:
if c != classname:
for c2 in classes:
if c2 != classname:
tn += self.getMatrixEntry(c, c2)
assert sum([tp, tn, fp, fn]) == self.instanceCount
return tp, tn, fp, fn
def getMetrics(self, classname):
'''
Returns the classifier evaluation metrices in the following order:
Accuracy, Precision, Recall, F1-Score.
'''
classes = []
for classification in self.matrix:
for truth in self.matrix.get(classification, {}):
try:
classes.index(truth)
except ValueError:
classes.append(truth)
classes = sorted(classes)
tp, tn, fp, fn = self.countClassifications(classname)
acc = None
if tp + tn + fp + fn > 0:
acc = (tp + tn) / float(tp + tn + fp + fn)
pre = 0.0
if tp + fp > 0:
pre = tp / float(tp + fp)
rec = 0.0
if tp + fn > 0:
rec = tp / float(tp + fn)
f1 = 0.0
if pre + rec > 0:
f1 = (2.0 * pre * rec) / (pre + rec)
return acc, pre, rec, f1
def getTotalAccuracy(self):
'''
Returns the fraction of correct predictions and
total predictions.
'''
true = 0
total = 0
for label in self.labels:
tp, _, _, _ = self.countClassifications(label)
true += tp
return true / float(self.instanceCount)
def getLatexTable(self):
'''
Returns LaTex code for the confusion matrix.
'''
grid = "|l|"
for cl in sorted(self.labels):
grid += "l|"
endl = '\n'
result = ''
result += r'\footnotesize' + endl
result += r'\begin{tabular}{' + grid + '}' + endl
headerRow = r"Prediction/Ground Truth"
for cl in sorted(self.labels):
headerRow += r" & \begin{turn}{90}" + cl.replace('_', r'\_') + r'\end{turn}'
# count number of actual instances per class label
examplesPerClass = {}
for label in self.labels:
tp, tn, fp, fn = self.countClassifications(label)
examplesPerClass[label] = sum([tp, fp, fn])
result += r'\hline' + endl
result += headerRow + r'\\ \hline' + endl
# for each class create row
for clazz in sorted(self.labels):
values = []
# for each row fill colum
for cl2 in sorted(self.labels):
counts = self.getMatrixEntry(clazz, cl2)
values.append(r'\cellcolor{cfmcolor!%d}%s' % (int(round(float(counts) / examplesPerClass[clazz] * 100)),
(r'\textbf{%d}' if clazz == cl2 else '%d') % counts))
result += clazz.replace('_', r'\_') + ' & ' + ' & '.join(values) + r'\\ \hline' + endl
result += r"\end{tabular}" + endl
return result
def printPrecisions(self):
'''
Prints to the standard out a table of the class-specific error measures accurracy, precision, recall, F score.
'''
classes = []
for classification in self.matrix:
for truth in self.matrix.get(classification, {}):
try:
classes.index(truth)
except ValueError:
classes.append(truth)
classes = sorted(classes)
for cf in classes:
acc, pre, rec, f1 = self.getMetrics(cf)
print('%s: - Acc=%.2f, Pre=%.2f, Rec=%.2f F1=%.2f' % (cf, acc, pre, rec, f1))
print("")
def printAveragePrecision(self):
classes = []
for classification in self.matrix:
for truth in self.matrix.get(classification, {}):
try:
classes.index(truth)
except ValueError:
classes.append(truth)
classes = sorted(classes)
aAcc = 0.0
aPre = 0.0
aRec = 0.0
aF1 = 0.0
for cf in classes:
acc, pre, rec, f1 = self.getMetrics(cf)
aAcc += acc
aPre += pre
aRec += rec
aF1 += f1
print('%s: - Acc=%.2f, Pre=%.2f, Rec=%.2f F1=%.2f' % (
'Average: ', aAcc / len(classes), aPre / len(classes), aRec / len(classes), aF1 / len(classes)))
print("")
@staticmethod
def compareConfusionMatrices(*matricesPath):
for path in matricesPath:
cm = ConfusionMatrix.load(path)
print(path)
cm.printAveragePrecision()
def iteritems(self):
'''
Iterates over triples of the form (prediction, class, count) of this confusion matrix.
'''
for prediction in self.labels:
for clazz in self.labels:
yield (prediction, clazz, self.getMatrixEntry(prediction, clazz))
def combine(self, matrix):
'''
Combines another confusion matrix with this one.
'''
for (pred, clazz, count) in matrix.items():
self.addClassificationResult(pred, clazz, inc=count)
def __str__(self):
maxNumDigits = max(max([list(x.values()) for x in list(self.matrix.values())], key=max))
maxNumDigits = len(str(maxNumDigits))
maxClassLabelLength = max(list(map(len, list(self.matrix.keys()))))
padding = 1
numLabels = len(list(self.matrix.keys()))
cellwidth = max(maxClassLabelLength, maxNumDigits, 3) + 2 * padding
# create an horizontal line
print(maxNumDigits)
hline = '|' + '-' * (cellwidth) + '+'
hline += '+'.join(['-' * (cellwidth)] * numLabels) + '|'
sep = '|'
outerHLine = '-' * len(hline)
def createTableRow(args):
return sep + sep.join([str(a).rjust(cellwidth - padding) + ' ' * padding for a in args]) + sep
endl = '\n'
# draw the table
table = outerHLine + endl
table += createTableRow(['P\C'] + sorted(self.matrix.keys())) + endl
table += hline + endl
for i, clazz in enumerate(sorted(self.labels)):
table += createTableRow([clazz] + [self.getMatrixEntry(clazz, x) for x in sorted(self.labels)]) + endl
if i < len(list(self.matrix.keys())) - 1:
table += hline + endl
table += outerHLine
return table
def printTable(self):
'''
Prints the confusion matrix nicely formatted onto the standard out.
'''
print(self)
def toFile(self, filename):
'''
Pickles the confusion matrix to a file with the given name.
'''
pickle.dump(self, open(filename, 'w+'))
def writeLatexFile(self, filename):
texFileName = filename + '.tex'
texFile = open(texFileName, 'w+')
texFile.write(r'''
\documentclass[10pt]{article}
\usepackage{color}
\usepackage{rotating}
\usepackage[table]{xcolor}
\definecolor{cfmcolor}{rgb}{0.2,0.4,0.6}
\begin{document}
\pagenumbering{gobble}
\resizebox{\columnwidth}{!}{
%s}
\end{document}
''' % self.getLatexTable())
texFile.close()
@staticmethod
def load(filename):
return pickle.load(open(filename))
def toPDF(self, filename):
'''
Creates a PDF file of this matrix. Requires 'pdflatex' and 'pdfcrop' installed.
'''
texFileName = filename + '.tex'
texFile = open(texFileName, 'w+')
texFile.write(r'''
\documentclass[10pt]{article}
\usepackage{color}
\usepackage{rotating}
\usepackage[table]{xcolor}
\definecolor{cfmcolor}{rgb}{0.2,0.4,0.6}
\begin{document}
\pagenumbering{gobble}
\resizebox{\columnwidth}{!}{
%s}
\end{document}
''' % self.getLatexTable())
texFile.close()
cmd = 'pdflatex -halt-on-error %s' % texFileName
p = Popen(cmd, shell=True)
if p.wait() != 0:
raise Exception('Couldn\'t compile LaTex.')
else:
cmd = 'pdfcrop %s.pdf %s.pdf' % (filename, filename)
p = Popen(cmd, shell=True)
if p.wait() != 0:
raise Exception('Couldn\'t crop pdf')
if __name__ == '__main__':
cm = ConfusionMatrix()
for _ in range(10):
cm.addClassificationResult("AAA", "A")
cm.addClassificationResult("AAA", "AAA")
cm.addClassificationResult("AAA", "AAA")
cm.addClassificationResult("AAA", "AAA")
cm.addClassificationResult("AAA", "AAA")
cm.addClassificationResult("AAA", "B")
cm.addClassificationResult("AAA", "B")
cm.addClassificationResult("AAA", "C")
cm.addClassificationResult("B", "AAA")
cm.addClassificationResult("B", "AAA")
cm.addClassificationResult("B", "C")
cm.addClassificationResult("B", "B")
# cm.addClassificationResult("C","A")
# cm.addClassificationResult("C","B")
# cm.addClassificationResult("C","C")
cm.printTable()
cm.printPrecisions()
print(cm.getLatexTable())
cm.toPDF('tmp')
print(pickle.loads(pickle.dumps(cm)))
| 33.196335 | 120 | 0.581973 |
ace10cbbdc4595da7a08a194a86622245125c7e9 | 850 | py | Python | screenpy/resolutions/is_close_to.py | ScreenPyHQ/screenpy | e5eb9873f0eb8987a88c7b51a273c55925801738 | [
"MIT"
] | 8 | 2022-02-23T18:40:13.000Z | 2022-03-20T06:27:30.000Z | screenpy/resolutions/is_close_to.py | ScreenPyHQ/screenpy | e5eb9873f0eb8987a88c7b51a273c55925801738 | [
"MIT"
] | 7 | 2022-01-13T07:01:40.000Z | 2022-03-31T15:45:13.000Z | screenpy/resolutions/is_close_to.py | ScreenPyHQ/screenpy | e5eb9873f0eb8987a88c7b51a273c55925801738 | [
"MIT"
] | 2 | 2022-01-16T09:03:19.000Z | 2022-01-16T09:06:10.000Z | """
Matches a value that falls within the range specified by the given delta.
"""
from hamcrest import close_to
from hamcrest.library.number.iscloseto import IsCloseTo as _IsCloseTo
from .base_resolution import BaseResolution
class IsCloseTo(BaseResolution):
"""Matches a value that falls within the range specified by the given delta.
Examples::
the_actor.should(
See.the(Number.of(BALLOONS), IsCloseTo(FILLED_BALLOONS_COUNT, delta=25))
)
"""
matcher: _IsCloseTo
matcher_function = close_to
def get_line(self) -> str:
"""Get the line that describes this Resolution."""
args, kwargs = self.expected
return f"a value at most {kwargs['delta']} away from {args[0]}."
def __init__(self, num: int, delta: int = 1) -> None:
super().__init__(num, delta=delta)
| 27.419355 | 84 | 0.678824 |
ace10f6fd2bb035283822810f563b4e21d14b6de | 553 | py | Python | divergency.py | LabUfjf/DiscretizationProcess3 | 34a835cf3c47e34efc0db4124d9de386c9d0874f | [
"MIT"
] | null | null | null | divergency.py | LabUfjf/DiscretizationProcess3 | 34a835cf3c47e34efc0db4124d9de386c9d0874f | [
"MIT"
] | null | null | null | divergency.py | LabUfjf/DiscretizationProcess3 | 34a835cf3c47e34efc0db4124d9de386c9d0874f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 14 12:37:41 2018
@author: Igor
"""
def L2(P,Q):
import numpy as np
simi = (np.sqrt(np.sum((P-Q)**2)))#/np.size(P)
return simi
def L1(P,Q):
import numpy as np
simi = (np.sum(np.abs(P-Q)))#/np.size(P)
return simi
def KL(P,Q):
import numpy as np
simi=(P*np.log10((P/Q)));
indN = np.where(np.isnan(simi) == False)
indF = np.where(np.isinf(simi) == False)
index = np.intersect1d(indN,indF)
simi = np.abs(np.sum(simi[index]))#/np.size(P))
return simi | 20.481481 | 51 | 0.56962 |
ace1106f1caa5c3af25f45798b890af3de83d36a | 3,989 | py | Python | tests/templates/test_layer.py | DanielPolatajko/pennylane | d603e810a4d34d727a436d852c540fdc0fe21a85 | [
"Apache-2.0"
] | 1 | 2021-02-18T02:14:27.000Z | 2021-02-18T02:14:27.000Z | tests/templates/test_layer.py | markhop20/pennylane | 8792f0f88178f70a04d6f7afbbb9dd90d2e758b3 | [
"Apache-2.0"
] | null | null | null | tests/templates/test_layer.py | markhop20/pennylane | 8792f0f88178f70a04d6f7afbbb9dd90d2e758b3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :func:`pennylane.template.layer` function.
Integration tests should be placed into ``test_templates.py``.
"""
# pylint: disable=protected-access,cell-var-from-loop
import pytest
import pennylane as qml
from pennylane import layer, template
@template
def ConstantCircuit():
qml.PauliX(wires=[0])
qml.Hadamard(wires=[0])
qml.PauliY(wires=[1])
@template
def StaticCircuit(wires, var):
qml.CNOT(wires=[wires[3], wires[1]])
qml.Hadamard(wires=wires[1])
qml.PauliY(wires=wires[2])
if var == True:
qml.Hadamard(wires=wires[0])
@template
def KwargCircuit(wires, **kwargs):
qml.CNOT(wires=[wires[3], wires[1]])
qml.Hadamard(wires=wires[1])
qml.PauliY(wires=wires[2])
if kwargs['var'] == True:
qml.Hadamard(wires=wires[0])
@template
def DynamicCircuit(parameters):
for i in range(2):
qml.RX(parameters[0][i], wires=i)
qml.MultiRZ(parameters[1], wires=[0, 1])
@template
def MultiCircuit(parameters1, parameters2, var1, wires, var2):
if var2 == True:
for i, w in enumerate(wires):
qml.RY(parameters1[i], wires=w)
if var1 == True:
qml.templates.BasicEntanglerLayers([parameters2], wires=wires)
UNITARIES = [
ConstantCircuit,
StaticCircuit,
KwargCircuit,
DynamicCircuit,
MultiCircuit
]
DEPTH = [2, 1, 2, 1, 2]
GATES = [
[qml.PauliX(wires=0), qml.Hadamard(wires=0), qml.PauliY(wires=1), qml.PauliX(wires=0), qml.Hadamard(wires=0), qml.PauliY(wires=1)],
[qml.CNOT(wires=[3, 1]), qml.Hadamard(wires=1), qml.PauliY(wires=2), qml.Hadamard(wires=0)],
[qml.CNOT(wires=[3, 1]), qml.Hadamard(wires=1), qml.PauliY(wires=2), qml.Hadamard(wires=0), qml.CNOT(wires=[3, 1]), qml.Hadamard(wires=1), qml.PauliY(wires=2), qml.Hadamard(wires=[0])],
[qml.RX(0.5, wires=0), qml.RX(0.5, wires=1), qml.MultiRZ(0.3, wires=[0, 1])],
[qml.RY(0.5, wires=0), qml.RY(0.4, wires=1), qml.RX(0.4, wires=0), qml.RX(0.4, wires=1), qml.CNOT(wires=[0, 1]), qml.RY(0.5, wires=0), qml.RY(0.4, wires=1)]
]
ARGS = [ [], [], [], [ [ [[0.5, 0.5], 0.3] ] ], [ [[0.5, 0.4], [0.5, 0.4]], [[0.4, 0.4], []], [True, False] ] ]
KWARGS = [{}, {'wires':range(4), 'var':True}, {'wires':range(4), 'var':True}, {}, {'wires':range(2), 'var2':True}]
REPEAT = zip(UNITARIES, DEPTH, ARGS, KWARGS, GATES)
########################
class TestLayer:
"""Tests the layering function"""
def test_args_length(self):
"""Tests that the correct error is thrown when the length of an argument is incorrect"""
params = [1, 1]
def unitary(param, wire):
qml.RX(param, wires=wire)
with pytest.raises(ValueError, match=r"Each positional argument must have length matching 'depth'; expected 3"):
layer(unitary, 3, params, wires=[0])
@pytest.mark.parametrize(("unitary", "depth", "arguments", "keywords", "gates"), REPEAT)
def test_layer(self, unitary, depth, arguments, keywords, gates):
"""Tests that the layering function is yielding the correct sequence of gates"""
with qml._queuing.OperationRecorder() as rec:
layer(unitary, depth, *arguments, **keywords)
for i, gate in enumerate(rec.operations):
prep = [gate.name, gate.parameters, gate.wires]
target = [gates[i].name, gates[i].parameters, gates[i].wires]
assert prep == target
| 33.241667 | 189 | 0.647029 |
ace110b458c1bc46e2097c2c0b77f2e2f720260f | 11,347 | py | Python | packages/python/plotly/plotly/validators/bar/marker/_colorbar.py | adehad/plotly.py | bca292530c400c61e8b7f8a6571262a9dde43ee3 | [
"MIT"
] | 7 | 2021-09-29T09:46:36.000Z | 2022-03-24T08:30:41.000Z | packages/python/plotly/plotly/validators/bar/marker/_colorbar.py | adehad/plotly.py | bca292530c400c61e8b7f8a6571262a9dde43ee3 | [
"MIT"
] | 1 | 2021-09-30T16:56:21.000Z | 2021-10-15T09:14:12.000Z | packages/python/plotly/plotly/validators/bar/marker/_colorbar.py | adehad/plotly.py | bca292530c400c61e8b7f8a6571262a9dde43ee3 | [
"MIT"
] | 1 | 2021-09-29T22:34:05.000Z | 2021-09-29T22:34:05.000Z | import _plotly_utils.basevalidators
class ColorbarValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="colorbar", parent_name="bar.marker", **kwargs):
super(ColorbarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "ColorBar"),
data_docs=kwargs.pop(
"data_docs",
"""
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format We add one item to d3's
date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f"
would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.bar.mar
ker.colorbar.Tickformatstop` instances or dicts
with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.bar.marker.colorbar.tickformatstopdefaults),
sets the default property values to use for
elements of bar.marker.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of
the axis. The default value for inside tick
labels is *hide past domain*. In other cases
the default is *hide past div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.bar.marker.colorba
r.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
bar.marker.colorbar.title.font instead. Sets
this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
bar.marker.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
""",
),
**kwargs
)
| 47.476987 | 83 | 0.526923 |
ace11197e37902a805db4c3be0abeb53d44a7786 | 1,017 | py | Python | flowpatrol/settings/production.py | chrxr/flowpatrol_old | f763c235c080bf3c76da84720a3691592141c206 | [
"BSD-3-Clause"
] | null | null | null | flowpatrol/settings/production.py | chrxr/flowpatrol_old | f763c235c080bf3c76da84720a3691592141c206 | [
"BSD-3-Clause"
] | 1 | 2019-06-09T14:17:48.000Z | 2019-06-18T20:14:42.000Z | flowpatrol/settings/production.py | chrxr/flowpatrol_old | f763c235c080bf3c76da84720a3691592141c206 | [
"BSD-3-Clause"
] | null | null | null | from .base import *
DEBUG = True
WAGTAILSEARCH_BACKENDS = {
'default': {
'BACKEND': 'wagtail.wagtailsearch.backends.elasticsearch.ElasticSearch',
'INDEX': 'flowpatrol'
}
}
INSTALLED_APPS+= (
'djcelery',
'kombu.transport.django',
'gunicorn',
)
CACHES = {
'default': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': '127.0.0.1:6379',
'KEY_PREFIX': 'flowpatrol',
'OPTIONS': {
'CLIENT_CLASS': 'redis_cache.client.DefaultClient',
}
}
}
# Use the cached template loader
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# CELERY SETTINGS
import djcelery
djcelery.setup_loader()
BROKER_URL = 'redis://'
CELERY_SEND_TASK_ERROR_EMAILS = True
CELERYD_LOG_COLOR = False
try:
from .local import *
except ImportError:
pass
MANDRILL_API_KEY = 'Z7vKU6m7pINJuxiUMco9Yw'
| 18.490909 | 80 | 0.648968 |
ace1119af085a6ebaf23e8b2974f76f85e810796 | 515 | py | Python | photo/migrations/0005_auto_20210528_1214.py | prajalpatidar06/Hazel | 76ce084ab81962d579f1e14e3258f332b3bb4965 | [
"Apache-2.0"
] | null | null | null | photo/migrations/0005_auto_20210528_1214.py | prajalpatidar06/Hazel | 76ce084ab81962d579f1e14e3258f332b3bb4965 | [
"Apache-2.0"
] | null | null | null | photo/migrations/0005_auto_20210528_1214.py | prajalpatidar06/Hazel | 76ce084ab81962d579f1e14e3258f332b3bb4965 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.1 on 2021-05-28 06:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('photo', '0004_auto_20210528_1135'),
]
operations = [
migrations.RemoveField(
model_name='photo',
name='time',
),
migrations.AddField(
model_name='photo',
name='topic',
field=models.CharField(default='', max_length=200),
),
]
| 22.391304 | 64 | 0.537864 |
ace112b1f6805f98382136161ef8418643bc8a5b | 53 | py | Python | trainer/__init__.py | SURFZJY/Real-time-Text-Detection | b76ee8d840b1fcebf7b9545402907416c7daf24e | [
"Apache-2.0"
] | 65 | 2019-11-29T07:48:38.000Z | 2021-02-05T15:15:28.000Z | trainer/__init__.py | 2017TJM/Real-time-Text-Detection | ac0adb060ed15ed96531e4a545f007990732ddd3 | [
"Apache-2.0"
] | 7 | 2019-12-09T02:11:39.000Z | 2020-10-12T13:27:54.000Z | trainer/__init__.py | 2017TJM/Real-time-Text-Detection | ac0adb060ed15ed96531e4a545f007990732ddd3 | [
"Apache-2.0"
] | 14 | 2019-12-12T06:12:18.000Z | 2020-06-08T03:51:39.000Z | # -*- coding: utf-8 -*-
from .trainer import Trainer | 17.666667 | 28 | 0.641509 |
ace112c8fa1bbd1e95262146712ce527884cf15a | 894 | py | Python | hard-gists/4efe2e28ffcba38aa242/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/4efe2e28ffcba38aa242/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/4efe2e28ffcba38aa242/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | #!/usr/bin/env python
"""
HACK to support the Django + nose without django-nose.
Built based on documentation from:
* https://docs.djangoproject.com/en/1.8/topics/testing/advanced/#using-the-django-test-runner-to-test-reusable-applications
* http://nose.readthedocs.org/en/latest/usage.html#basic-usage
"""
import sys
import django
import nose
from django.test.utils import setup_test_environment, teardown_test_environment
from django.db import connection
if __name__ == '__main__':
django.setup()
try:
sys.argv.remove('--keepdb')
except ValueError:
keepdb = False
else:
keepdb = True
setup_test_environment()
test_db_name = connection.creation.create_test_db(keepdb=keepdb)
result = nose.run()
connection.creation.destroy_test_db(test_db_name, keepdb=keepdb)
teardown_test_environment()
if not result:
sys.exit(1)
| 26.294118 | 123 | 0.728188 |
ace112db22ccac329d133e955851781b0e1ae0ac | 2,484 | py | Python | example/a549/test-tgfb-data.py | johnbachman/OmicsIntegrator | 0d76f73f6bf4b827074f5651034fd95e7518a642 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | example/a549/test-tgfb-data.py | johnbachman/OmicsIntegrator | 0d76f73f6bf4b827074f5651034fd95e7518a642 | [
"BSD-2-Clause",
"MIT"
] | null | null | null | example/a549/test-tgfb-data.py | johnbachman/OmicsIntegrator | 0d76f73f6bf4b827074f5651034fd95e7518a642 | [
"BSD-2-Clause",
"MIT"
] | 1 | 2020-12-12T22:00:56.000Z | 2020-12-12T22:00:56.000Z | #!/usr/local/bin/python
'''
A test script to run through the sample dataset provided with garnet-forest
'''
__author__='Sara JG Gosline'
__email__='sgosline@mit.edu'
import os, sys
from optparse import OptionParser
if __name__=='__main__':
parser=OptionParser()
parser.add_option('--forest-only',dest='forest_only',action='store_true',default=False,help='Set this flag to run forest on phospho-proteomic data only. DEFAULT:%default')
parser.add_option('--msgpath',dest='msgsteiner',type='string',help='Path to msgsteiner code, be sure to include!')
parser.add_option('--doRandom',dest='rand',action='store_true',help='THIS WILL TAKE A LONG TIME: set this flag to do 50 permutations of forest using the --noisyEdges flag',default=False)
opts,args=parser.parse_args()
phos_weights='Tgfb_phos.txt'
#garnet requires a configuration file that has all the data
forest_out='tgfb_garnet_forest_output'
garnet_conf='tgfb_garnet.cfg' #provided config file
gcmd='python ../../scripts/garnet.py --outdir=%s %s'%(forest_out,garnet_conf) #command
#forest requires more inputs
forest_conf='tgfb_forest.cfg' #provided config file should include garnetBeta parameter
edge_file='../../data/iref_mitab_miscore_2013_08_12_interactome.txt' #interactome
msgsteinerpath=opts.msgsteiner ##WE NEED MSGSTEINER9 INSTALLED!!!
##now ready to run commands
if not opts.forest_only:
print(gcmd)
res=os.system(gcmd)
if res!=0:
sys.exit('Error executing garnet, will not execute forest')
garnet_output=forest_out+'/events_to_genes_with_motifsregression_results_FOREST_INPUT.tsv'
#garnet_beta='0.1'
fcmd='python ../../scripts/forest.py --prize=%s --edge=%s --conf=%s --garnet=%s --outpath=%s --msgpath=%s'%(phos_weights,edge_file,forest_conf,garnet_output,forest_out,msgsteinerpath)
if opts.rand:
fcmd=fcmd+' --noisyEdges=20'
print('\n'+fcmd)
os.system(fcmd)
else:
forest_out='tgfb_forest_output'
if not os.path.exists(forest_out): ##FOREST WILL NOT CREATE DIRECTORY FOR YOU, GARNET WILL
os.makedirs(forest_out)
fcmd='python ../../scripts/forest.py --prize=%s --edge=%s --conf=%s --outpath=%s --msgpath=%s'%(phos_weights,edge_file,forest_conf,forest_out,msgsteinerpath)
if opts.rand:
fcmd=fcmd+' --noisyEdges=50'
print('\n'+fcmd)
os.system(fcmd)
| 40.721311 | 191 | 0.689614 |
ace1137f10b735ee28270f1244d272e8726f6b27 | 5,266 | py | Python | src/lib/telegram/ext/inlinequeryhandler.py | thonkify/thonkify | 2cb4493d796746cb46c8519a100ef3ef128a761a | [
"MIT"
] | 17 | 2017-08-04T15:41:05.000Z | 2020-10-16T18:02:41.000Z | src/lib/telegram/ext/inlinequeryhandler.py | thonkify/thonkify | 2cb4493d796746cb46c8519a100ef3ef128a761a | [
"MIT"
] | 3 | 2017-08-04T23:37:37.000Z | 2017-08-04T23:38:34.000Z | src/lib/telegram/ext/inlinequeryhandler.py | thonkify/thonkify | 2cb4493d796746cb46c8519a100ef3ef128a761a | [
"MIT"
] | 3 | 2017-12-07T16:30:59.000Z | 2019-06-16T02:48:28.000Z | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
""" This module contains the InlineQueryHandler class """
import re
from future.utils import string_types
from telegram import Update
from telegram.utils.deprecate import deprecate
from .handler import Handler
class InlineQueryHandler(Handler):
"""
Handler class to handle Telegram inline queries. Optionally based on a regex. Read the
documentation of the ``re`` module for more information.
Args:
callback (function): A function that takes ``bot, update`` as
positional arguments. It will be called when the ``check_update``
has determined that an update should be processed by this handler.
pass_update_queue (optional[bool]): If set to ``True``, a keyword argument called
``update_queue`` will be passed to the callback function. It will be the ``Queue``
instance used by the ``Updater`` and ``Dispatcher`` that contains new updates which can
be used to insert updates. Default is ``False``.
pass_job_queue (optional[bool]): If set to ``True``, a keyword argument called
``job_queue`` will be passed to the callback function. It will be a ``JobQueue``
instance created by the ``Updater`` which can be used to schedule new jobs.
Default is ``False``.
pattern (optional[str or Pattern]): Optional regex pattern. If not ``None`` ``re.match``
is used to determine if an update should be handled by this handler.
pass_groups (optional[bool]): If the callback should be passed the
result of ``re.match(pattern, query).groups()`` as a keyword
argument called ``groups``. Default is ``False``
pass_groupdict (optional[bool]): If the callback should be passed the
result of ``re.match(pattern, query).groupdict()`` as a keyword
argument called ``groupdict``. Default is ``False``
pass_user_data (optional[bool]): If set to ``True``, a keyword argument called
``user_data`` will be passed to the callback function. It will be a ``dict`` you
can use to keep any data related to the user that sent the update. For each update of
the same user, it will be the same ``dict``. Default is ``False``.
pass_chat_data (optional[bool]): If set to ``True``, a keyword argument called
``chat_data`` will be passed to the callback function. It will be a ``dict`` you
can use to keep any data related to the chat that the update was sent in.
For each update in the same chat, it will be the same ``dict``. Default is ``False``.
"""
def __init__(self,
callback,
pass_update_queue=False,
pass_job_queue=False,
pattern=None,
pass_groups=False,
pass_groupdict=False,
pass_user_data=False,
pass_chat_data=False):
super(InlineQueryHandler, self).__init__(
callback,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data)
if isinstance(pattern, string_types):
pattern = re.compile(pattern)
self.pattern = pattern
self.pass_groups = pass_groups
self.pass_groupdict = pass_groupdict
def check_update(self, update):
if isinstance(update, Update) and update.inline_query:
if self.pattern:
if update.inline_query.query:
match = re.match(self.pattern, update.inline_query.query)
return bool(match)
else:
return True
def handle_update(self, update, dispatcher):
optional_args = self.collect_optional_args(dispatcher, update)
if self.pattern:
match = re.match(self.pattern, update.inline_query.query)
if self.pass_groups:
optional_args['groups'] = match.groups()
if self.pass_groupdict:
optional_args['groupdict'] = match.groupdict()
return self.callback(dispatcher.bot, update, **optional_args)
# old non-PEP8 Handler methods
m = "telegram.InlineQueryHandler."
checkUpdate = deprecate(check_update, m + "checkUpdate", m + "check_update")
handleUpdate = deprecate(handle_update, m + "handleUpdate", m + "handle_update")
| 47.017857 | 99 | 0.654387 |
ace113d66dc6db8a6dedb185567aceb3ea5c4841 | 1,389 | py | Python | password.py | bellahOchola/password-locker | 58ba1977ea0131a4c1e393487f8ce4874d53d4e7 | [
"MIT"
] | null | null | null | password.py | bellahOchola/password-locker | 58ba1977ea0131a4c1e393487f8ce4874d53d4e7 | [
"MIT"
] | null | null | null | password.py | bellahOchola/password-locker | 58ba1977ea0131a4c1e393487f8ce4874d53d4e7 | [
"MIT"
] | null | null | null | class User:
users_list = []
def __init__(self, user_name, password):
self.user_name = user_name
self.password = password
def create_account(self):
User.users_list.append(self)
@classmethod
def login(cls,user_name, password):
'''
this method takes in the username and password of user and enables them to login
'''
for user in cls.users_list:
if user.user_name == user_name and user.password == password:
return True
return False
class Credentials:
credential_list = []
def __init__(self, account_name, user_name, password):
self.account_name = account_name
self.user_name = user_name
self.password = password
def save_credential(self):
Credentials.credential_list.append(self)
def delete_credential(self):
Credentials.credential_list.remove(self)
@classmethod
def display_credentials(cls):
'''
method returns the credentials list
'''
return cls.credential_list
@classmethod
def find_by_accountname(cls,account_name):
'''
this method returns the password of the account entered
'''
for credentials in cls.credential_list:
if credentials.account_name == account_name :
return credentials
| 26.711538 | 88 | 0.62635 |
ace11477717b3fabd7687a42617acd0426d12f18 | 1,048 | py | Python | loans/admin.py | lubegamark/senkumba | d7caf577ae7d1a44e9faa8a219847d7e5c531777 | [
"MIT"
] | 1 | 2018-02-05T10:28:48.000Z | 2018-02-05T10:28:48.000Z | loans/admin.py | lubegamark/senkumba | d7caf577ae7d1a44e9faa8a219847d7e5c531777 | [
"MIT"
] | null | null | null | loans/admin.py | lubegamark/senkumba | d7caf577ae7d1a44e9faa8a219847d7e5c531777 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.contrib.admin import ModelAdmin
from loans.models import LoanApplication, Loan, LoanCategory, InterestType, LoanApplicationStatus, \
LoanStatus, LoanPayment, Period
class LoansAdmin(ModelAdmin):
list_display = ('id', 'user', 'start', 'expected_end', 'type', 'amount', 'status', 'approval_date')
ordering = ('-approval_date', '-created', 'id')
search_fields = ('date', 'user', 'type')
readonly_fields = ('summary',)
class LoanApplicationsAdmin(ModelAdmin):
list_display = ('id', 'user', 'proposed_start', 'proposed_end', 'type', 'proposed_amount', 'status', 'approved')
ordering = ('-application_date', '-created', 'id')
search_fields = ('date', 'user', 'type')
admin.site.register(Loan, LoansAdmin)
admin.site.register(LoanApplication, LoanApplicationsAdmin)
admin.site.register(LoanCategory)
admin.site.register(LoanStatus)
admin.site.register(LoanApplicationStatus)
admin.site.register(LoanPayment)
admin.site.register(Period)
admin.site.register(InterestType)
| 36.137931 | 116 | 0.737595 |
ace115e1f40a0afc53492d26496ad0be99f442fb | 4,582 | py | Python | experiments/buffers/sim.py | ccanel/etalon | 653a9c881dde4f544908dd4e6e95214e6ec63693 | [
"MIT"
] | 2 | 2019-08-19T13:42:46.000Z | 2020-12-04T19:19:28.000Z | experiments/buffers/sim.py | ccanel/etalon | 653a9c881dde4f544908dd4e6e95214e6ec63693 | [
"MIT"
] | 1 | 2022-02-07T03:49:33.000Z | 2022-02-07T03:49:33.000Z | experiments/buffers/sim.py | ccanel/etalon | 653a9c881dde4f544908dd4e6e95214e6ec63693 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from collections import defaultdict
import os
from os import path
import sys
# Directory containing this program.
PROGDIR = path.dirname(path.realpath(__file__))
from dotmap import DotMap
import simpleplotlib
simpleplotlib.default_options.rcParams['font.family'] = "Tahoma"
from simpleplotlib import plot
MSS_B_EXP = 1500
MSS_B_TARGET = 9000
# CHOSEN_QUEUE_CAPS = [8, 16, 32, 64, 128]
CHOSEN_QUEUE_CAPS = [16]
def main():
assert len(sys.argv) == 2, "Expected one argument: experiment data file"
edf = sys.argv[1]
if not path.isfile(edf):
print("The first argument must be a file, but is: {}".format(edf))
sys.exit(-1)
# Specify and create the output directory.
odr = path.join(PROGDIR, "graphs", "nsdi2020")
if path.exists(odr):
if not path.isdir(odr):
print("Output directory exists and is a file: {}".format(odr))
sys.exit(-1)
else:
os.makedirs(odr)
# { num flows : { q len : list of results } }
data = defaultdict(lambda: defaultdict(dict))
with open(edf) as f:
for line in f:
# date, swt_us, num_flows, q_len_p, delay_2way_ns, fct_us, num_rtx, \
# num_rto, num_syns, avt_rtt_us = line.strip().split()
date, num_flows, swt_us, q_len_p, delay_2way_ns, fct_us, num_rtx, \
num_rto, num_syns, avt_rtt_us = line.strip().split()
# Convert the queue length from being in terms of 1500-byte packets
# to being in terms of 9000-bytes packets.
q_len_p = float(q_len_p) * (float(MSS_B_EXP) / float(MSS_B_TARGET))
swt_us = float(swt_us)
fct_s = float(fct_us) / 1e6
record = (date, float(delay_2way_ns), fct_s, float(num_rtx),
float(num_rto), float(num_syns), float(avt_rtt_us))
# Store this record if it is the smallest FCT for this q len.
if ((swt_us in data[num_flows][q_len_p] and
fct_s < data[num_flows][q_len_p][swt_us][2]) or
swt_us not in data[num_flows][q_len_p]):
data[num_flows][q_len_p][swt_us] = record
for num_flows, q_len_results in data.items():
# for q_len_p, swt_us_results in q_len_results.items():
# for swt_us, val in swt_us_results.items():
# print val
# assert False
# { q len : list of pairs (switch time, FCT) }.items()
lines = {q_len_p : [
(swt_us, fct_s)
for swt_us, (_, _, fct_s, _, _, _, _) in swt_us_results.items()]
for q_len_p, swt_us_results in q_len_results.items()}.items()
# Pick only the lines we want.
lines = [(q_len_p, res) for q_len_p, res in lines
if q_len_p in CHOSEN_QUEUE_CAPS]
# Sort the datapoints based on their x-valies.
lines = sorted(lines, key=lambda a: a[0])
# lbls: list of q lens
# lines: list of lists of pairs of (switch time, FCT)
lbls, lines = zip(*lines)
# xs: list of lists of switch times
# ys: list of lists of FCTs
xs = [[p[0] for p in sorted(val, key=lambda a: a[0])] for val in lines]
ys = [[p[1] for p in sorted(val, key=lambda a: a[0])] for val in lines]
options = DotMap()
options.plot_type = "LINE"
if len(CHOSEN_QUEUE_CAPS) > 1:
options.legend.options.labels = [
"{} packets".format(int(round(lbl))) for lbl in lbls]
options.legend.options.fontsize = 18
options.legend.options.ncol = 1
options.series_options = [DotMap(linewidth=2, marker="o")
for _ in range(len(xs))]
options.output_fn = path.join(odr, "sim-{}-flows.pdf".format(num_flows))
options.x.label.xlabel = "Circuit uptime ($\mu$s)"
options.y.label.ylabel = "Flow completion time (s)"
options.x.label.fontsize = options.y.label.fontsize = 18
options.x.ticks.major.options.labelsize = \
options.y.ticks.major.options.labelsize = 18
options.x.log = True
options.x.axis.show = options.y.axis.show = True
options.x.axis.color = options.y.axis.color = "black"
options.x.axis.stretch = 1.35
# Flip the x-axis.
options.x.limits = (max([max(vals) for vals in xs]) * 1.5,
min([min(vals) for vals in xs]) * 0.5)
options.y.limits = (0, max([max(vals) for vals in ys]) + 4)
plot(xs, ys, options)
if __name__ == "__main__":
main()
| 40.910714 | 81 | 0.592754 |
ace118487d7662ca3a665bc7af394926369d797d | 161 | py | Python | example/urlmap.py | ngalkov/Vial | 514fd8ad878ae64ed34d5967ab47a08bca584752 | [
"MIT"
] | null | null | null | example/urlmap.py | ngalkov/Vial | 514fd8ad878ae64ed34d5967ab47a08bca584752 | [
"MIT"
] | null | null | null | example/urlmap.py | ngalkov/Vial | 514fd8ad878ae64ed34d5967ab47a08bca584752 | [
"MIT"
] | 1 | 2022-02-09T18:54:14.000Z | 2022-02-09T18:54:14.000Z | """URL to view mapping"""
urlmap = [
(r"^/$", "index"),
(r"^/hello$", "hello"),
(r"^/item/(?P<item_id>\d+)$", "item"),
(r"^/logo$", "logo"),
]
| 16.1 | 42 | 0.409938 |
ace118dae9b2c2fb1b7881b5f0a4b087dba2724d | 2,592 | py | Python | networks/Lawhern1to30Hz_2sec/model_summary.py | msseibel/Biomag21 | 1b47cc3cb7b8bb3ff5adff5564947c187d658a22 | [
"MIT"
] | null | null | null | networks/Lawhern1to30Hz_2sec/model_summary.py | msseibel/Biomag21 | 1b47cc3cb7b8bb3ff5adff5564947c187d658a22 | [
"MIT"
] | null | null | null | networks/Lawhern1to30Hz_2sec/model_summary.py | msseibel/Biomag21 | 1b47cc3cb7b8bb3ff5adff5564947c187d658a22 | [
"MIT"
] | null | null | null | Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 512, 160, 1)] 0
_________________________________________________________________
permute (Permute) (None, 160, 512, 1) 0
_________________________________________________________________
conv2d (Conv2D) (None, 160, 512, 8) 512
_________________________________________________________________
batch_normalization (BatchNo (None, 160, 512, 8) 32
_________________________________________________________________
depthwise_conv2d (DepthwiseC (None, 1, 512, 16) 2560
_________________________________________________________________
batch_normalization_1 (Batch (None, 1, 512, 16) 64
_________________________________________________________________
activation (Activation) (None, 1, 512, 16) 0
_________________________________________________________________
average_pooling2d (AveragePo (None, 1, 128, 16) 0
_________________________________________________________________
dropout (Dropout) (None, 1, 128, 16) 0
_________________________________________________________________
separable_conv2d (SeparableC (None, 1, 128, 16) 512
_________________________________________________________________
batch_normalization_2 (Batch (None, 1, 128, 16) 64
_________________________________________________________________
activation_1 (Activation) (None, 1, 128, 16) 0
_________________________________________________________________
average_pooling2d_1 (Average (None, 1, 16, 16) 0
_________________________________________________________________
dropout_1 (Dropout) (None, 1, 16, 16) 0
_________________________________________________________________
flatten (Flatten) (None, 256) 0
_________________________________________________________________
dense (Dense) (None, 3) 771
_________________________________________________________________
softmax (Activation) (None, 3) 0
=================================================================
Total params: 4,515
Trainable params: 4,435
Non-trainable params: 80
_________________________________________________________________
| 60.27907 | 65 | 0.688657 |
ace119115481aaa6d781f0e943273c9babd69e9d | 117 | py | Python | markovorm/setup.py | thatosmk/markov-orm | c04420fd7bdd646ac840c5bfd62be3dfd637fd07 | [
"MIT"
] | null | null | null | markovorm/setup.py | thatosmk/markov-orm | c04420fd7bdd646ac840c5bfd62be3dfd637fd07 | [
"MIT"
] | null | null | null | markovorm/setup.py | thatosmk/markov-orm | c04420fd7bdd646ac840c5bfd62be3dfd637fd07 | [
"MIT"
] | null | null | null | user="Ryan"
password="Ryan12345"
server="analytics.technocore.co.za:3306"
database="analytics_ryan"
driver="mysqldb"
| 19.5 | 40 | 0.794872 |
ace11ab07c557bbffea8f73ba73a1de9d54a38ea | 9,432 | py | Python | nova/tests/api/openstack/compute/plugins/v3/test_migrate_server.py | vasart/nova | bca5004d367e0418e35f8a72fe0f2e106e977ab0 | [
"Apache-2.0"
] | 1 | 2021-09-10T15:29:02.000Z | 2021-09-10T15:29:02.000Z | nova/tests/api/openstack/compute/plugins/v3/test_migrate_server.py | PFZheng/nova | 84be8abbccb5ddc2d7c5a7db59019ed1edb19e7f | [
"Apache-2.0"
] | null | null | null | nova/tests/api/openstack/compute/plugins/v3/test_migrate_server.py | PFZheng/nova | 84be8abbccb5ddc2d7c5a7db59019ed1edb19e7f | [
"Apache-2.0"
] | null | null | null | # Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute.plugins.v3 import migrate_server
from nova import exception
from nova.openstack.common import uuidutils
from nova.tests.api.openstack.compute.plugins.v3 import \
admin_only_action_common
from nova.tests.api.openstack import fakes
class MigrateServerTests(admin_only_action_common.CommonTests):
def setUp(self):
super(MigrateServerTests, self).setUp()
self.controller = migrate_server.MigrateServerController()
self.compute_api = self.controller.compute_api
def _fake_controller(*args, **kwargs):
return self.controller
self.stubs.Set(migrate_server, 'MigrateServerController',
_fake_controller)
self.app = fakes.wsgi_app_v3(init_only=('servers',
'os-migrate-server'),
fake_auth_context=self.context)
self.mox.StubOutWithMock(self.compute_api, 'get')
def test_migrate(self):
method_translations = {'migrate': 'resize',
'migrate_live': 'live_migrate'}
body_map = {'migrate_live': {'host': 'hostname',
'block_migration': False,
'disk_over_commit': False}}
args_map = {'migrate_live': ((False, False, 'hostname'), {})}
self._test_actions(['migrate', 'migrate_live'], body_map=body_map,
method_translations=method_translations,
args_map=args_map)
def test_migrate_with_non_existed_instance(self):
body_map = {'migrate_live': {'host': 'hostname',
'block_migration': False,
'disk_over_commit': False}}
self._test_actions_with_non_existed_instance(
['migrate', 'migrate_live'], body_map=body_map)
def test_migrate_raise_conflict_on_invalid_state(self):
method_translations = {'migrate': 'resize',
'migrate_live': 'live_migrate'}
body_map = {'migrate_live': {'host': 'hostname',
'block_migration': False,
'disk_over_commit': False}}
args_map = {'migrate_live': ((False, False, 'hostname'), {})}
self._test_actions_raise_conflict_on_invalid_state(
['migrate', 'migrate_live'], body_map=body_map, args_map=args_map,
method_translations=method_translations)
def test_actions_with_locked_instance(self):
method_translations = {'migrate': 'resize',
'migrate_live': 'live_migrate'}
body_map = {'migrate_live': {'host': 'hostname',
'block_migration': False,
'disk_over_commit': False}}
args_map = {'migrate_live': ((False, False, 'hostname'), {})}
self._test_actions_with_locked_instance(
['migrate', 'migrate_live'], body_map=body_map, args_map=args_map,
method_translations=method_translations)
def _test_migrate_exception(self, exc_info, expected_result):
self.mox.StubOutWithMock(self.compute_api, 'resize')
instance = self._stub_instance_get()
self.compute_api.resize(self.context, instance).AndRaise(exc_info)
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance['uuid'],
{'migrate': None})
self.assertEqual(expected_result, res.status_int)
def test_migrate_too_many_instances(self):
exc_info = exception.TooManyInstances(overs='', req='', used=0,
allowed=0, resource='')
self._test_migrate_exception(exc_info, 413)
def _test_migrate_live_succeeded(self, param):
self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
instance = self._stub_instance_get()
self.compute_api.live_migrate(self.context, instance, False,
False, 'hostname')
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance.uuid,
{'migrate_live': param})
self.assertEqual(202, res.status_int)
def test_migrate_live_enabled(self):
param = {'host': 'hostname',
'block_migration': False,
'disk_over_commit': False}
self._test_migrate_live_succeeded(param)
def test_migrate_live_enabled_with_string_param(self):
param = {'host': 'hostname',
'block_migration': "False",
'disk_over_commit': "False"}
self._test_migrate_live_succeeded(param)
def test_migrate_live_missing_dict_param(self):
res = self._make_request('/servers/FAKE/action',
{'migrate_live': {'dummy': 'hostname',
'block_migration': False,
'disk_over_commit': False}})
self.assertEqual(400, res.status_int)
def test_migrate_live_with_invalid_block_migration(self):
res = self._make_request('/servers/FAKE/action',
{'migrate_live': {'host': 'hostname',
'block_migration': "foo",
'disk_over_commit': False}})
self.assertEqual(400, res.status_int)
def test_migrate_live_with_invalid_disk_over_commit(self):
res = self._make_request('/servers/FAKE/action',
{'migrate_live': {'host': 'hostname',
'block_migration': False,
'disk_over_commit': "foo"}})
self.assertEqual(400, res.status_int)
def _test_migrate_live_failed_with_exception(self, fake_exc,
uuid=None):
self.mox.StubOutWithMock(self.compute_api, 'live_migrate')
instance = self._stub_instance_get(uuid=uuid)
self.compute_api.live_migrate(self.context, instance, False,
False, 'hostname').AndRaise(fake_exc)
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance.uuid,
{'migrate_live':
{'host': 'hostname',
'block_migration': False,
'disk_over_commit': False}})
self.assertEqual(400, res.status_int)
self.assertIn(unicode(fake_exc), res.body)
def test_migrate_live_compute_service_unavailable(self):
self._test_migrate_live_failed_with_exception(
exception.ComputeServiceUnavailable(host='host'))
def test_migrate_live_invalid_hypervisor_type(self):
self._test_migrate_live_failed_with_exception(
exception.InvalidHypervisorType())
def test_migrate_live_invalid_cpu_info(self):
self._test_migrate_live_failed_with_exception(
exception.InvalidCPUInfo(reason=""))
def test_migrate_live_unable_to_migrate_to_self(self):
uuid = uuidutils.generate_uuid()
self._test_migrate_live_failed_with_exception(
exception.UnableToMigrateToSelf(instance_id=uuid,
host='host'),
uuid=uuid)
def test_migrate_live_destination_hypervisor_too_old(self):
self._test_migrate_live_failed_with_exception(
exception.DestinationHypervisorTooOld())
def test_migrate_live_no_valid_host(self):
self._test_migrate_live_failed_with_exception(
exception.NoValidHost(reason=''))
def test_migrate_live_invalid_local_storage(self):
self._test_migrate_live_failed_with_exception(
exception.InvalidLocalStorage(path='', reason=''))
def test_migrate_live_invalid_shared_storage(self):
self._test_migrate_live_failed_with_exception(
exception.InvalidSharedStorage(path='', reason=''))
def test_migrate_live_hypervisor_unavailable(self):
self._test_migrate_live_failed_with_exception(
exception.HypervisorUnavailable(host=""))
def test_migrate_live_instance_not_running(self):
self._test_migrate_live_failed_with_exception(
exception.InstanceNotRunning(instance_id=""))
def test_migrate_live_pre_check_error(self):
self._test_migrate_live_failed_with_exception(
exception.MigrationPreCheckError(reason=''))
| 46.009756 | 79 | 0.604008 |
ace11b1c5c3c071880bc9ee04385ff3da628bed1 | 5,755 | py | Python | tests/test_models/test_review.py | CelestineAkpanoko/AirBnB_clone | 5f2828f67601641a688768cd72f55937bc92f9fb | [
"MIT"
] | null | null | null | tests/test_models/test_review.py | CelestineAkpanoko/AirBnB_clone | 5f2828f67601641a688768cd72f55937bc92f9fb | [
"MIT"
] | null | null | null | tests/test_models/test_review.py | CelestineAkpanoko/AirBnB_clone | 5f2828f67601641a688768cd72f55937bc92f9fb | [
"MIT"
] | 1 | 2021-11-17T21:56:48.000Z | 2021-11-17T21:56:48.000Z | #!/usr/bin/python3
"""A unit test module for the review model.
"""
import os
import unittest
from datetime import datetime
from models.base_model import BaseModel
from models.review import Review
class TestReview(unittest.TestCase):
"""Represents the test class for the Review class.
"""
def test_init(self):
"""Tests the initialization of the Review class.
"""
self.assertIsInstance(Review(), BaseModel)
self.assertTrue(hasattr(Review, 'place_id'))
self.assertTrue(hasattr(Review, 'user_id'))
self.assertTrue(hasattr(Review, 'text'))
self.assertIsInstance(Review.place_id, str)
self.assertIsInstance(Review.user_id, str)
self.assertIsInstance(Review.text, str)
self.assertEqual(Review().place_id, '')
self.assertEqual(Review().user_id, '')
self.assertEqual(Review().text, '')
self.assertEqual(Review('p-e3').place_id, '')
self.assertEqual(Review('u-a5').user_id, '')
self.assertEqual(Review('T\'was fun').text, '')
self.assertEqual(Review(place_id='p-e3').place_id, 'p-e3')
self.assertEqual(Review(user_id='u-a5').user_id, 'u-a5')
self.assertEqual(Review(text='T\'was fun').text, 'T\'was fun')
self.assertEqual(Review('p-e8', place_id='p-e9').place_id, 'p-e9')
self.assertEqual(Review('u-a3', user_id='u-a2').user_id, 'u-a2')
self.assertEqual(Review('Loved it', text='GOOD').text, 'GOOD')
def test_str(self):
"""Tests the __str__ function of the Review class.
"""
datetime_now = datetime.today()
datetime_now_repr = repr(datetime_now)
mdl = Review()
mdl.id = '012345'
mdl.created_at = mdl.updated_at = datetime_now
mdl_str = str(mdl)
self.assertIn("[Review] (012345)", mdl_str)
self.assertIn("'id': '012345'", mdl_str)
self.assertIn("'created_at': " + datetime_now_repr, mdl_str)
self.assertIn("'updated_at': " + datetime_now_repr, mdl_str)
self.assertIn("'id': ", str(Review()))
self.assertIn("'created_at': ", str(Review()))
self.assertIn("'updated_at': ", str(Review()))
self.assertIn(
"'gender': 'female'",
str(Review(gender='female', id='m-77'))
)
self.assertNotIn(
"'created_at': ",
str(Review(gender='female', id='u-88'))
)
self.assertNotIn(
"'updated_at': ",
str(Review(gender='female', id='u-55'))
)
self.assertRegex(
str(Review()),
r'\[Review\] \([0-9a-zA-Z]+(?:-[0-9a-zA-Z]+)*\) \{.+\}'
)
self.assertEqual(
str(Review(id='m-345')),
"[Review] (m-345) {'id': 'm-345'}"
)
self.assertEqual(
str(Review(id=45)),
"[Review] (45) {'id': 45}"
)
self.assertEqual(
str(Review(id=None)),
"[Review] (None) {'id': None}"
)
with self.assertRaises(AttributeError):
str(Review(gender='female'))
def test_to_dict(self):
"""Tests the to_dict function of the Review class.
"""
# Tests if it's a dictionary
self.assertIsInstance(Review().to_dict(), dict)
# Tests if to_dict contains accurate keys
self.assertIn('id', Review().to_dict())
self.assertIn('created_at', Review().to_dict())
self.assertIn('updated_at', Review().to_dict())
# Tests if to_dict contains added attributes
mdl = Review()
mdl.firstname = 'Celestine'
mdl.lastname = 'Akpanoko'
self.assertIn('firstname', mdl.to_dict())
self.assertIn('lastname', mdl.to_dict())
self.assertIn('firstname', Review(firstname='Celestine').to_dict())
self.assertIn('lastname', Review(lastname='Akpanoko').to_dict())
# Tests to_dict datetime attributes if they are strings
self.assertIsInstance(Review().to_dict()['created_at'], str)
self.assertIsInstance(Review().to_dict()['updated_at'], str)
# Tests to_dict output
datetime_now = datetime.today()
mdl = Review()
mdl.id = '012345'
mdl.created_at = mdl.updated_at = datetime_now
to_dict = {
'id': '012345',
'__class__': 'Review',
'created_at': datetime_now.isoformat(),
'updated_at': datetime_now.isoformat()
}
self.assertDictEqual(mdl.to_dict(), to_dict)
self.assertDictEqual(
Review(id='u-b34', age=13).to_dict(),
{
'__class__': 'Review',
'id': 'u-b34',
'age': 13
}
)
self.assertDictEqual(
Review(id='u-b34', age=None).to_dict(),
{
'__class__': 'Review',
'id': 'u-b34',
'age': None
}
)
# Tests to_dict output contradiction
mdl_d = Review()
self.assertIn('__class__', Review().to_dict())
self.assertNotIn('__class__', Review().__dict__)
self.assertNotEqual(mdl_d.to_dict(), mdl_d.__dict__)
self.assertNotEqual(
mdl_d.to_dict()['__class__'],
mdl_d.__class__
)
# Tests to_dict with arg
with self.assertRaises(TypeError):
Review().to_dict(None)
with self.assertRaises(TypeError):
Review().to_dict(Review())
with self.assertRaises(TypeError):
Review().to_dict(45)
def tearDown(self):
"""Deconstructs this test class.
"""
super().tearDown()
if os.path.isfile('file.json'):
os.unlink('file.json')
| 36.656051 | 75 | 0.559687 |
ace11c4bf840ec67456fe5347d3bd604acd14ba4 | 759 | py | Python | research/sample/django-example/papersite/urls.py | lowmess/whatiscode | 4c3277f6ce10c5604ab166ddbc7cf71afdfffbeb | [
"Apache-2.0"
] | 3,815 | 2015-06-11T11:25:34.000Z | 2022-03-20T12:54:49.000Z | research/sample/django-example/papersite/urls.py | lowmess/whatiscode | 4c3277f6ce10c5604ab166ddbc7cf71afdfffbeb | [
"Apache-2.0"
] | 129 | 2015-06-11T13:42:28.000Z | 2019-03-22T17:21:40.000Z | research/sample/django-example/papersite/urls.py | lowmess/whatiscode | 4c3277f6ce10c5604ab166ddbc7cf71afdfffbeb | [
"Apache-2.0"
] | 361 | 2015-06-11T12:41:16.000Z | 2021-12-13T04:06:58.000Z | """papersite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
]
| 34.5 | 77 | 0.706192 |
ace11c59f326cf36011d2ee6eab8ccf107471f83 | 1,429 | py | Python | test/test_compatibilities_class_active_active_item.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | test/test_compatibilities_class_active_active_item.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | test/test_compatibilities_class_active_active_item.py | Atomicology/isilon_sdk_python | 91039da803ae37ed4abf8d2a3f59c333f3ef1866 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.compatibilities_class_active_active_item import CompatibilitiesClassActiveActiveItem
class TestCompatibilitiesClassActiveActiveItem(unittest.TestCase):
""" CompatibilitiesClassActiveActiveItem unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCompatibilitiesClassActiveActiveItem(self):
"""
Test CompatibilitiesClassActiveActiveItem
"""
model = swagger_client.models.compatibilities_class_active_active_item.CompatibilitiesClassActiveActiveItem()
if __name__ == '__main__':
unittest.main() | 29.163265 | 117 | 0.764171 |
ace11c89bfa82cf53cd6af4d42b445bb8d3a2f6e | 979 | py | Python | test.py | CMU-IDS-2020/a3-data-diggers | 470b5c5ea2db44ba5ab7f189340f16ac829d1188 | [
"BSD-3-Clause"
] | null | null | null | test.py | CMU-IDS-2020/a3-data-diggers | 470b5c5ea2db44ba5ab7f189340f16ac829d1188 | [
"BSD-3-Clause"
] | null | null | null | test.py | CMU-IDS-2020/a3-data-diggers | 470b5c5ea2db44ba5ab7f189340f16ac829d1188 | [
"BSD-3-Clause"
] | null | null | null | import streamlit as st
import pandas as pd
import math
import altair as alt
import numpy as np
import pydeck as pdk
df = pd.DataFrame(
np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],
columns=['lat', 'lon'])
st.pydeck_chart(pdk.Deck(
map_style='mapbox://styles/mapbox/light-v9',
initial_view_state=pdk.ViewState(
latitude=37.76,
longitude=-122.4,
bearing = 0,
zoom=11,
pitch=0,
),
layers=[
# pdk.Layer(
# 'HexagonLayer',
# data=df,
# get_position='[lon, lat]',
# radius=200,
# elevation_scale=4,
# elevation_range=[0, 1000],
# pickable=True,
# extruded=True,
# ),
pdk.Layer(
'ScatterplotLayer',
data=df,
pickable = True,
get_position='[lon, lat]',
get_color='[200, 30, 0, 160]',
get_radius=200,
),
],
)) | 23.878049 | 58 | 0.500511 |
ace11c99822f741623023dc2f8beb1b528f7d498 | 2,996 | py | Python | gpvdm_gui/gui/fit_vars.py | roderickmackenzie/gpvdm | 914fd2ee93e7202339853acaec1d61d59b789987 | [
"BSD-3-Clause"
] | 12 | 2016-09-13T08:58:13.000Z | 2022-01-17T07:04:52.000Z | gpvdm_gui/gui/fit_vars.py | roderickmackenzie/gpvdm | 914fd2ee93e7202339853acaec1d61d59b789987 | [
"BSD-3-Clause"
] | 3 | 2017-11-11T12:33:02.000Z | 2019-03-08T00:48:08.000Z | gpvdm_gui/gui/fit_vars.py | roderickmackenzie/gpvdm | 914fd2ee93e7202339853acaec1d61d59b789987 | [
"BSD-3-Clause"
] | 6 | 2019-01-03T06:17:12.000Z | 2022-01-01T15:59:00.000Z | #
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2008-2022 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
## @package fit_vars
# A window to define the fit variables.
#
import os
from token_lib import tokens
from str2bool import str2bool
import i18n
_ = i18n.language.gettext
#qt
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtWidgets import QWidget,QVBoxLayout,QToolBar,QSizePolicy,QAction,QTabWidget,QAbstractItemView, QMenuBar, QTableWidgetItem
from PyQt5.QtGui import QPainter,QIcon
from gpvdm_tab2 import gpvdm_tab2
from scan_select import select_param
from gpvdm_json import gpvdm_data
from json_fit import json_fit_vars_line
class fit_vars(QWidget):
def callback_show_list(self):
self.select_param_window.show()
def __init__(self):
QWidget.__init__(self)
data=gpvdm_data()
self.data=data.fits.vars
self.vbox=QVBoxLayout()
toolbar=QToolBar()
toolbar.setIconSize(QSize(32, 32))
self.vbox.addWidget(toolbar)
#tab2
self.tab2 = gpvdm_tab2(toolbar=toolbar)
self.tab2.set_tokens(["fit_var_enabled","human_var","min","max","error","log_fit","json_var"])
self.tab2.set_labels([_("Enabled"),_("Variable"), _("Min"), _("Max"), _("Error function"), _("Log scale"),_("JSON Variable")])
self.tab2.json_search_path="gpvdm_data().fits.vars.segments"
self.tab2.fixup_new_row=self.fixup_new_row
self.tab2.setColumnWidth(1, 400)
self.tab2.setColumnWidth(2, 100)
self.tab2.setColumnWidth(3, 100)
self.tab2.setColumnWidth(4, 100)
self.tab2.setColumnWidth(5, 100)
self.tab2.setColumnWidth(6, 20)
self.tab2.base_obj=json_fit_vars_line()
self.tab2.populate()
self.tab2.changed.connect(self.callback_save)
self.tab2.callback_a=self.callback_show_list
self.vbox.addWidget(self.tab2)
self.select_param_window=select_param(self.tab2)
self.select_param_window.human_path_col=1
self.select_param_window.json_path_col=6
self.select_param_window.update()
self.select_param_window.set_save_function(self.callback_save)
self.setLayout(self.vbox)
def fixup_new_row(self,row):
self.tab2.cellWidget(row, 1).button.clicked.connect(self.callback_show_list)
def callback_save(self):
gpvdm_data().save()
| 31.208333 | 133 | 0.760681 |
ace11cbe9534f7cb6d6dd59c5e8803f51f1abcfe | 12,348 | py | Python | ModularEngine/samples/MultiMain/MuddyChildrenProblem.py | amelentev/DKAL | 0170ecdc3c5d88cc2d3817a27d99e635de42859a | [
"Apache-2.0"
] | null | null | null | ModularEngine/samples/MultiMain/MuddyChildrenProblem.py | amelentev/DKAL | 0170ecdc3c5d88cc2d3817a27d99e635de42859a | [
"Apache-2.0"
] | null | null | null | ModularEngine/samples/MultiMain/MuddyChildrenProblem.py | amelentev/DKAL | 0170ecdc3c5d88cc2d3817a27d99e635de42859a | [
"Apache-2.0"
] | null | null | null | import sys
import getopt
HEADER_DEFINITIONS= (""
"/// Logic: UFOL\n"
"/// This example models the muddy children problem\n"
"///\n"
"type String = System.String\n"
"type Int = System.Int32\n"
"type Principal = Dkal.Principal\n"
"\n"
"relation dontKnow(P: Principal)\n"
"relation isMuddy(P: Principal)\n"
"relation isClean(P: Principal)\n"
"relation log(S: String)\n"
"relation question(N: Int)\n"
"relation hasConclusion()\n"
"relation startQuestionRound()\n"
"relation answered(P: Principal)\n"
"relation start()\n"
"relation end()\n"
"relation partialanswer()\n"
"\n"
"/// In the beginning, each child knows:\n"
"/// - every other child status\n"
"/// - that is ignorant of its own status\n"
"/// - that no round has elapsed\n"
"\n")
MOM_POLICY_TEMPLATE = ("---mom-----------------------------\n"
"knows start()\n"
"knows question(0)\n"
"\n"
"with X:Int\n"
"\tif start() && question(X) do\n"
"<FORGET_CHILDREN_STATUS_TEMPLATE>"
"<SEND_QUESTIONX_TEMPLATE>"
"\t\tforget start()\n"
"\n"
"with P: Principal\n"
"\tupon P said isMuddy(P) do\n"
"\t\tlearn isMuddy(P) learn answered(P)\n"
"\tupon P said isClean(P) do\n"
"\t\tlearn isClean(P) learn answered(P)\n"
"\tupon P said dontKnow(P) do\n"
"\t\tlearn dontKnow(P)\n"
"\t\tlearn answered(P)\n"
"\t\tlearn partialanswer()\n"
"\n"
"with X:Int, Y:Int\n"
"if\n"
"<CHILDREN_ANSWERED_TEMPLATE>"
"\tpartialanswer() &&\n"
"\tquestion(X) && asInfon({|\"basic\"|Y:=X+1|})\n"
"do\n"
"\tlearn start()\n"
"<FORGET_ANSWERS_TEMPLATE>"
"\tforget partialanswer()\n"
"\tforget question(X)\n"
"\tlearn question(Y)\n" )
CHILD_POLICY_TEMPLATE= ("---<CHILD_NAME>-----------------------------\n"
"knows dontKnow(me)\n"
"knows not end()\n"
"<NOONE_BUT_ME_SAID_ANYTHING_YET>\n"
"<EVERY_OTHER_CHILD_STATUS>\n"
"<LEARN_WHAT_PEOPLE_SAY>\n"
"with X:Int\n"
"\tupon mom said question(X) do\n"
"\t\tlearn question(X)\n"
"\t\tlearn startQuestionRound()\n"
"\n"
"<CHILD_DEDUCTION>\n"
"with X:Int\n"
"\tif question(X) do forget question(X)\n"
"if hasConclusion() do\n"
"\tforget hasConclusion()\n"
"\n"
"if hasConclusion() && isMuddy(me) && not end()\n"
"do\n"
"<SEND_MUDDY_TO_EVERYONE>"
"\trelearn end()\n"
"\n"
"if hasConclusion() && isClean(me) && not end()\n"
"do\n"
"<SEND_CLEAN_TO_EVERYONE>"
"\trelearn end()\n"
"\n"
"if hasConclusion() && dontKnow(me) && not end()\n"
"do\n"
"<SEND_DONT_KNOW_TO_EVERYONE>\n")
childrenNames= ["alice", "bob", "charlie", "dick", "emily", "frank", "gertie", "homer", "ione", "jack", "kevin", "leslie", "mary", "nathan", "ozzie",
"peter", "quisani", "rosie", "steve", "trixie", "ulysses", "valerie", "walter", "xavier", "yuri", "zack"]
def createDeductionSteps(childIndex, children, muddy):
result= ""
# if someone already said he is clean and I don't know anything, I'm muddy. shouldn't happen anyway
result= result + "with P: Dkal.Principal\n"
result= result + "\tif P said isClean(P) && not end() && startQuestionRound() do\n"
result= result + "\t\tlearn hasConclusion()\n"
result= result + "\t\tlearn isMuddy(me)\n"
result= result + "\t\tforget dontKnow(me)\n"
result= result + "\t\tforget startQuestionRound()\n\n"
# 1) more realistic behaviour of the child, model every single round.
# - If I don't see muddy faces, I'm muddy (0)
# - If at round X I see less than X muddy faces something went wrong :) So I don't do anything and it should hang if such thing happens
# - If at round X I see X+1 or more muddy children, I can't conclude anything (1)
# - If at round X I see exactly X muddy children, and still no one said anything, I must be muddy (2)
# - If at round X I see exactly X muddy children and already someone said she's muddy, I must be clean (3)
# I calculate how many muddy faces I see from outside info (but this is not cheating, it is the same as counting muddy faces)
muddyFacesISee= 0
if childIndex < muddy: # I am muddy (I don't know that anyway), so I see muddy-1 faces
muddyFacesISee= muddy - 1
else: # else I see them all
muddyFacesISee= muddy
#(0)
noMuddy= ""
if muddyFacesISee == 0:
noMuddy= "true"
else:
noMuddy= "false"
result= result + "// I check whether I see no muddy faces (if I see no muddy faces, I must be muddy myself).\n"
result= result + "if question(0) && asInfon({|\"basic\"|"+ noMuddy +"|}) && not end() do\n"
result= result + "\tlearn hasConclusion()\n"
result= result + "\tlearn isMuddy(me)\n"
result= result + "\tforget dontKnow(me)\n"
result= result + "\tforget startQuestionRound()\n\n"
#(1)
result= result + "// If I see a certain number of muddy faces and mom has asked less times than that, then I cannot be sure if I am muddy or not.\n"
if muddyFacesISee != 0:
result= result + "with X: Int\n"
result= result + "\tif question(X) && asInfon({|\"basic\"|X < " + str(muddy) + "|}) && not end() && startQuestionRound() do\n"
result= result + "\t\tlearn hasConclusion()\n"
result= result + "\t\tforget startQuestionRound()\n\n"
withNmuddyPrincipals= "with X:Int"
distinctCondition= "asInfon({|\"basic\"|true|})"
for i in range(0,muddyFacesISee):
withNmuddyPrincipals= withNmuddyPrincipals + ", P" + str(i) + ": Principal"
for j in range(0,i):
distinctCondition= distinctCondition + " && asInfon({|\"basic\"|P" + str(i) + "!=P" + str(j) + "|})"
if muddyFacesISee <= 1:
distinctCondition= "asInfon({|\"basic\"|true|})"
areMuddyAndDidntSay= ""
for i in range(0,muddyFacesISee):
areMuddyAndDidntSay= areMuddyAndDidntSay + "&& isMuddy(P" + str(i) + ") && not P" + str(i) + " said isMuddy(P" + str(i) + ") "
#(2)
result= result + "// If mom has asked as many times as muddy faces I see, I need to pay attention to what the muddy children do.\n"
result= result + "// If they don't say they are muddy, it is because they are not sure. If they are not sure it must be because they see another muddy face -- mine.\n"
if muddyFacesISee != 0:
result= result + withNmuddyPrincipals + "\n"
result= result + "\tif question(X) && asInfon({|\"basic\"|X := " + str(muddyFacesISee) + "|}) && not end() && startQuestionRound() && " + distinctCondition + " " + areMuddyAndDidntSay + " do\n"
result= result + "\t\tlearn hasConclusion()\n"
result= result + "\t\tlearn isMuddy(me)\n"
result= result + " \t\tforget dontKnow(me)\n"
result= result + " \t\tforget startQuestionRound()\n\n"
someoneSaidIsMuddy= "(asInfon({|\"basic\"|false|})"
for i in range(0,muddyFacesISee):
someoneSaidIsMuddy= someoneSaidIsMuddy + "|| P" + str(i) + " said isMuddy(P" + str(i) + ")"
someoneSaidIsMuddy= someoneSaidIsMuddy + ")"
#(3)
result= result + "// If however they do say they are muddy, they must be sure of that. Therefore they don't see another muddy face. I must be clean.\n"
if muddyFacesISee != 0:
result= result + withNmuddyPrincipals + "\n"
result= result + "\tif question(X) && asInfon({|\"basic\"|X := " + str(muddyFacesISee) + "|}) && not end() && startQuestionRound() && " + distinctCondition + " && " + someoneSaidIsMuddy + " do\n"
result= result + "\t\tlearn hasConclusion()\n"
result= result + "\t\tlearn isClean(me)\n"
result= result + "\t\tforget dontKnow(me)\n"
result= result + "\t\tforget startQuestionRound()\n\n"
return result
def createChildPolicy(childIndex, children, muddy):
result= CHILD_POLICY_TEMPLATE.replace("<CHILD_NAME>", childrenNames[childIndex])
noOneSaid= ""
for i in range(0,children):
if i != childIndex:
noOneSaid= noOneSaid + "knows not " + childrenNames[i] + " said isMuddy(" + childrenNames[i] + ")\n"
noOneSaid= noOneSaid + "knows not " + childrenNames[i] + " said isClean(" + childrenNames[i] + ")\n"
result= result.replace("<NOONE_BUT_ME_SAID_ANYTHING_YET>", noOneSaid)
otherStatus = ""
for i in range(0, children):
if i != childIndex and i < muddy:
otherStatus= otherStatus + "knows isMuddy(" + childrenNames[i] + ")\n"
elif i != childIndex:
otherStatus= otherStatus + "knows isClean(" + childrenNames[i] + ")\n"
result= result.replace("<EVERY_OTHER_CHILD_STATUS>", otherStatus)
learnSay= ""
for i in range(0, children):
if i != childIndex:
learnSay= learnSay + "upon " + childrenNames[i] + " said isMuddy(" + childrenNames[i] + ") do\n"
learnSay= learnSay + "\trelearn " + childrenNames[i] + " said isMuddy(" + childrenNames[i] + ")\n"
learnSay= learnSay + "upon " + childrenNames[i] + " said isClean(" + childrenNames[i] + ") do\n"
learnSay= learnSay + "\trelearn " + childrenNames[i] + " said isClean(" + childrenNames[i] + ")\n"
result= result.replace("<LEARN_WHAT_PEOPLE_SAY>", learnSay)
sendMuddy= "\tsend to mom: isMuddy(me)\n"
for i in range(0, children):
if i != childIndex:
sendMuddy= sendMuddy + "\tsend to " + childrenNames[i] + ": isMuddy(me)\n"
result= result.replace("<SEND_MUDDY_TO_EVERYONE>", sendMuddy)
sendClean= "\tsend to mom: isClean(me)\n"
for i in range(0, children):
if i != childIndex:
sendClean= sendClean + "\tsend to " + childrenNames[i] + ": isClean(me)\n"
result= result.replace("<SEND_CLEAN_TO_EVERYONE>", sendClean)
sendDK= "\tsend to mom: dontKnow(me)\n"
for i in range(0, children):
if i != childIndex:
sendDK= sendDK + "\tsend to " + childrenNames[i] + ": dontKnow(me)\n"
result= result.replace("<SEND_DONT_KNOW_TO_EVERYONE>", sendDK)
deductionSteps= createDeductionSteps(childIndex, children, muddy)
result= result.replace("<CHILD_DEDUCTION>", deductionSteps)
return result
def createMomPolicy(children, muddy):
forgetStatus= ""
for i in range(0,children):
forgetStatus= forgetStatus + "\t\tforget isMuddy(" + childrenNames[i] + ")\n"
forgetStatus= forgetStatus + "\t\tforget isClean(" + childrenNames[i] + ")\n"
forgetStatus= forgetStatus + "\t\tforget dontKnow(" + childrenNames[i] + ")\n"
questions= ""
for i in range(0, children):
questions = questions + "\t\tsend to " + childrenNames[i] + ": question(X)\n"
answers= ""
for i in range(0, children):
answers= answers + "\tanswered(" + childrenNames[i] + ") && \n"
forgetAnswers= ""
for i in range(0, children):
forgetAnswers= forgetAnswers + "\tforget answered(" + childrenNames[i] + ")\n"
result= MOM_POLICY_TEMPLATE.replace("<FORGET_CHILDREN_STATUS_TEMPLATE>", forgetStatus)
result= result.replace("<SEND_QUESTIONX_TEMPLATE>", questions)
result= result.replace("<CHILDREN_ANSWERED_TEMPLATE>", answers)
result= result.replace("<FORGET_ANSWERS_TEMPLATE>", forgetAnswers)
return result
def showUsage():
print("MuddyChildrenProblem -- builds an instance of the muddy children problem for DKAL to solve. Children are named by different initials, so no more than 26 are allowed.")
print("Usage:")
print("\tMuddyChildrenProblem --children <number_of_children> --muddy <number_of_muddy_children>")
print("Options:")
print("\t--children <number_of_children>: total children in the group, at least 2 and at most 26. Short form: -c. Required.")
print("\t--muddy <number_of_muddy_children>: Number of muddy children, between 1 and number_of_children. Short form: -m. Required.")
def main():
try:
opts, args= getopt.getopt(sys.argv[1:], "c:m:", ["children=","muddy="])
except getopt.error as msg:
print(msg)
showUsage()
sys.exit(2)
if len(opts) < 2:
print("Missing options")
showUsage()
sys.exit(2)
children = muddy = 0
for o, a in opts:
if o in ["-c","--children"]:
children= int(a)
if o in ["-m", "--muddy"]:
muddy= int(a)
if children < 2 or children > 26:
print("At least 2 children are necessary, and no more than 26")
showUsage()
sys.exit(2)
if muddy < 1:
print("Muddy children must be between 1 and the total children")
showUsage()
sys.exit(2)
if children < muddy:
print("Muddy children must be between 1 and the total children")
showUsage()
sys.exit(2)
dkalPolicyFile= open("muddychildren_" + str(children) + "_" + str(muddy) + ".mdkal", "w")
dkalPolicyFile.write(HEADER_DEFINITIONS)
dkalPolicyFile.write(createMomPolicy(children, muddy) + "\n")
for i in range(0,children):
dkalPolicyFile.write(createChildPolicy(i, children, muddy) + "\n")
dkalPolicyFile.close()
sys.exit(0)
if __name__ == "__main__":
main()
| 40.221498 | 198 | 0.656544 |
ace11cf1f83869f54fb543b09f98a0c4a0e510cc | 1,106 | py | Python | huaban/views.py | zhangjingqiang/souhuaban | 978b7f96222f916c827d844dd48f565f7156e714 | [
"MIT"
] | null | null | null | huaban/views.py | zhangjingqiang/souhuaban | 978b7f96222f916c827d844dd48f565f7156e714 | [
"MIT"
] | null | null | null | huaban/views.py | zhangjingqiang/souhuaban | 978b7f96222f916c827d844dd48f565f7156e714 | [
"MIT"
] | null | null | null | # coding: utf-8
from django.shortcuts import render_to_response
import requests
from bs4 import BeautifulSoup
def index(request):
if request.GET.get('q'):
q = request.GET.get('q')
else:
q = ""
google_search = 'https://www.google.com/search?hl=zh-CN&q=site:huaban.com+' + q
# dict
result = {}
# requests
r = requests.get(google_search)
# beautifulsoup
html_doc = r.text
soup = BeautifulSoup(html_doc)
h3 = soup.find_all("h3")
for a in h3:
result[a.find('a').get('href')] = a.find('a').get_text()
# template
return render_to_response('huaban/index.html',
{
'result':result,
'google_search':google_search,
'q':q,
'active':'index',
})
def about(request):
return render_to_response('huaban/about.html',
{
'active':'about'
}) | 28.358974 | 83 | 0.465642 |
ace11d4f79222b34ddfa5a50ef38591d35a20f82 | 4,118 | py | Python | cogs/getCalendar.py | MarkFrankle/Discord-Calendar-Bot | 264da72e0d4362307ad6e805ad82420ecb1c08da | [
"MIT"
] | 2 | 2018-06-27T03:44:14.000Z | 2021-07-05T06:45:44.000Z | cogs/getCalendar.py | MarkFrankle/Discord-Calendar-Bot | 264da72e0d4362307ad6e805ad82420ecb1c08da | [
"MIT"
] | null | null | null | cogs/getCalendar.py | MarkFrankle/Discord-Calendar-Bot | 264da72e0d4362307ad6e805ad82420ecb1c08da | [
"MIT"
] | null | null | null | from __future__ import print_function
from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
import calendar
import datetime
import discord
from discord.ext import commands
import math
from dateutil.relativedelta import *
from dateutil.easter import *
from dateutil.rrule import *
from dateutil.parser import *
# discord.py calls groups of commands cogs
# cogs can also be handlers for different types of events
# and respond to changes in data as they happen
# setup
class CalendarCog:
def __init__(self, bot):
self.bot = bot
# get the calendar
@commands.command()
async def getCalendar(self, ctx):
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
await ctx.send('Getting the upcoming 10 events')
store = file.Storage('credentials.json')
creds = store.get()
service = build('calendar', 'v3', http=creds.authorize(Http()))
events_result = service.events().list(calendarId='uw.edu_1g1n97mk4kumu34fooleqqkbn8@group.calendar.google.com', timeMin=now,
# events_result = service.events().list(calendarId=*, timeMin=now,
maxResults=10, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
if not events:
await ctx.send('No upcoming events found.')
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
# await ctx.send(start, event['summary'])
print(event['summary'])
await ctx.send(start)
@commands.command()
async def getAllCalendars(self, ctx):
await ctx.send('Available Calendars:')
store = file.Storage('credentials.json')
creds = store.get()
service = build('calendar', 'v3', http=creds.authorize(Http()))
calendar_list = service.calendarList().list(pageToken=None).execute()
for calendar_list_entry in calendar_list['items']:
await ctx.send(calendar_list_entry['summary'])
@commands.command()
async def getCalendarByName(self, ctx, *, calName):
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
store = file.Storage('credentials.json')
creds = store.get()
service = build('calendar', 'v3', http=creds.authorize(Http()))
calendar_list = service.calendarList().list(pageToken=None).execute()
for calendar_list_entry in calendar_list['items']:
# print(calendar_list_entry['summary'], '==', calName,': ', str(calendar_list_entry['summary'] == calName))
if calendar_list_entry['summary'] == calName:
events_result = service.events().list(calendarId=calendar_list_entry['id'],
timeMin=now, maxResults=3, singleEvents=True, orderBy='startTime').execute()
events = events_result.get('items', [])
# print(len(events))
if not events:
await ctx.send('No upcoming events found in ' + calName)
else:
await ctx.send('Upcoming events in ' + calName + ':')
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
# print('start.get(date): ',event['start'].get('dateTime'), ' Type: ', type(event['start']))
# print(event['start'].get('dateTime'))
eventTime = parse(event['start'].get('dateTime'))
monthStr = eventTime.strftime("%B")
printedTime = monthStr + ' ' + str(eventTime.day) + ', ' + str(eventTime.year)
# await ctx.send(start, event['summary'])
# print(event['summary'])
await ctx.send(printedTime)
await ctx.send(event['summary'])
# add this cog to the bot
def setup(bot):
bot.add_cog(CalendarCog(bot))
| 45.755556 | 132 | 0.600777 |
ace11d7d3c01d5f175a5677ed06610ed67714214 | 805 | py | Python | dJango/project0/urls.py | GursimranSinghKahlon/EdTech | 5944019ccb3d054dbb1f8eb8a81363cb7bae9de6 | [
"MIT"
] | 1 | 2019-05-25T04:22:00.000Z | 2019-05-25T04:22:00.000Z | dJango/project0/urls.py | GursimranSinghKahlon/EdTech | 5944019ccb3d054dbb1f8eb8a81363cb7bae9de6 | [
"MIT"
] | null | null | null | dJango/project0/urls.py | GursimranSinghKahlon/EdTech | 5944019ccb3d054dbb1f8eb8a81363cb7bae9de6 | [
"MIT"
] | null | null | null | """project0 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('detect0/', include('detect0.urls'))
]
| 35 | 77 | 0.70559 |
ace11f86a20cabe6ea8daf85b0c328c4f7a9d964 | 4,886 | py | Python | examples/inverse/mne_cov_power.py | guiomar/mne-python | 2d19800a07904cfe69c1ba290c3eaf712625c6ab | [
"BSD-3-Clause"
] | 2 | 2020-05-11T13:34:36.000Z | 2020-05-28T19:43:21.000Z | examples/inverse/mne_cov_power.py | guiomar/mne-python | 2d19800a07904cfe69c1ba290c3eaf712625c6ab | [
"BSD-3-Clause"
] | 4 | 2015-04-27T09:55:46.000Z | 2018-10-01T10:03:47.000Z | examples/inverse/mne_cov_power.py | guiomar/mne-python | 2d19800a07904cfe69c1ba290c3eaf712625c6ab | [
"BSD-3-Clause"
] | null | null | null | """
===================================================================
Compute source power estimate by projecting the covariance with MNE
===================================================================
We can apply the MNE inverse operator to a covariance matrix to obtain
an estimate of source power. This is computationally more efficient than first
estimating the source timecourses and then computing their power. This
code is based on the code from :footcite:`Sabbagh2020` and has been useful to
correct for individual field spread using source localization in the context of
predictive modeling.
References
----------
.. footbibliography::
"""
# Author: Denis A. Engemann <denis-alexander.engemann@inria.fr>
# Luke Bloy <luke.bloy@gmail.com>
#
# License: BSD-3-Clause
# %%
import os.path as op
import numpy as np
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse_cov
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname)
# %%
# Compute empty-room covariance
# -----------------------------
# First we compute an empty-room covariance, which captures noise from the
# sensors and environment.
raw_empty_room_fname = op.join(
data_path, 'MEG', 'sample', 'ernoise_raw.fif')
raw_empty_room = mne.io.read_raw_fif(raw_empty_room_fname)
raw_empty_room.crop(0, 60)
raw_empty_room.info['bads'] = ['MEG 2443']
raw_empty_room.add_proj(raw.info['projs'])
noise_cov = mne.compute_raw_covariance(
raw_empty_room, method=['empirical', 'shrunk'])
del raw_empty_room
# %%
# Epoch the data
# --------------
raw.info['bads'] = ['MEG 2443', 'EEG 053']
raw.load_data().filter(4, 12)
events = mne.find_events(raw, stim_channel='STI 014')
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
tmin, tmax = -0.2, 0.5
baseline = (None, 0) # means from the first instant to t = 0
reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)
epochs = mne.Epochs(raw.copy().filter(4, 12), events, event_id, tmin, tmax,
proj=True, picks=('meg', 'eog'), baseline=None,
reject=reject, preload=True)
del raw
# %%
# Compute and plot covariances
# ----------------------------
# In addition to the empty-room covariance above, we compute two additional
# covariances:
#
# 1. Baseline covariance, which captures signals not of interest in our
# analysis (e.g., sensor noise, environmental noise, physiological
# artifacts, and also resting-state-like brain activity / "noise").
# 2. Data covariance, which captures our activation of interest (in addition
# to noise sources).
base_cov = mne.compute_covariance(
epochs, tmin=-0.2, tmax=0, method=['shrunk', 'empirical'], rank=None,
verbose=True)
data_cov = mne.compute_covariance(
epochs, tmin=0., tmax=0.2, method=['shrunk', 'empirical'], rank=None,
verbose=True)
fig_noise_cov = mne.viz.plot_cov(noise_cov, epochs.info, show_svd=False)
fig_base_cov = mne.viz.plot_cov(base_cov, epochs.info, show_svd=False)
fig_data_cov = mne.viz.plot_cov(data_cov, epochs.info, show_svd=False)
# %%
# We can also look at the covariances using topomaps, here we just show the
# baseline and data covariances, followed by the data covariance whitened
# by the baseline covariance:
evoked = epochs.average().pick('meg')
evoked.drop_channels(evoked.info['bads'])
evoked.plot(time_unit='s')
evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag')
noise_cov.plot_topomap(evoked.info, 'grad', title='Noise')
data_cov.plot_topomap(evoked.info, 'grad', title='Data')
data_cov.plot_topomap(evoked.info, 'grad', noise_cov=noise_cov,
title='Whitened data')
# %%
# Apply inverse operator to covariance
# ------------------------------------
# Finally, we can construct an inverse using the empty-room noise covariance:
# Read the forward solution and compute the inverse operator
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fwd = mne.read_forward_solution(fname_fwd)
# make an MEG inverse operator
info = evoked.info
inverse_operator = make_inverse_operator(info, fwd, noise_cov,
loose=0.2, depth=0.8)
# %%
# Project our data and baseline covariance to source space:
stc_data = apply_inverse_cov(data_cov, evoked.info, inverse_operator,
nave=len(epochs), method='dSPM', verbose=True)
stc_base = apply_inverse_cov(base_cov, evoked.info, inverse_operator,
nave=len(epochs), method='dSPM', verbose=True)
# %%
# And visualize power is relative to the baseline:
# sphinx_gallery_thumbnail_number = 9
stc_data /= stc_base
brain = stc_data.plot(subject='sample', subjects_dir=subjects_dir,
clim=dict(kind='percent', lims=(50, 90, 98)))
| 36.192593 | 79 | 0.683995 |
ace11fcc8c4b725ba107391d9fbce38a01cc2563 | 2,563 | py | Python | tests/test_scale_intensity_range_percentiles.py | Irme/MONAI | dc4bf661831b14f4231cb325cc1b15d38c1e406c | [
"Apache-2.0"
] | 3 | 2021-11-23T08:03:02.000Z | 2022-03-18T09:56:01.000Z | tests/test_scale_intensity_range_percentiles.py | Scitator/MONAI | a42b563acf0c7504cee18ee84c8af2eff6e948a7 | [
"Apache-2.0"
] | 1 | 2020-09-17T12:41:51.000Z | 2020-09-29T15:20:37.000Z | tests/test_scale_intensity_range_percentiles.py | Scitator/MONAI | a42b563acf0c7504cee18ee84c8af2eff6e948a7 | [
"Apache-2.0"
] | 1 | 2020-06-11T13:03:02.000Z | 2020-06-11T13:03:02.000Z | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from monai.transforms.intensity.array import ScaleIntensityRangePercentiles
from tests.utils import NumpyImageTestCase2D
class TestScaleIntensityRangePercentiles(NumpyImageTestCase2D):
def test_scaling(self):
img = self.imt
lower = 10
upper = 99
b_min = 0
b_max = 255
a_min = np.percentile(img, lower)
a_max = np.percentile(img, upper)
expected = (img - a_min) / (a_max - a_min)
expected = (expected * (b_max - b_min)) + b_min
scaler = ScaleIntensityRangePercentiles(lower=lower, upper=upper, b_min=b_min, b_max=b_max)
self.assertTrue(np.allclose(expected, scaler(img)))
def test_relative_scaling(self):
img = self.imt
lower = 10
upper = 99
b_min = 100
b_max = 300
scaler = ScaleIntensityRangePercentiles(lower=lower, upper=upper, b_min=b_min, b_max=b_max, relative=True)
expected_a_min = np.percentile(img, lower)
expected_a_max = np.percentile(img, upper)
expected_b_min = ((b_max - b_min) * (lower / 100.0)) + b_min
expected_b_max = ((b_max - b_min) * (upper / 100.0)) + b_min
expected_img = (img - expected_a_min) / (expected_a_max - expected_a_min)
expected_img = (expected_img * (expected_b_max - expected_b_min)) + expected_b_min
self.assertTrue(np.allclose(expected_img, scaler(img)))
def test_invalid_instantiation(self):
self.assertRaises(AssertionError, ScaleIntensityRangePercentiles, lower=-10, upper=99, b_min=0, b_max=255)
self.assertRaises(AssertionError, ScaleIntensityRangePercentiles, lower=101, upper=99, b_min=0, b_max=255)
self.assertRaises(AssertionError, ScaleIntensityRangePercentiles, lower=30, upper=-20, b_min=0, b_max=255)
self.assertRaises(AssertionError, ScaleIntensityRangePercentiles, lower=30, upper=900, b_min=0, b_max=255)
if __name__ == "__main__":
unittest.main()
| 42.016393 | 114 | 0.705033 |
ace120d76cf2d532a23ab984d1508d2837485f8f | 8,950 | py | Python | autoencoder/gnina_autoencoder.py | OliverT1/gnina_tensorflow | 339310c643a85e6df1248d03dbbe4ae78cf59f19 | [
"MIT"
] | null | null | null | autoencoder/gnina_autoencoder.py | OliverT1/gnina_tensorflow | 339310c643a85e6df1248d03dbbe4ae78cf59f19 | [
"MIT"
] | null | null | null | autoencoder/gnina_autoencoder.py | OliverT1/gnina_tensorflow | 339310c643a85e6df1248d03dbbe4ae78cf59f19 | [
"MIT"
] | 1 | 2020-11-30T12:08:20.000Z | 2020-11-30T12:08:20.000Z | """
Created on Tue Jun 23 14:45:32 2020
@author: scantleb
@brief: Main script for using an autoencoder to reduce the dimensionality of
gnina inputs.
"""
import os
from pathlib import Path
import molgrid
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from matplotlib import pyplot as plt
from tensorflow.python.util import deprecation
from autoencoder import autoencoder_definitions, parse_command_line_args, \
schedules
from autoencoder.calculate_encodings import calculate_encodings
from autoencoder.train import train
from utilities.gnina_functions import Timer, get_dims, write_process_info
def main():
# Parse and sanitise command line args
ae, args = parse_command_line_args.parse_command_line_args('train')
# There really are a lot of these and they are not useful to scientists
# using this software. Only log errors (unless verbose)
if not args.verbose:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
deprecation._PRINT_DEPRECATION_WARNINGS = False
# For use later when defining model
architectures = {'single': autoencoder_definitions.SingleLayerAutoEncoder,
'dense': autoencoder_definitions.DenseAutoEncoder,
'multi': autoencoder_definitions.MultiLayerAutoEncoder,
'res': autoencoder_definitions.ResidualAutoEncoder}
molgrid.set_gpu_enabled(1 - args.use_cpu)
# Use learning rate schedule or single learning rate
if args.max_lr > 0 and args.min_lr > 0:
if args.optimiser not in ['sgdw', 'adamw']:
raise RuntimeError(
'Learning rate scheduling only compatible with AdamW and SGDW '
'optimisers.'
)
if args.learning_rate_schedule is None:
raise RuntimeError(
'Max and min learning rates must be used in conjunction with '
'a learning rate schedule.'
)
lrs_args = [args.min_lr, args.max_lr]
lrs_kwargs = {}
if args.learning_rate_schedule == '1cycle':
scheduler = schedules.OneCycle
lrs_kwargs.update({'iterations': args.iterations})
elif args.learning_rate_schedule == 'warm_restarts':
scheduler = schedules.WarmRestartCosine
lrs_kwargs.update(
{'beta': args.lrs_beta, 'period': args.lrs_period})
elif args.learning_rate_schedule == 'stepwise':
scheduler = schedules.StepWiseDecay
lrs_kwargs.update(
{'t': args.lrs_period, 'beta': args.lrs_beta})
else:
raise RuntimeError(
'learning_rate_schedule must be one of "1cycle", '
'"warm_restarts" or "stepwise".')
lrs = scheduler(*lrs_args, **lrs_kwargs)
opt_args = {'weight_decay': 1e-4}
else:
opt_args = {'lr': args.learning_rate}
lrs = schedules.ConstantLearningRateSchedule(args.learning_rate)
if args.momentum > 0:
opt_args['momentum'] = args.momentum
if args.optimiser.startswith('sgd'):
opt_args['nesterov'] = args.nesterov
barred_args = ['resume']
loss_log = None
starting_iter = 0
if args.resume:
if not args.load_model:
raise RuntimeError(
'--resume must be used in conjunction with load_model')
if args.optimiser == 'adamw':
optimiser = tfa.optimizers.AdamW
elif args.optimiser == 'sgdw':
optimiser = tfa.optimizers.SGDW
else:
optimiser = tf.keras.optimizers.get(args.optimiser).__class__
ae.optimizer = optimiser(
**opt_args
)
log_fname = Path(
args.load_model).expanduser().parents[1] / 'loss_log.txt'
starting_iter = int(str(Path(args.load_model).name).split('_')[-1])
with open(log_fname, 'r') as f:
loss_log = '\n'.join(
f.read().split('\n')[:starting_iter + 1]) + '\n'
barred_args.append('load_model')
arg_str = '\n'.join(
[
'{0} {1}'.format(param, argument)
for param, argument
in vars(args).items()
if param not in barred_args
]
)
save_path = Path(args.save_path, args.name).expanduser().resolve()
if args.momentum > 0 and args.optimiser.lower() not in ['sgd', 'rmsprop',
'sgdw']:
raise RuntimeError(
'Momentum only used for RMSProp and SGD optimisers.')
if not Path(args.train).exists():
raise RuntimeError('{} does not exist.'.format(args.train))
Path(save_path, 'checkpoints').mkdir(parents=True, exist_ok=True)
arg_str += '\nabsolute_save_path {}\n'.format(save_path)
print(arg_str)
if not args.resume:
with open(save_path / 'config', 'w') as f:
f.write(arg_str)
tf.keras.backend.clear_session()
if ae is None: # No loaded model
ae = architectures[args.model](
get_dims(args.dimension, args.resolution, args.ligmap, args.recmap),
encoding_size=args.encoding_size,
optimiser=args.optimiser,
loss=args.loss,
batch_size=args.batch_size,
hidden_activation=args.hidden_activation,
final_activation=args.final_activation,
encoding_activation=args.encoding_activation,
conv_filters=args.conv_filters,
metric_distance_threshold=args.metric_distance_threshold,
learning_rate_schedule=lrs,
adversarial=args.adversarial,
adversarial_variance=args.adversarial_variance,
**opt_args)
else:
ae.learning_rate_schedule = lrs
with open(save_path / 'model.summary', 'w') as f:
ae.summary(line_length=80, print_fn=lambda x: f.write(x + '\n'))
ae.summary()
if args.loss not in ['composite_mse', 'distance_mse']:
tf.keras.utils.plot_model(
ae, save_path / 'model.png', show_shapes=True)
# Logging process ID is useful for memory profiling (see utilities)
write_process_info(__file__, save_path)
losses, nonzero_losses, zero_losses = train(
ae,
data_root=args.data_root,
train_types=args.train,
iterations=args.iterations,
batch_size=args.batch_size,
save_path=save_path,
dimension=args.dimension,
resolution=args.resolution,
loss_fn=args.loss,
ligmap=args.ligmap,
recmap=args.recmap,
save_interval=args.save_interval,
metric_distance_threshold=args.metric_distance_threshold,
overwrite_checkpoints=args.overwrite_checkpoints,
binary_mask=args.binary_mask,
denoising=args.denoising,
loss_log=loss_log,
starting_iter=starting_iter
)
print('\nFinished training.')
# Plot zero, nonzero mse
fig, ax1 = plt.subplots()
ax1.set_xlabel('Batches')
ax2 = ax1.twinx()
axes = [ax1, ax2]
cols = ['r-', 'b-']
labels = ['Zero_MAE', 'Nonzero_MAE']
lines = []
for idx, losses in enumerate([zero_losses, nonzero_losses]):
gap = 100
losses = [np.mean(losses[n:n + gap]) for n in
range(0, len(losses), gap)]
line, = axes[idx].plot(
np.arange(len(losses)) * gap, losses, cols[idx], label=labels[idx])
axes[idx].set_ylabel('Loss')
lines.append(line)
ax1.legend(lines, [line.get_label() for line in lines])
fig.savefig(save_path / 'zero_nonzero_losses.png')
# Plot composite mse
fig, ax1 = plt.subplots()
ax1.set_xlabel('Batches')
axes = [ax1]
for idx, losses in enumerate([losses]):
gap = 100
losses = [np.mean(losses[n:n + gap]) for n in
range(0, len(losses), gap)]
axes[idx].plot(np.arange(len(losses)) * gap, losses)
axes[idx].set_ylabel('Loss')
ax1.legend([args.loss])
fig.savefig(save_path / 'composite_loss.png')
if args.save_encodings: # Save encodings in serialised format
print('Saving encodings...')
with Timer() as t:
calculate_encodings(encoder=ae,
data_root=args.data_root,
batch_size=args.batch_size,
types_file=args.train,
save_path=save_path,
dimension=args.dimension,
resolution=args.resolution,
ligmap=args.ligmap,
recmap=args.recmap,
rotate=False,
binary_mask=args.binary_mask
)
print('Encodings calculated and saved to {0} in {1} s'.format(
save_path / 'encodings', t.interval))
if __name__ == '__main__':
main()
| 36.831276 | 80 | 0.604469 |
ace121a2e36941a622b101d25c4647a1aae60609 | 530 | py | Python | rookie/mysite/migrations/0031_auto_20210615_1725.py | chen1932390299/drf-backend-platform | 80c2fb56c5c3c1456196d2415e3173600d190457 | [
"Apache-2.0"
] | 1 | 2021-07-02T09:05:51.000Z | 2021-07-02T09:05:51.000Z | rookie/mysite/migrations/0031_auto_20210615_1725.py | chen1932390299/drf-backend-platform | 80c2fb56c5c3c1456196d2415e3173600d190457 | [
"Apache-2.0"
] | null | null | null | rookie/mysite/migrations/0031_auto_20210615_1725.py | chen1932390299/drf-backend-platform | 80c2fb56c5c3c1456196d2415e3173600d190457 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.2 on 2021-06-15 17:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mysite', '0030_projectconfig'),
]
operations = [
migrations.RemoveField(
model_name='testcase',
name='project_name',
),
migrations.AddField(
model_name='testcase',
name='project_id',
field=models.IntegerField(default=1),
preserve_default=False,
),
]
| 22.083333 | 49 | 0.573585 |
ace121c3058a8a5d36d894d84f14c867c3e8f99d | 2,317 | py | Python | src/dataloader/ValidationDataset.py | aidotse/Team-Yenomze | f91655bafd41132d76ee68f366780593cf91c8bb | [
"MIT"
] | 1 | 2021-01-15T16:31:37.000Z | 2021-01-15T16:31:37.000Z | src/dataloader/ValidationDataset.py | aidotse/Team-Yenomze | f91655bafd41132d76ee68f366780593cf91c8bb | [
"MIT"
] | null | null | null | src/dataloader/ValidationDataset.py | aidotse/Team-Yenomze | f91655bafd41132d76ee68f366780593cf91c8bb | [
"MIT"
] | null | null | null | import torch
import numpy as np
from torch.utils.data import IterableDataset
from monai.utils import NumpyPadMode
from monai.transforms import LoadImage
from monai.data.utils import iter_patch
from typing import Any, Callable, Hashable, Optional, Sequence, Tuple, Union
class OurGridyDataset(IterableDataset):
def __init__(self,
data: Sequence,
patch_size: int,
data_reader: Callable):
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: a callable data transform on input data.
"""
self.patch_size = (None,) + (10, patch_size, patch_size)
self.start_pos = ()
self.mode = NumpyPadMode.WRAP
self.data = data
self.image_reader = LoadImage(data_reader, image_only=True)
def __len__(self) -> int:
return len(self.data)
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
iter_start = 0
iter_end = len(self.data)
if worker_info is not None:
# split workload
per_worker = int(math.ceil((iter_end - iter_start) / float(worker_info.num_workers)))
worker_id = worker_info.id
iter_start = iter_start + worker_id * per_worker
iter_end = min(iter_start + per_worker, iter_end)
for index in range(iter_start, iter_end):
img_paths = self.data[index]
arrays = np.expand_dims(np.stack([self.image_reader(x) for x in img_paths]), axis=(0,1))
#arrays = arrays / 30000.0
#arrays = (np.log(1 + arrays) - 5.5)/5.5
# Get mag level of file
mag_level = get_mag_level(img_paths[0])
# Preprocessing - 1,1,10,256,256
arrays[0,0,7,:,:] = preprocess(arrays[0,0,7,:,:], mag_level, "C01")
arrays[0,0,8,:,:] = preprocess(arrays[0,0,8,:,:], mag_level, "C02")
arrays[0,0,9,:,:] = preprocess(arrays[0,0,9,:,:], mag_level, "C03")
arrays[0,0,:7,:,:] = preprocess(arrays[0,0,:7,:,:], mag_level, "C04")
iters = [iter_patch(a, self.patch_size, self.start_pos, False, self.mode) for a in arrays]
yield from zip(*iters) | 36.777778 | 102 | 0.585671 |
ace123c809d7f9be1c46391eb77c9e2a41a133dd | 1,635 | py | Python | hummingbot/strategy/dev_2_perform_trade/start.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 3,027 | 2019-04-04T18:52:17.000Z | 2022-03-30T09:38:34.000Z | hummingbot/strategy/dev_2_perform_trade/start.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 4,080 | 2019-04-04T19:51:11.000Z | 2022-03-31T23:45:21.000Z | hummingbot/strategy/dev_2_perform_trade/start.py | BGTCapital/hummingbot | 2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242 | [
"Apache-2.0"
] | 1,342 | 2019-04-04T20:50:53.000Z | 2022-03-31T15:22:36.000Z | #!/usr/bin/env python
from hummingbot.strategy.dev_2_perform_trade import PerformTradeStrategy
from hummingbot.strategy.dev_2_perform_trade.dev_2_perform_trade_config_map import dev_2_perform_trade_config_map
from hummingbot.core.event.events import PriceType
def start(self):
try:
exchange = dev_2_perform_trade_config_map.get("exchange").value.lower()
trading_pair = dev_2_perform_trade_config_map.get("trading_pair").value
is_buy = dev_2_perform_trade_config_map.get("is_buy").value
spread = dev_2_perform_trade_config_map.get("spread").value
order_amount = dev_2_perform_trade_config_map.get("order_amount").value
price_type = dev_2_perform_trade_config_map.get("price_type").value.lower()
if price_type == "mid_price":
price_type = PriceType.MidPrice
elif price_type == "last_price":
price_type = PriceType.LastTrade
elif price_type == "last_own_trade_price":
price_type = PriceType.LastOwnTrade
else:
raise ValueError(f"Invalid Price Type: {price_type}")
self._initialize_markets([(exchange, [trading_pair])])
exchange = self.markets[exchange]
self.strategy = PerformTradeStrategy(
exchange=exchange,
trading_pair=trading_pair,
is_buy=is_buy,
spread=spread,
order_amount=order_amount,
price_type=price_type,
hb_app_notification=True,
)
except Exception as e:
self._notify(str(e))
self.logger().error("Unknown error during initialization.", exc_info=True)
| 39.878049 | 113 | 0.691131 |
ace124197720801e0e3876e14924a3e54f3c4479 | 1,515 | py | Python | password_generator/app.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | password_generator/app.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | password_generator/app.py | rhoenkelevra/python_simple_applications | 28ceb5f9fe7ecf11d606d49463385e92927e8f98 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 6 12:33:43 2021
@author: user15
"""
import string
from random import choice
pass_size = int(input("暗証番号の長さを記入して下さい \n -->"))
# ask user if wants uppercase letters and symbols
use_symbols = input("暗証番号には記号も使いますか (Y か N): \n -->")
def containsLetterAndNumber(input):
return any(x.isalpha() for x in input) and any(x.isnumeric() for x in input)
def write_to_file(password, question):
if question.upper() == "Y":
pass_name = input("この暗証番号はどこで使いますか。: \n -->")
# if file is not on same dir as .py, put the
file1 = open(r"pass.txt", 'a')
file1.write(f"{pass_name} <===> {password} \n")
file1.close()
print("暗証番号はpass.textに保存済み")
else:
print(f"暗証番号は{password}、忘れないで下さい。")
def generate_pass(size=pass_size, symbols=use_symbols):
if symbols.upper() == 'N' :
chars = string.ascii_letters + string.digits
elif symbols.upper() == 'Y':
chars = string.ascii_letters + string.digits + string.punctuation
temp_chars = "".join(choice(chars) for _ in range(size))
if containsLetterAndNumber(temp_chars):
print("おすすめの暗証番号は:", temp_chars)
question_save = input("pass.txtファイルに保存しますか (Y か N): \n -->")
write_to_file(temp_chars, question_save)
else:
print('not good pass, generate again!')
generate_pass()
generate_pass()
| 28.055556 | 81 | 0.592079 |
ace1261b7eedf20c070fc126ade7c0103f46a54d | 18,085 | py | Python | marge/job.py | chmielowiec/marge-bot | bda7eb1b562fac26ac9fd030a188a7d69c88d2f2 | [
"BSD-3-Clause"
] | null | null | null | marge/job.py | chmielowiec/marge-bot | bda7eb1b562fac26ac9fd030a188a7d69c88d2f2 | [
"BSD-3-Clause"
] | null | null | null | marge/job.py | chmielowiec/marge-bot | bda7eb1b562fac26ac9fd030a188a7d69c88d2f2 | [
"BSD-3-Clause"
] | null | null | null | # pylint: disable=too-many-locals,too-many-branches,too-many-statements
import enum
import logging as log
import time
from collections import namedtuple
from datetime import datetime, timedelta
import requests
from . import git, gitlab
from .branch import Branch
from .interval import IntervalUnion
from .merge_request import MergeRequestRebaseFailed
from .project import Project
from .user import User
from .pipeline import Pipeline
class MergeJob:
def __init__(self, *, api, user, project, repo, options):
self._api = api
self._user = user
self._project = project
self._repo = repo
self._options = options
self._merge_timeout = timedelta(minutes=5)
@property
def repo(self):
return self._repo
@property
def opts(self):
return self._options
def execute(self):
raise NotImplementedError
def ensure_mergeable_mr(self, merge_request):
merge_request.refetch_info()
log.info('Ensuring MR !%s is mergeable', merge_request.iid)
log.debug('Ensuring MR %r is mergeable', merge_request)
if merge_request.work_in_progress:
raise CannotMerge("Sorry, I can't merge requests marked as Work-In-Progress!")
if merge_request.squash and self._options.requests_commit_tagging:
raise CannotMerge(
"Sorry, merging requests marked as auto-squash would ruin my commit tagging!"
)
approvals = merge_request.fetch_approvals()
if not approvals.sufficient:
raise CannotMerge(
'Insufficient approvals '
'(have: {0.approver_usernames} missing: {0.approvals_left})'.format(approvals)
)
state = merge_request.state
if state not in ('opened', 'reopened', 'locked'):
if state in ('merged', 'closed'):
raise SkipMerge('The merge request is already {}!'.format(state))
raise CannotMerge('The merge request is in an unknown state: {}'.format(state))
if self.during_merge_embargo():
raise SkipMerge('Merge embargo!')
if self._user.id not in merge_request.assignee_ids:
raise SkipMerge('It is not assigned to me anymore!')
def add_trailers(self, merge_request):
log.info('Adding trailers for MR !%s', merge_request.iid)
# add Reviewed-by
should_add_reviewers = (
self._options.add_reviewers and
self._options.fusion is not Fusion.gitlab_rebase
)
reviewers = (
_get_reviewer_names_and_emails(
merge_request.fetch_commits(),
merge_request.fetch_approvals(),
self._api,
) if should_add_reviewers
else None
)
sha = None
if reviewers is not None:
sha = self._repo.tag_with_trailer(
trailer_name='Reviewed-by',
trailer_values=reviewers,
branch=merge_request.source_branch,
start_commit='origin/' + merge_request.target_branch,
)
# add Tested-by
should_add_tested = (
self._options.add_tested and
self._project.only_allow_merge_if_pipeline_succeeds and
self._options.fusion is Fusion.rebase
)
tested_by = (
['{0._user.name} <{1.web_url}>'.format(self, merge_request)]
if should_add_tested
else None
)
if tested_by is not None:
sha = self._repo.tag_with_trailer(
trailer_name='Tested-by',
trailer_values=tested_by,
branch=merge_request.source_branch,
start_commit=merge_request.source_branch + '^'
)
# add Part-of
should_add_parts_of = (
self._options.add_part_of and
self._options.fusion is not Fusion.gitlab_rebase
)
part_of = (
'<{0.web_url}>'.format(merge_request)
if should_add_parts_of
else None
)
if part_of is not None:
sha = self._repo.tag_with_trailer(
trailer_name='Part-of',
trailer_values=[part_of],
branch=merge_request.source_branch,
start_commit='origin/' + merge_request.target_branch,
)
return sha
def get_mr_ci_status(self, merge_request, commit_sha=None):
if commit_sha is None:
commit_sha = merge_request.sha
if self._api.version().release >= (10, 5, 0):
pipelines = Pipeline.pipelines_by_merge_request(
merge_request.target_project_id,
merge_request.iid,
self._api,
)
else:
pipelines = Pipeline.pipelines_by_branch(
merge_request.source_project_id,
merge_request.source_branch,
self._api,
)
current_pipeline = next(iter(pipeline for pipeline in pipelines if pipeline.sha == commit_sha), None)
if current_pipeline:
ci_status = current_pipeline.status
else:
log.warning('No pipeline listed for %s on branch %s', commit_sha, merge_request.source_branch)
ci_status = None
return ci_status
def wait_for_ci_to_pass(self, merge_request, commit_sha=None):
time_0 = datetime.utcnow()
waiting_time_in_secs = 10
if commit_sha is None:
commit_sha = merge_request.sha
log.info('Waiting for CI to pass for MR !%s', merge_request.iid)
consecutive_errors = 0
while datetime.utcnow() - time_0 < self._options.ci_timeout:
try:
ci_status = self.get_mr_ci_status(merge_request, commit_sha=commit_sha)
consecutive_errors = 0
except (gitlab.InternalServerError, requests.exceptions.Timeout):
consecutive_errors += 1
if consecutive_errors > 5:
raise
time.sleep(waiting_time_in_secs)
continue
if ci_status == 'success':
log.info('CI for MR !%s passed', merge_request.iid)
return
if ci_status == 'skipped':
log.info('CI for MR !%s skipped', merge_request.iid)
return
if ci_status == 'failed':
raise CannotMerge('CI failed!')
if ci_status == 'canceled':
raise CannotMerge('Someone canceled the CI.')
if ci_status not in ('pending', 'running'):
log.warning('Suspicious CI status: %r', ci_status)
log.debug('Waiting for %s secs before polling CI status again', waiting_time_in_secs)
time.sleep(waiting_time_in_secs)
raise CannotMerge('CI is taking too long.')
def wait_for_merge_status_to_resolve(self, merge_request):
attempts = 3
waiting_time_in_secs = 5
log.info('Waiting for MR !%s to have merge_status can_be_merged', merge_request.iid)
for attempt in range(attempts):
merge_request.refetch_info()
merge_status = merge_request.merge_status
if merge_status == 'can_be_merged':
log.info('MR !%s can be merged on attempt %d', merge_request.iid, attempt)
return
if merge_status == 'cannot_be_merged':
log.info('MR !%s cannot be merged on attempt %d', merge_request.iid, attempt)
raise CannotMerge('GitLab believes this MR cannot be merged.')
if merge_status == 'unchecked':
log.info('MR !%s merge status currently unchecked on attempt %d.', merge_request.iid, attempt)
time.sleep(waiting_time_in_secs)
def unassign_from_mr(self, merge_request):
log.info('Unassigning from MR !%s', merge_request.iid)
author_id = merge_request.author_id
if author_id != self._user.id:
merge_request.assign_to(author_id)
else:
merge_request.unassign()
def during_merge_embargo(self):
now = datetime.utcnow()
return self.opts.embargo.covers(now)
def maybe_reapprove(self, merge_request, approvals):
# Re-approve the merge request, in case us pushing it has removed approvals.
if self.opts.reapprove:
# approving is not idempotent, so we need to check first that there are no approvals,
# otherwise we'll get a failure on trying to re-instate the previous approvals
def sufficient_approvals():
return merge_request.fetch_approvals().sufficient
# Make sure we don't race by ensuring approvals have reset since the push
waiting_time_in_secs = 5
approval_timeout_in_secs = self._options.approval_timeout.total_seconds()
iterations = round(approval_timeout_in_secs / waiting_time_in_secs)
log.info('Checking if approvals have reset')
while sufficient_approvals() and iterations:
log.debug('Approvals haven\'t reset yet, sleeping for %s secs', waiting_time_in_secs)
time.sleep(waiting_time_in_secs)
iterations -= 1
if not sufficient_approvals():
approvals.reapprove()
def fetch_source_project(self, merge_request):
remote = 'origin'
remote_url = None
source_project = self.get_source_project(merge_request)
if source_project is not self._project:
remote = 'source'
remote_url = source_project.ssh_url_to_repo
self._repo.fetch(
remote_name=remote,
remote_url=remote_url,
)
return source_project, remote_url, remote
def get_source_project(self, merge_request):
source_project = self._project
if merge_request.source_project_id != self._project.id:
source_project = Project.fetch_by_id(
merge_request.source_project_id,
api=self._api,
)
return source_project
def get_target_project(self, merge_request):
return Project.fetch_by_id(merge_request.target_project_id, api=self._api)
def fuse(self, source, target, source_repo_url=None, local=False):
# NOTE: this leaves git switched to branch_a
strategies = {
Fusion.rebase: self._repo.rebase,
Fusion.merge: self._repo.merge,
Fusion.gitlab_rebase: self._repo.rebase, # we rebase locally to know sha
}
strategy = strategies[self._options.fusion]
return strategy(
source,
target,
source_repo_url=source_repo_url,
local=local,
)
def update_from_target_branch_and_push(
self,
merge_request,
*,
source_repo_url=None,
):
"""Updates `target_branch` with commits from `source_branch`, optionally add trailers and push.
The update strategy can either be rebase or merge. The default is rebase.
Returns
-------
(sha_of_target_branch, sha_after_update, sha_after_rewrite)
"""
repo = self._repo
source_branch = merge_request.source_branch
target_branch = merge_request.target_branch
assert source_repo_url != repo.remote_url
if source_repo_url is None and source_branch == target_branch:
raise CannotMerge('source and target branch seem to coincide!')
branch_update_done = commits_rewrite_done = False
try:
initial_mr_sha = merge_request.sha
updated_sha = self.fuse(
source_branch,
target_branch,
source_repo_url=source_repo_url,
)
branch_update_done = True
# The fuse above fetches origin again, so we are now safe to fetch
# the sha from the remote target branch.
target_sha = repo.get_commit_hash('origin/' + target_branch)
if updated_sha == target_sha:
raise CannotMerge('these changes already exist in branch `{}`'.format(target_branch))
final_sha = self.add_trailers(merge_request) or updated_sha
commits_rewrite_done = True
branch_was_modified = final_sha != initial_mr_sha
self.synchronize_mr_with_local_changes(merge_request, branch_was_modified, source_repo_url)
except git.GitError:
if not branch_update_done:
raise CannotMerge('got conflicts while rebasing, your problem now...')
if not commits_rewrite_done:
raise CannotMerge('failed on filter-branch; check my logs!')
raise
else:
return target_sha, updated_sha, final_sha
finally:
# A failure to clean up probably means something is fucked with the git repo
# and likely explains any previous failure, so it will better to just
# raise a GitError
if source_branch != 'master':
repo.checkout_branch('master')
repo.remove_branch(source_branch)
def synchronize_mr_with_local_changes(
self,
merge_request,
branch_was_modified,
source_repo_url=None,
):
if self._options.fusion is Fusion.gitlab_rebase:
self.synchronize_using_gitlab_rebase(merge_request)
else:
self.push_force_to_mr(
merge_request,
branch_was_modified,
source_repo_url=source_repo_url,
)
def push_force_to_mr(
self,
merge_request,
branch_was_modified,
source_repo_url=None,
):
try:
self._repo.push(
merge_request.source_branch,
source_repo_url=source_repo_url,
force=True,
)
except git.GitError:
def fetch_remote_branch():
return Branch.fetch_by_name(
merge_request.source_project_id,
merge_request.source_branch,
self._api,
)
if branch_was_modified and fetch_remote_branch().protected:
raise CannotMerge("Sorry, I can't modify protected branches!")
change_type = "merged" if self.opts.fusion == Fusion.merge else "rebased"
raise CannotMerge('Failed to push %s changes, check my logs!' % change_type)
def synchronize_using_gitlab_rebase(self, merge_request, expected_sha=None):
expected_sha = expected_sha or self._repo.get_commit_hash()
try:
merge_request.rebase()
except MergeRequestRebaseFailed as err:
raise CannotMerge("GitLab failed to rebase the branch saying: {0[0]}".format(err.args))
except TimeoutError:
raise CannotMerge("GitLab was taking too long to rebase the branch...")
except gitlab.ApiError:
branch = Branch.fetch_by_name(
merge_request.source_project_id,
merge_request.source_branch,
self._api,
)
if branch.protected:
raise CannotMerge("Sorry, I can't modify protected branches!")
raise
else:
if merge_request.sha != expected_sha:
raise GitLabRebaseResultMismatch(
gitlab_sha=merge_request.sha,
expected_sha=expected_sha,
)
def _get_reviewer_names_and_emails(commits, approvals, api):
"""Return a list ['A. Prover <a.prover@example.com', ...]` for `merge_request.`"""
uids = approvals.approver_ids
users = [User.fetch_by_id(uid, api) for uid in uids]
self_reviewed = {commit['author_email'] for commit in commits} & {user.email for user in users}
if self_reviewed and len(users) <= 1:
raise CannotMerge('Commits require at least one independent reviewer.')
return ['{0.name} <{0.email}>'.format(user) for user in users]
@enum.unique
class Fusion(enum.Enum):
merge = 0
rebase = 1
gitlab_rebase = 2
JOB_OPTIONS = [
'add_tested',
'add_part_of',
'add_reviewers',
'reapprove',
'approval_timeout',
'embargo',
'ci_timeout',
'fusion',
'use_no_ff_batches',
]
class MergeJobOptions(namedtuple('MergeJobOptions', JOB_OPTIONS)):
__slots__ = ()
@property
def requests_commit_tagging(self):
return self.add_tested or self.add_part_of or self.add_reviewers
@classmethod
def default(
cls, *,
add_tested=False, add_part_of=False, add_reviewers=False, reapprove=False,
approval_timeout=None, embargo=None, ci_timeout=None, fusion=Fusion.rebase,
use_no_ff_batches=False,
):
approval_timeout = approval_timeout or timedelta(seconds=0)
embargo = embargo or IntervalUnion.empty()
ci_timeout = ci_timeout or timedelta(minutes=15)
return cls(
add_tested=add_tested,
add_part_of=add_part_of,
add_reviewers=add_reviewers,
reapprove=reapprove,
approval_timeout=approval_timeout,
embargo=embargo,
ci_timeout=ci_timeout,
fusion=fusion,
use_no_ff_batches=use_no_ff_batches,
)
class CannotMerge(Exception):
@property
def reason(self):
args = self.args
if not args:
return 'Unknown reason!'
return args[0]
class SkipMerge(CannotMerge):
pass
class GitLabRebaseResultMismatch(CannotMerge):
def __init__(self, gitlab_sha, expected_sha):
super(GitLabRebaseResultMismatch, self).__init__(
"GitLab rebase ended up with a different commit:"
"I expected %s but they got %s" % (expected_sha, gitlab_sha)
)
| 36.242485 | 110 | 0.610285 |
ace126583118273342aa6d149e8b0b4193602498 | 34,956 | py | Python | shap/explainers/_deep/deep_tf.py | SleepyPepperHead/shap | 80ef18660060cf958db1c129b270536219a0cf50 | [
"MIT"
] | null | null | null | shap/explainers/_deep/deep_tf.py | SleepyPepperHead/shap | 80ef18660060cf958db1c129b270536219a0cf50 | [
"MIT"
] | null | null | null | shap/explainers/_deep/deep_tf.py | SleepyPepperHead/shap | 80ef18660060cf958db1c129b270536219a0cf50 | [
"MIT"
] | null | null | null | import numpy as np
import warnings
from .._explainer import Explainer
from packaging import version
from ..tf_utils import _get_session, _get_graph, _get_model_inputs, _get_model_output
keras = None
tf = None
tf_ops = None
tf_backprop = None
tf_execute = None
tf_gradients_impl = None
def custom_record_gradient(op_name, inputs, attrs, results):
""" This overrides tensorflow.python.eager.backprop._record_gradient.
We need to override _record_gradient in order to get gradient backprop to
get called for ResourceGather operations. In order to make this work we
temporarily "lie" about the input type to prevent the node from getting
pruned from the gradient backprop process. We then reset the type directly
afterwards back to what it was (an integer type).
"""
reset_input = False
if op_name == "ResourceGather" and inputs[1].dtype == tf.int32:
inputs[1].__dict__["_dtype"] = tf.float32
reset_input = True
try:
out = tf_backprop._record_gradient("shap_"+op_name, inputs, attrs, results)
except AttributeError:
out = tf_backprop.record_gradient("shap_"+op_name, inputs, attrs, results)
if reset_input:
inputs[1].__dict__["_dtype"] = tf.int32
return out
class TFDeep(Explainer):
"""
Using tf.gradients to implement the backgropagation was
inspired by the gradient based implementation approach proposed by Ancona et al, ICLR 2018. Note
that this package does not currently use the reveal-cancel rule for ReLu units proposed in DeepLIFT.
"""
def __init__(self, model, data, session=None, learning_phase_flags=None):
""" An explainer object for a deep model using a given background dataset.
Note that the complexity of the method scales linearly with the number of background data
samples. Passing the entire training dataset as `data` will give very accurate expected
values, but be unreasonably expensive. The variance of the expectation estimates scale by
roughly 1/sqrt(N) for N background data samples. So 100 samples will give a good estimate,
and 1000 samples a very good estimate of the expected values.
Parameters
----------
model : tf.keras.Model or (input : [tf.Operation], output : tf.Operation)
A keras model object or a pair of TensorFlow operations (or a list and an op) that
specifies the input and output of the model to be explained. Note that SHAP values
are specific to a single output value, so you get an explanation for each element of
the output tensor (which must be a flat rank one vector).
data : [numpy.array] or [pandas.DataFrame] or function
The background dataset to use for integrating out features. DeepExplainer integrates
over all these samples for each explanation. The data passed here must match the input
operations given to the model. If a function is supplied, it must be a function that
takes a particular input example and generates the background dataset for that example
session : None or tensorflow.Session
The TensorFlow session that has the model we are explaining. If None is passed then
we do our best to find the right session, first looking for a keras session, then
falling back to the default TensorFlow session.
learning_phase_flags : None or list of tensors
If you have your own custom learning phase flags pass them here. When explaining a prediction
we need to ensure we are not in training mode, since this changes the behavior of ops like
batch norm or dropout. If None is passed then we look for tensors in the graph that look like
learning phase flags (this works for Keras models). Note that we assume all the flags should
have a value of False during predictions (and hence explanations).
"""
# try and import keras and tensorflow
global tf, tf_ops, tf_backprop, tf_execute, tf_gradients_impl
if tf is None:
from tensorflow.python.framework import ops as tf_ops # pylint: disable=E0611
from tensorflow.python.ops import gradients_impl as tf_gradients_impl # pylint: disable=E0611
from tensorflow.python.eager import backprop as tf_backprop
from tensorflow.python.eager import execute as tf_execute
if not hasattr(tf_gradients_impl, "_IsBackpropagatable"):
from tensorflow.python.ops import gradients_util as tf_gradients_impl
import tensorflow as tf
if version.parse(tf.__version__) < version.parse("1.4.0"):
warnings.warn("Your TensorFlow version is older than 1.4.0 and not supported.")
global keras
if keras is None:
try:
import keras
warnings.warn("keras is no longer supported, please use tf.keras instead.")
except:
pass
if version.parse(tf.__version__) >= version.parse("2.4.0"):
warnings.warn("Your TensorFlow version is newer than 2.4.0 and so graph support has been removed in eager mode and some static graphs may not be supported. See PR #1483 for discussion.")
# determine the model inputs and outputs
self.model_inputs = _get_model_inputs(model)
self.model_output = _get_model_output(model)
assert type(self.model_output) != list, "The model output to be explained must be a single tensor!"
assert len(self.model_output.shape) < 3, "The model output must be a vector or a single value!"
self.multi_output = True
if len(self.model_output.shape) == 1:
self.multi_output = False
if tf.executing_eagerly():
if type(model) is tuple or type(model) is list:
assert len(model) == 2, "When a tuple is passed it must be of the form (inputs, outputs)"
from tensorflow.keras import Model
self.model = Model(model[0], model[1])
else:
self.model = model
# check if we have multiple inputs
self.multi_input = True
if type(self.model_inputs) != list or len(self.model_inputs) == 1:
self.multi_input = False
if type(self.model_inputs) != list:
self.model_inputs = [self.model_inputs]
if type(data) != list and (hasattr(data, '__call__')==False):
data = [data]
self.data = data
self._vinputs = {} # used to track what op inputs depends on the model inputs
self.orig_grads = {}
if not tf.executing_eagerly():
self.session = _get_session(session)
self.graph = _get_graph(self)
# if no learning phase flags were given we go looking for them
# ...this will catch the one that keras uses
# we need to find them since we want to make sure learning phase flags are set to False
if learning_phase_flags is None:
self.learning_phase_ops = []
for op in self.graph.get_operations():
if 'learning_phase' in op.name and op.type == "Const" and len(op.outputs[0].shape) == 0:
if op.outputs[0].dtype == tf.bool:
self.learning_phase_ops.append(op)
self.learning_phase_flags = [op.outputs[0] for op in self.learning_phase_ops]
else:
self.learning_phase_ops = [t.op for t in learning_phase_flags]
# save the expected output of the model
# if self.data is a function, set self.expected_value to None
if (hasattr(self.data, '__call__')):
self.expected_value = None
else:
if self.data[0].shape[0] > 5000:
warnings.warn("You have provided over 5k background samples! For better performance consider using smaller random sample.")
if not tf.executing_eagerly():
self.expected_value = self.run(self.model_output, self.model_inputs, self.data).mean(0)
else:
#if type(self.model)is tuple:
# self.fModel(cnn.inputs, cnn.get_layer(theNameYouWant).outputs)
self.expected_value = tf.reduce_mean(self.model(self.data), 0)
if not tf.executing_eagerly():
self._init_between_tensors(self.model_output.op, self.model_inputs)
# make a blank array that will get lazily filled in with the SHAP value computation
# graphs for each output. Lazy is important since if there are 1000 outputs and we
# only explain the top 5 it would be a waste to build graphs for the other 995
if not self.multi_output:
self.phi_symbolics = [None]
else:
noutputs = self.model_output.shape.as_list()[1]
if noutputs is not None:
self.phi_symbolics = [None for i in range(noutputs)]
else:
raise Exception("The model output tensor to be explained cannot have a static shape in dim 1 of None!")
def _get_model_output(self, model):
if len(model.layers[-1]._inbound_nodes) == 0:
if len(model.outputs) > 1:
warnings.warn("Only one model output supported.")
return model.outputs[0]
else:
return model.layers[-1].output
def _init_between_tensors(self, out_op, model_inputs):
# find all the operations in the graph between our inputs and outputs
tensor_blacklist = tensors_blocked_by_false(self.learning_phase_ops) # don't follow learning phase branches
dependence_breakers = [k for k in op_handlers if op_handlers[k] == break_dependence]
back_ops = backward_walk_ops(
[out_op], tensor_blacklist,
dependence_breakers
)
start_ops = []
for minput in model_inputs:
for op in minput.consumers():
start_ops.append(op)
self.between_ops = forward_walk_ops(
start_ops,
tensor_blacklist, dependence_breakers,
within_ops=back_ops
)
# note all the tensors that are on the path between the inputs and the output
self.between_tensors = {}
for op in self.between_ops:
for t in op.outputs:
self.between_tensors[t.name] = True
for t in model_inputs:
self.between_tensors[t.name] = True
# save what types are being used
self.used_types = {}
for op in self.between_ops:
self.used_types[op.type] = True
def _variable_inputs(self, op):
""" Return which inputs of this operation are variable (i.e. depend on the model inputs).
"""
if op not in self._vinputs:
out = np.zeros(len(op.inputs), dtype=np.bool)
for i,t in enumerate(op.inputs):
out[i] = t.name in self.between_tensors
self._vinputs[op] = out
return self._vinputs[op]
def phi_symbolic(self, i):
""" Get the SHAP value computation graph for a given model output.
"""
if self.phi_symbolics[i] is None:
if not tf.executing_eagerly():
def anon():
out = self.model_output[:,i] if self.multi_output else self.model_output
return tf.gradients(out, self.model_inputs)
self.phi_symbolics[i] = self.execute_with_overridden_gradients(anon)
else:
@tf.function
def grad_graph(shap_rAnD):
phase = tf.keras.backend.learning_phase()
tf.keras.backend.set_learning_phase(0)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(shap_rAnD)
out = self.model(shap_rAnD)
if self.multi_output:
out = out[:,i]
self._init_between_tensors(out.op, shap_rAnD)
x_grad = tape.gradient(out, shap_rAnD)
tf.keras.backend.set_learning_phase(phase)
return x_grad
self.phi_symbolics[i] = grad_graph
return self.phi_symbolics[i]
def shap_values(self, X, ranked_outputs=None, output_rank_order="max", check_additivity=True):
# check if we have multiple inputs
if not self.multi_input:
if type(X) == list and len(X) != 1:
assert False, "Expected a single tensor as model input!"
elif type(X) != list:
X = [X]
else:
assert type(X) == list, "Expected a list of model inputs!"
assert len(self.model_inputs) == len(X), "Number of model inputs (%d) does not match the number given (%d)!" % (len(self.model_inputs), len(X))
# rank and determine the model outputs that we will explain
if ranked_outputs is not None and self.multi_output:
if not tf.executing_eagerly():
model_output_values = self.run(self.model_output, self.model_inputs, X)
else:
model_output_values = self.model(X)
if output_rank_order == "max":
model_output_ranks = np.argsort(-model_output_values)
elif output_rank_order == "min":
model_output_ranks = np.argsort(model_output_values)
elif output_rank_order == "max_abs":
model_output_ranks = np.argsort(np.abs(model_output_values))
else:
assert False, "output_rank_order must be max, min, or max_abs!"
model_output_ranks = model_output_ranks[:,:ranked_outputs]
else:
model_output_ranks = np.tile(np.arange(len(self.phi_symbolics)), (X[0].shape[0], 1))
# compute the attributions
output_phis = []
for i in range(model_output_ranks.shape[1]):
phis = []
for k in range(len(X)):
phis.append(np.zeros(X[k].shape))
for j in range(X[0].shape[0]):
if (hasattr(self.data, '__call__')):
bg_data = self.data([X[l][j] for l in range(len(X))])
if type(bg_data) != list:
bg_data = [bg_data]
else:
bg_data = self.data
# tile the inputs to line up with the background data samples
tiled_X = [np.tile(X[l][j:j+1], (bg_data[l].shape[0],) + tuple([1 for k in range(len(X[l].shape)-1)])) for l in range(len(X))]
# we use the first sample for the current sample and the rest for the references
joint_input = [np.concatenate([tiled_X[l], bg_data[l]], 0) for l in range(len(X))]
# run attribution computation graph
feature_ind = model_output_ranks[j,i]
sample_phis = self.run(self.phi_symbolic(feature_ind), self.model_inputs, joint_input)
# assign the attributions to the right part of the output arrays
for l in range(len(X)):
phis[l][j] = (sample_phis[l][bg_data[l].shape[0]:] * (X[l][j] - bg_data[l])).mean(0)
output_phis.append(phis[0] if not self.multi_input else phis)
# check that the SHAP values sum up to the model output
if check_additivity:
if not tf.executing_eagerly():
model_output = self.run(self.model_output, self.model_inputs, X)
else:
model_output = self.model(X)
for l in range(len(self.expected_value)):
if not self.multi_input:
diffs = model_output[:, l] - self.expected_value[l] - output_phis[l].sum(axis=tuple(range(1, output_phis[l].ndim)))
else:
diffs = model_output[:, l] - self.expected_value[l]
for i in range(len(output_phis[l])):
diffs -= output_phis[l][i].sum(axis=tuple(range(1, output_phis[l][i].ndim)))
assert np.abs(diffs).max() < 1e-2, "The SHAP explanations do not sum up to the model's output! This is either because of a " \
"rounding error or because an operator in your computation graph was not fully supported. If " \
"the sum difference of %f is significant compared the scale of your model outputs please post " \
"as a github issue, with a reproducable example if possible so we can debug it." % np.abs(diffs).max()
if not self.multi_output:
return output_phis[0]
elif ranked_outputs is not None:
return output_phis, model_output_ranks
else:
return output_phis
def run(self, out, model_inputs, X):
""" Runs the model while also setting the learning phase flags to False.
"""
if not tf.executing_eagerly():
feed_dict = dict(zip(model_inputs, X))
for t in self.learning_phase_flags:
feed_dict[t] = False
return self.session.run(out, feed_dict)
else:
def anon():
tf_execute.record_gradient = custom_record_gradient
# build inputs that are correctly shaped, typed, and tf-wrapped
inputs = []
for i in range(len(X)):
shape = list(self.model_inputs[i].shape)
shape[0] = -1
data = X[i].reshape(shape)
v = tf.constant(data, dtype=self.model_inputs[i].dtype)
inputs.append(v)
final_out = out(inputs)
try:
tf_execute.record_gradient = tf_backprop._record_gradient
except AttributeError:
tf_execute.record_gradient = tf_backprop.record_gradient
return final_out
return self.execute_with_overridden_gradients(anon)
def custom_grad(self, op, *grads):
""" Passes a gradient op creation request to the correct handler.
"""
type_name = op.type[5:] if op.type.startswith("shap_") else op.type
out = op_handlers[type_name](self, op, *grads) # we cut off the shap_ prefex before the lookup
return out
def execute_with_overridden_gradients(self, f):
# replace the gradients for all the non-linear activations
# we do this by hacking our way into the registry (TODO: find a public API for this if it exists)
reg = tf_ops._gradient_registry._registry
ops_not_in_registry = ['TensorListReserve']
# NOTE: location_tag taken from tensorflow source for None type ops
location_tag = ("UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN")
# TODO: unclear why some ops are not in the registry with TF 2.0 like TensorListReserve
for non_reg_ops in ops_not_in_registry:
reg[non_reg_ops] = {'type': None, 'location': location_tag}
for n in op_handlers:
if n in reg:
self.orig_grads[n] = reg[n]["type"]
reg["shap_"+n] = {
"type": self.custom_grad,
"location": reg[n]["location"]
}
reg[n]["type"] = self.custom_grad
# In TensorFlow 1.10 they started pruning out nodes that they think can't be backpropped
# unfortunately that includes the index of embedding layers so we disable that check here
if hasattr(tf_gradients_impl, "_IsBackpropagatable"):
orig_IsBackpropagatable = tf_gradients_impl._IsBackpropagatable
tf_gradients_impl._IsBackpropagatable = lambda tensor: True
# define the computation graph for the attribution values using a custom gradient-like computation
try:
out = f()
finally:
# reinstate the backpropagatable check
if hasattr(tf_gradients_impl, "_IsBackpropagatable"):
tf_gradients_impl._IsBackpropagatable = orig_IsBackpropagatable
# restore the original gradient definitions
for n in op_handlers:
if n in reg:
del reg["shap_"+n]
reg[n]["type"] = self.orig_grads[n]
for non_reg_ops in ops_not_in_registry:
del reg[non_reg_ops]
if not tf.executing_eagerly():
return out
else:
return [v.numpy() for v in out]
def tensors_blocked_by_false(ops):
""" Follows a set of ops assuming their value is False and find blocked Switch paths.
This is used to prune away parts of the model graph that are only used during the training
phase (like dropout, batch norm, etc.).
"""
blocked = []
def recurse(op):
if op.type == "Switch":
blocked.append(op.outputs[1]) # the true path is blocked since we assume the ops we trace are False
else:
for out in op.outputs:
for c in out.consumers():
recurse(c)
for op in ops:
recurse(op)
return blocked
def backward_walk_ops(start_ops, tensor_blacklist, op_type_blacklist):
found_ops = []
op_stack = [op for op in start_ops]
while len(op_stack) > 0:
op = op_stack.pop()
if op.type not in op_type_blacklist and op not in found_ops:
found_ops.append(op)
for input in op.inputs:
if input not in tensor_blacklist:
op_stack.append(input.op)
return found_ops
def forward_walk_ops(start_ops, tensor_blacklist, op_type_blacklist, within_ops):
found_ops = []
op_stack = [op for op in start_ops]
while len(op_stack) > 0:
op = op_stack.pop()
if op.type not in op_type_blacklist and op in within_ops and op not in found_ops:
found_ops.append(op)
for out in op.outputs:
if out not in tensor_blacklist:
for c in out.consumers():
op_stack.append(c)
return found_ops
def softmax(explainer, op, *grads):
""" Just decompose softmax into its components and recurse, we can handle all of them :)
We assume the 'axis' is the last dimension because the TF codebase swaps the 'axis' to
the last dimension before the softmax op if 'axis' is not already the last dimension.
We also don't subtract the max before tf.exp for numerical stability since that might
mess up the attributions and it seems like TensorFlow doesn't define softmax that way
(according to the docs)
"""
in0 = op.inputs[0]
in0_max = tf.reduce_max(in0, axis=-1, keepdims=True, name="in0_max")
in0_centered = in0 - in0_max
evals = tf.exp(in0_centered, name="custom_exp")
rsum = tf.reduce_sum(evals, axis=-1, keepdims=True)
div = evals / rsum
# mark these as in-between the inputs and outputs
for op in [evals.op, rsum.op, div.op, in0_centered.op]:
for t in op.outputs:
if t.name not in explainer.between_tensors:
explainer.between_tensors[t.name] = False
out = tf.gradients(div, in0_centered, grad_ys=grads[0])[0]
# remove the names we just added
for op in [evals.op, rsum.op, div.op, in0_centered.op]:
for t in op.outputs:
if explainer.between_tensors[t.name] is False:
del explainer.between_tensors[t.name]
# rescale to account for our shift by in0_max (which we did for numerical stability)
xin0,rin0 = tf.split(in0, 2)
xin0_centered,rin0_centered = tf.split(in0_centered, 2)
delta_in0 = xin0 - rin0
dup0 = [2] + [1 for i in delta_in0.shape[1:]]
return tf.where(
tf.tile(tf.abs(delta_in0), dup0) < 1e-6,
out,
out * tf.tile((xin0_centered - rin0_centered) / delta_in0, dup0)
)
def maxpool(explainer, op, *grads):
xin0,rin0 = tf.split(op.inputs[0], 2)
xout,rout = tf.split(op.outputs[0], 2)
delta_in0 = xin0 - rin0
dup0 = [2] + [1 for i in delta_in0.shape[1:]]
cross_max = tf.maximum(xout, rout)
diffs = tf.concat([cross_max - rout, xout - cross_max], 0)
if op.type.startswith("shap_"):
op.type = op.type[5:]
xmax_pos,rmax_pos = tf.split(explainer.orig_grads[op.type](op, grads[0] * diffs), 2)
return tf.tile(tf.where(
tf.abs(delta_in0) < 1e-7,
tf.zeros_like(delta_in0),
(xmax_pos + rmax_pos) / delta_in0
), dup0)
def gather(explainer, op, *grads):
#params = op.inputs[0]
indices = op.inputs[1]
#axis = op.inputs[2]
var = explainer._variable_inputs(op)
if var[1] and not var[0]:
assert len(indices.shape) == 2, "Only scalar indices supported right now in GatherV2!"
xin1,rin1 = tf.split(tf.cast(op.inputs[1], tf.float32), 2)
xout,rout = tf.split(op.outputs[0], 2)
dup_in1 = [2] + [1 for i in xin1.shape[1:]]
dup_out = [2] + [1 for i in xout.shape[1:]]
delta_in1_t = tf.tile(xin1 - rin1, dup_in1)
out_sum = tf.reduce_sum(grads[0] * tf.tile(xout - rout, dup_out), list(range(len(indices.shape), len(grads[0].shape))))
if op.type == "ResourceGather":
return [None, tf.where(
tf.abs(delta_in1_t) < 1e-6,
tf.zeros_like(delta_in1_t),
out_sum / delta_in1_t
)]
return [None, tf.where(
tf.abs(delta_in1_t) < 1e-6,
tf.zeros_like(delta_in1_t),
out_sum / delta_in1_t
), None]
elif var[0] and not var[1]:
if op.type.startswith("shap_"):
op.type = op.type[5:]
return [explainer.orig_grads[op.type](op, grads[0]), None] # linear in this case
else:
assert False, "Axis not yet supported to be varying for gather op!"
def linearity_1d_nonlinearity_2d(input_ind0, input_ind1, op_func):
def handler(explainer, op, *grads):
var = explainer._variable_inputs(op)
if var[input_ind0] and not var[input_ind1]:
return linearity_1d_handler(input_ind0, explainer, op, *grads)
elif var[input_ind1] and not var[input_ind0]:
return linearity_1d_handler(input_ind1, explainer, op, *grads)
elif var[input_ind0] and var[input_ind1]:
return nonlinearity_2d_handler(input_ind0, input_ind1, op_func, explainer, op, *grads)
else:
return [None for _ in op.inputs] # no inputs vary, we must be hidden by a switch function
return handler
def nonlinearity_1d_nonlinearity_2d(input_ind0, input_ind1, op_func):
def handler(explainer, op, *grads):
var = explainer._variable_inputs(op)
if var[input_ind0] and not var[input_ind1]:
return nonlinearity_1d_handler(input_ind0, explainer, op, *grads)
elif var[input_ind1] and not var[input_ind0]:
return nonlinearity_1d_handler(input_ind1, explainer, op, *grads)
elif var[input_ind0] and var[input_ind1]:
return nonlinearity_2d_handler(input_ind0, input_ind1, op_func, explainer, op, *grads)
else:
return [None for _ in op.inputs] # no inputs vary, we must be hidden by a switch function
return handler
def nonlinearity_1d(input_ind):
def handler(explainer, op, *grads):
return nonlinearity_1d_handler(input_ind, explainer, op, *grads)
return handler
def nonlinearity_1d_handler(input_ind, explainer, op, *grads):
# make sure only the given input varies
op_inputs = op.inputs
if op_inputs is None:
op_inputs = op.outputs[0].op.inputs
for i in range(len(op_inputs)):
if i != input_ind:
assert not explainer._variable_inputs(op)[i], str(i) + "th input to " + op.name + " cannot vary!"
xin0, rin0 = tf.split(op_inputs[input_ind], 2)
xout, rout = tf.split(op.outputs[input_ind], 2)
delta_in0 = xin0 - rin0
if delta_in0.shape is None:
dup0 = [2, 1]
else:
dup0 = [2] + [1 for i in delta_in0.shape[1:]]
out = [None for _ in op_inputs]
if op.type.startswith("shap_"):
op.type = op.type[5:]
orig_grad = explainer.orig_grads[op.type](op, grads[0])
out[input_ind] = tf.where(
tf.tile(tf.abs(delta_in0), dup0) < 1e-6,
orig_grad[input_ind] if len(op_inputs) > 1 else orig_grad,
grads[0] * tf.tile((xout - rout) / delta_in0, dup0)
)
return out
def nonlinearity_2d_handler(input_ind0, input_ind1, op_func, explainer, op, *grads):
assert input_ind0 == 0 and input_ind1 == 1, "TODO: Can't yet handle double inputs that are not first!"
xout,rout = tf.split(op.outputs[0], 2)
in0 = op.inputs[input_ind0]
in1 = op.inputs[input_ind1]
xin0,rin0 = tf.split(in0, 2)
xin1,rin1 = tf.split(in1, 2)
delta_in0 = xin0 - rin0
delta_in1 = xin1 - rin1
dup0 = [2] + [1 for i in delta_in0.shape[1:]]
out10 = op_func(xin0, rin1)
out01 = op_func(rin0, xin1)
out11,out00 = xout,rout
out0 = 0.5 * (out11 - out01 + out10 - out00)
out0 = grads[0] * tf.tile(out0 / delta_in0, dup0)
out1 = 0.5 * (out11 - out10 + out01 - out00)
out1 = grads[0] * tf.tile(out1 / delta_in1, dup0)
# Avoid divide by zero nans
out0 = tf.where(tf.abs(tf.tile(delta_in0, dup0)) < 1e-7, tf.zeros_like(out0), out0)
out1 = tf.where(tf.abs(tf.tile(delta_in1, dup0)) < 1e-7, tf.zeros_like(out1), out1)
# see if due to broadcasting our gradient shapes don't match our input shapes
if (np.any(np.array(out1.shape) != np.array(in1.shape))):
broadcast_index = np.where(np.array(out1.shape) != np.array(in1.shape))[0][0]
out1 = tf.reduce_sum(out1, axis=broadcast_index, keepdims=True)
elif (np.any(np.array(out0.shape) != np.array(in0.shape))):
broadcast_index = np.where(np.array(out0.shape) != np.array(in0.shape))[0][0]
out0 = tf.reduce_sum(out0, axis=broadcast_index, keepdims=True)
return [out0, out1]
def linearity_1d(input_ind):
def handler(explainer, op, *grads):
return linearity_1d_handler(input_ind, explainer, op, *grads)
return handler
def linearity_1d_handler(input_ind, explainer, op, *grads):
# make sure only the given input varies (negative means only that input cannot vary, and is measured from the end of the list)
for i in range(len(op.inputs)):
if i != input_ind:
assert not explainer._variable_inputs(op)[i], str(i) + "th input to " + op.name + " cannot vary!"
if op.type.startswith("shap_"):
op.type = op.type[5:]
return explainer.orig_grads[op.type](op, *grads)
def linearity_with_excluded(input_inds):
def handler(explainer, op, *grads):
return linearity_with_excluded_handler(input_inds, explainer, op, *grads)
return handler
def linearity_with_excluded_handler(input_inds, explainer, op, *grads):
# make sure the given inputs don't vary (negative is measured from the end of the list)
for i in range(len(op.inputs)):
if i in input_inds or i - len(op.inputs) in input_inds:
assert not explainer._variable_inputs(op)[i], str(i) + "th input to " + op.name + " cannot vary!"
if op.type.startswith("shap_"):
op.type = op.type[5:]
return explainer.orig_grads[op.type](op, *grads)
def passthrough(explainer, op, *grads):
if op.type.startswith("shap_"):
op.type = op.type[5:]
return explainer.orig_grads[op.type](op, *grads)
def break_dependence(explainer, op, *grads):
""" This function name is used to break attribution dependence in the graph traversal.
These operation types may be connected above input data values in the graph but their outputs
don't depend on the input values (for example they just depend on the shape).
"""
return [None for _ in op.inputs]
op_handlers = {}
# ops that are always linear
op_handlers["Identity"] = passthrough
op_handlers["StridedSlice"] = passthrough
op_handlers["Squeeze"] = passthrough
op_handlers["ExpandDims"] = passthrough
op_handlers["Pack"] = passthrough
op_handlers["BiasAdd"] = passthrough
op_handlers["Unpack"] = passthrough
op_handlers["Add"] = passthrough
op_handlers["Sub"] = passthrough
op_handlers["Merge"] = passthrough
op_handlers["Sum"] = passthrough
op_handlers["Mean"] = passthrough
op_handlers["Cast"] = passthrough
op_handlers["Transpose"] = passthrough
op_handlers["Enter"] = passthrough
op_handlers["Exit"] = passthrough
op_handlers["NextIteration"] = passthrough
op_handlers["Tile"] = passthrough
op_handlers["TensorArrayScatterV3"] = passthrough
op_handlers["TensorArrayReadV3"] = passthrough
op_handlers["TensorArrayWriteV3"] = passthrough
# ops that don't pass any attributions to their inputs
op_handlers["Shape"] = break_dependence
op_handlers["RandomUniform"] = break_dependence
op_handlers["ZerosLike"] = break_dependence
#op_handlers["StopGradient"] = break_dependence # this allows us to stop attributions when we want to (like softmax re-centering)
# ops that are linear and only allow a single input to vary
op_handlers["Reshape"] = linearity_1d(0)
op_handlers["Pad"] = linearity_1d(0)
op_handlers["ReverseV2"] = linearity_1d(0)
op_handlers["ConcatV2"] = linearity_with_excluded([-1])
op_handlers["Conv2D"] = linearity_1d(0)
op_handlers["Switch"] = linearity_1d(0)
op_handlers["AvgPool"] = linearity_1d(0)
op_handlers["FusedBatchNorm"] = linearity_1d(0)
# ops that are nonlinear and only allow a single input to vary
op_handlers["Relu"] = nonlinearity_1d(0)
op_handlers["Elu"] = nonlinearity_1d(0)
op_handlers["Sigmoid"] = nonlinearity_1d(0)
op_handlers["Tanh"] = nonlinearity_1d(0)
op_handlers["Softplus"] = nonlinearity_1d(0)
op_handlers["Exp"] = nonlinearity_1d(0)
op_handlers["ClipByValue"] = nonlinearity_1d(0)
op_handlers["Rsqrt"] = nonlinearity_1d(0)
op_handlers["Square"] = nonlinearity_1d(0)
op_handlers["Max"] = nonlinearity_1d(0)
# ops that are nonlinear and allow two inputs to vary
op_handlers["SquaredDifference"] = nonlinearity_1d_nonlinearity_2d(0, 1, lambda x, y: (x - y) * (x - y))
op_handlers["Minimum"] = nonlinearity_1d_nonlinearity_2d(0, 1, lambda x, y: tf.minimum(x, y))
op_handlers["Maximum"] = nonlinearity_1d_nonlinearity_2d(0, 1, lambda x, y: tf.maximum(x, y))
# ops that allow up to two inputs to vary are are linear when only one input varies
op_handlers["Mul"] = linearity_1d_nonlinearity_2d(0, 1, lambda x, y: x * y)
op_handlers["RealDiv"] = linearity_1d_nonlinearity_2d(0, 1, lambda x, y: x / y)
op_handlers["MatMul"] = linearity_1d_nonlinearity_2d(0, 1, lambda x, y: tf.matmul(x, y))
# ops that need their own custom attribution functions
op_handlers["GatherV2"] = gather
op_handlers["ResourceGather"] = gather
op_handlers["MaxPool"] = maxpool
op_handlers["Softmax"] = softmax
# TODO items
# TensorArrayGatherV3
# Max
# TensorArraySizeV3
# Range
| 45.515625 | 198 | 0.635256 |
ace126f4966db5e09fa01b9d4c7ca3e9f8f02ff1 | 9,307 | py | Python | readthedocs/projects/migrations/0008_whitelist_users.py | ardalis/readthedocs.org | 1c417d866f014e01d3842022facf7fed4c09921a | [
"MIT"
] | 2 | 2015-08-08T11:32:34.000Z | 2017-11-12T18:17:05.000Z | readthedocs/projects/migrations/0008_whitelist_users.py | ardalis/readthedocs.org | 1c417d866f014e01d3842022facf7fed4c09921a | [
"MIT"
] | 5 | 2021-02-08T20:51:19.000Z | 2021-12-13T20:45:26.000Z | readthedocs/projects/migrations/0008_whitelist_users.py | ardalis/readthedocs.org | 1c417d866f014e01d3842022facf7fed4c09921a | [
"MIT"
] | 14 | 2017-01-12T11:17:42.000Z | 2019-04-19T10:09:15.000Z | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from projects.models import Project
from core.models import UserProfile
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
class Migration(DataMigration):
def forwards(self, orm):
"Whitelist any user who has whitelisted projects."
pass
"""
for project in Project.objects.all():
if project.whitelisted:
# If user doesn't have a profile, create one
try:
profile = project.user.get_profile()
except ObjectDoesNotExist:
profile = UserProfile(user=project.user)
# Whitelist the user's profile
profile.whitelisted = True
profile.save()
"""
def backwards(self, orm):
"Whitelist all projects owned by whitelisted users."
pass
"""
for project in Project.objects.all():
# If user doesn't have a profile, skip
# (assume they are not whitelisted)
try:
profile = project.user.get_profile()
except ObjectDoesNotExist:
pass
else:
# Whitelist the user's projects
if profile.whitelisted:
project.whitelisted = True
project.save()
"""
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'projects.file': {
'Meta': {'ordering': "('denormalized_path',)", 'object_name': 'File'},
'content': ('django.db.models.fields.TextField', [], {}),
'denormalized_path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'heading': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['projects.File']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'})
},
'projects.filerevision': {
'Meta': {'ordering': "('-revision_number',)", 'object_name': 'FileRevision'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'diff': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['projects.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reverted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {})
},
'projects.importedfile': {
'Meta': {'object_name': 'ImportedFile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'imported_files'", 'to': "orm['projects.Project']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'projects.project': {
'Meta': {'ordering': "('slug',)", 'object_name': 'Project'},
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'docs_directory': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'extensions': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '10'}),
'skip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "'.rst'", 'max_length': '10'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects'", 'to': "orm['auth.User']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
}
}
complete_apps = ['projects']
| 65.542254 | 182 | 0.557215 |
ace127359edadaf598a9b33af57d8fd03947386f | 45 | py | Python | run.py | LiuYuann/chatrobot | 2895db6965669e4658adb90262580bc398a2a36f | [
"MIT"
] | null | null | null | run.py | LiuYuann/chatrobot | 2895db6965669e4658adb90262580bc398a2a36f | [
"MIT"
] | null | null | null | run.py | LiuYuann/chatrobot | 2895db6965669e4658adb90262580bc398a2a36f | [
"MIT"
] | null | null | null | from app import App
app = App()
app.start()
| 9 | 19 | 0.666667 |
ace128859c7fbaf90b0fe5fd783305ed4a535c0b | 722 | py | Python | thupoll/blueprints/telegram/mount.py | octomen/thupoll | 1114d8e9802a97c1fd9d3850c887df94e7fa609e | [
"MIT"
] | 2 | 2019-04-11T20:02:45.000Z | 2019-04-15T01:43:09.000Z | thupoll/blueprints/telegram/mount.py | octomen/thupoll | 1114d8e9802a97c1fd9d3850c887df94e7fa609e | [
"MIT"
] | 37 | 2019-03-17T14:45:38.000Z | 2019-06-07T03:19:22.000Z | thupoll/blueprints/telegram/mount.py | octomen/thupoll | 1114d8e9802a97c1fd9d3850c887df94e7fa609e | [
"MIT"
] | null | null | null | from thupoll.blueprints.telegram.hook import TelegramHook
from thupoll.blueprints.telegram.handler import (
InviteHandler, ChatMembersHandler)
from thupoll.blueprints.telegram.filters import (
MemberJoinFilter, MemberLeftFilter)
from thupoll.settings import env
def mount(hook: TelegramHook):
"""Mount handlers to hook"""
invite_handler = InviteHandler(env.thupoll_url, env("TOKEN_TTL_DAYS", 10))
hook.mount_command("invite", invite_handler.invite)
hook.mount_command("start", invite_handler.invite)
members_handler = ChatMembersHandler()
hook.mount_message_handler(MemberJoinFilter(), members_handler.on_join)
hook.mount_message_handler(MemberLeftFilter(), members_handler.on_left)
| 40.111111 | 78 | 0.793629 |
ace12961d102bab8e42be3b89e6fa0909d8ee89e | 519 | py | Python | print_bot_id.py | bruferrari/slack-bot | 9acad87e274d8ffc11e049b6c1bb8258f9a8bc61 | [
"Apache-2.0"
] | null | null | null | print_bot_id.py | bruferrari/slack-bot | 9acad87e274d8ffc11e049b6c1bb8258f9a8bc61 | [
"Apache-2.0"
] | null | null | null | print_bot_id.py | bruferrari/slack-bot | 9acad87e274d8ffc11e049b6c1bb8258f9a8bc61 | [
"Apache-2.0"
] | null | null | null | import os
from slackclient import SlackClient
BOT_NAME = 'nxer'
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
if __name__ == "__main__":
api_call = slack_client.api_call("users.list")
if api_call.get('ok'):
users = api_call.get('members')
for user in users:
if 'name' in user and user.get('name') == BOT_NAME:
print("Bot ID for '" + user['name'] + "' is '" + user.get('id'))
else:
print("could not find bot user with name " + BOT_NAME)
| 30.529412 | 80 | 0.614644 |
ace129c1ff0475a97013ec58d66ba64ec37cf500 | 13,782 | py | Python | Agent/Agent.py | LuEE-C/Generative_NLP_RL_GAN | 03b43100e75ea69f750c9ab22daf4f0694f0b47d | [
"MIT"
] | 11 | 2019-10-25T12:36:11.000Z | 2022-02-12T15:30:58.000Z | Agent/Agent.py | LuEE-C/Generative_NLP_RL_GAN | 03b43100e75ea69f750c9ab22daf4f0694f0b47d | [
"MIT"
] | null | null | null | Agent/Agent.py | LuEE-C/Generative_NLP_RL_GAN | 03b43100e75ea69f750c9ab22daf4f0694f0b47d | [
"MIT"
] | 1 | 2019-03-07T07:47:26.000Z | 2019-03-07T07:47:26.000Z | import os
import numba as nb
import numpy as np
import math
from random import random, randint
from keras.optimizers import Adam
from keras.layers import Input, Dense, Embedding, PReLU, BatchNormalization, Conv1D
from keras.models import Model
from Environnement.Environnement import Environnement
from LSTM_Model import LSTM_Model
from NoisyDense import NoisyDense
from PriorityExperienceReplay.PriorityExperienceReplay import Experience
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class Agent:
def __init__(self, cutoff=8, from_save=False, gamma=.9, batch_size=32, min_history=64000, lr=0.0000625,
sigma_init=0.5, target_network_period=32000, adam_e=1.5*10e-4, atoms=51,
discriminator_loss_limits=0.1, n_steps=3):
self.cutoff = cutoff
self.environnement = Environnement(cutoff=cutoff, min_frequency_words=300000)
self.vocab = self.environnement.different_words
self.batch_size = batch_size
self.n_steps = n_steps
self.labels = np.array([1] * self.batch_size + [0] * self.batch_size)
self.gammas = np.array([gamma ** (i + 1) for i in range(self.n_steps + 1)]).astype(np.float32)
self.atoms = atoms
self.v_max = np.sum([0.5 * gam for gam in self.gammas])
self.v_min = - self.v_max
self.delta_z = (self.v_max - self.v_min) / float(self.atoms - 1)
self.z_steps = np.array([self.v_min + i * self.delta_z for i in range(self.atoms)]).astype(np.float32)
self.epsilon_greedy_max = 0.8
self.sigma_init = sigma_init
self.min_history = min_history
self.lr = lr
self.target_network_period = target_network_period
self.adam_e = adam_e
self.discriminator_loss_limit = discriminator_loss_limits
self.model, self.target_model = self._build_model(), self._build_model()
self.discriminator = self._build_discriminator()
self.dataset_epoch = 0
if from_save is True:
self.model.load_weights('model')
self.target_model.load_weights('model')
self.discriminator.load_weights('discriminator')
def update_target_model(self):
self.target_model.set_weights(self.model.get_weights())
def get_average_noisy_weight(self):
average = []
for i in range(self.vocab):
average.append(np.mean(self.model.get_layer('Word_'+str(i)).get_weights()[1]))
return np.mean(average), np.std(average)
def _build_model(self):
state_input = Input(shape=(self.cutoff,))
embedding = Embedding(self.vocab + 1, 50, input_length=self.cutoff)(state_input)
main_network = Conv1D(256, 3, padding='same')(embedding)
main_network = PReLU()(main_network)
main_network = LSTM_Model(main_network, 100, batch_norm=False)
main_network = Dense(256)(main_network)
main_network = PReLU()(main_network)
main_network = Dense(512)(main_network)
main_network = PReLU()(main_network)
dist_list = []
for i in range(self.vocab):
dist_list.append(NoisyDense(self.atoms, activation='softmax', sigma_init=self.sigma_init, name='Word_' + str(i))(main_network))
actor = Model(inputs=[state_input], outputs=dist_list)
actor.compile(optimizer=Adam(lr=self.lr, epsilon=self.adam_e),
loss='categorical_crossentropy')
return actor
def _build_discriminator(self):
state_input = Input(shape=(self.cutoff,))
embedding = Embedding(self.vocab + 1, 50, input_length=self.cutoff)(state_input)
main_network = Conv1D(256, 3, padding='same')(embedding)
main_network = PReLU()(main_network)
main_network = BatchNormalization()(main_network)
main_network = LSTM_Model(main_network, 100)
main_network = Dense(256)(main_network)
main_network = PReLU()(main_network)
main_network = BatchNormalization()(main_network)
main_network = Dense(512)(main_network)
main_network = PReLU()(main_network)
main_network = BatchNormalization()(main_network)
discriminator_output = Dense(1, activation='sigmoid')(main_network)
discriminator = Model(inputs=[state_input], outputs=discriminator_output)
discriminator.compile(optimizer=Adam(),
loss='binary_crossentropy')
discriminator.summary()
return discriminator
def train(self, epoch):
e, total_frames = 0, 0
while e <= epoch:
print('Epoch :', e)
discrim_loss, model_loss_array, memory = [1], [], Experience(memory_size=1000000, batch_size=self.batch_size, alpha=0.5)
while np.mean(discrim_loss[-20:]) >= self.discriminator_loss_limit:
discrim_loss.append(self.train_discriminator())
for i in range(self.min_history//200):
states, rewards, actions, states_prime = self.get_training_batch(200, self.get_epsilon(np.mean(discrim_loss[-20:])))
for j in range(200):
memory.add((states[j], rewards[j], actions[j], states_prime[j]), 5)
trained_frames = 1
while np.mean(discrim_loss[-20:]) < 0.5 + 0.5 * 500000/(trained_frames * 10 * 4 * self.batch_size):
if trained_frames % (self.target_network_period//(10 * 4 * self.batch_size)) == 0:
self.update_target_model()
states, rewards, actions, states_prime = self.get_training_batch(10 * self.batch_size, self.get_epsilon(np.mean(discrim_loss[-20:])))
for j in range(10 * self.batch_size):
memory.add((states[j], rewards[j], actions[j], states_prime[j]), 5)
for j in range(10 * 4):
out, weights, indices = memory.select(min(1, 0.4 + 1.2 * np.mean(discrim_loss[-20:]))) # Scales b value
model_loss_array.append(self.train_on_replay(out, self.batch_size)[0])
memory.priority_update(indices, [model_loss_array[-1] for _ in range(self.batch_size)])
trained_frames += 1
total_frames += 1
discrim_loss.append(self.train_discriminator(evaluate=True))
if trained_frames % 100 == 0:
print()
mean, std = self.get_average_noisy_weight()
print('Average loss of model :', np.mean(model_loss_array[-10 * 4 * 20:]),
'\tAverage discriminator loss :', np.mean(discrim_loss[-20:]),
'\tFrames passed :', trained_frames * 10 * 4 * self.batch_size,
'\tTotal frames passed :', total_frames * 10 * 4 * self.batch_size,
'\tAverage Noisy Weights :', mean,
'\tSTD Noisy Weights :', std,
'\tEpoch :', e,
'\tDataset Epoch :', self.dataset_epoch
)
self.print_pred()
self.print_pred()
self.update_target_model()
e += 1
def get_epsilon(self, discrim_loss):
epsilon = min(1.0, (0.1 / discrim_loss)) * self.epsilon_greedy_max
return epsilon
@nb.jit
def train_discriminator(self, evaluate=False):
fake_batch = self.get_fake_batch()
real_batch, done = self.environnement.query_state(self.batch_size)
if done is True:
self.dataset_epoch += 1
print('Current Dataset Epoch :', self.dataset_epoch)
batch = np.vstack((real_batch, fake_batch))
if evaluate is True:
return self.discriminator.evaluate([batch], [self.labels], verbose=0)
return self.discriminator.train_on_batch([batch], [self.labels])
@nb.jit
def make_seed(self, seed=None):
if seed is None:
# This is the kinda Z vector
seed = np.random.random_integers(low=0, high=self.vocab - 1, size=(1, self.cutoff))
predictions = self.target_model.predict(seed)
for _ in range(self.cutoff - 1):
numba_optimised_seed_switch(predictions, seed, self.z_steps)
predictions = self.target_model.predict(seed)
numba_optimised_seed_switch(predictions, seed, self.z_steps)
return seed
@nb.jit
def get_fake_batch(self):
seed = self.make_seed()
fake_batch = np.zeros((self.batch_size, self.cutoff))
for i in range(self.batch_size):
predictions = self.target_model.predict([seed])
numba_optimised_pred_rollover(predictions, i, seed, fake_batch, self.z_steps)
return fake_batch
@nb.jit
def get_training_batch(self, batch_size, epsilon):
seed = self.make_seed()
states = np.zeros((batch_size + self.n_steps, self.cutoff))
actions = np.zeros((batch_size + self.n_steps, 1))
for i in range(batch_size + self.n_steps):
action = -1
predictions = self.target_model.predict(seed)
if random() < epsilon:
action = randint(0, self.vocab - 1)
numba_optimised_pred_rollover_with_actions(predictions, i, seed, states, self.z_steps, actions, action)
rewards = self.get_values(states)
states_prime = states[self.n_steps:]
return states[:-self.n_steps], rewards, actions, states_prime
@nb.jit
def get_values(self, fake_batch):
values = self.discriminator.predict(fake_batch)
return numba_optimised_nstep_value_function(values, values.shape[0], self.n_steps, self.gammas)
@nb.jit
def print_pred(self):
fake_state = self.make_seed()
pred = ""
for _ in range(4):
for j in range(self.cutoff):
pred += self.environnement.ind_to_word[fake_state[0][j]]
pred += " "
fake_state = self.make_seed(fake_state)
for j in range(self.cutoff):
pred += self.environnement.ind_to_word[fake_state[0][j]]
pred += " "
print(pred)
# @nb.jit
def train_on_replay(self, data, batch_size):
states, reward, actions, state_prime = make_dataset(data=data, batch_size=batch_size)
m_prob = np.zeros((batch_size, self.vocab, self.atoms))
z = self.target_model.predict(state_prime)
z = np.array(z)
z = np.swapaxes(z, 0, 1)
q = np.sum(np.multiply(z, self.z_steps), axis=-1)
optimal_action_idxs = np.argmax(q, axis=-1)
update_m_prob(self.batch_size, self.atoms, self.v_max, self.v_min, reward, self.gammas[-1],
self.z_steps, self.delta_z, m_prob, actions, z, optimal_action_idxs)
return self.model.train_on_batch(states, [m_prob[:,i,:] for i in range(self.vocab)])
@nb.jit(nb.void(nb.int64,nb.int64,nb.float32,nb.float32, nb.float32[:],nb.float32,
nb.float32[:],nb.float32,nb.float32[:,:,:],nb.float32[:,:], nb.float32[:,:,:], nb.float32[:]))
def update_m_prob(batch_size, atoms, v_max, v_min, reward, gamma, z_steps, delta_z, m_prob, actions, z, optimal_action_idxs):
for i in range(batch_size):
for j in range(atoms):
Tz = min(v_max, max(v_min, reward[i] + gamma * z_steps[j]))
bj = (Tz - v_min) / delta_z
m_l, m_u = math.floor(bj), math.ceil(bj)
m_prob[i, actions[i, 0], int(m_l)] += z[i, optimal_action_idxs[i], j] * (m_u - bj)
m_prob[i, actions[i, 0], int(m_l)] += z[i, optimal_action_idxs[i], j] * (bj - m_l)
# @nb.jit
def make_dataset(data, batch_size):
states, reward, actions, state_prime = [], [], [], []
for i in range(batch_size):
states.append(data[i][0])
reward.append(data[i][1])
actions.append(data[i][2])
state_prime.append(data[i][3])
states = np.array(states)
reward = np.array(reward)
actions = np.array(actions).astype(np.int)
state_prime = np.array(state_prime)
return states, reward, actions, state_prime
@nb.jit(nb.int64(nb.float32[:,:], nb.float32[:]))
def get_optimal_action(z, z_distrib):
z_concat = np.vstack(z)
q = np.sum(np.multiply(z_concat, z_distrib), axis=1)
action_idx = np.argmax(q)
return action_idx
# Some strong numba optimisation in bottlenecks
# N_Step reward function
@nb.jit(nb.float32[:,:](nb.float32[:,:], nb.int64, nb.int64, nb.float32[:]))
def numba_optimised_nstep_value_function(values, batch_size, n_step, gammas):
for i in range(batch_size):
for j in range(n_step):
values[i] += values[i + j + 1] * gammas[j]
return values[:batch_size]
@nb.jit(nb.void(nb.float32[:,:], nb.int64, nb.float32[:,:], nb.float32[:,:], nb.float32[:]))
def numba_optimised_pred_rollover(predictions, index, seed, fake_batch, z_distrib):
seed[:, :-1] = seed[:, 1:]
seed[:, -1] = get_optimal_action(predictions, z_distrib)
fake_batch[index] = seed
@nb.jit(nb.void(nb.float32[:,:], nb.int64, nb.float32[:,:], nb.float32[:,:], nb.float32[:], nb.float32[:,:], nb.int64))
def numba_optimised_pred_rollover_with_actions(predictions, index, seed, fake_batch, z_distrib, actions, action):
if action != -1:
choice = action
else:
choice = get_optimal_action(predictions, z_distrib)
seed[:, :-1] = seed[:, 1:]
seed[:, -1] = choice
actions[index] = choice
fake_batch[index] = seed
@nb.jit(nb.void(nb.float32[:,:], nb.int64, nb.float32[:,:]))
def numba_optimised_seed_switch(predictions, seed, z_distrib):
seed[:, :-1] = seed[:, 1:]
seed[:, -1] = get_optimal_action(predictions, z_distrib)
if __name__ == '__main__':
agent = Agent(cutoff=5, from_save=False, batch_size=32)
agent.train(epoch=5000)
| 38.713483 | 149 | 0.625381 |
ace12a11d06d7bf3bc83bbc8c5d297a16e6b53aa | 193 | py | Python | example_native/vistanlp_core/setup.py | berquist/namespace_package_testing | a570bc94f3bc6b773e06afc18d9e19917b978b95 | [
"MIT"
] | null | null | null | example_native/vistanlp_core/setup.py | berquist/namespace_package_testing | a570bc94f3bc6b773e06afc18d9e19917b978b95 | [
"MIT"
] | null | null | null | example_native/vistanlp_core/setup.py | berquist/namespace_package_testing | a570bc94f3bc6b773e06afc18d9e19917b978b95 | [
"MIT"
] | null | null | null | from setuptools import setup, find_namespace_packages
setup(
name="vistanlp-core",
# Not necessary anymore?
# packages=["vistanlp.core"],
packages=find_namespace_packages(),
)
| 21.444444 | 53 | 0.720207 |
ace12b908d523d0922d071fa7a67314f17622d26 | 1,949 | py | Python | Python-code-snippets-201-300/269-US-States.py | abartoha/python-snippets-ref | 04e4feada96077f0e849b277204c012194e8fbcd | [
"Unlicense"
] | null | null | null | Python-code-snippets-201-300/269-US-States.py | abartoha/python-snippets-ref | 04e4feada96077f0e849b277204c012194e8fbcd | [
"Unlicense"
] | null | null | null | Python-code-snippets-201-300/269-US-States.py | abartoha/python-snippets-ref | 04e4feada96077f0e849b277204c012194e8fbcd | [
"Unlicense"
] | null | null | null | """Code snippets vol-54
269-Translate US States.
Download all snippets so far:
https://wp.me/Pa5TU8-1yg
Blog: stevepython.wordpress.com
requirements:
None
Origin:
https://gist.github.com/rogerallen/1583593
"""
us_state_abbrev = {
'Alabama': 'AL',
'Alaska': 'AK',
'American Samoa': 'AS',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District of Columbia': 'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Guam': 'GU',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Northern Mariana Islands':'MP',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Puerto Rico': 'PR',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virgin Islands': 'VI',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY'
}
# thank you to @kinghelix and @trevormarburger for this idea
abbrev_us_state = dict(map(reversed, us_state_abbrev.items()))
# Simple test examples
if __name__ == '__main__':
print("Wisconin --> WI?", us_state_abbrev['Wisconsin'] == 'WI')
print("WI --> Wisconin?", abbrev_us_state['WI'] == 'Wisconsin')
print("Number of entries (50 states, DC, 5 Territories) == 56? ", 56 == len(us_state_abbrev))
| 23.768293 | 97 | 0.544895 |
ace12cd096f3ed958812c96215024871a81907a3 | 1,994 | py | Python | event_adder-1.0.0.py | Yuanqi-Hong/cw_calendar_event_adder | 2d4a58f50fe0d76173f8048391b881b033861ac3 | [
"MIT"
] | null | null | null | event_adder-1.0.0.py | Yuanqi-Hong/cw_calendar_event_adder | 2d4a58f50fe0d76173f8048391b881b033861ac3 | [
"MIT"
] | null | null | null | event_adder-1.0.0.py | Yuanqi-Hong/cw_calendar_event_adder | 2d4a58f50fe0d76173f8048391b881b033861ac3 | [
"MIT"
] | null | null | null | # This Python script automatically adds an event
# to Columbia University Canvas Calendar
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from datetime import datetime
driver = webdriver.Chrome()
driver.get('https://courseworks2.columbia.edu')
uni = driver.find_element_by_xpath('//*[@id="username"]')
uni.send_keys('yourUNI')
password = driver.find_element_by_xpath('//*[@id="password"]')
password.send_keys('yourPassword')
login = driver.find_element_by_xpath('//*[@id="fm1"]/div[3]/input[4]')
login.click()
calendar = driver.find_element_by_xpath('//*[@id="global_nav_calendar_link"]')
calendar.click()
driver1 = webdriver.Chrome()
event_url = input('Paste event URL here: ')
driver1.get(event_url)
title = driver1.find_element_by_class_name('eventTitle').text
title
when = driver1.find_element_by_class_name('eventWhen')
when_str = when.text
when_str
where = driver1.find_element_by_class_name('eventWhere').text
where
when1 = datetime.strptime(when_str.split(' - ')[0], '%A, %B %d, %Y %I:%M %p')
when2 = datetime.strptime(when_str.split(' - ')[1], '%I:%M %p')
date = when1.strftime('%Y-%m-%d')
from_ = when1.strftime('%H:%M')
_to = when2.strftime('%H:%M')
create_btn = driver.find_element_by_xpath('//*[@id="create_new_event_link"]')
create_btn.click()
Title = driver.find_element_by_xpath('//*[@id="calendar_event_title"]')
Title.send_keys(title)
Date = driver.find_element_by_xpath('//*[@id="calendar_event_date"]')
Date.clear()
Date.send_keys(date)
From = driver.find_element_by_xpath('//*[@id="calendar_event_start_time"]')
From.clear()
From.send_keys(from_)
To = driver.find_element_by_xpath('//*[@id="calendar_event_end_time"]')
To.clear()
To.send_keys(_to)
Location = driver.find_element_by_xpath('//*[@id="calendar_event_location_name"]')
Location.send_keys(where.split('\n')[0] + ' ' + where.split('\n')[1])
Location.send_keys('\n')
driver.quit()
driver1.quit()
raise SystemExit() | 28.898551 | 82 | 0.738215 |
ace12d3838cadac7c20081708772e97c5bb6c753 | 2,316 | py | Python | homeassistant/components/netatmo/__init__.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 5 | 2020-09-17T10:48:51.000Z | 2021-11-22T00:08:17.000Z | homeassistant/components/netatmo/__init__.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 7 | 2016-04-09T20:56:30.000Z | 2016-04-19T21:28:46.000Z | homeassistant/components/netatmo/__init__.py | billyburly/home-assistant | 9795449d22783e77a0ca7b745f15c89a830c5cc6 | [
"Apache-2.0"
] | 6 | 2019-12-01T19:06:52.000Z | 2020-09-17T00:57:06.000Z | """The Netatmo integration."""
import asyncio
import logging
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_entry_oauth2_flow, config_validation as cv
from . import api, config_flow
from .const import AUTH, DATA_PERSONS, DOMAIN, OAUTH2_AUTHORIZE, OAUTH2_TOKEN
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = ["binary_sensor", "camera", "climate", "sensor"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Netatmo component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_PERSONS] = {}
if DOMAIN not in config:
return True
config_flow.NetatmoFlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
config[DOMAIN][CONF_CLIENT_ID],
config[DOMAIN][CONF_CLIENT_SECRET],
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
),
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Netatmo from a config entry."""
implementation = await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
hass.data[DOMAIN][entry.entry_id] = {
AUTH: api.ConfigEntryNetatmoAuth(hass, entry, implementation)
}
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| 26.62069 | 90 | 0.665371 |
ace12dcfb37042c3848586e229dcbf67f24d840c | 1,225 | py | Python | src/environ/__init__.py | hynek/environ-conf | 74023280867fb24a02630624e339d32d24abf5f3 | [
"Apache-2.0"
] | 222 | 2019-06-03T12:43:50.000Z | 2022-03-22T07:47:59.000Z | src/environ/__init__.py | hynek/environ-conf | 74023280867fb24a02630624e339d32d24abf5f3 | [
"Apache-2.0"
] | 22 | 2019-06-07T15:09:17.000Z | 2022-03-30T14:14:11.000Z | src/environ/__init__.py | hynek/environ-conf | 74023280867fb24a02630624e339d32d24abf5f3 | [
"Apache-2.0"
] | 16 | 2019-06-03T02:44:51.000Z | 2022-02-16T15:00:14.000Z | # Copyright 2017 Hynek Schlawack
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import secrets
from ._environ_config import (
bool_var,
config,
generate_help,
group,
to_config,
var,
)
from .exceptions import MissingEnvValueError
__version__ = "21.3.0.dev0"
__title__ = "environ_config"
__description__ = "Boilerplate-free configuration with env variables."
__uri__ = "https://github.com/hynek/environ_config"
__author__ = "Hynek Schlawack"
__email__ = "hs@ox.cx"
__license__ = "Apache-2.0"
__copyright__ = "Copyright (c) 2017 " + __author__
__all__ = [
"MissingEnvValueError",
"bool_var",
"config",
"generate_help",
"group",
"secrets",
"to_config",
"var",
]
| 24.5 | 74 | 0.720816 |
ace12ecd08ce6a132f6d40f66cb6adefb08a7b7d | 743 | py | Python | consumers/python/faker_consumer.py | bietdoikiem/pipeline-asm-1 | e9b505d80c6efe6c0343e0a0f32d54f8fdb214a6 | [
"MIT"
] | null | null | null | consumers/python/faker_consumer.py | bietdoikiem/pipeline-asm-1 | e9b505d80c6efe6c0343e0a0f32d54f8fdb214a6 | [
"MIT"
] | null | null | null | consumers/python/faker_consumer.py | bietdoikiem/pipeline-asm-1 | e9b505d80c6efe6c0343e0a0f32d54f8fdb214a6 | [
"MIT"
] | 1 | 2021-09-25T09:50:48.000Z | 2021-09-25T09:50:48.000Z | from kafka import KafkaConsumer
# import pandas as pd
import os, json
import ast
if __name__ == "__main__":
print("Starting Faker Consumer")
TOPIC_NAME = os.environ.get("TOPIC_NAME")
KAFKA_BROKER_URL = os.environ.get("KAFKA_BROKER_URL", "localhost:9092")
CASSANDRA_HOST = os.environ.get("CASSANDRA_HOST", "localhost")
CASSANDRA_KEYSPACE = os.environ.get("CASSANDRA_KEYSPACE", "kafkapipeline")
print("Setting up Kafka consumer at {}".format(KAFKA_BROKER_URL))
consumer = KafkaConsumer(TOPIC_NAME, bootstrap_servers=[KAFKA_BROKER_URL])
print("Waiting for faker msg...")
for msg in consumer:
msg = msg.value.decode('utf-8')
jsonData=json.loads(msg)
# add print for checking
print(jsonData) | 35.380952 | 77 | 0.720054 |
ace12ecf9a46f923da4bff1dad42750d353757b3 | 517 | py | Python | remove_duplicate_inplace.py | VivekWesley/DS_and_Algo_Python | 991a3f738e34c00c0480e0780989d46038fba47c | [
"MIT"
] | null | null | null | remove_duplicate_inplace.py | VivekWesley/DS_and_Algo_Python | 991a3f738e34c00c0480e0780989d46038fba47c | [
"MIT"
] | null | null | null | remove_duplicate_inplace.py | VivekWesley/DS_and_Algo_Python | 991a3f738e34c00c0480e0780989d46038fba47c | [
"MIT"
] | null | null | null | def remove_duplicate_inplace(str):
my_set = set([])
read_stream = 0
write_stream = 0
while(read_stream < len(str)):
if(str[read_stream] not in my_set):
my_set.add(str[read_stream])
str[write_stream] = str[read_stream]
write_stream += 1
read_stream += 1
str[write_stream] = '\0'
return str
str = [ 'D', 'A', 'D', 'B', 'A', 'C', 'D', 'A' ]
print ("original array: ", str)
print ("distinct characters: ", remove_duplicate_inplace(str)) | 25.85 | 62 | 0.574468 |
ace12f5062589616edc8a6cef2c061ae31c6abe5 | 2,468 | py | Python | setup.py | AishwaryaKalloli/koalas | 8d35a74508c1319996c8c27e2a5e24af52b9ee31 | [
"Apache-2.0"
] | null | null | null | setup.py | AishwaryaKalloli/koalas | 8d35a74508c1319996c8c27e2a5e24af52b9ee31 | [
"Apache-2.0"
] | null | null | null | setup.py | AishwaryaKalloli/koalas | 8d35a74508c1319996c8c27e2a5e24af52b9ee31 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
from io import open
import sys
from setuptools import setup
from os import path
DESCRIPTION = "Koalas: pandas API on Apache Spark"
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
try:
exec(open('databricks/koalas/version.py').read())
except IOError:
print("Failed to load Koalas version file for packaging. You must be in Koalas root dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
setup(
name='koalas',
version=VERSION,
packages=['databricks', 'databricks.koalas', 'databricks.koalas.missing',
'databricks.koalas.spark', 'databricks.koalas.typedef',
'databricks.koalas.usage_logging'],
extras_require={
'spark': ['pyspark>=2.4.0'],
'mlflow': ['mlflow>=1.0'],
'plotly': ['plotly>=4.8'],
},
python_requires='>=3.5,<3.9',
install_requires=[
'pandas>=0.23.2',
'pyarrow>=0.10',
'numpy>=1.14',
'matplotlib>=3.0.0',
],
author="Databricks",
author_email="koalas@databricks.com",
license='http://www.apache.org/licenses/LICENSE-2.0',
url="https://github.com/databricks/koalas",
project_urls={
'Bug Tracker': 'https://github.com/databricks/koalas/issues',
'Documentation': 'https://koalas.readthedocs.io/',
'Source Code': 'https://github.com/databricks/koalas'
},
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
classifiers=[
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| 32.473684 | 94 | 0.665316 |
ace130510805368dc470bef4b076b35d85cca903 | 2,398 | py | Python | var/spack/repos/builtin/packages/qucs/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/qucs/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/qucs/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class Qucs(AutotoolsPackage):
"""QUCS - Quite Universal Circuit Simulator
Qucs is an integrated circuit simulator which means you are able to
setup a circuit with a graphical user interface (GUI) and simulate
the large-signal, small-signal and noise behaviour of the circuit.
After that simulation has finished you can view the simulation results
on a presentation page or window.
"""
homepage = "http://qucs.sourceforge.net/"
url = "https://sourceforge.net/projects/qucs/files/qucs/0.0.19/qucs-0.0.19.tar.gz"
git = "https://git.code.sf.net/p/qucs/git"
version('master', branch='master')
version('0.0.19', sha256='45c6434fde24c533e63550675ac21cdbd3cc6cbba29b82a1dc3f36e7dd4b3b3e')
version('0.0.18', sha256='3609a18b57485dc9f19886ac6694667f3251702175bd1cbbbea37981b2c482a7')
# Can use external simulators:
variant(
'simulators', default='qucs', multi=True,
values=('qucs', 'ngspice', 'xyce'),
description='Circuits simulators (builtin qucsator and external ngspice, xyce)'
)
depends_on('flex@2.5.9:', type='build')
depends_on('bison@2.5:', type='build')
depends_on('pkgconfig', type='build')
depends_on("autoconf@2.64:", type='build')
depends_on("automake@1.7.0:", type='build')
depends_on("libtool", type='build')
depends_on("m4", type='build')
depends_on('adms', when='@0.0.19:')
depends_on('qt@4.8.5:4.8.7')
depends_on('gperf@3.0.1:')
# Simulators can be qucsator, the Circuit simulator of the Qucs project
# from https://github.com/Qucs/qucsator, or they can also be provided by
# ngspice and xyce.
# See https://qucs-help.readthedocs.io/en/spice4qucs/BasSim.html
depends_on('ngspice build=bin', type='run', when='simulators=ngspice')
depends_on('xyce', type='run', when='simulators=xyce')
def autoreconf(self, spec, prefix):
sh = which('sh')
if os.path.exists('bootstrap'):
sh('./bootstrap')
else:
sh('./autogen.sh')
def configure_args(self):
args = ['--disable-doc']
return args
| 36.892308 | 96 | 0.660967 |
ace13067cc615ced5595d7800c0a618a946d810a | 302 | py | Python | ___Python/Torsten/Python-Kurs/p06_persistence/m01_pickle.py | uvenil/PythonKurs201806 | 85afa9c9515f5dd8bec0c546f077d8cc39568fe8 | [
"Apache-2.0"
] | null | null | null | ___Python/Torsten/Python-Kurs/p06_persistence/m01_pickle.py | uvenil/PythonKurs201806 | 85afa9c9515f5dd8bec0c546f077d8cc39568fe8 | [
"Apache-2.0"
] | null | null | null | ___Python/Torsten/Python-Kurs/p06_persistence/m01_pickle.py | uvenil/PythonKurs201806 | 85afa9c9515f5dd8bec0c546f077d8cc39568fe8 | [
"Apache-2.0"
] | null | null | null | import pickle
from p01_kennenlernen.m01_kennenlernen import teilnehmerliste
# Pickling
with open("teilnehmer.pickle", "wb") as datei:
pickle.dump(teilnehmerliste, datei)
# Un-Pickling
with open("teilnehmer.pickle", "rb") as datei:
liste = pickle.load(datei)
print(liste)
| 17.764706 | 62 | 0.705298 |
ace13099561e8437ceaf171e151caa9c61d56ed5 | 156 | py | Python | calc_plus/other.py | keegang6705/calc_plus | b11339a2d119185989d6e140a982970f5499d7d6 | [
"MIT"
] | null | null | null | calc_plus/other.py | keegang6705/calc_plus | b11339a2d119185989d6e140a982970f5499d7d6 | [
"MIT"
] | null | null | null | calc_plus/other.py | keegang6705/calc_plus | b11339a2d119185989d6e140a982970f5499d7d6 | [
"MIT"
] | null | null | null | import random
def middle(min: int,max: int):
return (min+max)/2
def chance(number: int,change: int):
return number+random.randint(-change,change)
| 26 | 49 | 0.705128 |
ace130b89dfcfd2906523c0b84ad798baac53716 | 10,071 | py | Python | Collections-a-installer/community-general-2.4.0/plugins/modules/database/vertica/vertica_info.py | d-amien-b/simple-getwordpress | da90d515a0aa837b633d50db4d91d22b031c04a2 | [
"MIT"
] | null | null | null | Collections-a-installer/community-general-2.4.0/plugins/modules/database/vertica/vertica_info.py | d-amien-b/simple-getwordpress | da90d515a0aa837b633d50db4d91d22b031c04a2 | [
"MIT"
] | null | null | null | Collections-a-installer/community-general-2.4.0/plugins/modules/database/vertica/vertica_info.py | d-amien-b/simple-getwordpress | da90d515a0aa837b633d50db4d91d22b031c04a2 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: vertica_info
short_description: Gathers Vertica database facts.
description:
- Gathers Vertica database information.
- This module was called C(vertica_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(community.general.vertica_info) module no longer returns C(ansible_facts)!
options:
cluster:
description:
- Name of the cluster running the schema.
default: localhost
type: str
port:
description:
Database port to connect to.
default: 5433
type: str
db:
description:
- Name of the database running the schema.
type: str
login_user:
description:
- The username used to authenticate with.
default: dbadmin
type: str
login_password:
description:
- The password used to authenticate with.
type: str
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) are installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
'''
EXAMPLES = """
- name: Gathering vertica facts
community.general.vertica_info: db=db_name
register: result
- name: Print schemas
ansible.builtin.debug:
msg: "{{ result.vertica_schemas }}"
"""
import traceback
PYODBC_IMP_ERR = None
try:
import pyodbc
except ImportError:
PYODBC_IMP_ERR = traceback.format_exc()
pyodbc_found = False
else:
pyodbc_found = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
class NotSupportedError(Exception):
pass
# module specific functions
def get_schema_facts(cursor, schema=''):
facts = {}
cursor.execute("""
select schema_name, schema_owner, create_time
from schemata
where not is_system_schema and schema_name not in ('public')
and (? = '' or schema_name ilike ?)
""", schema, schema)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.schema_name.lower()] = {
'name': row.schema_name,
'owner': row.schema_owner,
'create_time': str(row.create_time),
'usage_roles': [],
'create_roles': []}
cursor.execute("""
select g.object_name as schema_name, r.name as role_name,
lower(g.privileges_description) privileges_description
from roles r join grants g
on g.grantee = r.name and g.object_type='SCHEMA'
and g.privileges_description like '%USAGE%'
and g.grantee not in ('public', 'dbadmin')
and (? = '' or g.object_name ilike ?)
""", schema, schema)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
schema_key = row.schema_name.lower()
if 'create' in row.privileges_description:
facts[schema_key]['create_roles'].append(row.role_name)
else:
facts[schema_key]['usage_roles'].append(row.role_name)
return facts
def get_user_facts(cursor, user=''):
facts = {}
cursor.execute("""
select u.user_name, u.is_locked, u.lock_time,
p.password, p.acctexpired as is_expired,
u.profile_name, u.resource_pool,
u.all_roles, u.default_roles
from users u join password_auditor p on p.user_id = u.user_id
where not u.is_super_user
and (? = '' or u.user_name ilike ?)
""", user, user)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
user_key = row.user_name.lower()
facts[user_key] = {
'name': row.user_name,
'locked': str(row.is_locked),
'password': row.password,
'expired': str(row.is_expired),
'profile': row.profile_name,
'resource_pool': row.resource_pool,
'roles': [],
'default_roles': []}
if row.is_locked:
facts[user_key]['locked_time'] = str(row.lock_time)
if row.all_roles:
facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
if row.default_roles:
facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
return facts
def get_role_facts(cursor, role=''):
facts = {}
cursor.execute("""
select r.name, r.assigned_roles
from roles r
where (? = '' or r.name ilike ?)
""", role, role)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
role_key = row.name.lower()
facts[role_key] = {
'name': row.name,
'assigned_roles': []}
if row.assigned_roles:
facts[role_key]['assigned_roles'] = row.assigned_roles.replace(' ', '').split(',')
return facts
def get_configuration_facts(cursor, parameter=''):
facts = {}
cursor.execute("""
select c.parameter_name, c.current_value, c.default_value
from configuration_parameters c
where c.node_name = 'ALL'
and (? = '' or c.parameter_name ilike ?)
""", parameter, parameter)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.parameter_name.lower()] = {
'parameter_name': row.parameter_name,
'current_value': row.current_value,
'default_value': row.default_value}
return facts
def get_node_facts(cursor, schema=''):
facts = {}
cursor.execute("""
select node_name, node_address, export_address, node_state, node_type,
catalog_path
from nodes
""")
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
facts[row.node_address] = {
'node_name': row.node_name,
'export_address': row.export_address,
'node_state': row.node_state,
'node_type': row.node_type,
'catalog_path': row.catalog_path}
return facts
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
cluster=dict(default='localhost'),
port=dict(default='5433'),
db=dict(default=None),
login_user=dict(default='dbadmin'),
login_password=dict(default=None, no_log=True),
), supports_check_mode=True)
is_old_facts = module._name in ('vertica_facts', 'community.general.vertica_facts')
if is_old_facts:
module.deprecate("The 'vertica_facts' module has been renamed to 'vertica_info', "
"and the renamed one no longer returns ansible_facts",
version='3.0.0', collection_name='community.general') # was Ansible 2.13
if not pyodbc_found:
module.fail_json(msg=missing_required_lib('pyodbc'), exception=PYODBC_IMP_ERR)
db = ''
if module.params['db']:
db = module.params['db']
try:
dsn = (
"Driver=Vertica;"
"Server=%s;"
"Port=%s;"
"Database=%s;"
"User=%s;"
"Password=%s;"
"ConnectionLoadBalance=%s"
) % (module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception as e:
module.fail_json(msg="Unable to connect to database: %s." % to_native(e), exception=traceback.format_exc())
try:
schema_facts = get_schema_facts(cursor)
user_facts = get_user_facts(cursor)
role_facts = get_role_facts(cursor)
configuration_facts = get_configuration_facts(cursor)
node_facts = get_node_facts(cursor)
if is_old_facts:
module.exit_json(changed=False,
ansible_facts={'vertica_schemas': schema_facts,
'vertica_users': user_facts,
'vertica_roles': role_facts,
'vertica_configuration': configuration_facts,
'vertica_nodes': node_facts})
else:
module.exit_json(changed=False,
vertica_schemas=schema_facts,
vertica_users=user_facts,
vertica_roles=role_facts,
vertica_configuration=configuration_facts,
vertica_nodes=node_facts)
except NotSupportedError as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| 33.909091 | 115 | 0.596465 |
ace130c6131d4060ee960685cf63927262f653d0 | 27,544 | py | Python | myems-api/reports/equipmentoutput.py | hyh123a/myems | 669ab8554995a622da595384698d670f9cee61f8 | [
"MIT"
] | 1 | 2021-08-04T13:41:45.000Z | 2021-08-04T13:41:45.000Z | myems-api/reports/equipmentoutput.py | hyh123a/myems | 669ab8554995a622da595384698d670f9cee61f8 | [
"MIT"
] | null | null | null | myems-api/reports/equipmentoutput.py | hyh123a/myems | 669ab8554995a622da595384698d670f9cee61f8 | [
"MIT"
] | null | null | null | import falcon
import simplejson as json
import mysql.connector
import config
from datetime import datetime, timedelta, timezone
from core import utilities
from decimal import Decimal
class Reporting:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp):
resp.status = falcon.HTTP_200
####################################################################################################################
# PROCEDURES
# Step 1: valid parameters
# Step 2: query the equipment
# Step 3: query energy categories
# Step 4: query associated points
# Step 5: query base period energy output
# Step 6: query reporting period energy output
# Step 7: query tariff data
# Step 8: query associated points data
# Step 9: construct the report
####################################################################################################################
@staticmethod
def on_get(req, resp):
print(req.params)
equipment_id = req.params.get('equipmentid')
period_type = req.params.get('periodtype')
base_start_datetime_local = req.params.get('baseperiodstartdatetime')
base_end_datetime_local = req.params.get('baseperiodenddatetime')
reporting_start_datetime_local = req.params.get('reportingperiodstartdatetime')
reporting_end_datetime_local = req.params.get('reportingperiodenddatetime')
################################################################################################################
# Step 1: valid parameters
################################################################################################################
if equipment_id is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_EQUIPMENT_ID')
else:
equipment_id = str.strip(equipment_id)
if not equipment_id.isdigit() or int(equipment_id) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_EQUIPMENT_ID')
if period_type is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_PERIOD_TYPE')
else:
period_type = str.strip(period_type)
if period_type not in ['hourly', 'daily', 'monthly', 'yearly']:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_PERIOD_TYPE')
timezone_offset = int(config.utc_offset[1:3]) * 60 + int(config.utc_offset[4:6])
if config.utc_offset[0] == '-':
timezone_offset = -timezone_offset
base_start_datetime_utc = None
if base_start_datetime_local is not None and len(str.strip(base_start_datetime_local)) > 0:
base_start_datetime_local = str.strip(base_start_datetime_local)
try:
base_start_datetime_utc = datetime.strptime(base_start_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_BASE_PERIOD_START_DATETIME")
base_end_datetime_utc = None
if base_end_datetime_local is not None and len(str.strip(base_end_datetime_local)) > 0:
base_end_datetime_local = str.strip(base_end_datetime_local)
try:
base_end_datetime_utc = datetime.strptime(base_end_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_BASE_PERIOD_END_DATETIME")
if base_start_datetime_utc is not None and base_end_datetime_utc is not None and \
base_start_datetime_utc >= base_end_datetime_utc:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_BASE_PERIOD_END_DATETIME')
if reporting_start_datetime_local is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_START_DATETIME")
else:
reporting_start_datetime_local = str.strip(reporting_start_datetime_local)
try:
reporting_start_datetime_utc = datetime.strptime(reporting_start_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_START_DATETIME")
if reporting_end_datetime_local is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_END_DATETIME")
else:
reporting_end_datetime_local = str.strip(reporting_end_datetime_local)
try:
reporting_end_datetime_utc = datetime.strptime(reporting_end_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_END_DATETIME")
if reporting_start_datetime_utc >= reporting_end_datetime_utc:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_REPORTING_PERIOD_END_DATETIME')
################################################################################################################
# Step 2: query the equipment
################################################################################################################
cnx_system = mysql.connector.connect(**config.myems_system_db)
cursor_system = cnx_system.cursor()
cnx_energy = mysql.connector.connect(**config.myems_energy_db)
cursor_energy = cnx_energy.cursor()
cnx_historical = mysql.connector.connect(**config.myems_historical_db)
cursor_historical = cnx_historical.cursor()
cursor_system.execute(" SELECT id, name, cost_center_id "
" FROM tbl_equipments "
" WHERE id = %s ", (equipment_id,))
row_equipment = cursor_system.fetchone()
if row_equipment is None:
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
if cnx_historical:
cnx_historical.close()
if cursor_historical:
cursor_historical.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND', description='API.EQUIPMENT_NOT_FOUND')
equipment = dict()
equipment['id'] = row_equipment[0]
equipment['name'] = row_equipment[1]
equipment['cost_center_id'] = row_equipment[2]
################################################################################################################
# Step 3: query energy categories
################################################################################################################
energy_category_set = set()
# query energy categories in base period
cursor_energy.execute(" SELECT DISTINCT(energy_category_id) "
" FROM tbl_equipment_output_category_hourly "
" WHERE equipment_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s ",
(equipment['id'], base_start_datetime_utc, base_end_datetime_utc))
rows_energy_categories = cursor_energy.fetchall()
if rows_energy_categories is not None or len(rows_energy_categories) > 0:
for row_energy_category in rows_energy_categories:
energy_category_set.add(row_energy_category[0])
# query energy categories in reporting period
cursor_energy.execute(" SELECT DISTINCT(energy_category_id) "
" FROM tbl_equipment_output_category_hourly "
" WHERE equipment_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s ",
(equipment['id'], reporting_start_datetime_utc, reporting_end_datetime_utc))
rows_energy_categories = cursor_energy.fetchall()
if rows_energy_categories is not None or len(rows_energy_categories) > 0:
for row_energy_category in rows_energy_categories:
energy_category_set.add(row_energy_category[0])
# query all energy categories in base period and reporting period
cursor_system.execute(" SELECT id, name, unit_of_measure, kgce, kgco2e "
" FROM tbl_energy_categories "
" ORDER BY id ", )
rows_energy_categories = cursor_system.fetchall()
if rows_energy_categories is None or len(rows_energy_categories) == 0:
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
if cnx_historical:
cnx_historical.close()
if cursor_historical:
cursor_historical.disconnect()
raise falcon.HTTPError(falcon.HTTP_404,
title='API.NOT_FOUND',
description='API.ENERGY_CATEGORY_NOT_FOUND')
energy_category_dict = dict()
for row_energy_category in rows_energy_categories:
if row_energy_category[0] in energy_category_set:
energy_category_dict[row_energy_category[0]] = {"name": row_energy_category[1],
"unit_of_measure": row_energy_category[2],
"kgce": row_energy_category[3],
"kgco2e": row_energy_category[4]}
################################################################################################################
# Step 4: query associated points
################################################################################################################
point_list = list()
cursor_system.execute(" SELECT p.id, p.name, p.units, p.object_type "
" FROM tbl_equipments e, tbl_equipments_parameters ep, tbl_points p "
" WHERE e.id = %s AND e.id = ep.equipment_id AND ep.parameter_type = 'point' "
" AND ep.point_id = p.id "
" ORDER BY p.id ", (equipment['id'],))
rows_points = cursor_system.fetchall()
if rows_points is not None and len(rows_points) > 0:
for row in rows_points:
point_list.append({"id": row[0], "name": row[1], "units": row[2], "object_type": row[3]})
################################################################################################################
# Step 5: query base period energy output
################################################################################################################
base = dict()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
base[energy_category_id] = dict()
base[energy_category_id]['timestamps'] = list()
base[energy_category_id]['values'] = list()
base[energy_category_id]['subtotal'] = Decimal(0.0)
cursor_energy.execute(" SELECT start_datetime_utc, actual_value "
" FROM tbl_equipment_output_category_hourly "
" WHERE equipment_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(equipment['id'],
energy_category_id,
base_start_datetime_utc,
base_end_datetime_utc))
rows_equipment_hourly = cursor_energy.fetchall()
rows_equipment_periodically = utilities.aggregate_hourly_data_by_period(rows_equipment_hourly,
base_start_datetime_utc,
base_end_datetime_utc,
period_type)
for row_equipment_periodically in rows_equipment_periodically:
current_datetime_local = row_equipment_periodically[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
if period_type == 'hourly':
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
elif period_type == 'daily':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'monthly':
current_datetime = current_datetime_local.strftime('%Y-%m')
elif period_type == 'yearly':
current_datetime = current_datetime_local.strftime('%Y')
actual_value = Decimal(0.0) if row_equipment_periodically[1] is None \
else row_equipment_periodically[1]
base[energy_category_id]['timestamps'].append(current_datetime)
base[energy_category_id]['values'].append(actual_value)
base[energy_category_id]['subtotal'] += actual_value
################################################################################################################
# Step 8: query reporting period energy output
################################################################################################################
reporting = dict()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
reporting[energy_category_id] = dict()
reporting[energy_category_id]['timestamps'] = list()
reporting[energy_category_id]['values'] = list()
reporting[energy_category_id]['subtotal'] = Decimal(0.0)
cursor_energy.execute(" SELECT start_datetime_utc, actual_value "
" FROM tbl_equipment_output_category_hourly "
" WHERE equipment_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(equipment['id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows_equipment_hourly = cursor_energy.fetchall()
rows_equipment_periodically = utilities.aggregate_hourly_data_by_period(rows_equipment_hourly,
reporting_start_datetime_utc,
reporting_end_datetime_utc,
period_type)
for row_equipment_periodically in rows_equipment_periodically:
current_datetime_local = row_equipment_periodically[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
if period_type == 'hourly':
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
elif period_type == 'daily':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'monthly':
current_datetime = current_datetime_local.strftime('%Y-%m')
elif period_type == 'yearly':
current_datetime = current_datetime_local.strftime('%Y')
actual_value = Decimal(0.0) if row_equipment_periodically[1] is None \
else row_equipment_periodically[1]
reporting[energy_category_id]['timestamps'].append(current_datetime)
reporting[energy_category_id]['values'].append(actual_value)
reporting[energy_category_id]['subtotal'] += actual_value
################################################################################################################
# Step 9: query tariff data
################################################################################################################
parameters_data = dict()
parameters_data['names'] = list()
parameters_data['timestamps'] = list()
parameters_data['values'] = list()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
energy_category_tariff_dict = utilities.get_energy_category_tariffs(equipment['cost_center_id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc)
tariff_timestamp_list = list()
tariff_value_list = list()
for k, v in energy_category_tariff_dict.items():
# convert k from utc to local
k = k + timedelta(minutes=timezone_offset)
tariff_timestamp_list.append(k.isoformat()[0:19][0:19])
tariff_value_list.append(v)
parameters_data['names'].append('TARIFF-' + energy_category_dict[energy_category_id]['name'])
parameters_data['timestamps'].append(tariff_timestamp_list)
parameters_data['values'].append(tariff_value_list)
################################################################################################################
# Step 10: query associated points data
################################################################################################################
for point in point_list:
point_values = []
point_timestamps = []
if point['object_type'] == 'ANALOG_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_analog_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s "
" ORDER BY utc_date_time ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
elif point['object_type'] == 'ENERGY_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_energy_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s "
" ORDER BY utc_date_time ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
elif point['object_type'] == 'DIGITAL_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_digital_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
parameters_data['names'].append(point['name'] + ' (' + point['units'] + ')')
parameters_data['timestamps'].append(point_timestamps)
parameters_data['values'].append(point_values)
################################################################################################################
# Step 12: construct the report
################################################################################################################
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
result = dict()
result['equipment'] = dict()
result['equipment']['name'] = equipment['name']
result['base_period'] = dict()
result['base_period']['names'] = list()
result['base_period']['units'] = list()
result['base_period']['timestamps'] = list()
result['base_period']['values'] = list()
result['base_period']['subtotals'] = list()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
result['base_period']['names'].append(energy_category_dict[energy_category_id]['name'])
result['base_period']['units'].append(energy_category_dict[energy_category_id]['unit_of_measure'])
result['base_period']['timestamps'].append(base[energy_category_id]['timestamps'])
result['base_period']['values'].append(base[energy_category_id]['values'])
result['base_period']['subtotals'].append(base[energy_category_id]['subtotal'])
result['reporting_period'] = dict()
result['reporting_period']['names'] = list()
result['reporting_period']['energy_category_ids'] = list()
result['reporting_period']['units'] = list()
result['reporting_period']['timestamps'] = list()
result['reporting_period']['values'] = list()
result['reporting_period']['subtotals'] = list()
result['reporting_period']['increment_rates'] = list()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
result['reporting_period']['names'].append(energy_category_dict[energy_category_id]['name'])
result['reporting_period']['energy_category_ids'].append(energy_category_id)
result['reporting_period']['units'].append(energy_category_dict[energy_category_id]['unit_of_measure'])
result['reporting_period']['timestamps'].append(reporting[energy_category_id]['timestamps'])
result['reporting_period']['values'].append(reporting[energy_category_id]['values'])
result['reporting_period']['subtotals'].append(reporting[energy_category_id]['subtotal'])
result['reporting_period']['increment_rates'].append(
(reporting[energy_category_id]['subtotal'] - base[energy_category_id]['subtotal']) /
base[energy_category_id]['subtotal']
if base[energy_category_id]['subtotal'] > 0.0 else None)
result['parameters'] = {
"names": parameters_data['names'],
"timestamps": parameters_data['timestamps'],
"values": parameters_data['values']
}
resp.body = json.dumps(result)
| 58.232558 | 120 | 0.50196 |
ace132bffa992e4436d7919b8f2e108e35de70f2 | 13,396 | py | Python | airbnb/api_v2.py | JSenart/airbnb-python | facaf635c8410838e4d7587fe2331961dbb324ae | [
"WTFPL"
] | null | null | null | airbnb/api_v2.py | JSenart/airbnb-python | facaf635c8410838e4d7587fe2331961dbb324ae | [
"WTFPL"
] | null | null | null | airbnb/api_v2.py | JSenart/airbnb-python | facaf635c8410838e4d7587fe2331961dbb324ae | [
"WTFPL"
] | null | null | null | import requests
import json
import datetime
from dateutil.tz import tzlocal
from airbnb.random_request import RandomRequest
import os
import functools
API_URL = "https://www.airbnb.com/api/v2"
API_KEY = "d306zoyjsyarp7ifhu67rjxn52tv0t20"
class AuthError(Exception):
"""
Authentication error
"""
pass
class VerificationError(AuthError):
"""
Authentication error
"""
pass
class MissingParameterError(Exception):
"""
Missing parameter error
"""
pass
class MissingAccessTokenError(MissingParameterError):
"""
Missing access token error
"""
pass
def require_auth(function):
"""
A decorator that wraps the passed in function and raises exception
if access token is missing
"""
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
if not self.access_token():
raise MissingAccessTokenError
return function(self, *args, **kwargs)
return wrapper
def randomizable(function):
"""
A decorator which randomizes requests if needed
"""
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
if self.randomize:
self.randomize_headers()
return function(self, *args, **kwargs)
return wrapper
class ApiV2(object):
"""Base API class
>>> api = ApiV2(access_token=os.environ.get("AIRBNB_ACCESS_TOKEN"))
>>> api.get_profile() # doctest: +ELLIPSIS
{...}
>>> api = ApiV2()
>>> api.get_homes("Lisbon, Portugal") # doctest: +ELLIPSIS
{...}
>>> api.get_homes(gps_lat=55.6123352, gps_lng=37.7117917) # doctest: +ELLIPSIS
{...}
>>> api.get_homes("Lisbon, Portugal", checkin=datetime.datetime.now().strftime("%Y-%m-%d"), checkout=(datetime.datetime.now() + datetime.timedelta(days=30)).strftime("%Y-%m-%d")) # doctest: +ELLIPSIS
{...}
>>> api.get_calendar(975964) # doctest: +ELLIPSIS
{...}
>>> api.get_reviews(975964) # doctest: +ELLIPSIS
{...}
>>> api = ApiV2(randomize=True)
>>> api.get_listing_details(975964) # doctest: +ELLIPSIS
{...}
"""
def __init__(
self,
username=None,
password=None,
access_token=None,
api_key=API_KEY,
session_cookie=None,
proxy=None,
randomize=None,
):
self._session = requests.Session()
self.b = None
self.user_agent = "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Mobile Safari/537.36"
self.udid = "9120210f8fb1ae837affff54a0a2f64da821d227"
self.uuid = "C326397B-3A38-474B-973B-F022E6E4E6CC"
self.randomize = randomize
self._session.headers = {
'Accept': '*/*',
'Connection': 'keep-alive' ,
'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.55 Mobile Safari/537.36',
'cookie': f'_aat={access_token};'
}
if proxy:
self._session.proxies = {"http": proxy, "https": proxy}
if access_token:
self._access_token = access_token
if session_cookie and "_airbed_session_id=" in session_cookie:
self._session.headers.update({"Cookie": session_cookie})
self._session.headers.update({"x-airbnb-oauth-token": self._access_token})
elif username and password:
login_payload = {"email": username, "password": password, "type": "email"}
r = self._session.post(API_URL + "/logins", data=json.dumps(login_payload))
if r.status_code == 420:
raise VerificationError
elif r.status_code == 403:
raise AuthError
self._access_token = r.json()["login"]["id"]
print("Your access token: {}".format(self._access_token))
self._session.headers.update({"x-airbnb-oauth-token": self._access_token})
else:
# no auth
pass
def access_token(self):
return self._access_token
def set_user_agent(self, user_agent):
self.user_agent = user_agent
self._session.headers["user-agent"] = user_agent
def set_udid(self, udid):
self.udid = udid
self._session.headers["airbnb-device-id"] = udid
def set_uuid(self, uuid):
self.uuid = uuid
self._session.headers["x-airbnb-advertising-id"] = uuid
def randomize_headers(self):
self.set_user_agent(RandomRequest.get_random_user_agent())
self.set_udid(RandomRequest.get_random_udid())
self.set_uuid(RandomRequest.get_random_uuid())
@require_auth
def get_profile(self):
"""
Get my own profile
"""
r = self._session.get(API_URL + "/logins/me")
r.raise_for_status()
return r.json()
@randomizable
def get_calendar(
self,
listing_id,
starting_month=datetime.datetime.now().month,
starting_year=datetime.datetime.now().year,
calendar_months=12,
):
"""
Get availability calendar for a given listing
"""
params = {
"year": str(starting_year),
"listing_id": str(listing_id),
"_format": "with_conditions",
"count": str(calendar_months),
"month": str(starting_month),
}
r = self._session.get(API_URL + "/calendar_months", params=params)
r.raise_for_status()
return r.json()
# Host APIs
@require_auth
def get_user(self, user_id):
params = {"_format": "with_content_framework_articles"}
r = self._session.get(API_URL + "/users/" + str(user_id), params=params)
r.raise_for_status()
return r.json()
@require_auth
def get_price_breakdown(self, reservation_code):
params = {"_format": "for_remy", "currency": "EUR", "locale": "pt-PT"}
r = self._session.get(
API_URL + "/homes_host_booking_pricing_quotes/" + reservation_code,
params=params,
)
r.raise_for_status()
return r.json()
@require_auth
def get_reservations(
self,
offset=0,
limit=40,
start_date=datetime.datetime.now().strftime("%Y-%m-%d"),
):
params = {
"_format": "for_remy",
"_offset": str(offset),
"_limit": str(limit),
"collection_strategy": "for_reservations_list",
"currency": "EUR",
"date_max": "",
"date_min": start_date,
"key": API_KEY,
"locale": "en",
"sort_field": "start_date",
"sort_order": "desc",
"status": "accepted,request,canceled",
}
r = self._session.get(API_URL + "/reservations", params=params)
return r.json()
@require_auth
def get_booking_details(self, code):
params = {"_format": "for_remy", "currency": "EUR", "locale": "en"}
r = self._session.get(API_URL + "/homes_booking_details/" + code, params=params)
r.raise_for_status()
return r.json()
@require_auth
def get_threads_full(self, offset=0, limit=18):
"""
Gets messaging threads.
"""
params = {
"_format": "for_messaging_sync_with_posts",
"_limit": str(limit),
"_offset": str(offset),
}
r = self._session.get(API_URL + "/threads", params=params)
r.raise_for_status()
return r.json()
@require_auth
def get_message_thread(self, thread_id, limit=50, offset=0):
"""
Gets one thread of messages.
"""
params = {
"_limit": str(limit),
"_ofset": str(offset),
"selected_inbox_type": "host",
"_format": "for_messaging_sync_with_posts",
}
r = self._session.get(API_URL + "/threads", params=params)
r.raise_for_status()
return r.json()
@require_auth
def send_message(self, thread_id, message):
"""
Sends a message in a thread.
"""
body = {
"message": message.strip(),
"thread_id": thread_id,
}
# r = self._session.post("https://www.airbnb.com/messaging/qt_reply_v2/" + str(thread_id), data=json.dumps(body))
r = self._session.post(API_URL + "/messages", data=json.dumps(body))
r.raise_for_status()
return r.json()
# User past trips and stats
@require_auth
def get_trip_schedules(self):
params = {
"_format": "for_unbundled_itinerary",
"_limit": "10",
"_offset": "0",
"client_version": "3",
"exclude_free_time": "false",
}
r = self._session.get(API_URL + "/trip_schedules", params=params)
r.raise_for_status()
return r.json()["trip_schedules"]
@require_auth
def get_travel_plans(
self, upcoming_scheduled_plans_limit=20, past_scheduled_plans_limit=8
):
now = datetime.datetime.now(tzlocal())
strftime_date = now.strftime("%Y-%m-%dT%H:%M:%S%z")
params = {
"now": "{}:{}".format(strftime_date[:-2], strftime_date[-2:]),
"upcoming_scheduled_plans_limit": upcoming_scheduled_plans_limit,
"past_scheduled_plans_limit": past_scheduled_plans_limit,
}
r = self._session.get(API_URL + "/plans", params=params)
r.raise_for_status()
return r.json()["plans"][0]
@require_auth
def get_scheduled_plan(self, identifier):
assert self._access_token
params = {"_format": "for_trip_day_view"}
r = self._session.get(
API_URL + "/scheduled_plans/{}".format(identifier), params=params
)
r.raise_for_status()
return r.json()["scheduled_plan"]
@require_auth
def get_reservation(self, reservation_id):
assert self._access_token
params = {"_format": "for_trip_planner"}
r = self._session.get(
API_URL + "/reservations/{}".format(reservation_id), params=params
)
r.raise_for_status()
return r.json()["reservation"]
@require_auth
def get_all_past_reservations(self):
past_scheduled_plan_ids = self.get_travel_plans()["past_scheduled_plans"][
"metadata"
]["cache"]["identifiers"]
past_reservations = []
for plan_id in past_scheduled_plan_ids:
scheduled_plan = self.get_scheduled_plan(plan_id)
reservation_id = scheduled_plan["events"][0]["destination"][
"reservation_key"
]
past_reservations.append(self.get_reservation(reservation_id))
return past_reservations
@require_auth
def get_total_money_spent_in_usd(self):
reservations = self.get_all_past_reservations()
total_spent = 0.0
for reservation in reservations:
if reservation["total_price_formatted"].startswith("$"):
dollars_spent = reservation["total_price_formatted"]
total_spent += float(dollars_spent[1:])
return total_spent
# Listing search
@randomizable
def get_homes(
self,
query=None,
gps_lat=None,
gps_lng=None,
checkin=None,
checkout=None,
offset=0,
items_per_grid=8,
):
"""
Search listings with
* Query (e.g. query="Lisbon, Portugal") or
* Location (e.g. gps_lat=55.6123352&gps_lng=37.7117917)
* Check in/check out filters (e.g. checkin=2019-05-15&checkout=2019-05-20)
"""
params = {
"toddlers": "0",
"adults": "0",
"infants": "0",
"is_guided_search": "true",
"version": "1.4.8",
"section_offset": "0",
"items_offset": str(offset),
"screen_size": "small",
"source": "explore_tabs",
"items_per_grid": str(items_per_grid),
"_format": "for_explore_search_native",
"metadata_only": "false",
"refinement_paths[]": "/homes",
"timezone": "Europe/Lisbon",
"satori_version": "1.1.0",
}
if not query and not (gps_lat and gps_lng):
raise MissingParameterError("Missing query or gps coordinates")
if query:
params["query"] = query
if gps_lat and gps_lng:
params["lat"] = gps_lat
params["lng"] = gps_lng
if checkin and checkout:
params["checkin"] = checkin
params["checkout"] = checkout
r = self._session.get(API_URL + "/explore_tabs", params=params)
r.raise_for_status()
return r.json()
@randomizable
def get_listing_details(self, listing_id):
params = {
"adults": "0",
"_format": "for_native",
"infants": "0",
"children": "0",
}
r = self._session.get(
API_URL + "/pdp_listing_details/" + str(listing_id), params=params
)
r.raise_for_status()
return r.json()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27.850312 | 203 | 0.578307 |
ace135176d8116a9b79c18cad80580e2ab8bc6f1 | 81 | py | Python | 002.py | rafaelgervickas/Exercicios | 36397a9d78135f5eec29e8df95bfdb9de4ffa932 | [
"Apache-2.0"
] | null | null | null | 002.py | rafaelgervickas/Exercicios | 36397a9d78135f5eec29e8df95bfdb9de4ffa932 | [
"Apache-2.0"
] | null | null | null | 002.py | rafaelgervickas/Exercicios | 36397a9d78135f5eec29e8df95bfdb9de4ffa932 | [
"Apache-2.0"
] | null | null | null | nome = input('Qual é o seu nome?')
print('É um prazer te conhecer', nome, '!')
| 27 | 44 | 0.617284 |
ace1352252f422b0dde65debb2cdcd9e0285f6e2 | 383 | py | Python | slib/json.py | lukeexer/py_server_starter | 5f86c8bc210bddf59d82b8c8911b0a08c4ae1638 | [
"MIT"
] | null | null | null | slib/json.py | lukeexer/py_server_starter | 5f86c8bc210bddf59d82b8c8911b0a08c4ae1638 | [
"MIT"
] | null | null | null | slib/json.py | lukeexer/py_server_starter | 5f86c8bc210bddf59d82b8c8911b0a08c4ae1638 | [
"MIT"
] | null | null | null | # pylint: disable=R0903
'''SServer JSON Library.'''
class SJson():
'''SServer JSON Utility Functions.'''
@staticmethod
def make_error_json(error_code, error_msg):
'''Generate API error message in JSON format.'''
ret_json = {}
ret_json['code'] = error_code.value
ret_json['msg'] = error_msg.value
return ret_json
| 23.9375 | 57 | 0.603133 |
ace1352b9dd2bf46f52bf96c904e3bb542ac0a2a | 22,729 | py | Python | frame_2D_alg/comp_slice_.py | KangDo96/CogAlg | 22708050270ae98d6df8333d85b554597e14d65e | [
"MIT"
] | 7 | 2021-05-08T18:25:42.000Z | 2021-09-30T13:41:18.000Z | frame_2D_alg/comp_slice_.py | KangDo96/CogAlg | 22708050270ae98d6df8333d85b554597e14d65e | [
"MIT"
] | null | null | null | frame_2D_alg/comp_slice_.py | KangDo96/CogAlg | 22708050270ae98d6df8333d85b554597e14d65e | [
"MIT"
] | null | null | null | '''
Comp_slice is a terminal fork of intra_blob.
-
It traces blob axis by cross-comparing vertically adjacent Ps: horizontal slices across an edge blob.
These low-M high-Ma blobs are vectorized into outlines of adjacent flat or high-M blobs.
(high match: M / Ma, roughly corresponds to low gradient: G / Ga)
-
Vectorization is clustering of Ps + their derivatives (derPs) into PPs: patterns of Ps that describe an edge.
This process is a reduced-dimensionality (2D->1D) version of cross-comp and clustering cycle, common across this project.
As we add higher dimensions (2D alg, 3D alg), this dimensionality reduction is done in salient high-aspect blobs
(likely edges / contours in 2D or surfaces in 3D) to form more compressed skeletal representations of full-D patterns.
-
Please see diagram:
https://github.com/boris-kz/CogAlg/blob/master/frame_2D_alg/Illustrations/comp_slice_flip.drawio
'''
from collections import deque
import sys
import numpy as np
from class_cluster import ClusterStructure, NoneType
from frame_blobs import CDert
#from slice_utils import draw_PP_
import warnings # to detect overflow issue, in case of infinity loop
warnings.filterwarnings('error')
ave = 30 # filter or hyper-parameter, set as a guess, latter adjusted by feedback, not needed here
aveG = 50 # filter for comp_g, assumed constant direction
flip_ave = .1
flip_ave_FPP = 0 # flip large FPPs only (change to 0 for debug purpose)
div_ave = 200
ave_dX = 10 # difference between median x coords of consecutive Ps
ave_Dx = 10
ave_mP = 8 # just a random number right now.
ave_rmP = .7 # the rate of mP decay per relative dX (x shift) = 1: initial form of distance
ave_ortho = 20
class CP(ClusterStructure):
Dert = object # summed kernel parameters
L = int
x0 = int
dX = int # shift of average x between P and _P, if any
y = int # for visualization only
sign = NoneType # sign of gradient deviation
dert_ = list # array of pixel-level derts: (p, dy, dx, g, m), extended in intra_blob
upconnect_ = list
downconnect_cnt = int
derP = object # derP object reference
# only in Pd:
Pm = object # reference to root P
dxdert_ = list
# only in Pm:
Pd_ = list
class CderDert(ClusterStructure):
mP = int
dP = int
mx = int
dx = int
mL = int
dL = int
mDx = int
dDx = int
mDy = int
dDy = int
# dDdx,mDdx,dMdx,mMdx is used by comp_dx
mDyy = int
mDyx = int
mDxy = int
mDxx = int
mGa = int
mMa = int
mMdx = int
mDdx = int
dDyy = int
dDyx = int
dDxy = int
dDxx = int
dGa = int
dMa = int
dMdx = int
dDdx = int
class CderP(ClusterStructure):
derDert = object
P = object # lower comparand
_P = object # higher comparand
PP = object # FPP if flip_val, contains this derP
# from comp_dx
fdx = NoneType
class CPP(ClusterStructure):
Dert = object # set of P params accumulated in PP
derDert = object # set of derP params accumulated in PP
# between PPs:
upconnect_ = list
downconnect_cnt = int
fPPm = NoneType # PPm if 1, else PPd; not needed if packed in PP_?
fdiv = NoneType
box = list # for visualization only, original box before flipping
dert__ = list
mask__ = bool
# PP params
derP__ = list
P__ = list
PPmm_ = list
PPdm_ = list
# PPd params
derPd__ = list
Pd__ = list
PPmd_ = list
PPdd_ = list # comp_dx params
# Functions:
'''
leading '_' denotes higher-line variable or structure, vs. same-type lower-line variable or structure
trailing '_' denotes array name, vs. same-name elements of that array. '__' is a 2D array
leading 'f' denotes flag
-
rough workflow:
-
intra_blob -> slice_blob(blob) -> derP_ -> PP,
if flip_val(PP is FPP): pack FPP in blob.PP_ -> flip FPP.dert__ -> slice_blob(FPP) -> pack PP in FPP.PP_
else (PP is PP): pack PP in blob.PP_
'''
def slice_blob(blob, verbose=False):
'''
Slice_blob converts selected smooth-edge blobs (high G, low Ga or low M, high Ma) into sliced blobs,
adding horizontal blob slices: Ps or 1D patterns
'''
dert__ = blob.dert__
mask__ = blob.mask__
height, width = dert__[0].shape
if verbose: print("Converting to image...")
for fPPd in range(2): # run twice, 1st loop fPPd=0: form PPs, 2nd loop fPPd=1: form PPds
P__ , derP__, Pd__, derPd__ = [], [], [], []
zip_dert__ = zip(*dert__)
_P_ = form_P_(list(zip(*next(zip_dert__))), mask__[0], 0) # 1st upper row
P__ += _P_ # frame of Ps
for y, dert_ in enumerate(zip_dert__, start=1): # scan top down
if verbose: print(f"\rProcessing line {y + 1}/{height}, ", end=""); sys.stdout.flush()
P_ = form_P_(list(zip(*dert_)), mask__[y], y) # horizontal clustering - lower row
derP_ = scan_P_(P_, _P_) # tests for x overlap between Ps, calls comp_slice
Pd_ = form_Pd_(P_) # form Pds within Ps
derPd_ = scan_Pd_(P_, _P_) # adds upconnect_ in Pds and calls derPd_2_PP_derPd_, same as derP_2_PP_
derP__ += derP_; derPd__ += derPd_ # frame of derPs
P__ += P_; Pd__ += Pd_
_P_ = P_ # set current lower row P_ as next upper row _P_
form_PP_root(blob, derP__, P__, derPd__, Pd__, fPPd) # form PPs in blob or in FPP
# yet to be updated
# draw PPs
# if not isinstance(blob, CPP):
# draw_PP_(blob)
def form_P_(idert_, mask_, y): # segment dert__ into P__ in horizontal ) vertical order, sum dert params into P params
P_ = [] # rows of derPs
dert_ = [list(idert_[0])] # get first dert from idert_ (generator/iterator)
_mask = mask_[0] # mask bit per dert
if ~_mask:
I, Dy, Dx, G, M, Dyy, Dyx, Dxy, Dxx, Ga, Ma = dert_[0]; L = 1; x0 = 0 # initialize P params with first dert
for x, dert in enumerate(idert_[1:], start=1): # left to right in each row of derts
mask = mask_[x] # pixel mask
if mask: # masks: if 1,_0: P termination, if 0,_1: P initialization, if 0,_0: P accumulation:
if ~_mask: # _dert is not masked, dert is masked, terminate P:
P = CP(Dert=CDert(I=I, Dy=Dy, Dx=Dx, G=G, M=M, Dyy=Dyy, Dyx=Dyx, Dxy=Dxy, Dxx=Dxx, Ga=Ga, Ma=Ma), L=L, x0=x0, dert_=dert_, y=y)
P_.append(P)
else: # dert is not masked
if _mask: # _dert is masked, initialize P params:
I, Dy, Dx, G, M, Dyy, Dyx, Dxy, Dxx, Ga, Ma = dert; L = 1; x0 = x; dert_ = [dert]
else:
I += dert[0] # _dert is not masked, accumulate P params with (p, dy, dx, g, m, dyy, dyx, dxy, dxx, ga, ma) = dert
Dy += dert[1]
Dx += dert[2]
G += dert[3]
M += dert[4]
Dyy += dert[5]
Dyx += dert[6]
Dxy += dert[7]
Dxx += dert[8]
Ga += dert[9]
Ma += dert[10]
L += 1
dert_.append(dert)
_mask = mask
if ~_mask: # terminate last P in a row
P = CP(Dert=CDert(I=I, Dy=Dy, Dx=Dx, G=G, M=M, Dyy=Dyy, Dyx=Dyx, Dxy=Dxy, Dxx=Dxx, Ga=Ga, Ma=Ma), L=L, x0=x0, dert_=dert_, y=y)
P_.append(P)
return P_
def form_Pd_(P_): # form Pds from Pm derts by dx sign, otherwise same as form_P
Pd__ = []
for iP in P_:
if (iP.downconnect_cnt>0) or (iP.upconnect_): # form Pd s if at least one connect in P, else they won't be compared
P_Ddx = 0 # sum of Ddx across Pd s
P_Mdx = 0 # sum of Mdx across Pd s
Pd_ = [] # Pds in P
_dert = iP.dert_[0] # 1st dert
dert_ = [_dert]
I, Dy, Dx, G, M, Dyy, Dyx, Dxy, Dxx, Ga, Ma= _dert; L = 1; x0 = iP.x0 # initialize P params with first dert
_sign = _dert[2] > 0
x = 1 # relative x within P
for dert in iP.dert_[1:]:
sign = dert[2] > 0
if sign == _sign: # same Dx sign
I += dert[0] # accumulate P params with (p, dy, dx, g, m, dyy, dyx, dxy, dxx, ga, ma) = dert
Dy += dert[1]
Dx += dert[2]
G += dert[3]
M += dert[4]
Dyy += dert[5]
Dyx += dert[6]
Dxy += dert[7]
Dxx += dert[8]
Ga += dert[9]
Ma += dert[10]
L += 1
dert_.append(dert)
else: # sign change, terminate P
P = CP(Dert=CDert(I=I, Dy=Dy, Dx=Dx, G=G, M=M, Dyy=Dyy, Dyx=Dyx, Dxy=Dxy, Dxx=Dxx, Ga=Ga, Ma=Ma),
L=L, x0=x0, dert_=dert_, y=iP.y, sign=_sign, Pm=iP)
if Dx > ave_Dx:
# cross-comp of dx in P.dert_
comp_dx(P); P_Ddx += P.Dert.Ddx; P_Mdx += P.Dert.Mdx
Pd_.append(P)
# reinitialize params
I, Dy, Dx, G, M, Dyy, Dyx, Dxy, Dxx, Ga, Ma = dert; x0 = iP.x0+x; L = 1; dert_ = [dert]
_sign = sign
x += 1
# terminate last P
P = CP(Dert=CDert(I=I, Dy=Dy, Dx=Dx, G=G, M=M, Dyy=Dyy, Dyx=Dyx, Dxy=Dxy, Dxx=Dxx, Ga=Ga, Ma=Ma),
L=L, x0=x0, dert_=dert_, y=iP.y, sign=_sign, Pm=iP)
if Dx > ave_Dx:
comp_dx(P); P_Ddx += P.Dert.Ddx; P_Mdx += P.Dert.Mdx
Pd_.append(P)
# update Pd params in P
iP.Pd_ = Pd_; iP.Dert.Ddx = P_Ddx; iP.Dert.Mdx = P_Mdx
Pd__ += Pd_
return Pd__
def scan_P_(P_, _P_): # test for x overlap between Ps, call comp_slice
derP_ = []
for P in P_: # lower row
for _P in _P_: # upper row
# test for x overlap between P and _P in 8 directions
if (P.x0 - 1 < (_P.x0 + _P.L) and (P.x0 + P.L) + 1 > _P.x0): # all Ps here are positive
fcomp = [1 for derP in P.upconnect_ if P is derP.P] # upconnect could be derP or dirP
if not fcomp:
derP = comp_slice_full(_P, P) # form vertical and directional derivatives
derP_.append(derP)
P.upconnect_.append(derP)
_P.downconnect_cnt += 1
elif (P.x0 + P.L) < _P.x0: # stop scanning the rest of lower P_ if there is no overlap
break
return derP_
def scan_Pd_(P_, _P_): # test for x overlap between Pds
derPd_ = []
for P in P_: # lower row
for _P in _P_: # upper row
for Pd in P.Pd_: # lower row Pds
for _Pd in _P.Pd_: # upper row Pds
# test for same sign & x overlap between Pd and _Pd in 8 directions
if (Pd.x0 - 1 < (_Pd.x0 + _Pd.L) and (Pd.x0 + Pd.L) + 1 > _Pd.x0) and (Pd.sign == _Pd.sign):
fcomp = [1 for derPd in Pd.upconnect_ if Pd is derPd.P] # upconnect could be derP or dirP
if not fcomp:
derPd = comp_slice_full(_Pd, Pd)
derPd_.append(derPd)
Pd.upconnect_.append(derPd)
_Pd.downconnect_cnt += 1
elif (Pd.x0 + Pd.L) < _Pd.x0: # stop scanning the rest of lower P_ if there is no overlap
break
return derPd_
def form_PP_root(blob, derP__, P__, derPd__, Pd__, fPPd):
'''
form vertically contiguous patterns of patterns by the sign of derP, in blob or in FPP
'''
blob.derP__ = derP__; blob.P__ = P__
blob.derPd__ = derPd__; blob.Pd__ = Pd__
if fPPd:
derP_2_PP_(blob.derP__, blob.PPdm_, 0, 1) # cluster by derPm dP sign
derP_2_PP_(blob.derPd__, blob.PPdd_, 1, 1) # cluster by derPd dP sign, not used
else:
derP_2_PP_(blob.derP__, blob.PPmm_, 0, 0) # cluster by derPm mP sign
derP_2_PP_(blob.derPd__, blob.PPmd_, 1, 0) # cluster by derPd mP sign, not used
def derP_2_PP_(derP_, PP_, fderPd, fPPd):
'''
first row of derP_ has downconnect_cnt == 0, higher rows may also have them
'''
for derP in reversed(derP_): # bottom-up to follow upconnects, derP is stored top-down
if not derP.P.downconnect_cnt and not isinstance(derP.PP, CPP): # root derP was not terminated in prior call
PP = CPP(Dert=CDert(), derDert=CderDert()) # init
accum_PP(PP,derP)
if derP._P.upconnect_: # derP has upconnects
upconnect_2_PP_(derP, PP_, fderPd, fPPd) # form PPs across _P upconnects
else:
PP_.append(derP.PP)
def upconnect_2_PP_(iderP, PP_, fderPd, fPPd):
'''
compare sign of lower-layer iderP to the sign of its upconnects to form contiguous same-sign PPs
'''
confirmed_upconnect_ = []
for derP in iderP._P.upconnect_: # potential upconnects from previous call
if derP not in iderP.PP.derP__: # derP should not in current iPP derP_ list, but this may occur after the PP merging
if fPPd: same_sign = (iderP.derDert.dP > 0) == (derP.derDert.dP > 0) # comp dP sign
else: same_sign = (iderP.derDert.mP > 0) == (derP.derDert.mP > 0) # comp mP sign
if same_sign: # upconnect derP has different PP, merge them
if isinstance(derP.PP, CPP) and (derP.PP is not iderP.PP):
merge_PP(iderP.PP, derP.PP, PP_)
else: # accumulate derP in current PP
accum_PP(iderP.PP, derP)
confirmed_upconnect_.append(derP)
elif not isinstance(derP.PP, CPP): # sign changed, derP is root derP unless it already has FPP/PP
PP = CPP(Dert=CDert(), derDert=CderDert())
accum_PP(PP,derP)
derP.P.downconnect_cnt = 0 # reset downconnect count for root derP
if derP._P.upconnect_:
upconnect_2_PP_(derP, PP_, fderPd, fPPd) # recursive compare sign of next-layer upconnects
elif derP.PP is not iderP.PP and derP.P.downconnect_cnt == 0:
PP_.append(derP.PP) # terminate PP (not iPP) at the sign change
iderP._P.upconnect_ = confirmed_upconnect_
if not iderP.P.downconnect_cnt:
PP_.append(iderP.PP) # iPP is terminated after all upconnects are checked
def merge_PP(_PP, PP, PP_): # merge PP into _PP
for derP in PP.derP__:
if derP not in _PP.derP__:
_PP.derP__.append(derP)
derP.PP = _PP # update reference
# accumulate Dert
_PP.Dert.accumulate(**{param:getattr(derP.P.Dert, param) for param in _PP.Dert.numeric_params})
# accumulate derDert
_PP.derDert.accumulate(**{param:getattr(derP.derDert, param) for param in _PP.derDert.numeric_params})
if PP in PP_:
PP_.remove(PP) # remove merged PP
def accum_Dert(Dert: dict, **params) -> None:
Dert.update({param: Dert[param] + value for param, value in params.items()})
def accum_PP(PP, derP): # accumulate params in PP
# accumulate Dert
PP.Dert.accumulate(**{param:getattr(derP.P.Dert, param) for param in PP.Dert.numeric_params})
# accumulate derDert
PP.derDert.accumulate(**{param:getattr(derP.derDert, param) for param in PP.derDert.numeric_params})
PP.derP__.append(derP)
derP.PP = PP # update reference
def comp_dx(P): # cross-comp of dx s in P.dert_
Ddx = 0
Mdx = 0
dxdert_ = []
_dx = P.dert_[0][2] # first dx
for dert in P.dert_[1:]:
dx = dert[2]
ddx = dx - _dx
if dx > 0 == _dx > 0: mdx = min(dx, _dx)
else: mdx = -min(abs(dx), abs(_dx))
dxdert_.append((ddx, mdx)) # no dx: already in dert_
Ddx += ddx # P-wide cross-sign, P.L is too short to form sub_Ps
Mdx += mdx
_dx = dx
P.dxdert_ = dxdert_
P.Dert.Ddx = Ddx
P.Dert.Mdx = Mdx
def comp_slice(_P, P, _derP_): # forms vertical derivatives of derP params, and conditional ders from norm and DIV comp
s, x0, Dx, Dy, G, M, L, Ddx, Mdx = P.sign, P.x0, P.Dert.Dx, P.Dert.Dy, P.Dert.G, P.Dert.M, P.L, P.Dert.Ddx, P.Dert.Mdx # params per comp branch
_s, _x0, _Dx, _Dy, _G, _M, _dX, _L, _Ddx, _Mdx = _P.sign, _P.x0, _P.Dert.Dx, _P.Dert.Dy, _P.Dert.G, _P.Dert.M, _P.dX, _P.L, _P.Dert.Ddx, _P.Dert.Mdx
dX = (x0 + (L-1) / 2) - (_x0 + (_L-1) / 2) # x shift: d_ave_x, or from offsets: abs(x0 - _x0) + abs(xn - _xn)?
ddX = dX - _dX # long axis curvature, if > ave: ortho eval per P, else per PP_dX?
mdX = min(dX, _dX) # dX is inversely predictive of mP?
hyp = np.hypot(dX, 1) # ratio of local segment of long (vertical) axis to dY = 1
L /= hyp # orthogonal L is reduced by hyp
dL = L - _L; mL = min(L, _L) # L: positions / sign, dderived: magnitude-proportional value
M /= hyp # orthogonal M is reduced by hyp
dM = M - _M; mM = min(M, _M) # use abs M? no Mx, My: non-core, lesser and redundant bias?
dP = dL + dM # -> directional PPd, equal-weight params, no rdn?
mP = mL + mM # -> complementary PPm, rdn *= Pd | Pm rolp?
mP -= ave_mP * ave_rmP ** (dX / L) # dX / L is relative x-distance between P and _P,
P.Dert.flip_val = (dX * (P.Dert.Dy / (P.Dert.Dx+.001)) - flip_ave) # +.001 to avoid division by zero
derP = CderP(derDert=CderDert(mP=mP, dP=dP, dX=dX, mL=mL, dL=dL), P=P, _P=_P)
P.derP = derP
return derP
def comp_slice_full(_P, P): # forms vertical derivatives of derP params, and conditional ders from norm and DIV comp
s, x0, Dx, Dy, G, M, L, Ddx, Mdx = P.sign, P.x0, P.Dert.Dx, P.Dert.Dy, P.Dert.G, P.Dert.M, P.L, P.Dert.Ddx, P.Dert.Mdx
# params per comp branch, add angle params
_s, _x0, _Dx, _Dy, _G, _M, _dX, _L, _Ddx, _Mdx = _P.sign, _P.x0, _P.Dert.Dx, _P.Dert.Dy, _P.Dert.G, _P.Dert.M, _P.dX, _P.L, _P.Dert.Ddx, _P.Dert.Mdx
dX = (x0 + (L-1) / 2) - (_x0 + (_L-1) / 2) # x shift: d_ave_x, or from offsets: abs(x0 - _x0) + abs(xn - _xn)?
if dX > ave_dX: # internal comp is higher-power, else two-input comp not compressive?
xn = x0 + L - 1
_xn = _x0 + _L - 1
mX = min(xn, _xn) - max(x0, _x0) # overlap = abs proximity: summed binary x match
rX = dX / mX if mX else dX*2 # average dist / prox, | prox / dist, | mX / max_L?
ddX = dX - _dX # long axis curvature, if > ave: ortho eval per P, else per PP_dX?
mdX = min(dX, _dX) # dX is inversely predictive of mP?
if dX * P.Dert.G > ave_ortho: # estimate params of P locally orthogonal to long axis, maximizing lateral diff and vertical match
# diagram: https://github.com/boris-kz/CogAlg/blob/master/frame_2D_alg/Illustrations/orthogonalization.png
# Long axis is a curve of connections between ave_xs: mid-points of consecutive Ps.
# Ortho virtually rotates P to connection-orthogonal direction:
hyp = np.hypot(dX, 1) # ratio of local segment of long (vertical) axis to dY = 1
L = L / hyp # orthogonal L
# combine derivatives in proportion to the contribution of their axes to orthogonal axes:
# contribution of Dx should increase with hyp(dX,dY=1), this is original direction of Dx:
Dy = (Dy / hyp + Dx * hyp) / 2 # estimated along-axis D
Dx = (Dy * hyp + Dx / hyp) / 2 # estimated cross-axis D
'''
alternatives:
oDy = (Dy * hyp - Dx / hyp) / 2; oDx = (Dx / hyp + Dy * hyp) / 2; or:
oDy = hypot( Dy / hyp, Dx * hyp); oDx = hypot( Dy * hyp, Dx / hyp)
'''
dL = L - _L; mL = min(L, _L) # L: positions / sign, dderived: magnitude-proportional value
dM = M - _M; mM = min(M, _M) # use abs M? no Mx, My: non-core, lesser and redundant bias?
# no comp G: Dy, Dx are more specific:
dDx = Dx - _Dx # same-sign Dx if Pd
mDx = min(abs(Dx), abs(_Dx))
if Dx > 0 != _Dx > 0: mDx = -mDx
# min is value distance for opposite-sign comparands, vs. value overlap for same-sign comparands
dDy = Dy - _Dy # Dy per sub_P by intra_comp(dx), vs. less vertically specific dI
mDy = min(abs(Dy), abs(_Dy))
if (Dy > 0) != (_Dy > 0): mDy = -mDy
dDdx, dMdx, mDdx, mMdx = 0, 0, 0, 0
if P.dxdert_ and _P.dxdert_: # from comp_dx
fdx = 1
dDdx = Ddx - _Ddx
mDdx = min( abs(Ddx), abs(_Ddx))
if (Ddx > 0) != (_Ddx > 0): mDdx = -mDdx
# Mdx is signed:
dMdx = min( Mdx, _Mdx)
mMdx = -min( abs(Mdx), abs(_Mdx))
if (Mdx > 0) != (_Mdx > 0): mMdx = -mMdx
else:
fdx = 0
# coeff = 0.7 for semi redundant parameters, 0.5 for fully redundant parameters:
dP = ddX + dL + 0.7*(dM + dDx + dDy) # -> directional PPd, equal-weight params, no rdn?
# correlation: dX -> L, oDy, !oDx, ddX -> dL, odDy ! odDx? dL -> dDx, dDy?
if fdx: dP += 0.7*(dDdx + dMdx)
mP = mdX + mL + 0.7*(mM + mDx + mDy) # -> complementary PPm, rdn *= Pd | Pm rolp?
if fdx: mP += 0.7*(mDdx + mMdx)
mP -= ave_mP * ave_rmP ** (dX / L) # dX / L is relative x-distance between P and _P,
derP = CderP(P=P, _P=_P, derDert=CderDert(mP=mP, dP=dP, dX=dX, mL=mL, dL=dL, mDx=mDx, dDx=dDx, mDy=mDy, dDy=dDy))
P.derP = derP
if fdx:
derP.fdx=1; derP.derDert.dDdx=dDdx; derP.derDert.mDdx=mDdx; derP.derDert.dMdx=dMdx; derP.derDert.mMdx=mMdx
'''
min comp for rotation: L, Dy, Dx, no redundancy?
mParam weighting by relative contribution to mP, /= redundancy?
div_f, nvars: if abs dP per PPd, primary comp L, the rest is normalized?
'''
return derP
''' radial comp extension for co-internal blobs:
!= sign comp x sum( adj_blob_) -> intra_comp value, isolation value, cross-sign merge if weak, else:
== sign comp x ind( adj_adj_blob_) -> same-sign merge | composition:
borrow = adj_G * rA: default sum div_comp S -> relative area and distance to adjj_blob_
internal sum comp if mA: in thin lines only? comp_norm_G or div_comp_G -> rG?
isolation = decay + contrast:
G - G * (rA * ave_rG: decay) - (rA * adj_G: contrast, = lend | borrow, no need to compare vG?)
if isolation: cross adjj_blob composition eval,
else: cross adjj_blob merge eval:
blob merger if internal match (~raG) - isolation, rdn external match:
blob compos if external match (~rA?) + isolation,
Also eval comp_slice over fork_?
rng+ should preserve resolution: rng+_dert_ is dert layers,
rng_sum-> rng+, der+: whole rng, rng_incr-> angle / past vs next g,
rdn Rng | rng_ eval at rng term, Rng -= lost coord bits mag, always > discr?
Add comp_PP_recursive
''' | 41.704587 | 152 | 0.587047 |
ace136549336010f4c42d18b0b164cf6c66ff605 | 9,584 | py | Python | flask_mongoengine/wtf/orm.py | corydolphin/flask-mongoengine | 689b10e6f17e8db4ec15fc87ed03e504bca757a2 | [
"BSD-3-Clause"
] | null | null | null | flask_mongoengine/wtf/orm.py | corydolphin/flask-mongoengine | 689b10e6f17e8db4ec15fc87ed03e504bca757a2 | [
"BSD-3-Clause"
] | null | null | null | flask_mongoengine/wtf/orm.py | corydolphin/flask-mongoengine | 689b10e6f17e8db4ec15fc87ed03e504bca757a2 | [
"BSD-3-Clause"
] | null | null | null | """
Tools for generating forms based on mongoengine Document schemas.
"""
import sys
import decimal
from bson import ObjectId
from operator import itemgetter
try:
from collections import OrderedDict
except ImportError:
# Use bson's SON implementation instead
from bson import SON as OrderedDict
from wtforms import fields as f, validators
from mongoengine import ReferenceField
from flask.ext.mongoengine.wtf.fields import ModelSelectField, ModelSelectMultipleField, DictField, NoneStringField, BinaryField
from flask.ext.mongoengine.wtf.models import ModelForm
__all__ = (
'model_fields', 'model_form',
)
def converts(*args):
def _inner(func):
func._converter_for = frozenset(args)
return func
return _inner
class ModelConverter(object):
def __init__(self, converters=None):
if not converters:
converters = {}
for name in dir(self):
obj = getattr(self, name)
if hasattr(obj, '_converter_for'):
for classname in obj._converter_for:
converters[classname] = obj
self.converters = converters
def convert(self, model, field, field_args):
kwargs = {
'label': getattr(field, 'verbose_name', field.name),
'description': field.help_text or '',
'validators': [],
'filters': [],
'default': field.default,
}
if field_args:
kwargs.update(field_args)
if field.required:
kwargs['validators'].append(validators.Required())
else:
kwargs['validators'].append(validators.Optional())
ftype = type(field).__name__
if field.choices:
kwargs['choices'] = field.choices
if ftype in self.converters:
kwargs["coerce"] = self.coerce(ftype)
if kwargs.pop('multiple', False):
return f.SelectMultipleField(**kwargs)
return f.SelectField(**kwargs)
ftype = type(field).__name__
if hasattr(field, 'to_form_field'):
return field.to_form_field(model, kwargs)
if ftype in self.converters:
return self.converters[ftype](model, field, kwargs)
@classmethod
def _string_common(cls, model, field, kwargs):
if field.max_length or field.min_length:
kwargs['validators'].append(
validators.Length(max=field.max_length or - 1,
min=field.min_length or - 1))
@classmethod
def _number_common(cls, model, field, kwargs):
if field.max_value or field.min_value:
kwargs['validators'].append(
validators.NumberRange(max=field.max_value,
min=field.min_value))
@converts('StringField')
def conv_String(self, model, field, kwargs):
if field.regex:
kwargs['validators'].append(validators.Regexp(regex=field.regex))
self._string_common(model, field, kwargs)
if 'password' in kwargs:
if kwargs.pop('password'):
return f.PasswordField(**kwargs)
if field.max_length:
return f.StringField(**kwargs)
return f.TextAreaField(**kwargs)
@converts('URLField')
def conv_URL(self, model, field, kwargs):
kwargs['validators'].append(validators.URL())
self._string_common(model, field, kwargs)
return NoneStringField(**kwargs)
@converts('EmailField')
def conv_Email(self, model, field, kwargs):
kwargs['validators'].append(validators.Email())
self._string_common(model, field, kwargs)
return NoneStringField(**kwargs)
@converts('IntField')
def conv_Int(self, model, field, kwargs):
self._number_common(model, field, kwargs)
return f.IntegerField(**kwargs)
@converts('FloatField')
def conv_Float(self, model, field, kwargs):
self._number_common(model, field, kwargs)
return f.FloatField(**kwargs)
@converts('DecimalField')
def conv_Decimal(self, model, field, kwargs):
self._number_common(model, field, kwargs)
return f.DecimalField(**kwargs)
@converts('BooleanField')
def conv_Boolean(self, model, field, kwargs):
return f.BooleanField(**kwargs)
@converts('DateTimeField')
def conv_DateTime(self, model, field, kwargs):
return f.DateTimeField(**kwargs)
@converts('BinaryField')
def conv_Binary(self, model, field, kwargs):
#TODO: may be set file field that will save file`s data to MongoDB
if field.max_bytes:
kwargs['validators'].append(validators.Length(max=field.max_bytes))
return BinaryField(**kwargs)
@converts('DictField')
def conv_Dict(self, model, field, kwargs):
return DictField(**kwargs)
@converts('ListField')
def conv_List(self, model, field, kwargs):
if isinstance(field.field, ReferenceField):
return ModelSelectMultipleField(model=field.field.document_type, **kwargs)
if field.field.choices:
kwargs['multiple'] = True
return self.convert(model, field.field, kwargs)
field_args = kwargs.pop("field_args", {})
unbound_field = self.convert(model, field.field, field_args)
unacceptable = {
'validators': [],
'filters': [],
'min_entries': kwargs.get('min_entries', 0)
}
kwargs.update(unacceptable)
return f.FieldList(unbound_field, **kwargs)
@converts('SortedListField')
def conv_SortedList(self, model, field, kwargs):
#TODO: sort functionality, may be need sortable widget
return self.conv_List(model, field, kwargs)
@converts('GeoLocationField')
def conv_GeoLocation(self, model, field, kwargs):
#TODO: create geo field and widget (also GoogleMaps)
return
@converts('ObjectIdField')
def conv_ObjectId(self, model, field, kwargs):
return
@converts('EmbeddedDocumentField')
def conv_EmbeddedDocument(self, model, field, kwargs):
kwargs = {
'validators': [],
'filters': [],
'default': field.default or field.document_type_obj,
}
form_class = model_form(field.document_type_obj, field_args={})
return f.FormField(form_class, **kwargs)
@converts('ReferenceField')
def conv_Reference(self, model, field, kwargs):
return ModelSelectField(model=field.document_type, **kwargs)
@converts('GenericReferenceField')
def conv_GenericReference(self, model, field, kwargs):
return
def coerce(self, field_type):
coercions = {
"IntField": int,
"BooleanField": bool,
"FloatField": float,
"DecimalField": decimal.Decimal,
"ObjectIdField": ObjectId
}
if sys.version_info >= (3, 0):
return coercions.get(field_type, str)
else:
return coercions.get(field_type, unicode)
def model_fields(model, only=None, exclude=None, field_args=None, converter=None):
"""
Generate a dictionary of fields for a given database model.
See `model_form` docstring for description of parameters.
"""
from mongoengine.base import BaseDocument, DocumentMetaclass
if not isinstance(model, (BaseDocument, DocumentMetaclass)):
raise TypeError('model must be a mongoengine Document schema')
converter = converter or ModelConverter()
field_args = field_args or {}
if sys.version_info >= (3, 0):
names = ((k, v.creation_counter) for k, v in model._fields.items())
else:
names = ((k, v.creation_counter) for k, v in model._fields.iteritems())
field_names = map(itemgetter(0), sorted(names, key=itemgetter(1)))
if only:
field_names = [x for x in only if x in set(field_names)]
elif exclude:
field_names = [x for x in field_names if x not in set(exclude)]
field_dict = OrderedDict()
for name in field_names:
model_field = model._fields[name]
field = converter.convert(model, model_field, field_args.get(name))
if field is not None:
field_dict[name] = field
return field_dict
def model_form(model, base_class=ModelForm, only=None, exclude=None, field_args=None, converter=None):
"""
Create a wtforms Form for a given mongoengine Document schema::
from flask.ext.mongoengine.wtf import model_form
from myproject.myapp.schemas import Article
ArticleForm = model_form(Article)
:param model:
A mongoengine Document schema class
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments used
to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
field_dict = model_fields(model, only, exclude, field_args, converter)
field_dict['model_class'] = model
return type(model.__name__ + 'Form', (base_class,), field_dict)
| 34.47482 | 128 | 0.640755 |
ace1365df1b135cbecb6dfb7c388496eb1b277bd | 16,455 | py | Python | homeassistant/helpers/config_entry_oauth2_flow.py | erogleva/core | 994ae09f69afe772150a698953c0d7386a745de2 | [
"Apache-2.0"
] | 1 | 2021-11-13T23:06:27.000Z | 2021-11-13T23:06:27.000Z | homeassistant/helpers/config_entry_oauth2_flow.py | erogleva/core | 994ae09f69afe772150a698953c0d7386a745de2 | [
"Apache-2.0"
] | 52 | 2020-07-14T14:12:26.000Z | 2022-03-31T06:24:02.000Z | homeassistant/helpers/config_entry_oauth2_flow.py | erogleva/core | 994ae09f69afe772150a698953c0d7386a745de2 | [
"Apache-2.0"
] | 1 | 2018-08-03T20:06:38.000Z | 2018-08-03T20:06:38.000Z | """Config Flow using OAuth2.
This module exists of the following parts:
- OAuth2 config flow which supports multiple OAuth2 implementations
- OAuth2 implementation that works with local provided client ID/secret
"""
from abc import ABC, ABCMeta, abstractmethod
import asyncio
import logging
import secrets
import time
from typing import Any, Awaitable, Callable, Dict, Optional, cast
from aiohttp import client, web
import async_timeout
import jwt
import voluptuous as vol
from yarl import URL
from homeassistant import config_entries
from homeassistant.components.http import HomeAssistantView
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.network import NoURLAvailableError, get_url
from .aiohttp_client import async_get_clientsession
_LOGGER = logging.getLogger(__name__)
DATA_JWT_SECRET = "oauth2_jwt_secret"
DATA_VIEW_REGISTERED = "oauth2_view_reg"
DATA_IMPLEMENTATIONS = "oauth2_impl"
DATA_PROVIDERS = "oauth2_providers"
AUTH_CALLBACK_PATH = "/auth/external/callback"
class AbstractOAuth2Implementation(ABC):
"""Base class to abstract OAuth2 authentication."""
@property
@abstractmethod
def name(self) -> str:
"""Name of the implementation."""
@property
@abstractmethod
def domain(self) -> str:
"""Domain that is providing the implementation."""
@abstractmethod
async def async_generate_authorize_url(self, flow_id: str) -> str:
"""Generate a url for the user to authorize.
This step is called when a config flow is initialized. It should redirect the
user to the vendor website where they can authorize Home Assistant.
The implementation is responsible to get notified when the user is authorized
and pass this to the specified config flow. Do as little work as possible once
notified. You can do the work inside async_resolve_external_data. This will
give the best UX.
Pass external data in with:
await hass.config_entries.flow.async_configure(
flow_id=flow_id, user_input=external_data
)
"""
@abstractmethod
async def async_resolve_external_data(self, external_data: Any) -> dict:
"""Resolve external data to tokens.
Turn the data that the implementation passed to the config flow as external
step data into tokens. These tokens will be stored as 'token' in the
config entry data.
"""
async def async_refresh_token(self, token: dict) -> dict:
"""Refresh a token and update expires info."""
new_token = await self._async_refresh_token(token)
# Force int for non-compliant oauth2 providers
new_token["expires_in"] = int(new_token["expires_in"])
new_token["expires_at"] = time.time() + new_token["expires_in"]
return new_token
@abstractmethod
async def _async_refresh_token(self, token: dict) -> dict:
"""Refresh a token."""
class LocalOAuth2Implementation(AbstractOAuth2Implementation):
"""Local OAuth2 implementation."""
def __init__(
self,
hass: HomeAssistant,
domain: str,
client_id: str,
client_secret: str,
authorize_url: str,
token_url: str,
):
"""Initialize local auth implementation."""
self.hass = hass
self._domain = domain
self.client_id = client_id
self.client_secret = client_secret
self.authorize_url = authorize_url
self.token_url = token_url
@property
def name(self) -> str:
"""Name of the implementation."""
return "Configuration.yaml"
@property
def domain(self) -> str:
"""Domain providing the implementation."""
return self._domain
@property
def redirect_uri(self) -> str:
"""Return the redirect uri."""
return f"{get_url(self.hass, require_current_request=True)}{AUTH_CALLBACK_PATH}"
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {}
async def async_generate_authorize_url(self, flow_id: str) -> str:
"""Generate a url for the user to authorize."""
return str(
URL(self.authorize_url)
.with_query(
{
"response_type": "code",
"client_id": self.client_id,
"redirect_uri": self.redirect_uri,
"state": _encode_jwt(self.hass, {"flow_id": flow_id}),
}
)
.update_query(self.extra_authorize_data)
)
async def async_resolve_external_data(self, external_data: Any) -> dict:
"""Resolve the authorization code to tokens."""
return await self._token_request(
{
"grant_type": "authorization_code",
"code": external_data,
"redirect_uri": self.redirect_uri,
}
)
async def _async_refresh_token(self, token: dict) -> dict:
"""Refresh tokens."""
new_token = await self._token_request(
{
"grant_type": "refresh_token",
"client_id": self.client_id,
"refresh_token": token["refresh_token"],
}
)
return {**token, **new_token}
async def _token_request(self, data: dict) -> dict:
"""Make a token request."""
session = async_get_clientsession(self.hass)
data["client_id"] = self.client_id
if self.client_secret is not None:
data["client_secret"] = self.client_secret
resp = await session.post(self.token_url, data=data)
resp.raise_for_status()
return cast(dict, await resp.json())
class AbstractOAuth2FlowHandler(config_entries.ConfigFlow, metaclass=ABCMeta):
"""Handle a config flow."""
DOMAIN = ""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_UNKNOWN
def __init__(self) -> None:
"""Instantiate config flow."""
if self.DOMAIN == "":
raise TypeError(
f"Can't instantiate class {self.__class__.__name__} without DOMAIN being set"
)
self.external_data: Any = None
self.flow_impl: AbstractOAuth2Implementation = None # type: ignore
@property
@abstractmethod
def logger(self) -> logging.Logger:
"""Return logger."""
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {}
async def async_step_pick_implementation(
self, user_input: Optional[dict] = None
) -> dict:
"""Handle a flow start."""
assert self.hass
implementations = await async_get_implementations(self.hass, self.DOMAIN)
if user_input is not None:
self.flow_impl = implementations[user_input["implementation"]]
return await self.async_step_auth()
if not implementations:
return self.async_abort(reason="missing_configuration")
if len(implementations) == 1:
# Pick first implementation as we have only one.
self.flow_impl = list(implementations.values())[0]
return await self.async_step_auth()
return self.async_show_form(
step_id="pick_implementation",
data_schema=vol.Schema(
{
vol.Required(
"implementation", default=list(implementations.keys())[0]
): vol.In({key: impl.name for key, impl in implementations.items()})
}
),
)
async def async_step_auth(
self, user_input: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Create an entry for auth."""
# Flow has been triggered by external data
if user_input:
self.external_data = user_input
return self.async_external_step_done(next_step_id="creation")
try:
with async_timeout.timeout(10):
url = await self.flow_impl.async_generate_authorize_url(self.flow_id)
except asyncio.TimeoutError:
return self.async_abort(reason="authorize_url_timeout")
except NoURLAvailableError:
return self.async_abort(
reason="no_url_available",
description_placeholders={
"docs_url": "https://www.home-assistant.io/more-info/no-url-available"
},
)
url = str(URL(url).update_query(self.extra_authorize_data))
return self.async_external_step(step_id="auth", url=url)
async def async_step_creation(
self, user_input: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Create config entry from external data."""
token = await self.flow_impl.async_resolve_external_data(self.external_data)
# Force int for non-compliant oauth2 providers
try:
token["expires_in"] = int(token["expires_in"])
except ValueError as err:
_LOGGER.warning("Error converting expires_in to int: %s", err)
return self.async_abort(reason="oauth_error")
token["expires_at"] = time.time() + token["expires_in"]
self.logger.info("Successfully authenticated")
return await self.async_oauth_create_entry(
{"auth_implementation": self.flow_impl.domain, "token": token}
)
async def async_oauth_create_entry(self, data: dict) -> dict:
"""Create an entry for the flow.
Ok to override if you want to fetch extra info or even add another step.
"""
return self.async_create_entry(title=self.flow_impl.name, data=data)
async def async_step_discovery(
self, discovery_info: Dict[str, Any]
) -> Dict[str, Any]:
"""Handle a flow initialized by discovery."""
await self.async_set_unique_id(self.DOMAIN)
assert self.hass is not None
if self.hass.config_entries.async_entries(self.DOMAIN):
return self.async_abort(reason="already_configured")
return await self.async_step_pick_implementation()
async_step_user = async_step_pick_implementation
async_step_mqtt = async_step_discovery
async_step_ssdp = async_step_discovery
async_step_zeroconf = async_step_discovery
async_step_homekit = async_step_discovery
@classmethod
def async_register_implementation(
cls, hass: HomeAssistant, local_impl: LocalOAuth2Implementation
) -> None:
"""Register a local implementation."""
async_register_implementation(hass, cls.DOMAIN, local_impl)
@callback
def async_register_implementation(
hass: HomeAssistant, domain: str, implementation: AbstractOAuth2Implementation
) -> None:
"""Register an OAuth2 flow implementation for an integration."""
if isinstance(implementation, LocalOAuth2Implementation) and not hass.data.get(
DATA_VIEW_REGISTERED, False
):
hass.http.register_view(OAuth2AuthorizeCallbackView()) # type: ignore
hass.data[DATA_VIEW_REGISTERED] = True
implementations = hass.data.setdefault(DATA_IMPLEMENTATIONS, {})
implementations.setdefault(domain, {})[implementation.domain] = implementation
async def async_get_implementations(
hass: HomeAssistant, domain: str
) -> Dict[str, AbstractOAuth2Implementation]:
"""Return OAuth2 implementations for specified domain."""
registered = cast(
Dict[str, AbstractOAuth2Implementation],
hass.data.setdefault(DATA_IMPLEMENTATIONS, {}).get(domain, {}),
)
if DATA_PROVIDERS not in hass.data:
return registered
registered = dict(registered)
for provider_domain, get_impl in hass.data[DATA_PROVIDERS].items():
implementation = await get_impl(hass, domain)
if implementation is not None:
registered[provider_domain] = implementation
return registered
async def async_get_config_entry_implementation(
hass: HomeAssistant, config_entry: config_entries.ConfigEntry
) -> AbstractOAuth2Implementation:
"""Return the implementation for this config entry."""
implementations = await async_get_implementations(hass, config_entry.domain)
implementation = implementations.get(config_entry.data["auth_implementation"])
if implementation is None:
raise ValueError("Implementation not available")
return implementation
@callback
def async_add_implementation_provider(
hass: HomeAssistant,
provider_domain: str,
async_provide_implementation: Callable[
[HomeAssistant, str], Awaitable[Optional[AbstractOAuth2Implementation]]
],
) -> None:
"""Add an implementation provider.
If no implementation found, return None.
"""
hass.data.setdefault(DATA_PROVIDERS, {})[
provider_domain
] = async_provide_implementation
class OAuth2AuthorizeCallbackView(HomeAssistantView):
"""OAuth2 Authorization Callback View."""
requires_auth = False
url = AUTH_CALLBACK_PATH
name = "auth:external:callback"
async def get(self, request: web.Request) -> web.Response:
"""Receive authorization code."""
if "code" not in request.query or "state" not in request.query:
return web.Response(
text=f"Missing code or state parameter in {request.url}"
)
hass = request.app["hass"]
state = _decode_jwt(hass, request.query["state"])
if state is None:
return web.Response(text="Invalid state")
await hass.config_entries.flow.async_configure(
flow_id=state["flow_id"], user_input=request.query["code"]
)
return web.Response(
headers={"content-type": "text/html"},
text="<script>window.close()</script>",
)
class OAuth2Session:
"""Session to make requests authenticated with OAuth2."""
def __init__(
self,
hass: HomeAssistant,
config_entry: config_entries.ConfigEntry,
implementation: AbstractOAuth2Implementation,
):
"""Initialize an OAuth2 session."""
self.hass = hass
self.config_entry = config_entry
self.implementation = implementation
@property
def token(self) -> dict:
"""Return the token."""
return cast(dict, self.config_entry.data["token"])
@property
def valid_token(self) -> bool:
"""Return if token is still valid."""
return cast(float, self.token["expires_at"]) > time.time()
async def async_ensure_token_valid(self) -> None:
"""Ensure that the current token is valid."""
if self.valid_token:
return
new_token = await self.implementation.async_refresh_token(self.token)
self.hass.config_entries.async_update_entry(
self.config_entry, data={**self.config_entry.data, "token": new_token}
)
async def async_request(
self, method: str, url: str, **kwargs: Any
) -> client.ClientResponse:
"""Make a request."""
await self.async_ensure_token_valid()
return await async_oauth2_request(
self.hass, self.config_entry.data["token"], method, url, **kwargs
)
async def async_oauth2_request(
hass: HomeAssistant, token: dict, method: str, url: str, **kwargs: Any
) -> client.ClientResponse:
"""Make an OAuth2 authenticated request.
This method will not refresh tokens. Use OAuth2 session for that.
"""
session = async_get_clientsession(hass)
return await session.request(
method,
url,
**kwargs,
headers={
**(kwargs.get("headers") or {}),
"authorization": f"Bearer {token['access_token']}",
},
)
@callback
def _encode_jwt(hass: HomeAssistant, data: dict) -> str:
"""JWT encode data."""
secret = hass.data.get(DATA_JWT_SECRET)
if secret is None:
secret = hass.data[DATA_JWT_SECRET] = secrets.token_hex()
return jwt.encode(data, secret, algorithm="HS256").decode()
@callback
def _decode_jwt(hass: HomeAssistant, encoded: str) -> Optional[dict]:
"""JWT encode data."""
secret = cast(str, hass.data.get(DATA_JWT_SECRET))
try:
return jwt.decode(encoded, secret, algorithms=["HS256"])
except jwt.InvalidTokenError:
return None
| 32.844311 | 93 | 0.651231 |
ace1366daab9e63f24ed775480be2cbf31f0e9e2 | 1,694 | py | Python | emulator/utils/gp.py | Harry45/emuPK | c5cd8a4ab7ef593b196ee58d9df5d826d444a2b9 | [
"MIT"
] | 2 | 2021-05-10T16:59:34.000Z | 2021-05-19T16:10:24.000Z | emulator/utils/gp.py | Harry45/emuPK | c5cd8a4ab7ef593b196ee58d9df5d826d444a2b9 | [
"MIT"
] | null | null | null | emulator/utils/gp.py | Harry45/emuPK | c5cd8a4ab7ef593b196ee58d9df5d826d444a2b9 | [
"MIT"
] | 2 | 2021-04-16T23:55:16.000Z | 2021-09-09T12:48:41.000Z | # Author: Arrykrishna Mootoovaloo
# Collaborators: Alan Heavens, Andrew Jaffe, Florent Leclercq
# Email : a.mootoovaloo17@imperial.ac.uk
# Affiliation : Imperial Centre for Inference and Cosmology
# Status : Under Development
'''
Perform all additional operations such as prediction, interpolation, gradient calculation for GPs
'''
def pred_normal(input_pred: list) -> float:
'''
For each GP we have to calculate the mean prediction
:param: input_pred (list or tuple): array for the test point and whole gp module
:param: mean_pred (float) : the mean prediction from the GP
'''
testpoint, gp = input_pred[0], input_pred[1]
mean_pred = gp.prediction(testpoint).reshape(1,)
return mean_pred[0]
def prediction(input_pred: list) -> float:
'''
For each GP we have to calculate the mean prediction
:param: input_pred (list or tuple): array for the test point and whole gp module
:param: mean_pred (float) : the mean prediction from the GP
'''
testpoint, gp = input_pred[0], input_pred[1]
mean_pred = gp.pred_original_function(testpoint).reshape(1,)
return mean_pred[0]
def gradient(input_pred: list) -> float:
'''
For each GP we have to calculate the mean prediction
:param: input_pred (list or tuple): array for the test point and whole gp module
:param: mean_pred (float) : the mean prediction from the GP
'''
testpoint, gp, order = input_pred[0], input_pred[1], input_pred[2]
if order == 1:
first_der = gp.derivatives(testpoint, order)
return first_der
else:
first_der, second_der = gp.derivatives(testpoint, order)
return first_der, second_der
| 26.46875 | 97 | 0.695396 |
ace1369bfbb28b925a581d7f1a4378e6138044b4 | 4,369 | py | Python | ckanext/ckanext-apicatalog_routes/ckanext/apicatalog_routes/auth.py | vrk-kpa/api-catalog | 1150e0f288a96c27657a0c4a02dac96ce1134894 | [
"MIT"
] | 17 | 2015-10-28T11:39:30.000Z | 2022-01-25T22:25:37.000Z | ckanext/ckanext-apicatalog_routes/ckanext/apicatalog_routes/auth.py | vrk-kpa/api-catalog | 1150e0f288a96c27657a0c4a02dac96ce1134894 | [
"MIT"
] | 19 | 2016-05-22T06:09:54.000Z | 2022-03-18T09:58:28.000Z | ckanext/ckanext-apicatalog_routes/ckanext/apicatalog_routes/auth.py | vrk-kpa/api-catalog | 1150e0f288a96c27657a0c4a02dac96ce1134894 | [
"MIT"
] | 9 | 2015-12-24T04:43:46.000Z | 2020-04-10T06:00:00.000Z | import logging
from ckan import authz
from ckan.plugins.toolkit import aslist
from ckan.logic.auth import get, update
from ckan.plugins.toolkit import auth_allow_anonymous_access, _, chained_auth_function
from ckan.lib.base import config
from ckan.common import c
log = logging.getLogger(__name__)
@auth_allow_anonymous_access
def package_show(context, data_dict):
read_only_users = aslist(config.get('ckanext.apicatalog_routes.readonly_users', []))
if context.get('user') and context.get('user') in read_only_users:
return {'success': True}
return get.package_show(context, data_dict)
def read_members(context, data_dict):
if 'id' not in data_dict and 'group' not in context:
data_dict['id'] = c.group_dict['id']
read_only_users = aslist(config.get('ckanext.apicatalog_routes.readonly_users', []))
if context.get('user') and context.get('user') in read_only_users:
return {'success': True}
if _is_member_editor(context):
return {'success': True}
return update.group_edit_permissions(context, data_dict)
def create_user_to_organization(context, data_dict=None):
users_allowed_to_create_users = aslist(config.get('ckanext.apicatalog_routes.allowed_user_creators', []))
if context.get('user') and context.get('user') in users_allowed_to_create_users:
return {"success": True}
return {
"success": False,
"msg": _("User {user} not authorized to create users via the API").format(user=context.get('user'))
}
@chained_auth_function
def user_create(next_auth, context, data_dict=None):
users_allowed_to_create_users = aslist(config.get('ckanext.apicatalog_routes.allowed_user_editors', []))
if context.get('user') and context.get('user') in users_allowed_to_create_users:
return {"success": True}
return next_auth(context, data_dict)
@chained_auth_function
@auth_allow_anonymous_access
def user_update(next_auth, context, data_dict=None):
users_allowed_to_create_users = aslist(config.get('ckanext.apicatalog_routes.allowed_user_editors', []))
if context.get('user_obj'):
sysadmin_field = context.get('user_obj').sysadmin
else:
sysadmin_field = data_dict.get('sysadmin')
# In edit form, only user id is supplied
if not sysadmin_field:
sysadmin_field = authz.is_sysadmin(data_dict['id'])
if context.get('user') and context.get('user') in users_allowed_to_create_users \
and sysadmin_field is False:
return {"success": True}
return next_auth(context, data_dict)
@chained_auth_function
@auth_allow_anonymous_access
def user_show(next_auth, context, data_dict=None):
users_allowed_to_create_users = aslist(config.get('ckanext.apicatalog_routes.allowed_user_editors', []))
if context.get('user') and context.get('user') in users_allowed_to_create_users \
and context['user_obj'].sysadmin is False:
context['keep_email'] = True
return {"success": True}
return next_auth(context, data_dict)
def _is_member_editor(context):
users_allowed_to_edit_members = aslist(config.get('ckanext.apicatalog_routes.allowed_member_editors', []))
return context.get('user') and context.get('user') in users_allowed_to_edit_members
@chained_auth_function
@auth_allow_anonymous_access
def group_show(next_auth, context, data_dict=None):
return {"success": True} if _is_member_editor(context) else next_auth(context, data_dict)
@chained_auth_function
def member_create(next_auth, context, data_dict=None):
return {"success": True} if _is_member_editor(context) else next_auth(context, data_dict)
@chained_auth_function
def organization_member_create(next_auth, context, data_dict=None):
return {"success": True} if _is_member_editor(context) else next_auth(context, data_dict)
@chained_auth_function
def member_delete(next_auth, context, data_dict=None):
return {"success": True} if _is_member_editor(context) else next_auth(context, data_dict)
@chained_auth_function
def organization_member_delete(next_auth, context, data_dict=None):
return {"success": True} if _is_member_editor(context) else next_auth(context, data_dict)
@chained_auth_function
def user_invite(next_auth, context, data_dict=None):
return {"success": True} if _is_member_editor(context) else next_auth(context, data_dict)
| 34.401575 | 110 | 0.750973 |
ace138c3e90c51851998b33d688533ca6b133610 | 847 | py | Python | pavan_2.0.py | pkongaleti/python | fa36c058cc2b19e5c320206143fb0d26ff1523ce | [
"MIT"
] | null | null | null | pavan_2.0.py | pkongaleti/python | fa36c058cc2b19e5c320206143fb0d26ff1523ce | [
"MIT"
] | null | null | null | pavan_2.0.py | pkongaleti/python | fa36c058cc2b19e5c320206143fb0d26ff1523ce | [
"MIT"
] | null | null | null | import pyttsx3 # pip install pyttsx3
import datetime
engine = pyttsx3.init()
def speak(audio):
engine.say(audio)
engine.runAndWait()
def time_():
Time=datetime.datetime.now().strftime("%H:%M:%S")
speak("The current time is :")
speak(Time)
def date_():
year=str(datetime.datetime.now().year)
month=str(datetime.datetime.now().month)
day=str(datetime.datetime.now().day)
speak("The current date is")
speak(year)
speak(month)
speak(day)
hour = datetime.datetime.now().hour
if hour>=6 and hour<12:
speak("Good Morning Bubbu")
elif hour>=12 and hour<=18:
speak("Good Evening Bubbu")
elif hour>18 and hour <= 24:
speak("Good evening Bubbu")
else:
speak("Good Night")
def welcome_():
speak("Welcome, Bubbu")
time_()
date_()
welcome_() | 22.289474 | 53 | 0.625738 |
ace1394566d4e04eb2a778fcfdd9b85b32b9ecf6 | 3,058 | py | Python | tests/test_util.py | ccolas/sentence-transformers | d7235076a663114c5267b093d5c28e1fc0272f76 | [
"Apache-2.0"
] | 7,566 | 2019-07-25T07:45:17.000Z | 2022-03-31T22:15:35.000Z | tests/test_util.py | ccolas/sentence-transformers | d7235076a663114c5267b093d5c28e1fc0272f76 | [
"Apache-2.0"
] | 1,444 | 2019-07-25T11:53:48.000Z | 2022-03-31T15:13:32.000Z | tests/test_util.py | ccolas/sentence-transformers | d7235076a663114c5267b093d5c28e1fc0272f76 | [
"Apache-2.0"
] | 1,567 | 2019-07-26T15:19:28.000Z | 2022-03-31T19:57:35.000Z | from sentence_transformers import util, SentenceTransformer
import unittest
import numpy as np
import sklearn
import torch
class UtilTest(unittest.TestCase):
def test_normalize_embeddings(self):
"""Tests the correct computation of util.normalize_embeddings"""
embedding_size = 100
a = torch.tensor(np.random.randn(50, embedding_size))
a_norm = util.normalize_embeddings(a)
for embedding in a_norm:
assert len(embedding) == embedding_size
emb_norm = torch.norm(embedding)
assert abs(emb_norm.item() - 1) < 0.0001
def test_pytorch_cos_sim(self):
"""Tests the correct computation of util.pytorch_cos_scores"""
a = np.random.randn(50, 100)
b = np.random.randn(50, 100)
sklearn_pairwise = sklearn.metrics.pairwise.cosine_similarity(a, b)
pytorch_cos_scores = util.pytorch_cos_sim(a, b).numpy()
for i in range(len(sklearn_pairwise)):
for j in range(len(sklearn_pairwise[i])):
assert abs(sklearn_pairwise[i][j] - pytorch_cos_scores[i][j]) < 0.001
def test_semantic_search(self):
"""Tests util.semantic_search function"""
num_queries = 20
num_k = 10
doc_emb = torch.tensor(np.random.randn(1000, 100))
q_emb = torch.tensor(np.random.randn(num_queries, 100))
hits = util.semantic_search(q_emb, doc_emb, top_k=num_k, query_chunk_size=5, corpus_chunk_size=17)
assert len(hits) == num_queries
assert len(hits[0]) == num_k
#Sanity Check of the results
cos_scores = util.pytorch_cos_sim(q_emb, doc_emb)
cos_scores_values, cos_scores_idx = cos_scores.topk(num_k)
cos_scores_values = cos_scores_values.cpu().tolist()
cos_scores_idx = cos_scores_idx.cpu().tolist()
for qid in range(num_queries):
for hit_num in range(num_k):
assert hits[qid][hit_num]['corpus_id'] == cos_scores_idx[qid][hit_num]
assert np.abs(hits[qid][hit_num]['score'] - cos_scores_values[qid][hit_num]) < 0.001
def test_paraphrase_mining(self):
model = SentenceTransformer('paraphrase-distilroberta-base-v1')
sentences = [
"This is a test", "This is a test!",
"The cat sits on mat", "The cat sits on the mat", "On the mat a cat sits",
"A man eats pasta", "A woman eats pasta", "A man eats spaghetti"
]
duplicates = util.paraphrase_mining(model, sentences)
for score, a, b in duplicates:
if score > 0.5:
assert (a,b) in [(0,1), (2,3), (2,4), (3,4), (5,6), (5,7), (6,7)]
def test_pairwise_scores(self):
a = np.random.randn(50, 100)
b = np.random.randn(50, 100)
#Pairwise cos
sklearn_pairwise = 1-sklearn.metrics.pairwise.paired_cosine_distances(a, b)
pytorch_cos_scores = util.pairwise_cos_sim(a, b).numpy()
assert np.allclose(sklearn_pairwise, pytorch_cos_scores)
if "__main__" == __name__:
unittest.main() | 38.708861 | 106 | 0.638326 |
ace1399b00a0323bf5897f4e03e27b078b7668bc | 419 | py | Python | src/stackoverflow/66852436/test_my_numbers.py | mrdulin/python-codelab | 3d960a14a96b3a673b7dc2277d202069b1f8e778 | [
"MIT"
] | null | null | null | src/stackoverflow/66852436/test_my_numbers.py | mrdulin/python-codelab | 3d960a14a96b3a673b7dc2277d202069b1f8e778 | [
"MIT"
] | null | null | null | src/stackoverflow/66852436/test_my_numbers.py | mrdulin/python-codelab | 3d960a14a96b3a673b7dc2277d202069b1f8e778 | [
"MIT"
] | 3 | 2020-02-19T08:02:04.000Z | 2021-06-08T13:27:51.000Z | import unittest
from unittest.mock import patch
import my_numbers
class TestNumbers(unittest.TestCase):
@patch('my_numbers.MongoClient')
def test_get_count(self, mocked_object):
mocked_object.return_value = [{'1': 'data'}]
assert my_numbers.get_count() == [{'1': 'data'}]
mocked_object.called_once_with_value('abc.xyz.com', port=27010)
if __name__ == '__main__':
unittest.main()
| 26.1875 | 71 | 0.696897 |
ace139c4240c48160bd5555ba69bce7f0c3b1fdd | 2,377 | py | Python | tests/inferences/test_hmc.py | xiangze/edward | 6419751d1d849c84c502e5ff3f7249b9bbc7b3aa | [
"Apache-2.0"
] | 1 | 2021-04-06T15:08:53.000Z | 2021-04-06T15:08:53.000Z | tests/inferences/test_hmc.py | xiangze/edward | 6419751d1d849c84c502e5ff3f7249b9bbc7b3aa | [
"Apache-2.0"
] | null | null | null | tests/inferences/test_hmc.py | xiangze/edward | 6419751d1d849c84c502e5ff3f7249b9bbc7b3aa | [
"Apache-2.0"
] | 3 | 2017-12-22T08:21:41.000Z | 2020-11-16T02:45:04.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import Categorical, Empirical, Normal
class test_hmc_class(tf.test.TestCase):
def test_normalnormal_float32(self):
with self.test_session() as sess:
x_data = np.array([0.0] * 50, dtype=np.float32)
mu = Normal(loc=0.0, scale=1.0)
x = Normal(loc=mu, scale=1.0, sample_shape=50)
qmu = Empirical(params=tf.Variable(tf.ones(2000)))
# analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
inference = ed.HMC({mu: qmu}, data={x: x_data})
inference.run()
self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-2, atol=1e-2)
self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51),
rtol=1e-2, atol=1e-2)
def test_normalnormal_float64(self):
with self.test_session() as sess:
x_data = np.array([0.0] * 50, dtype=np.float64)
mu = Normal(loc=tf.constant(0.0, dtype=tf.float64),
scale=tf.constant(1.0, dtype=tf.float64))
x = Normal(loc=mu,
scale=tf.constant(1.0, dtype=tf.float64),
sample_shape=50)
qmu = Empirical(params=tf.Variable(tf.ones(2000, dtype=tf.float64)))
# analytic solution: N(loc=0.0, scale=\sqrt{1/51}=0.140)
inference = ed.HMC({mu: qmu}, data={x: x_data})
inference.run()
self.assertAllClose(qmu.mean().eval(), 0, rtol=1e-2, atol=1e-2)
self.assertAllClose(qmu.stddev().eval(), np.sqrt(1 / 51),
rtol=1e-2, atol=1e-2)
def test_indexedslices(self):
"""Test that gradients accumulate when tf.gradients doesn't return
tf.Tensor (IndexedSlices)."""
with self.test_session() as sess:
N = 10 # number of data points
K = 2 # number of clusters
T = 1 # number of MCMC samples
x_data = np.zeros(N, dtype=np.float32)
mu = Normal(0.0, 1.0, sample_shape=K)
c = Categorical(logits=tf.zeros(N))
x = Normal(tf.gather(mu, c), tf.ones(N))
qmu = Empirical(params=tf.Variable(tf.ones([T, K])))
qc = Empirical(params=tf.Variable(tf.ones([T, N])))
inference = ed.HMC({mu: qmu}, data={x: x_data})
inference.initialize()
if __name__ == '__main__':
ed.set_seed(42)
tf.test.main()
| 32.121622 | 74 | 0.623054 |
ace13a0d938f12ceb3a2226da174081f05537058 | 1,628 | py | Python | literature/migrations/0003_author_book_m2m.py | MarkusH/talk-orm | 4eaa0ece9eac676836b2576102b303961f81fb96 | [
"BSD-3-Clause"
] | 2 | 2019-03-16T13:59:33.000Z | 2019-03-23T11:33:41.000Z | literature/migrations/0003_author_book_m2m.py | MarkusH/talk-orm | 4eaa0ece9eac676836b2576102b303961f81fb96 | [
"BSD-3-Clause"
] | 6 | 2019-03-19T10:36:39.000Z | 2022-02-10T11:13:22.000Z | literature/migrations/0003_author_book_m2m.py | MarkusH/talk-orm | 4eaa0ece9eac676836b2576102b303961f81fb96 | [
"BSD-3-Clause"
] | 1 | 2019-03-15T23:03:20.000Z | 2019-03-15T23:03:20.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-31 12:29
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("literature", "0002_book")]
operations = [
migrations.CreateModel(
name="AuthorBookThrough",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"author",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="literature.Author",
),
),
(
"book",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="literature.Book",
),
),
],
),
migrations.AddField(
model_name="book",
name="authors",
field=models.ManyToManyField(
related_name="books_m2m",
through="literature.AuthorBookThrough",
to="literature.Author",
),
),
migrations.AlterUniqueTogether(
name="authorbookthrough", unique_together=set([("author", "book")])
),
]
| 29.6 | 79 | 0.437961 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.