content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
# pylint: disable=invalid-name
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
REPO_DIR = os.path.dirname(BASE_DIR)
# Load environment variables from .env
env = environ.Env()
env_file = os.path.join(REPO_DIR, ".env")
if os.path.exists(env_file):
environ.Env.read_env(str(env_file))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("DJANGO_SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DJANGO_DEBUG", False)
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=[])
STELLAR_ACCOUNT_ADDRESS = env("STELLAR_ACCOUNT_ADDRESS")
STELLAR_ACCOUNT_SEED = env("STELLAR_ACCOUNT_SEED")
STELLAR_NETWORK = env("STELLAR_NETWORK", default="TESTNET")
HORIZON_URI = env("HORIZON_URI", default="https://horizon-testnet.stellar.org/")
REDIS_URL = env("REDIS_URL", default=None)
SERVER_JWT_KEY = env("SERVER_JWT_KEY")
OPERATION_DEPOSIT = "deposit"
OPERATION_WITHDRAWAL = "withdraw"
ACCOUNT_STARTING_BALANCE = str(2.01)
WITHDRAW_AUTH_REQUIRED = env("WITHDRAW_AUTH_REQUIRED", default=False)
DEPOSIT_AUTH_REQUIRED = env("DEPOSIT_AUTH_REQUIRED", default=False)
FEE_AUTH_REQUIRED = env("FEE_AUTH_REQUIRED", default=False)
TRANSACTIONS_AUTH_REQUIRED = env("TRANSACTIONS_AUTH_REQUIRED", default=False)
TRANSACTION_AUTH_REQUIRED = env("TRANSACTION_AUTH_REQUIRED", default=False)
# Application definition
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
THIRD_PARTY_APPS = ["rest_framework", "corsheaders"]
CUSTOM_APPS = ["info", "transaction"]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + CUSTOM_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
APPEND_SLASH = False
ROOT_URLCONF = "app.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(BASE_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
WSGI_APPLICATION = "app.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": env.db(
"DATABASE_URL", default="sqlite:///" + os.path.join(REPO_DIR, "db.sqlite3")
)
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATIC_URL = "/static/"
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# Django Rest Framework Settings:
REST_FRAMEWORK = {
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 10,
}
# API Config
DEFAULT_PAGE_SIZE = 10
# Logging config
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": (
"%(asctime)s [%(process)d] [%(levelname)s] "
+ "pathname=%(pathname)s lineno=%(lineno)s "
+ "funcname=%(funcName)s %(message)s"
),
"datefmt": "%Y-%m-%d %H:%M:%S",
},
"simple": {"format": "%(levelname)s %(message)s"},
},
"handlers": {
"null": {"level": "DEBUG", "class": "logging.NullHandler"},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
},
"loggers": {"testlogger": {"handlers": ["console"], "level": "INFO"}},
}
# CORS configuration
CORS_ORIGIN_ALLOW_ALL = True
# Celery config
CELERY_BROKER_URL = REDIS_URL
CELERY_RESULT_BACKEND = REDIS_URL
CELERY_ACCEPT_CONTENT = ["application/json"]
CELERY_TASK_SERIALIZER = "json"
CELERY_RESULT_SERIALIZER = "json"
|
nilq/baby-python
|
python
|
"""
Tyk API Management.
"""
from diagrams import Node
class _Tyk(Node):
_provider = "tyk"
_icon_dir = "resources/tyk"
fontcolor = "#2d3436"
|
nilq/baby-python
|
python
|
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
from h2o.utils.compatibility import * # NOQA
from .model_base import ModelBase
from .metrics_base import * # NOQA
import h2o
from h2o.expr import ExprNode
class H2OWordEmbeddingModel(ModelBase):
"""
Word embedding model.
"""
def find_synonyms(self, word, count=20):
"""
Find synonyms using a word2vec model.
:param str word: A single word to find synonyms for.
:param int count: The first "count" synonyms will be returned.
:returns: the approximate reconstruction of the training data.
"""
j = h2o.api("GET /3/Word2VecSynonyms", data={'model': self.model_id, 'word': word, 'count': count})
return OrderedDict(sorted(zip(j['synonyms'], j['scores']), key=lambda t: t[1], reverse=True))
def transform(self, words, aggregate_method):
"""
Transform words (or sequences of words) to vectors using a word2vec model.
:param str words: An H2OFrame made of a single column containing source words.
:param str aggregate_method: Specifies how to aggregate sequences of words. If method is `NONE`
then no aggregation is performed and each input word is mapped to a single word-vector.
If method is 'AVERAGE' then input is treated as sequences of words delimited by NA.
Each word of a sequences is internally mapped to a vector and vectors belonging to
the same sentence are averaged and returned in the result.
:returns: the approximate reconstruction of the training data.
"""
j = h2o.api("GET /3/Word2VecTransform", data={'model': self.model_id, 'words_frame': words.frame_id, 'aggregate_method': aggregate_method})
return h2o.get_frame(j["vectors_frame"]["name"])
def to_frame(self):
"""
Converts a given word2vec model into H2OFrame.
:returns: a frame representing learned word embeddings.
"""
return h2o.H2OFrame._expr(expr=ExprNode("word2vec.to.frame", self))
|
nilq/baby-python
|
python
|
from pymongo import MongoClient
def mongo_client():
return MongoClient("database", 27017)
|
nilq/baby-python
|
python
|
from msgpack import Packer
COMMAND_SET_VERSION = 3
class CommandType:
JumpToMain = 1
CRCRegion = 2
Erase = 3
Write = 4
Ping = 5
Read = 6
UpdateConfig = 7
SaveConfig = 8
ReadConfig = 9
GetStatus = 10
def encode_command(command_code, *arguments):
"""
Encodes a command of the given type with given arguments.
"""
p = Packer(use_bin_type=True)
obj = list(arguments)
return p.pack(COMMAND_SET_VERSION) + p.pack(command_code) + p.pack(obj)
def encode_crc_region(address, length):
"""
Encodes the command to request the CRC of a region in flash.
"""
return encode_command(CommandType.CRCRegion, address, length)
def encode_erase_flash_page(address, device_class):
"""
Encodes the command to erase the flash page at given address.
"""
return encode_command(CommandType.Erase, address, device_class)
def encode_write_flash(data, address, device_class):
"""
Encodes the command to write the given data at the given address in a
messagepack byte object.
"""
return encode_command(CommandType.Write, address, device_class, data)
def encode_read_flash(aderess, length):
"""
Encodes the command to read the flash at given address.
"""
return encode_command(CommandType.Read, address, length)
def encode_update_config(data):
"""
Encodes the command to update the config from given MessagePack data.
"""
return encode_command(CommandType.UpdateConfig, data)
def encode_save_config():
"""
Encodes the command to save the config to flash.
"""
return encode_command(CommandType.SaveConfig)
def encode_jump_to_main():
"""
Encodes the command to jump to application using MessagePack.
"""
return encode_command(CommandType.JumpToMain)
def encode_read_config():
"""
Encodes the read config command.
"""
return encode_command(CommandType.ReadConfig)
def encode_ping():
"""
Encodes a ping command.
"""
return encode_command(CommandType.Ping)
def encode_get_status():
"""
Encodes a get status command.
"""
return encode_command(CommandType.GetStatus)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from utility.webdl import WebDLUtility
from service.webdl import WebDLService
from postprocessing.tools import plot_utility_model, utility_grid, save_model
from postprocessing.cfg import *
#%%
df = pd.read_csv("service/data/webdl.csv")
X = df[['cfg_client_maxrate_kbit', 'cfg_delay']]
y = df.webdlc_median
service_model = WebDLService(X, y)
# Unscaled
utility_model = WebDLUtility()
tp,delay,utility = utility_grid(service_model, utility_model,
discrete_delay_cnt=glb_discrete_delay_cnt,
discrete_tp_cnt=glb_discrete_tp_cnt,
max_delay=service_model.max_delay,
min_delay=service_model.min_delay,
min_tp=glb_min_tp,
max_tp=service_model.max_tp)
plot_utility_model(tp, delay, utility)
save_model("postprocessing/models_unscaled/", "webdl", tp, delay, utility)
# Scaled
utility_model = WebDLUtility(scaled=True)
tp,delay,utility = utility_grid(service_model, utility_model,
discrete_delay_cnt=glb_discrete_delay_cnt,
discrete_tp_cnt=glb_discrete_tp_cnt,
max_delay=service_model.max_delay,
min_delay=service_model.min_delay,
min_tp=glb_min_tp,
max_tp=service_model.max_tp)
plot_utility_model(tp, delay, utility)
save_model("postprocessing/models/", "webdl", tp, delay, utility)
|
nilq/baby-python
|
python
|
XSym
0033
19e4fe6b5fba275cfa63817605c40e9f
/anaconda2/lib/python2.7/types.py
|
nilq/baby-python
|
python
|
import unittest
import shutil
import SimpleITK as sitk
import numpy as np
from typing import Union, Sequence
from mnts.scripts.dicom2nii import *
from mnts.scripts.normalization import *
from mnts.filters import MNTSFilterGraph
from mnts.filters.intensity import *
from mnts.filters.geom import *
from pathlib import Path
def create_graph() -> MNTSFilterGraph:
r"""Create the normalization graph"""
G = MNTSFilterGraph()
# Add filter nodes to the graph.
G.add_node(SpatialNorm(out_spacing=[0.4492, 0.4492, 0]))
G.add_node(HuangThresholding(closing_kernel_size=10), 0, is_exit=True) # Use mask to better match the histograms
G.add_node(N4ITKBiasFieldCorrection(), [0, 1])
G.add_node(NyulNormalizer(), [2, 1], is_exit=True)
return G
def create_random_boxes(size: Sequence[int], box_size: Sequence[int], intensity: int):
r"""Create an sitk image of size with a random box placed within the image"""
x = np.zeros(size)
corner = [np.random.randint(0, size[i] - box_size[i]) for i in range(len(size))]
s = tuple([slice(corner[i], corner[i] + box_size[i], 1) for i in range(len(size))])
x[s] = intensity
return sitk.GetImageFromArray(x)
"""
Test settings
"""
N = 3 # create 3 images
out_path = Path('./temp_output')
fnames = [f"_temp{i}.nii.gz" for i in range(N)]
test_yaml =\
r"""
SpatialNorm:
out_spacing: [0.5, 0.5, 0]
HuangThresholding:
closing_kernel_size: 10
_ext:
upstream: 0
is_exit: True
N4ITKBiasFieldCorrection:
_ext:
upstream: [0, 1]
NyulNormalizer:
_ext:
upstream: [2, 1]
is_exit: True
"""
class TestScript(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestScript, self).__init__(*args, **kwargs)
TestScript.create_samples()
def test_norm_train(self):
# Create graph
G = create_graph()
G._logger.set_verbose(1)
_train_normalization(G, '.', str(out_path), 0)
def test_norm_train_mpi(self):
# Create graph
G = create_graph()
G._logger.set_verbose(1)
_train_normalization(G, '.', str(out_path), 16)
def test_norm_inference(self):
G = create_graph()
G._logger.set_verbose(1)
_inference_normalization(G, str(out_path.joinpath("Trained_states")), ".", str(out_path), 0)
def test_norm_inference_mpi(self):
G = create_graph()
G._logger.set_verbose(1)
_inference_normalization(G, str(out_path.joinpath("Trained_states")), ".", str(out_path), 16)
def test_console_entry_train(self):
r"""Run this after """
with open('_temp.yaml', 'w') as f:
f.write(test_yaml)
run_graph_train(f"-i . -f ./_temp.yaml -n 16 -v -o {str(out_path)}".split())
Path('_temp.yaml').unlink()
@staticmethod
def create_samples():
x = [create_random_boxes([128, 128, 30], [64, 64, 20], 255) for i in range(N)]
[sitk.WriteImage(sitk.Cast(xx, sitk.sitkInt16), fnames[i]) for i, xx in enumerate(x)]
@staticmethod
def clean_dir():
# Delete temp images and generated files
[Path(f).unlink() for f in fnames]
Path("./default.log").unlink()
shutil.rmtree(str(out_path))
def __del__(self):
TestScript.clean_dir()
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from click.testing import CliRunner
from simmate.command_line.workflows import workflows
def test_database():
# make the dummy terminal
runner = CliRunner()
# list the workflows
result = runner.invoke(workflows, ["list-all"])
assert result.exit_code == 0
# list the config for one workflow
result = runner.invoke(workflows, ["show-config", "energy_mit"])
assert result.exit_code == 0
# How will I mock the testing of VASP? It will require the database to be configured.
# Also I need Structure fixtures.
# TODO: test setup_only, run, run_cloud
|
nilq/baby-python
|
python
|
# Copyright 2017 The BerryDB Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
BASE_FLAGS = [
'-Wall',
'-Wextra',
'-Werror',
'-DUSE_CLANG_COMPLETER', # YCM needs this.
'-xc++', # YCM needs this to avoid compiling headers as C code.
]
SOURCE_EXTENSIONS = [ '.cc' ]
HEADER_EXTENSIONS = [ '.h' ]
SOURCE_DIRECTORIES = [ 'src' ]
HEADER_DIRECTORIES = [ 'include', 'platform' ]
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
def IsHeaderFile( filename ):
extension = os.path.splitext(filename)[1]
return extension in HEADER_EXTENSIONS
def MakeRelativePathsInFlagsAbsolute(flags, build_root):
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/'):
new_flag = os.path.join(build_root, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[ len(path_flag): ]
new_flag = path_flag + os.path.join(build_root, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def FindNearest(path, target, build_root):
candidate = os.path.join(path, target)
if os.path.isfile(candidate):
return candidate
if path == build_root:
return None
parent = os.path.dirname(path)
if parent == path:
return None
return FindNearest(parent, target, build_root)
def FlagsForClangComplete(file_path, build_root):
clang_complete_path = FindNearest(file_path, '.clang_complete', build_root)
clang_complete_flags = open(clang_complete_path, 'r').read().splitlines()
return clang_complete_flags
def FlagsForFile(filename, **kwargs):
build_root = DirectoryOfThisScript()
file_path = os.path.realpath(filename)
flags = BASE_FLAGS
clang_flags = FlagsForClangComplete(file_path, build_root)
if clang_flags:
flags = flags + clang_flags
final_flags = MakeRelativePathsInFlagsAbsolute(flags, build_root)
return { 'flags': final_flags }
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from twisted.internet.defer import Deferred
from twisted.internet.protocol import ClientFactory, ServerFactory
from twisted.internet import reactor
from twisted.protocols.basic import LineReceiver
from Screens.MessageBox import MessageBox
from Tools import Notifications
from GrowleeConnection import emergencyDisable
from . import NOTIFICATIONID
SNP_TCP_PORT = 9887
class SnarlNetworkProtocol(LineReceiver):
def __init__(self, client=False):
self.client = client
def connectionMade(self):
self.factory.addClient(self)
if self.client:
payload = "type=SNP#?version=1.0#?action=register#?app=growlee"
self.sendLine(payload)
payload = "type=SNP#?version=1.0#?action=add_class#?app=growlee#?class=growleeClass#?title=Notifications from your Dreambox"
self.sendLine(payload)
def connectionLost(self, reason):
self.factory.removeClient(self)
def stop(self):
if self.client:
payload = "type=SNP#?version=1.0#?action=unregister#?app=growlee"
self.sendLine(payload)
self.transport.loseConnection()
def sendNotification(self, title='No title.', description='No message.', timeout=1):
if not self.client or not self.transport:
return
payload = "type=SNP#?version=1.0#?action=notification#?app=growlee#?class=growleeClass#?title=%s#?text=%s#?timeout=%d" % (title, description, timeout)
self.sendLine(payload)
def lineReceived(self, data):
if self.client or not self.transport:
return
Len = len(data)
if Len < 23 or not data[:23] == "type=SNP#?version=1.0#?":
return
items = data[23:].split('#?')
title = ''
description = ''
timeout = 5
for item in items:
key, value = item.split('=')
if key == "action":
if value == "unregister":
payload = "SNP/1.0/0/OK"
self.sendLine(payload)
self.transport.loseConnection()
return
elif value != "notification":
# NOTE: we pretend to handle&accept pretty much everything one throws at us
payload = "SNP/1.0/0/OK"
self.sendLine(payload)
return
elif key == "title":
title = value
elif key == "text":
description = value
elif key == "timeout":
timeout = int(value)
Notifications.AddNotificationWithID(
NOTIFICATIONID,
MessageBox,
text=title + '\n' + description,
type=MessageBox.TYPE_INFO,
timeout=timeout,
close_on_any_key=True,
)
# return ok
payload = "SNP/1.0/0/OK"
self.sendLine(payload)
class SnarlNetworkProtocolClientFactory(ClientFactory):
client = None
def buildProtocol(self, addr):
p = SnarlNetworkProtocol(client=True)
p.factory = self
return p
def sendNotification(self, title='No title.', description='No message.', priority=0, timeout=-1):
if self.client:
title = title.decode('utf-8', 'ignore').encode('iso8859-15', 'ignore')
description = description.decode('utf-8', 'ignore').encode('iso8859-15', 'ignore')
# NOTE: timeout = 0 means sticky, so add one second to map -1 to 0 and make 0 non-sticky
if timeout < 1:
timeout += 1
self.client.sendNotification(title=title, description=description, timeout=timeout)
def addClient(self, client):
self.client = client
def removeClient(self, client):
self.client = None
class SnarlNetworkProtocolServerFactory(ServerFactory):
protocol = SnarlNetworkProtocol
def __init__(self):
self.clients = []
def addClient(self, client):
self.clients.append(client)
def removeClient(self, client):
self.clients.remove(client)
def sendNotification(self, *args, **kwargs):
pass
def stopFactory(self):
for client in self.clients:
client.stop()
class SnarlNetworkProtocolAbstraction:
clientPort = None
serverPort = None
pending = 0
def __init__(self, host):
self.clientFactory = SnarlNetworkProtocolClientFactory()
self.serverFactory = SnarlNetworkProtocolServerFactory()
if host.enable_outgoing.value:
reactor.resolve(host.address.value).addCallback(self.gotIP).addErrback(self.noIP)
if host.enable_incoming.value:
self.serverPort = reactor.listenTCP(SNP_TCP_PORT, self.serverFactory)
self.pending += 1
def gotIP(self, ip):
self.clientPort = reactor.connectTCP(ip, SNP_TCP_PORT, self.clientFactory)
self.pending += 1
def noIP(self, error):
emergencyDisable()
def sendNotification(self, *args, **kwargs):
self.clientFactory.sendNotification(*args, **kwargs)
def maybeClose(self, resOrFail, defer=None):
self.pending -= 1
if self.pending == 0:
if defer:
defer.callback(True)
def stop(self):
defer = Deferred()
if self.clientPort:
d = self.clientPort.disconnect()
if d:
d.addBoth(self.maybeClose, defer=defer)
else:
self.pending -= 1
if self.serverPort:
d = self.serverPort.stopListening()
if d:
d.addBoth(self.maybeClose, defer=defer)
else:
self.pending -= 1
if self.pending == 0:
reactor.callLater(1, defer.callback, True)
return defer
|
nilq/baby-python
|
python
|
import unicodedata
from django import forms
from django.contrib.auth import authenticate, get_user_model, password_validation
from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX, identify_hasher
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.utils.text import capfirst
from django.utils.translation import gettext
from django.utils.translation import gettext_lazy as _
from .models import User, Profile
UserModel = get_user_model()
class UsernameField(forms.CharField):
def to_python(self, value):
return unicodedata.normalize("NFKC", super().to_python(value))
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {"password_mismatch": _("The two password fields didn't match.")}
password1 = forms.CharField(
label=_("Password"),
strip=False,
widget=forms.PasswordInput,
help_text=password_validation.password_validators_help_text_html(),
)
password2 = forms.CharField(
label=_("Password confirmation"),
widget=forms.PasswordInput,
strip=False,
help_text=_("Enter the same password as before, for verification."),
)
class Meta:
model = UserModel
fields = ("username", "email")
field_classes = {"username": UsernameField}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._meta.model.USERNAME_FIELD in self.fields:
self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(
{"autofocus": True}
)
if self._meta.model.USERNAME_FIELD != "email":
self.fields["email"] = forms.EmailField(label=_("E-mail"), max_length=75)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages["password_mismatch"], code="password_mismatch"
)
return password2
def _post_clean(self):
super()._post_clean()
# Validate the password after self.instance is updated with form data
# by super().
password = self.cleaned_data.get("password2")
if password:
try:
password_validation.validate_password(password, self.instance)
except forms.ValidationError as error:
self.add_error("password2", error)
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class AdminProfileCreationForm(forms.ModelForm):
email = forms.EmailField()
username = forms.CharField(max_length=150)
parent = forms.CharField(max_length=20, label=_("Upline ID"))
class Meta:
model = Profile
exclude = ("user",)
|
nilq/baby-python
|
python
|
from typing import Union
from requests import session, Session
import json
import os
from models import ChallengeResult, ChallengeError
BASE_URL = os.getenv('API_URL', 'https://soallpeach-api-soroosh.fandogh.cloud')
session = Session()
def get_session() -> Session:
session.headers.update({
'Authorization': 'TOKEN ' + os.getenv('API_SECRET_KEY', 'STRONG_TOKEN'),
'Content-Type': 'application/json'
})
return session
class ReportRequest(object):
nickname: str
challenge_name: str
run_id: str
result: Union[ChallengeResult, ChallengeError]
state: str
def __init__(self, nickname: str, challenge_name: str, run_id: str, result: Union[ChallengeResult, ChallengeError]):
self.nickname = nickname
self.challenge_name = challenge_name
self.run_id = run_id
self.result = result
self.state = 'PASSED' if isinstance(result, ChallengeResult) else 'FAILED'
def report(nickname: str, challenge_name: str, run_id: str, result: Union[ChallengeResult, ChallengeError]):
request = ReportRequest(nickname, challenge_name, run_id, result)
request_json = json.dumps(request.__dict__, default=lambda o: o.__dict__, indent=4)
response = get_session().post(f'{BASE_URL}/scores', data=request_json)
print(response)
print(response.text)
|
nilq/baby-python
|
python
|
#!/data2/zhangshuai/anaconda3/bin
# -*- coding: utf-8 -*-
import os
import wave
from pydub import AudioSegment
import json
wav_path = "/home/zhangshuai/kaldi-master/egs/biendata/Magicdata/audio/test"
trans_path = "/home/zhangshuai/kaldi-master/egs/biendata/Magicdata/transcription/test_no_ref_noise"
wav_segments_path = "/home/zhangshuai/kaldi-master/egs/biendata/Magicdata_seg/audio/test"
trans_segments_path = "/home/zhangshuai/kaldi-master/egs/biendata/Magicdata_seg/transcription/test"
wav_files = os.listdir(wav_path)
# wav_files = [wav_path + "/" + f for f in wav_files if f.endswith('.wav')]
trans_files = os.listdir(trans_path)
trans_files = [trans_path + "/" + f for f in trans_files if f.endswith('.json')]
for file in wav_files:
if file[0] is not '.':
# with wave.open(file, "rb") as wav_f:
# print(f.getparams())
wav_parts_paths = wav_segments_path + '/' + file.split('.', 1)[0]
if not os.path.exists(wav_parts_paths):
os.makedirs(wav_parts_paths)
trans_parts_path = trans_segments_path + '/' + file.split('.', 1)[0]
if not os.path.exists(trans_parts_path):
os.makedirs(trans_parts_path)
# print(wav_parts_paths)
# print(file)
print(trans_path + "/" + file.rsplit('_', 1)[0] + '.json')
with open(trans_path + "/" + file.rsplit('_', 1)[0] + '.json', 'r') as trans_f:
trans = json.load(trans_f)
# print(len(trans))
for i in range(len(trans)-1):
# print(i)
# sub_trans = trans[i]
# if not lines:
# break
# trans_info = lines.split('\t', 4)
start_time = trans[i]['start_time']
print(start_time)
start_time = (int(start_time.split(':')[0])*3600 + int(start_time.split(':')[1])*60 + float(start_time.split(':')[2]))*1000
end_time = trans[i]['end_time']
print(end_time)
# with open(trans_parts_path + '/' + file.split('.', 1)[0] + '_' + str(i) + '.txt', 'w') as w:
# w.write(file.split('.', 1)[0] + '_' + str(i) + '.wav' + ' ' + trans[i]['words'])
end_time = (int(end_time.split(':')[0])*3600 + int(end_time.split(':')[1])*60 + float(end_time.split(':')[2]))*1000
# print(trans_info[0])
# print(start_time,end_time)
wav = AudioSegment.from_mp3(wav_path + '/' + file)
wav_parts = wav[int(start_time) : int(end_time)]
# wav_parts.export(wav_parts_paths + '/' + file.split('.', 1)[0] + '_' + str(i) + '.wav', format="wav")
wav_parts.export(wav_parts_paths + '/' + trans[i]['uttid'] + '.wav', format="wav")
#if __name__ == '__main__':
|
nilq/baby-python
|
python
|
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networkapi.test.utils import log
from networkapi.test.utils import xml2dict
def assert_response_error(response, codigo, descricao=None):
""" Verifica se a resposta da networkapi foi como esperada. Quando for passado uma lista de códigos
possiveis, a descrição não poderá ser passada
"""
# trata o argumento código, quando somente 1 elemento for passado
codigos = codigo if type(codigo) == list else [codigo]
try:
networkapi_response = xml2dict(response.content)
codigo_resposta = int(networkapi_response['erro']['codigo'])
descricao_resposta = networkapi_response['erro']['descricao']
assert codigo_resposta in codigos, u"Código de resposta inválido: %d (descricao: %s). Esperados: %s" % (
codigo_resposta, descricao_resposta, repr(codigos))
assert descricao_resposta is not None
assert len(descricao_resposta) > 0
if descricao:
assert descricao_resposta == descricao
except:
# Se houver algum erro na formação (parsing) da resposta, imprimo qual
# ela era para facilitar a investigação
log.error("Erro fazendo parsing da resposta:\n%s\n",
(response or response.content))
raise
def assert_response_success(response, status_code=200, codigo=0, stdout=None, stderr=None):
""" Verifica se a resposta da networkapi foi sucesso e com os valores informados """
try:
assert response.status_code == status_code
networkapi_response = xml2dict(response.content)
codigo_resposta = int(networkapi_response['sucesso']['codigo'])
assert codigo_resposta == codigo, u"Código de resposta inválido: %d. Esperado: %d" % (
codigo_resposta, codigo)
if stdout:
assert networkapi_response['sucesso'][
'descricao']['stdout'] == stdout
if stderr:
assert networkapi_response['sucesso'][
'descricao']['stderr'] == stderr
except:
# Se houver algum erro na formação (parsing) da resposta, imprimo qual
# ela era para facilitar a investigação
log.error("Erro fazendo parsing da resposta:\n%s\n",
(response or response.content))
raise
|
nilq/baby-python
|
python
|
import os
## FIXME most of the path variables should come from env vars
PWD = os.getcwd()
OUTPUT_PATH= os.path.join(PWD, "CodeComb_Outputs")
#FORMATS = ['.cpp']
DATA_PATH = os.path.join(PWD, "CodeComb_Data")
DF_FILE = "df_corpus"
DOC_EMB_PATH = os.path.join(OUTPUT_PATH, "doc_emb_" + DF_FILE)
ANN_INDEX_PATH = os.path.join(OUTPUT_PATH, "annoy_" + DF_FILE)
W2V_MODEL_PATH = os.path.join(OUTPUT_PATH, "w2v_model_" + DF_FILE)
DF_PATH = os.path.join(DATA_PATH, DF_FILE)
def ensure_dir(file_path):
print ("Checking path = {}".format(file_path))
if not os.path.exists(file_path):
os.makedirs(file_path)
def init_path():
ensure_dir(OUTPUT_PATH)
ensure_dir(DATA_PATH)
|
nilq/baby-python
|
python
|
import scrapy
import re
from .mirkcolorselector import sampler_function
class DecjubaSpider(scrapy.Spider):
name = "decjuba_products"
start_urls = [
'https://www.decjuba.com.au/collections/women/dresses',
'https://www.decjuba.com.au/collections/women/jackets',
'https://www.decjuba.com.au/collections/women/cardigans',
'https://www.decjuba.com.au/collections/women/pants',
'https://www.decjuba.com.au/collections/women/shorts',
'https://www.decjuba.com.au/collections/women/skirts',
'https://www.decjuba.com.au/collections/women/tees',
'https://www.decjuba.com.au/collections/women/tops',
'https://www.decjuba.com.au/collections/d-luxe/pants',
'https://www.decjuba.com.au/collections/d-luxe/dl-dresses',
'https://www.decjuba.com.au/collections/d-luxe/dl-tops'
]
def parse(self, response):
for product in response.xpath('//p[@class="h6"]'):
url = "https://www.decjuba.com.au" + product.css('a::attr(href)').extract_first()
next_page = response.xpath('//span[@class="next"]/a/@href').extract_first()
if next_page is not None:
yield response.follow(next_page, self.parse)
yield scrapy.Request(url, callback=self.parse_product, meta={'start_url':response.request.url})
def parse_product(self, response):
class Item(scrapy.Item):
name = scrapy.Field()
price = scrapy.Field()
link = scrapy.Field()
images = scrapy.Field()
sizes = scrapy.Field()
style = scrapy.Field()
stock = scrapy.Field()
gender = scrapy.Field()
colour = scrapy.Field()
address = scrapy.Field()
location = scrapy.Field()
item_type = scrapy.Field()
vendor_name = scrapy.Field()
def women_size_converter(size):
return {
'XXS': 6,
'XXS/XS': 8,
'XS': 8,
'XS/S': 10,
'S': 10,
'S/M': 12,
'M': 12,
'M/L': 14,
'L': 14,
'L/XL': 16,
'XL': 16,
'XL/XXL': 18,
'XXL': 18,
'onesize': None,
'6': 6,
'8': 8,
'10': 10,
'12': 12,
'14': 14,
'16': 16,
'36': 5,
'37': 6,
'38': 7,
'39': 8,
'40': 9,
'41': 10,
}.get(size, size)
for info in response.xpath('//div[contains(@class, "product-single") and contains(@class, "grid")]'):
item = Item()
item['name'] = info.xpath('//div[@itemprop="name"]/text()').extract_first()
price = re.findall(r'(\d[^\s\\]+)', str(info.xpath('//div[@itemprop="price"]/text()').extract()))
item['price'] = float(price[0])
item['link'] = response.url
sizes = info.xpath('//ul[@class="size-container"]/li/input/@value').extract()
item['sizes'] = [women_size_converter(i) for i in sizes]
item['style'] = info.xpath('//div[@id="product-description"]/p/text()').extract()
item['images'] = ['https:' + i for i in info.xpath('//div[@class="swiper-wrapper"]/div/img').xpath('@src').extract()]
colour = info.xpath('//span[@class="colour-option"]/img').xpath('@src').extract()
item['colour'] = [sampler_function(i, 0.3) for i in colour][0]
item['gender'] = 'Women'
item['address'] = "Shop 310, Broadway Shopping Centre 1 Bay Street, Broadway, New South Wales 2007, Australia"
item['location'] = "-33.883835, 151.194704"
item['stock'] = True
item['item_type'] = re.findall(r'.+(\/.+)$', response.meta['start_url'])
item['vendor_name'] = 'Decjuba'
yield item
|
nilq/baby-python
|
python
|
from cloudshell.devices.runners.autoload_runner import AutoloadRunner
from vyos.flows.autoload import VyOSAutoloadFlow
class VyOSAutoloadRunner(AutoloadRunner):
def __init__(self, resource_config, cli_handler, logger):
"""
:param resource_config:
:param cli_handler:
:param logger:
"""
super(VyOSAutoloadRunner, self).__init__(resource_config)
self._cli_handler = cli_handler
self._logger = logger
@property
def autoload_flow(self):
return VyOSAutoloadFlow(cli_handler=self._cli_handler,
resource_config=self.resource_config,
logger=self._logger)
def discover(self):
"""
:return: AutoLoadDetails object
"""
return self.autoload_flow.execute_flow()
|
nilq/baby-python
|
python
|
"""
Dada uma String "str", retorne true se nela possuir o mesmo número de ocorrências
das strings "cat" e "dog".
Ex.:(('catdog') → True; ('1cat1cadodog') → True; ('catcat') → False).
"""
def cat_dog(str):
return str.count("cat") == str.count("dog")
print(cat_dog("catdog"))
|
nilq/baby-python
|
python
|
"""
This file implements a general purpose best-first planner.
--------------HOW TO INITIALIZE IT -------------
An instance of the planner is created using
planner = Planner(s)
where s is the initial state in the planning process.
The planner needs five functions/methods to work properly.
These functions can either be implemented as methods of the state 's',
or provided as optional arguments to the constructor. They are:
get_children : state -> iterable collection of states.
This function takes a state and returns all its neighbours in the state space
is_goal_state : state -> bool
This function returns true if the provided state is a goal state
extract_plan : state -> whatever result you want
This function generates some sort of plan from the goal state.
This is what the planner returns, and the planner itself doesn't care
about how the extracted plan looks
heuristic : state -> int
This function produces a heuristic value for the provided state.
Technically, it could map each state to anything that is comparable,
but integers are fast, so lets stick to that.
get_g_value : state -> int
This funciton gives the distance from the initial state of the search to this state
The functions provided as arguments take precedence over method implementations in 's'.
This means that it is possible to implement all methods in 's',
but still provide a custom heuristic function to the planner in the following way:
planner = Planner(s, heuristic=custom_heuristic_function)
---------------- HOW TO USE IT ----------------------------
The planner supports three different ways of searching the state space:
expand_one_state - picks one state from the frontier and expands it. It is more or less useless, and used only as a sub procedure
expand_n_states - this method takes an integer 'n' as argument and repeats expand_one_state 'n' times or until a successful plan is found.
This is useful if you want to search for a while, but stop if the search takes too long
If a plan is found during the execution of any of the above procedues, it is stored in the attribute 'plan' of the planner.
I.e., get it using (some_planner.plan). The attribute 'plan' is None if no plan is found
The last way of finding a plan is:
make_plan - This method starts searching and stops when a plan is found or the state space is exhausted.
It returns None if it couldn't find a plan, or the plan itself otherwise
"""
import heapq
import sys
from time import perf_counter
def default_heuristic(state):
return state.heuristic()
def default_get_g_value(state):
return state.get_g_value()
def default_is_goal_state(state):
return state.is_goal_state()
def default_extract_plan(state):
return state.extract_plan()
def default_get_children(state):
return state.get_children()
class Planner(object):
def __init__(self,initial_state,get_children = None,is_goal_state = None,extract_plan = None, heuristic = None, g_value = None, cutoff_solution_length = None, print_status = True):
#Setting the functions used to explore the state space
#Use implementaitons in state unless new functions are provided
self.get_children = get_children if get_children else default_get_children
self.is_goal_state = is_goal_state if is_goal_state else default_is_goal_state
self.extract_plan = extract_plan if extract_plan else default_extract_plan
self.heuristic = heuristic if heuristic else default_heuristic
self.g_value = g_value if g_value else default_get_g_value
#Adding the initial state to the frontier
self.frontier = []
heapq.heapify(self.frontier)
firstEntry = (self.heuristic(initial_state), initial_state)
heapq.heappush(self.frontier, firstEntry)
#Initialize remaining variables
self.expanded_set = set()
self.plan = None
#Replace the cutoff with a suitably large number if there is no cutoff
self.cutoff_solution_length = cutoff_solution_length if cutoff_solution_length else 2000000000
#output planner status
self.print_status = print_status
self.start_time = perf_counter()
self.times_printed = []
def expand_one_state(self):
#TODO: Fix this: it's not very good. What if there is no solution and the state space is exhausted?
assert len(self.frontier) > 0, "state space exhausted in planner"
#Extract the state with minimum heuristic value from the frontier
result = heapq.heappop(self.frontier)
state = result[1]
#Find the plan if state is goal
if self.is_goal_state(state):
#if hasattr(state, 'game_state'):
# print("--------------------------FOUND THE GOAL-----------------",file=sys.stderr)
# print(state.game_state,file=sys.stderr)
self.plan = self.extract_plan(state)
return
#Add the state to the expanded set
self.expanded_set.add(state)
#Get the unexpanded neighbours of the state
children = self.get_children(state)
#Filter out expanded states and states that are past the cutoff for solution length
children = [s for s in children if s not in self.expanded_set and self.g_value(s) < self.cutoff_solution_length]
#Calculate their heuristic value
children = [(self.heuristic(s),s) for s in children]
#Add them to the frontier
for entry in children:
heapq.heappush(self.frontier, entry)
# will print the current search status roughly every 10 seconds
if self.print_status:
if not int('%.f' % self.used_time()) % 10 and int('%.f' % self.used_time()) not in self.times_printed:
self.times_printed.append(int('%.f' % self.used_time()))
print(self.search_status(), file=sys.stderr, flush=True)
#Expands at most n more states from the frontier.
#Returns the plan if it is found, otherwise returns None
def expand_n_states(self, n):
for i in range(n):
if self.plan:
return self.plan
self.expand_one_state()
return self.plan
#finds a plan to the problem. If there is no goal state, returns None
def make_plan(self):
while(len(self.frontier) > 0 and not self.plan):
self.expand_one_state()
return self.plan
def used_time(self):
return perf_counter()-self.start_time
def search_status(self):
return "expanded: {}, frontier: {}, generated: {}, time: {:3.2f} ".format(len(self.expanded_set), len(self.frontier), len(self.expanded_set)+len(self.frontier), self.used_time())
|
nilq/baby-python
|
python
|
import cv2
import apriltag
# Функция для вывода изображения на экран
def viewImage(image, window_name='window name'):
cv2.imshow(window_name, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Считываем изображение и преобразуем его в grayscale
tag = cv2.imread('/home/administrator/PycharmProjects/trial/signs/t9.png')
gray_tag = cv2.cvtColor(tag, cv2.COLOR_BGR2GRAY)
viewImage(gray_tag)
# Определяем семейство apriltags, затем настраиваем детектор и распознаем apriltag на картинке
options = apriltag.DetectorOptions(families='tag36h11')
detector = apriltag.Detector(options)
results = detector.detect(gray_tag)
print("[INFO] {} total AprilTags detected".format(len(results)))
print(results)
for r in results:
# extract the bounding box (x, y)-coordinates for the AprilTag
# and convert each of the (x, y)-coordinate pairs to integers
(ptA, ptB, ptC, ptD) = r.corners
ptB = (int(ptB[0]), int(ptB[1]))
ptC = (int(ptC[0]), int(ptC[1]))
ptD = (int(ptD[0]), int(ptD[1]))
ptA = (int(ptA[0]), int(ptA[1]))
# draw the bounding box of the AprilTag detection
cv2.line(tag, ptA, ptB, (0, 255, 0), 2)
cv2.line(tag, ptB, ptC, (0, 255, 0), 2)
cv2.line(tag, ptC, ptD, (0, 255, 0), 2)
cv2.line(tag, ptD, ptA, (0, 255, 0), 2)
# draw the center (x, y)-coordinates of the AprilTag
(cX, cY) = (int(r.center[0]), int(r.center[1]))
cv2.circle(tag, (cX, cY), 5, (0, 0, 255), -1)
# draw the tag family on the image
tagID = str(r.tag_id)
cv2.putText(tag, tagID, (ptA[0], ptA[1] - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
print("[INFO] tag ID: {}".format(tagID))
viewImage(tag)
|
nilq/baby-python
|
python
|
# Copyright (c) 2020, Manfred Moitzi
# License: MIT License
import pytest
from ezdxf.math import BSpline, global_bspline_interpolation, rational_bspline_from_arc, Vec3
def test_from_nurbs_python_curve_to_ezdxf_bspline():
from geomdl.fitting import interpolate_curve
curve = interpolate_curve([(0, 0), (0, 10), (10, 10), (10, 0)], degree=3)
bspline = BSpline.from_nurbs_python_curve(curve)
assert bspline.degree == 3
assert len(bspline.control_points) == 4
assert len(bspline.knots()) == 8 # count + order
def test_from_ezdxf_bspline_to_nurbs_python_curve_non_rational():
bspline = global_bspline_interpolation([(0, 0), (0, 10), (10, 10), (10, 0)], degree=3)
# to NURBS-Python
curve = bspline.to_nurbs_python_curve()
assert curve.degree == 3
assert len(curve.ctrlpts) == 4
assert len(curve.knotvector) == 8 # count + order
assert curve.rational is False
# and back to ezdxf
spline = BSpline.from_nurbs_python_curve(curve)
assert spline.degree == 3
assert len(spline.control_points) == 4
assert len(spline.knots()) == 8 # count + order
def test_from_ezdxf_bspline_to_nurbs_python_curve_rational():
bspline = rational_bspline_from_arc(center=Vec3(0, 0), radius=2, start_angle=0, end_angle=90)
# to NURBS-Python
curve = bspline.to_nurbs_python_curve()
assert curve.degree == 2
assert len(curve.ctrlpts) == 3
assert len(curve.knotvector) == 6 # count + order
assert curve.rational is True
assert curve.weights == [1.0, 0.7071067811865476, 1.0]
# and back to ezdxf
spline = BSpline.from_nurbs_python_curve(curve)
assert spline.degree == 2
assert len(spline.control_points) == 3
assert len(spline.knots()) == 6 # count + order
assert spline.weights() == (1.0, 0.7071067811865476, 1.0)
if __name__ == '__main__':
pytest.main([__file__])
|
nilq/baby-python
|
python
|
import itertools
import numpy
from matplotlib import pyplot
from typing import Dict, Sequence, Tuple
from warg import Number
__all__ = [
"plot_errors",
"masks_to_color_img",
"plot_prediction",
"bounding_box_from_mask",
]
def plot_errors(results_dict: Dict, title: str) -> None:
"""
Args:
results_dict:
title:
"""
markers = itertools.cycle(("+", "x", "o"))
pyplot.title(f"{title}")
for label, result in sorted(results_dict.items()):
pyplot.plot(result, marker=next(markers), label=label)
pyplot.ylabel("dice_coef")
pyplot.xlabel("epoch")
pyplot.legend(loc=3, bbox_to_anchor=(1, 0))
pyplot.show()
def masks_to_color_img(masks: numpy.ndarray) -> numpy.ndarray:
"""
Args:
masks:
Returns:
"""
height, width, mask_channels = masks.shape
color_channels = 3
color_image = numpy.zeros((height, width, color_channels), dtype=numpy.uint8) * 255
for y in range(height):
for x in range(width):
for mc in range(mask_channels):
color_image[y, x, mc % color_channels] = masks[y, x, mc]
return color_image.astype(numpy.uint8)
def plot_prediction(
img_array: numpy.ndarray,
labels: Sequence,
max_pred: Sequence,
pred: Sequence,
n_col: int = 3,
) -> None:
"""
Args:
img_array:
labels:
max_pred:
pred:
n_col:
"""
n_row = len(img_array) // n_col
f, plots = pyplot.subplots(
n_row, n_col, sharex="all", sharey="all", figsize=(n_col * 4, n_row * 4)
)
for i in range(len(img_array)):
plots[i // n_col, i % n_col].imshow(img_array[i])
plots[i // n_col, i % n_col].set_title(
f"truth:{labels[i]},\n max_pred:{max_pred[i]},\n pred:{pred[i]}", fontsize=8
)
def bounding_box_from_mask(
hard_mask: numpy.ndarray,
) -> Tuple[Number, Number, Number, Number]:
"""
Args:
hard_mask:
Returns:
"""
nz = numpy.nonzero(hard_mask)
return numpy.min(nz[0]), numpy.min(nz[1]), numpy.max(nz[0]), numpy.max(nz[1])
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import numpy
import os
from os.path import join
import shutil
import time
import sys
import math
import json
import utilities
import matplotlib.pyplot as plt
def get_num_antennas(ms):
"""."""
tb.open(ms + '/ANTENNA', nomodify=True)
num_stations = tb.nrows()
tb.close()
return num_stations
def average_ms(ms_ref, ms_in, ms_out, overwrite=True):
if not overwrite and os.path.isdir(ms_out):
return
# Create output MS by making a copy of the reference MS.
if os.path.exists(ms_out):
shutil.rmtree(ms_out)
print 'Averaging MS:', ms_in
shutil.copytree(ms_ref, ms_out)
tb.open(ms_in, nomodify=True)
num_rows = tb.nrows()
num_times = num_rows / num_baselines
col_data = tb.getcol('DATA')
col_uvw = tb.getcol('UVW')
col_ant1 = tb.getcol('ANTENNA1')
col_ant2 = tb.getcol('ANTENNA2')
col_time = tb.getcol('TIME')
uu = col_uvw[0, :]
uu = uu.reshape(num_times, num_baselines)
ave_uu = numpy.mean(uu, axis=0)
vv = col_uvw[1, :]
vv = vv.reshape(num_times, num_baselines)
ave_vv = numpy.mean(vv, axis=0)
ww = col_uvw[2, :]
ww = ww.reshape(num_times, num_baselines)
ave_ww = numpy.mean(ww, axis=0)
t = col_time
t = t.reshape(num_times, num_baselines)
ave_t = numpy.mean(t, axis=0)
# Assert that the MS has 1 channel and is stokes-I only.
assert col_data.shape[0] == 1
assert col_data.shape[1] == 1
assert col_data.shape[2] == num_rows
data = numpy.squeeze(col_data)
data = data.reshape(num_times, num_baselines)
ave_data = numpy.mean(data, axis=0)
tb.close()
tb.open(ms_out, nomodify=False)
col_data = tb.getcol('DATA')
tb.putcol('DATA', numpy.reshape(ave_data, col_data.shape))
col_data = tb.getcol('DATA')
tb.close()
if __name__ == "__main__":
"""Copy the ref ms and populate it with averaged input ms."""
settings = utilities.byteify(json.load(open(config_file)))
sim_dir = settings['path']
ms_ref = join(sim_dir, 'n0001.ms')
num_antennas = get_num_antennas(ms_ref)
num_baselines = num_antennas * (num_antennas - 1) / 2
for n in settings['sim']['observation']['num_times']:
if n == 1:
continue
# === No smearing ===
ms_in = join(sim_dir, 'n%04i.ms' % n)
ms_out = join(sim_dir, 'ave_n%04i.ms' % n)
average_ms(ms_ref, ms_in, ms_out, overwrite=False)
# === With analytical smearing ===
ms_in = join(sim_dir, 'n%04i_smearing.ms' % n)
ms_out = join(sim_dir, 'ave_n%04i_smearing.ms' % n)
average_ms(ms_ref, ms_in, ms_out, overwrite=False)
|
nilq/baby-python
|
python
|
# coding=utf-8
import datetime
import json
import time
import redis
import scrapy
from pymongo import MongoClient
from scrapy.http import Request
from scrapy_redis.spiders import RedisSpider
from biliob_spider.items import TagListItem
from biliob_tracer.task import SpiderTask
from db import db
class TagAdderSpider(RedisSpider):
name = "tagAdder"
allowed_domains = ["bilibili.com"]
start_urls = []
custom_settings = {
'ITEM_PIPELINES': {
'biliob_spider.pipelines.TagAdderPipeline': 300
},
}
def __init__(self):
self.db = db
def start_requests(self):
for i in self.start_urls:
yield Request(i, meta={
'dont_redirect': True,
'handle_httpstatus_list': [302]
}, callback=self.parse)
def parse(self, response):
try:
aid = str(
response.url.lstrip(
'https://www.bilibili.com/video/av').rstrip('/'))
tagName = response.xpath("//li[@class='tag']/a/text()").extract()
item = TagListItem()
item['aid'] = int(aid)
item['tag_list'] = []
if tagName != []:
ITEM_NUMBER = len(tagName)
for i in range(0, ITEM_NUMBER):
item['tag_list'].append(tagName[i])
yield item
except Exception as error:
# 出现错误时打印错误日志
print(error)
item = TagListItem()
item['aid'] = int(aid)
item['tag_list'] = []
yield item
|
nilq/baby-python
|
python
|
"""
Bot's behaviour
"""
INTENTS = [
{
'name': 'Date',
'tokens': ('when', 'time', 'date', 'at', '1'), # You can add any key words in the list
'scenario': None,
'answer': 'The conference is being held on May 10, registration will start at 11 am.'
},
{
'name': 'Place',
'tokens': ('where', 'place', 'location', 'address', 'station', '2', ), # You can add any key words in the list
'scenario': None,
'answer': 'The conference will be held at the Centre of the City.'
},
{
'name': 'Registration',
'tokens': ('reg', 'add', '3', ), # You can add any key words in the list
'scenario': 'registration',
'answer': None
},
{
'name': 'Greetings',
'tokens': ('thx', 'thank', '4', ), # You can add any key words in the list
'scenario': None,
'answer': 'You are welcome!'
},
]
SCENARIOS = {
'registration': {
'first_step': 'step1',
'steps': {
'step1': {
'text': 'Write your name to register. It will be shown on your badge',
'failure_text': 'Name must contain at least 2 symbols. Try one more time',
'handler': 'handle_name',
'next_step': 'step2',
},
'step2': {
'text': 'Send your e-mail, we will send all the required information to this address.',
'failure_text': 'There is a typo in you email. Try one more time',
'handler': 'handle_email',
'next_step': 'step3',
},
'step3': {
'text': 'Thanks for your time, {name}! Your ticket is below, also, we sent the ticket to your {email}, print it',
'image': 'generate_ticket_handler',
'failure_text': None,
'handler': None,
'next_step': None,
},
}
}
}
DEFAULT_ANSWER = 'IDK how to answer.' \
'But I know where the conference is held and I can send you all the information I know, just ask me'
DB_CONFIG = dict(
provider='postgres',
user='postgres',
password='',
host='localhost',
database='chatbot'
)
|
nilq/baby-python
|
python
|
from rest_framework.exceptions import APIException
class CFSSLError(APIException):
status_code = 503
default_detail = 'Could not create Docker certificate.'
default_code = 'docker_certificate_service_unavailable'
|
nilq/baby-python
|
python
|
"""Source code for categorical dqn brain class.
Author: Yoshinari Motokawa <yoshinari.moto@fuji.waseda.jp>
"""
from typing import List
import torch
from omegaconf import DictConfig
from torch import nn
from .abstract_brain import AbstractBrain
from core.agents.models.customs.categorical_dqn import ApplySoftmax
class CategoricalDQNBrain(AbstractBrain):
def __init__(self, config: DictConfig, obs_shape: List[int], act_size: int):
super().__init__(config=config, obs_shape=obs_shape, act_size=act_size)
self.gamma = config.gamma
self.num_atoms = config.model.num_atoms
self.V_min = config.model.V_min
self.V_max = config.model.V_max
self.support = torch.linspace(self.V_min, self.V_max, self.num_atoms).to(
device=self.device
) # Support (range) of z
self.delta_z = (self.V_max - self.V_min) / (self.num_atoms - 1)
@torch.no_grad()
def get_action(self, state):
for state_key, state_value in state.items():
state[state_key] = state_value.unsqueeze(0).float().to(self.device)
model_output = self.network(state, ApplySoftmax.NORMAL)
model_output = torch.sum(model_output * self.support, dim=2)
_, action = torch.max(model_output, dim=1)
action = int(action.item())
return action
def learn(self, states_ind, actions_ind, rewards_ind, dones_ind, next_states_ind):
for states_key, states_value in states_ind.items():
states_ind[states_key] = states_value.float().to(self.device)
actions_ind = actions_ind.to(self.device)
rewards_ind = rewards_ind.float().to(self.device)
dones_ind = dones_ind.to(self.device)
for next_states_key, next_states_value in next_states_ind.items():
next_states_ind[next_states_key] = next_states_value.float().to(self.device)
batch_size = dones_ind.shape[0]
log_p = self.network(states_ind, ApplySoftmax.LOG)
log_p_a = log_p[range(batch_size), actions_ind.squeeze()]
with torch.no_grad():
# 最も価値の高いactionを抽出
model_output = self.network(next_states_ind, ApplySoftmax.NORMAL)
best_actions = torch.sum(model_output * self.support, dim=2).argmax(dim=1)
p_next = self.target_network(next_states_ind, ApplySoftmax.NORMAL)
# (1) terminal state用に確率分布としてすべてのatomに同じ値を与えておく
p_next_best = torch.zeros(0).to(self.device, dtype=torch.float32).new_full((batch_size, self.num_atoms), 1.0 / self.num_atoms)
# terminal state以外はDDQNで計算したもので上書き
p_next_best = p_next[range(batch_size), best_actions]
# 報酬を分布に直す
Tz = (rewards_ind.unsqueeze(1) + self.gamma * self.support.unsqueeze(0)).clamp(self.V_min, self.V_max)
b = (Tz - self.V_min) / self.delta_z
lower = b.floor().long()
upper = b.ceil().long()
# (3) bの値がちょうど整数値だった場合にmの要素値が0となってしまうことを回避
lower[(lower == upper) * (0 < lower)] -= 1
# ↑の処理によってlの値は既に変更済みなため、↓の処理が同時に行われてしまうことはない
upper[(lower == upper) * (upper < self.num_atoms - 1)] += 1
m = torch.zeros(batch_size, self.num_atoms).to(self.device, dtype=torch.float32)
# (4) ミニバッチの各要素毎に和を持っておくため、offsetを計算した上でmを一次元のリストにして扱う
offset = torch.linspace(0, ((batch_size - 1) * self.num_atoms), batch_size).unsqueeze(1).expand(batch_size, self.num_atoms).to(lower)
m.view(-1).index_add_(0, (lower + offset).view(-1), (p_next_best * (upper.float() - b)).view(-1))
m.view(-1).index_add_(0, (upper + offset).view(-1), (p_next_best * (b - lower.float())).view(-1))
self.optimizer.zero_grad()
loss = -torch.sum(m * log_p_a, dim=1).mean()
loss.backward()
nn.utils.clip_grad_norm_(self.network.parameters(), 10)
self.optimizer.step()
return loss.detach()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Model definitions."""
from django.db import models
from picklefield.fields import PickledObjectField
class Model(models.Model):
"""GLM model."""
blob = PickledObjectField()
def __str__(self):
return f'Hello, I am the GLM model #{self.id}'
|
nilq/baby-python
|
python
|
import anki_vector
from anki_vector.util import Pose, degrees
def main():
args = anki_vector.util.parse_command_args()
with anki_vector.Robot(args.serial, show_3d_viewer=True, enable_nav_map_feed=True) as robot:
robot.behavior.drive_off_charger()
fixed_object = robot.world.create_custom_fixed_object(Pose(200, -50, 0, angle_z=degrees(90)), 100, 50, 100, relative_to_robot=True)
fixed_object = robot.world.create_custom_fixed_object(Pose(100, 50, 0, angle_z=degrees(90)), 100, 50, 100, relative_to_robot=True)
if fixed_object:
print("fixed custom objects created successfully")
robot.behavior.go_to_pose(Pose(300, 0, 0, angle_z=degrees(0)), relative_to_robot=True)
robot.world.delete_custom_objects()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# proxy module
from __future__ import absolute_import
from mayavi.action.filters import *
|
nilq/baby-python
|
python
|
class Solution:
def XXX(self, head: ListNode, n: int) -> ListNode:
slow=head
fast=head
for i in range(n):
fast=fast.next
if fast==None:
return slow.next
else:
fast=fast.next
while fast:
fast=fast.next
slow=slow.next
slow.next=slow.next.next
return head
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import click
import pytest
from click.testing import CliRunner
from gitlabctl.cli import project_get_env
from gitlabctl.cli import run_pipeline
__author__ = "Thomas Bianchi"
__copyright__ = "Thomas Bianchi"
__license__ = "mit"
def main_get_env(func_name, id):
return [id]
def main_run_pipeline(func_name, d):
click.echo(d)
get_env_by_id_expections = [
pytest.param(['--by-id', '1123'], '1123\n', id="full"),
pytest.param(None, '\n', id="no-id"),
]
run_pipeline_expections = [
pytest.param(['NOPROD=1'], "[{'key': 'NOPROD', 'value': '1'}]\n",
id="spaced single param"),
pytest.param(['NOPROD=1', 'PROVA=2'],
"[{'key': 'NOPROD', 'value': '1'}, {'key': 'PROVA', 'value': '2'}]\n",
id="spaced multiple params"),
# pytest.param(['NOPROD=1,PROVA=2'], pytest.raises(click.BadArgumentUsage),
# id="non spaced"),
]
@pytest.mark.parametrize("a,expected", get_env_by_id_expections)
def test_project_get_env(mocker, a, expected):
mocker.patch(
'gitlabctl.project.main',
main_get_env)
runner = CliRunner()
result = runner.invoke(project_get_env, a)
assert expected == result.output
@pytest.mark.parametrize("a,expected", run_pipeline_expections)
def test_run_pipeline(mocker, a, expected):
mocker.patch(
'gitlabctl.project.main',
main_run_pipeline)
runner = CliRunner()
result = runner.invoke(run_pipeline, a)
assert expected == result.output
|
nilq/baby-python
|
python
|
from enum import Enum
class Status(Enum):
Dziecko=1,
Nastolatek=2,
Dorosly=3
def printFileName():
print("Status")
|
nilq/baby-python
|
python
|
import logging
from typing import Optional, Sequence
from hybrid.sites import SiteInfo
import PySAM.Singleowner as Singleowner
from hybrid.log import hybrid_logger as logger
class PowerSource:
def __init__(self, name, site: SiteInfo, system_model, financial_model):
"""
Abstract class for a renewable energy power plant simulation.
"""
self.name = name
self.site = site
self.system_model = system_model
self.financial_model = financial_model
self.set_construction_financing_cost_per_kw(financial_model.FinancialParameters.construction_financing_cost \
/ financial_model.FinancialParameters.system_capacity)
@property
def system_capacity_kw(self) -> float:
raise NotImplementedError
def get_total_installed_cost_dollars(self) -> float:
return self.financial_model.SystemCosts.total_installed_cost
def set_total_installed_cost_dollars(self, total_installed_cost_dollars: float):
self.financial_model.SystemCosts.total_installed_cost = total_installed_cost_dollars
logger.info("{} set total_installed_cost to ${}".format(self.name, total_installed_cost_dollars))
def set_construction_financing_cost_per_kw(self, construction_financing_cost_per_kw):
self._construction_financing_cost_per_kw = construction_financing_cost_per_kw
def get_construction_financing_cost(self) -> float:
return self._construction_financing_cost_per_kw * self.system_capacity_kw
def simulate(self, project_life: int = 25):
"""
Run the system and financial model
"""
if not self.system_model:
return
self.system_model.execute(0)
if not self.financial_model:
return
self.financial_model.value("construction_financing_cost", self.get_construction_financing_cost())
self.financial_model.Revenue.ppa_soln_mode = 1
self.financial_model.Lifetime.system_use_lifetime_output = 1
self.financial_model.FinancialParameters.analysis_period = project_life
single_year_gen = self.financial_model.SystemOutput.gen
self.financial_model.SystemOutput.gen = list(single_year_gen) * project_life
if self.name != "Grid":
self.financial_model.SystemOutput.system_pre_curtailment_kwac = self.system_model.Outputs.gen * project_life
self.financial_model.SystemOutput.annual_energy_pre_curtailment_ac = self.system_model.Outputs.annual_energy
self.financial_model.execute(0)
logger.info("{} simulation executed".format(self.name))
def generation_profile(self) -> Sequence:
if self.system_capacity_kw:
return self.system_model.Outputs.gen
else:
return [0] * self.site.n_timesteps
def copy(self):
"""
:return: new instance
"""
raise NotImplementedError
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 13:41:37 2018
@author: craggles
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import scipy.special
import sklearn
from matplotlib import cm
from matplotlib import rc
import matplotlib
matplotlib.rc('pdf', fonttype=42)
start = -10
end = 10
resolution = 512
x=y= np.linspace(start,end,num=resolution)
xx,yy = np.meshgrid(x,y)
rr = np.sqrt(xx**2+yy**2)
tt = np.arctan2(y,x)
airy = 2*np.divide(scipy.special.jve(1,rr),rr)
#norm_airy = sklearn.preprocessing.normalize(airy)
#plt.imshow(airy)
colors = cm.viridis(airy)
fig = plt.figure()
#plt.rc('text', usetex=True)
#plt.rc('font', family='serif')
#pgf_with_rc_fonts = {"pgf.texsystem": "pdflatex"}
#matplotlib.rcParams.update(pgf_with_rc_fonts)
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d([start, end])
ax.set_ylim3d([start, end])
ax.set_zlim3d([-0.08, 1])
# Plot a basic wireframe.
#ax.plot_wireframe(xx, yy, airy, rstride=10, cstride=10)
#surf = ax.plot_surface(xx, yy, airy, rcount=50, ccount=50,
# facecolors=colors, shade=False)
surf = ax.plot_surface(xx, yy, airy, cmap=cm.viridis)
#surf.set_facecolor((0,0,0,0))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('Electric field')
plt.tight_layout()
plt.savefig("airy_E_fill.pdf")
surf.remove()
#plt.show()
#%%
#plt.cla()
surf = ax.plot_surface(xx, yy, airy**2, cmap=cm.viridis)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('Intensity')
plt.savefig("airy_I_fill.pdf")
plt.show()
|
nilq/baby-python
|
python
|
import os
from getpass import getpass
from netmiko import ConnectHandler
password = os.getenv("PYNET_PASSWORD") if os.getenv("PYNET_PASSWORD") else getpass()
net_connect = ConnectHandler(
host="cisco3.lasthop.io",
username="pyclass",
password=password,
device_type="cisco_ios",
session_log="my_session.txt",
)
print(net_connect.find_prompt())
net_connect.disconnect()
|
nilq/baby-python
|
python
|
from pathlib import Path
from yacs.config import CfgNode as CN
import os
import time
import logging
import torch.distributed as dist
_C = CN()
_C.dataset = 'imagenet'
_C.data_dir = './data_list/'
_C.check_path = './checkpoint'
_C.arch = 'resnet50'
_C.workers = 32
_C.epochs = 400
_C.defer_epoch = 0
_C.start_epoch = 1
_C.batch_size = 256
_C.lr = 0.02
_C.momentum = 0.9
_C.weight_decay = 5e-4
_C.print_freq = 100
_C.resume = ''
_C.resume2 = ''
_C.world_size = 1
_C.rank = 0
_C.dist_url = 'tcp://localhost:10000'
_C.dist_backend = 'nccl'
_C.seed = None
_C.gpu = None
_C.evaluate = False
_C.multiprocessing_distributed = True
# options for moco v2
_C.moco_dim = 128
_C.moco_k = 8192
_C.moco_m = 0.999
_C.grad = False
_C.mlp = True
_C.aug_plus = False
_C.normalize = False
_C.queue_size_per_cls = 4
_C.smooth = 0.1
_C.ldam_m = 0.1
# options for SupCon
_C.con_type = 'SupConLoss'
_C.gamma = 128
_C.margin = 0.25
_C.con_weight = 1.0
_C.balsfx_n = 0.0
_C.effective_num_beta = 0.99
_C.temperature = 0.1
_C.log_weight = 7.0
# options for others
_C.mark = ''
_C.debug = False
_C.aug = 'randcls_sim'
_C.log_dir = 'logs'
_C.model_dir = 'ckps'
_C.warm_epochs = 10
_C.randaug_m = 10
_C.randaug_n = 2
_C.color_p = 1.0
_C.color_h = 0.0
_C.branch_type = 'balance'
_C.alpha = 0.2
_C.path = 'same'
_C.pos_size_per_cls = 8
_C.neg_size_per_cls = 4
def update_config(cfg, args):
cfg.defrost()
cfg.merge_from_file(args.cfg)
cfg.merge_from_list(args.opts)
log_dir = Path("saved") / (cfg.mark) / Path(cfg.log_dir)
print('=> creating {}'.format(log_dir))
log_dir.mkdir(parents=True, exist_ok=True)
log_file = '{}.txt'.format(cfg.mark)
# final_log_file = log_dir / log_file
model_dir = Path("saved") / (cfg.mark) / Path(cfg.model_dir)
print('=> creating {}'.format(model_dir))
model_dir.mkdir(parents=True, exist_ok=True)
cfg.model_dir = str(model_dir)
# cfg.freeze()
import logging
import os
import sys
class NoOp:
def __getattr__(self, *args):
def no_op(*args, **kwargs):
"""Accept every signature by doing non-operation."""
pass
return no_op
def get_logger(config, resume=False, is_rank0=True):
"""Get the program logger.
Args:
log_dir (str): The directory to save the log file.
log_name (str, optional): The log filename. If None, it will use the main
filename with ``.log`` extension. Default is None.
resume (str): If False, open the log file in writing and reading mode.
Else, open the log file in appending and reading mode; Default is "".
is_rank0 (boolean): If True, create the normal logger; If False, create the null
logger, which is useful in DDP training. Default is True.
"""
if is_rank0:
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
# # StreamHandler
# stream_handler = logging.StreamHandler(sys.stdout)
# stream_handler.setLevel(level=logging.INFO)
# logger.addHandler(stream_handler)
# FileHandler
if resume == False:
mode = "w+"
else:
mode = "a+"
log_dir = Path("saved") / (config.mark) / Path(config.log_dir)
log_name = config.mark + ".log"
file_handler = logging.FileHandler(os.path.join(log_dir, log_name), mode=mode)
file_handler.setLevel(level=logging.INFO)
logger.addHandler(file_handler)
else:
logger = NoOp()
return logger
|
nilq/baby-python
|
python
|
#!/usr/local/sbin/charm-env python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charms.reactive import (
Endpoint,
toggle_flag,
)
from charmhelpers.core.hookenv import log
class KubeMastersPeer(Endpoint):
"""
Implements peering for kubernetes-master units.
"""
def manage_flags(self):
"""
Set states corresponding to the data we have.
"""
toggle_flag(
self.expand_name('{endpoint_name}.connected'),
self.is_joined)
toggle_flag(
self.expand_name('{endpoint_name}.cohorts.ready'),
self.is_joined and self._peers_have_cohorts())
def _peers_have_cohorts(self):
"""
Return True if all peers have cohort keys.
"""
for unit in self.all_joined_units:
if not unit.received.get('cohort-keys'):
log('Unit {} does not yet have cohort-keys'.format(unit))
return False
log('All units have cohort-keys')
return True
def set_cohort_keys(self, cohort_keys):
"""
Send the cohort snapshot keys.
"""
for relation in self.relations:
relation.to_publish['cohort-keys'] = cohort_keys
|
nilq/baby-python
|
python
|
import sys
import re as _re
from fclpy.lisptype import LispSymbol
class LispStream():
def __init__(self, fh):
self.fh = fh
self.tokens = []
self.buff = []
def unread_char(self, y):
self.buff.append(y)
def push_token(self, token):
self.tokens.append(token)
def has_token(self,token):
return token in self.tokens
def pop_token(self):
return self.tokens.pop()
def read_char(self):
if len(self.buff) > 0:
return self.buff.pop()
return self.fh.read(1)
def eof(self):
return False
STDIN = LispStream(sys.stdin)
class LispReader():
def __init__(self, get_macro_character, stream = STDIN):
self.stream = stream
self.get_macro_character = get_macro_character
def read_1(self):
toss = True
while(toss):
toss = False
x = self.stream.read_char()
if self.stream.eof():
return None
elif (not self.valid_char(x)):
raise Exception("reader-error")
elif self.whitespace_char(x):
toss = True
elif self.macro_character(x):
return self.get_macro_character(x)(x,self.stream)
elif self.single_escape_character(x):
y = self.stream.read_char()
if self.stream.eof():
raise Exception("reader-error")
return self.read_8(y.upper())
elif self.multiple_escape_character(x):
return self.read_9("")
else:
return self.read_8(x.upper())
def read_8(self, token):
more = True
while(more):
y = self.stream.read_char()
if self.terminating_macro_character(y):
self.stream.unread_char(y)
more = False
elif self.whitespace_char(y):
more = False
else:
token = token + y.upper()
return self.read_10(token)
def read_10(self, token):
if _re.match("[0-9].*",token):
return token
return LispSymbol(token)
def valid_char(self,c):
return c == c
def whitespace_char(self,c):
return c in [" ","\t","\n","\r"]
def eof(self,c):
return c != c
def macro_character(self,c ):
return c in ["(",")","'",";"]
def terminating_macro_character(self,c):
return c in [")"]
def non_terminating_macro_character(self,c):
return c != c
def single_escape_character(self,c):
return c == "\\"
def multiple_escape_character(self,c):
return c == "\""
|
nilq/baby-python
|
python
|
"""
We have discussed Knight’s tour and Rat in a Maze problems in Set 1 and Set 2 respectively. Let us discuss N Queen as another example problem that can be solved using Backtracking.
The N Queen is the problem of placing N chess queens on an N×N chessboard so
that no two queens attack each other. For example, following is a solution for
4 Queen problem.
"""
def get_n_queens(n_queens):
pass
n_queens = 4
expected = [
[
[0, 1, 0, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 0, 1, 0],
],
[
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
],
]
result = get_n_queens(n_queens)
result1 = result == expected
result2 = result[0] == expected[1] and result[1] == expected[0]
assert result1 or result2
print("OK")
|
nilq/baby-python
|
python
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--text",help="text for the google_speech",
action="store")
args = parser.parse_args()
print ()
from google_speech import Speech
# say "Hello World"
text = args.text
lang = "en"
speech = Speech(text, lang)
#speech.play()
# you can also apply audio effects (using SoX)
# see http://sox.sourceforge.net/sox.html#EFFECTS for full effect documentation
sox_effects = ("speed", "1")
speech.play(sox_effects)
|
nilq/baby-python
|
python
|
import os
import webapp2
import jinja2
import json
import cgi
import re
import hmac
import hashlib
import random
from string import letters
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir),
autoescape = True)
USER_RE = re.compile(r'^[a-zA-Z0-9_-]{3,20}$')
PASS_RE = re.compile(r'^.{3,20}$')
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
signupForm="""
<!DOCTYPE html>
<html>
<head>
<title>Sign Up</title>
<style type="text/css">
.label {text-align: right}
.error {color: red}
</style>
</head>
<body>
<h2>Signup</h2>
<form method="post">
<table>
<tr>
<td class="label">
Username
</td>
<td>
<input type="text" name="username" value="%(username)s">
</td>
<td class="error">
%(user_error)s
</td>
</tr>
<tr>
<td class="label">
Password
</td>
<td>
<input type="password" name="password" value="%(password)s">
</td>
<td class="error">
%(pass_error)s
</td>
</tr>
<tr>
<td class="label">
Verify Password
</td>
<td>
<input type="password" name="verify" value="%(verify)s">
</td>
<td class="error">
%(verify_error)s
</td>
</tr>
<tr>
<td class="label">
Email (optional)
</td>
<td>
<input type="text" name="email" value="%(email)s">
</td>
<td class="error">
%(email_error)s
</td>
</tr>
</table>
<input type="submit">
</form>
</body>
</html>
"""
loginForm="""
<!DOCTYPE html>
<html>
<head>
<title>Login</title>
<style type="text/css">
.label {text-align: right}
.error {color: red}
</style>
</head>
<body>
<h2>Login</h2>
<form method="post">
<table>
<tr>
<td class="label">
Username
</td>
<td>
<input type="text" name="username" value="%(username)s">
</td>
<td class="error">
%(user_error)s
</td>
</tr>
<tr>
<td class="label">
Password
</td>
<td>
<input type="password" name="password" value="%(password)s">
</td>
<td class="error">
%(pass_error)s
</td>
</tr>
</table>
<input type="submit">
</form>
</body>
</html>
"""
def valid_username(username):
return USER_RE.match(username)
def valid_password(password):
return PASS_RE.match(password)
def valid_email(email):
return EMAIL_RE.match(email)
secret='iloveyou'
def hash_cookie(cookie):
return '%s|%s' %(cookie,hmac.new(secret,cookie).hexdigest())
def valid_cookie(hashcookie):
cookie = hashcookie.split('|')[0]
if hashcookie == hash_cookie(cookie):
return cookie
def make_salt():
salt_list = [ random.choice(letters) for x in xrange(5) ]
return ''.join(salt_list)
def hash_password(password,salt=None):
if not salt:
salt=make_salt()
h = hashlib.sha256(password+salt).hexdigest()
return '%s|%s' %(h,salt)
def valid_hashpassword(hashpass,password):
salt=hashpass.split('|')[1]
if hash_password(password,salt=salt)==hashpass:
return True
class User(db.Model):
name = db.StringProperty(required=True)
pw_hash = db.StringProperty(required=True)
email = db.StringProperty()
class Signup(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
self.response.write(writeForm(signupForm))
def post(self):
username=self.request.get("username")
password=self.request.get("password")
verify=self.request.get("verify")
email=self.request.get("email")
if not valid_username(username):
user_error="It's not a valid username!"
else:
user_error=''
if not valid_password(password):
pass_error="It's not a valid password!"
else:
pass_error=''
if (password != verify):
verify_error="Password didn't match!"
else:
verify_error=''
if (email):
if not valid_email(email):
email_error="It's not a valid email!"
else:
email_error=''
else:
email_error=''
if (user_error or pass_error or verify_error or email_error):
self.response.headers['Content-Type'] = 'text/html'
self.response.write(writeForm(signupForm,username,password,verify,email,user_error,pass_error,verify_error,\
email_error))
elif User.all().filter('name =',username).get():
user_error = 'This user already exists!'
self.response.write(writeForm(signupForm,username,password,verify,email,user_error,pass_error,verify_error,\
email_error))
else:
h_username=hash_cookie(username)
pw_hash = hash_password(password)
u = User(name=username,pw_hash=pw_hash,email=email)
u.put()
self.response.headers.add_header('Set-Cookie','username=%s;Path=/' %str(h_username))
self.redirect("/thanks") # redirect
class Login(webapp2.RequestHandler):
def get(self):
self.response.write(writeForm(loginForm))
def post(self):
username=self.request.get('username')
password=self.request.get('password')
u=User.all().filter('name =',username).get()
if u and valid_hashpassword(u.pw_hash,password):
h_username=hash_cookie(username)
self.response.headers.add_header('Set-Cookie','username=%s;Path=/' %str(h_username))
self.redirect("/thanks") # redirect
else:
self.response.write(writeForm(loginForm))
class Logout(webapp2.RequestHandler):
def get(self):
self.response.headers.add_header('Set-Cookie','username=;Path=/')
self.redirect("/signup")
class ThanksHandler(webapp2.RequestHandler):
def get(self):
username=self.request.cookies.get('username')
username=valid_cookie(username)
if not username:
self.redirect('/login')
else:
self.response.headers['Content-Type'] = 'text/plain'
self.response.write('Welcome, %s' %username)
def writeForm(form,username='',password='',verify='',email='',\
user_error='',pass_error='',verify_error='',email_error=''):
return form %{'username':username,'password':password,\
'verify':verify,'email':email,'user_error':user_error,\
'verify_error':verify_error,'email_error':email_error,'pass_error':pass_error}
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
class BaseHandler(webapp2.RequestHandler):
def render(self, template, **kw):
self.response.out.write(render_str(template, **kw))
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
class Blogdb(db.Model): # database
subject=db.StringProperty(required=True)
content=db.TextProperty(required=True)
created=db.DateTimeProperty(auto_now_add=True)
class MainPage(BaseHandler):
def get(self):
posts=db.GqlQuery("select * from Blogdb order by created desc")
self.render('mainpage.html',posts=posts)
class MainPageJason(BaseHandler):
def get(self):
main_dict = {}
main_list = []
posts=db.GqlQuery("select * from Blogdb order by created desc")
for post in posts:
main_dict['subject'] = post.subject
main_dict['content'] = post.content
main_dict['created'] = post.created.strftime('%c')
main_list.append(main_dict)
mainJson = json.dumps(main_list)
self.response.headers['Content-type']='application/json; charset=UTF-8'
self.write(mainJson)
class PostPage(BaseHandler):
def get(self,post_id):
# key = db.Key.from_path('Blogdb',int(post_id))
# post = db.get(key)
post = Blogdb.get_by_id(int(post_id))
if not post:
self.error(404)
else:
self.render('permalink.html',post = post)
class postJson(BaseHandler):
def get(self,post_id):
post_dict = {}
post_id = post_id.split('.')[0]
post = Blogdb.get_by_id(int(post_id))
if not post:
self.error(404)
else:
post_dict['subject'] = post.subject
post_dict['content'] = post.content
post_dict['created'] = post.created.strftime('%c')
postjson= json.dumps(post_dict)
self.response.headers['Content-type']='application/json; charset=UTF-8'
self.write(postjson)
class NewpostHandler(BaseHandler):
def get(self):
# self.render('newentry-form.html',subject='',error_subject='',\
# content='',error_content='')
self.render("newentry-form.html")
def post(self):
have_error= False
subject=self.request.get('subject')
content=self.request.get('content').replace('\n','<br>') # in order for the content to be
params=dict(subject=subject,content=content) # displayed properly in the browser
if not subject:
have_error=True
params['error_subject']='You must have a subject!'
if not content:
have_error=True
params['error_content']='You need enter the content!'
if have_error:
self.render('newentry-form.html',**params)
else:
blog=Blogdb(subject=subject,content=content)
blog.put()
# print blog.key().id()
self.redirect('/%s' % str(blog.key().id()))
app = webapp2.WSGIApplication([('/newpost', NewpostHandler),\
('/([0-9]+)',PostPage),
('/.json',MainPageJason),
('/([0-9]+\.json)',postJson),
('/signup', Signup),
('/thanks',ThanksHandler),
('/login',Login),
('/logout',Logout),
('/',MainPage),],debug=True)
|
nilq/baby-python
|
python
|
import logging
import os
import shutil
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.cur = val
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1, 5)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "aux" not in name)/1e6
def save_checkpoint(state, is_best, save):
filename = os.path.join(save, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def load_net_config(path):
with open(path, 'r') as f:
net_config = ''
while True:
line = f.readline().strip()
if 'net_type' in line:
net_type = line.split(': ')[-1]
break
else:
net_config += line
return net_config, net_type
def load_model(model, model_path):
logging.info('Start loading the model from ' + model_path)
if 'http' in model_path:
model_addr = model_path
model_path = model_path.split('/')[-1]
if os.path.isfile(model_path):
os.system('rm ' + model_path)
os.system('wget -q ' + model_addr)
model.load_state_dict(torch.load(model_path), strict=False)
logging.info('Loading the model finished!')
def create_exp_dir(path):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
def cross_entropy_with_label_smoothing(pred, target, label_smoothing=0.):
"""
Label smoothing implementation.
This function is taken from https://github.com/MIT-HAN-LAB/ProxylessNAS/blob/master/proxyless_nas/utils.py
"""
logsoftmax = nn.LogSoftmax().cuda()
n_classes = pred.size(1)
# convert to one-hot
target = torch.unsqueeze(target, 1)
soft_target = torch.zeros_like(pred)
soft_target.scatter_(1, target, 1)
# label smoothing
soft_target = soft_target * (1 - label_smoothing) + label_smoothing / n_classes
return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1))
def parse_net_config(net_config):
str_configs = net_config.split('|')
return [eval(str_config) for str_config in str_configs]
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def set_logging(save_path, log_name='log.txt'):
log_format = '%(asctime)s %(message)s'
date_format = '%m/%d %H:%M:%S'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt=date_format)
fh = logging.FileHandler(os.path.join(save_path, log_name))
fh.setFormatter(logging.Formatter(log_format, date_format))
logging.getLogger().addHandler(fh)
def create_save_dir(save_path, job_name):
if job_name != '':
job_name = time.strftime("%Y%m%d-%H%M%S-") + job_name
save_path = os.path.join(save_path, job_name)
create_exp_dir(save_path)
os.system('cp -r ./* '+save_path)
save_path = os.path.join(save_path, 'output')
create_exp_dir(save_path)
else:
save_path = os.path.join(save_path, 'output')
create_exp_dir(save_path)
return save_path, job_name
def latency_measure(module, input_size, batch_size, meas_times, mode='gpu'):
assert mode in ['gpu', 'cpu']
latency = []
module.eval()
input_size = (batch_size,) + tuple(input_size)
input_data = torch.randn(input_size)
if mode=='gpu':
input_data = input_data.cuda()
module.cuda()
for i in range(meas_times):
with torch.no_grad():
start = time.time()
_ = module(input_data)
torch.cuda.synchronize()
if i >= 100:
latency.append(time.time() - start)
print(np.mean(latency) * 1e3, 'ms')
return np.mean(latency) * 1e3
def latency_measure_fw(module, input_data, meas_times):
latency = []
module.eval()
for i in range(meas_times):
with torch.no_grad():
start = time.time()
output_data = module(input_data)
torch.cuda.synchronize()
if i >= 100:
latency.append(time.time() - start)
print(np.mean(latency) * 1e3, 'ms')
return np.mean(latency) * 1e3, output_data
def record_topk(k, rec_list, data, comp_attr, check_attr):
def get_insert_idx(orig_list, data, comp_attr):
start = 0
end = len(orig_list)
while start < end:
mid = (start + end) // 2
if data[comp_attr] < orig_list[mid][comp_attr]:
start = mid + 1
else:
end = mid
return start
if_insert = False
insert_idx = get_insert_idx(rec_list, data, comp_attr)
if insert_idx < k:
rec_list.insert(insert_idx, data)
if_insert = True
while len(rec_list) > k:
rec_list.pop()
return if_insert
def one_hot_tensor(y_batch_tensor, num_classes, device):
y_tensor = torch.cuda.FloatTensor(y_batch_tensor.size(0),
num_classes).fill_(0)
y_tensor[np.arange(len(y_batch_tensor)), y_batch_tensor] = 1.0
return y_tensor
def label_smoothing(y_batch_tensor, num_classes, delta):
y_batch_smooth = (1 - delta - delta / (num_classes - 1)) * \
y_batch_tensor + delta / (num_classes - 1)
return y_batch_smooth
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
class softCrossEntropy(nn.Module):
def __init__(self, reduce=True):
super(softCrossEntropy, self).__init__()
self.reduce = reduce
return
def forward(self, inputs, targets):
"""
:param inputs: predictions
:param targets: target labels in vector form
:return: loss
"""
log_likelihood = -F.log_softmax(inputs, dim=1)
sample_num, class_num = targets.shape
if self.reduce:
loss = torch.sum(torch.mul(log_likelihood, targets)) / sample_num
else:
loss = torch.sum(torch.mul(log_likelihood, targets), 1)
return loss
class CWLoss(nn.Module):
def __init__(self, num_classes, margin=50, reduce=True):
super(CWLoss, self).__init__()
self.num_classes = num_classes
self.margin = margin
self.reduce = reduce
return
def forward(self, logits, targets):
"""
:param inputs: predictions
:param targets: target labels
:return: loss
"""
onehot_targets = one_hot_tensor(targets, self.num_classes,
targets.device)
self_loss = torch.sum(onehot_targets * logits, dim=1)
other_loss = torch.max(
(1 - onehot_targets) * logits - onehot_targets * 1000, dim=1)[0]
loss = -torch.sum(torch.clamp(self_loss - other_loss + self.margin, 0))
if self.reduce:
sample_num = onehot_targets.shape[0]
loss = loss / sample_num
return loss
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author: Wenbin Li (liwenbin.nju@gmail.com)
Date: April 9, 2019
Version: V0
Citation:
@inproceedings{li2019DN4,
title={Revisiting Local Descriptor based Image-to-Class Measure for Few-shot Learning},
author={Li, Wenbin and Wang, Lei and Xu, Jinglin and Huo, Jing and Gao Yang and Luo, Jiebo},
booktitle={CVPR},
year={2019}
}
"""
from __future__ import print_function
import argparse
import os
import random
import shutil
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import grad
import time
from torch import autograd
from PIL import ImageFile
import scipy as sp
import scipy.stats
import pdb
# ============================ Data & Networks =====================================
from dataset.datasets_csv import Imagefolder_csv
import models.network as DN4Net
# ==================================================================================
ImageFile.LOAD_TRUNCATED_IMAGES = True
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']='0'
# Load the pre-trained model
model_trained = './results/DN4_miniImageNet_Conv64F_5Way_5Shot_K3/model_best.pth.tar'
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', default='/Datasets/miniImageNet--ravi', help='/miniImageNet')
parser.add_argument('--data_name', default='miniImageNet', help='miniImageNet|StanfordDog|StanfordCar|CubBird')
parser.add_argument('--mode', default='test', help='train|val|test')
parser.add_argument('--outf', default='./results/DN4')
parser.add_argument('--resume', default=model_trained, type=str, help='path to the lastest checkpoint (default: none)')
parser.add_argument('--basemodel', default='Conv64F', help='Conv64F|ResNet256F')
parser.add_argument('--workers', type=int, default=8)
# Few-shot parameters #
parser.add_argument('--imageSize', type=int, default=84)
parser.add_argument('--episodeSize', type=int, default=1, help='the mini-batch size of training')
parser.add_argument('--testepisodeSize', type=int, default=1, help='one episode is taken as a mini-batch')
parser.add_argument('--epochs', type=int, default=30, help='the total number of training epoch')
parser.add_argument('--episode_train_num', type=int, default=10000, help='the total number of training episodes')
parser.add_argument('--episode_val_num', type=int, default=1000, help='the total number of evaluation episodes')
parser.add_argument('--episode_test_num', type=int, default=600, help='the total number of testing episodes')
parser.add_argument('--way_num', type=int, default=5, help='the number of way/class')
parser.add_argument('--shot_num', type=int, default=5, help='the number of shot')
parser.add_argument('--query_num', type=int, default=15, help='the number of queries')
parser.add_argument('--neighbor_k', type=int, default=3, help='the number of k-nearest neighbors')
parser.add_argument('--lr', type=float, default=0.005, help='learning rate, default=0.005')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', default=True, help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='the number of gpus')
parser.add_argument('--nc', type=int, default=3, help='input image channels')
parser.add_argument('--clamp_lower', type=float, default=-0.01)
parser.add_argument('--clamp_upper', type=float, default=0.01)
parser.add_argument('--print_freq', '-p', default=100, type=int, metavar='N', help='print frequency (default: 100)')
opt = parser.parse_args()
opt.cuda = True
cudnn.benchmark = True
# ======================================= Define functions =============================================
def validate(val_loader, model, criterion, epoch_index, F_txt):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
accuracies = []
end = time.time()
for episode_index, (query_images, query_targets, support_images, support_targets) in enumerate(val_loader):
# Convert query and support images
query_images = torch.cat(query_images, 0)
input_var1 = query_images.cuda()
input_var2 = []
for i in range(len(support_images)):
temp_support = support_images[i]
temp_support = torch.cat(temp_support, 0)
temp_support = temp_support.cuda()
input_var2.append(temp_support)
# Deal with the target
target = torch.cat(query_targets, 0)
target = target.cuda()
# Calculate the output
output = model(input_var1, input_var2)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, _ = accuracy(output, target, topk=(1, 3))
losses.update(loss.item(), query_images.size(0))
top1.update(prec1[0], query_images.size(0))
accuracies.append(prec1)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
#============== print the intermediate results ==============#
if episode_index % opt.print_freq == 0 and episode_index != 0:
print('Test-({0}): [{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1))
print('Test-({0}): [{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.3f} ({loss.avg:.3f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch_index, episode_index, len(val_loader), batch_time=batch_time, loss=losses, top1=top1), file=F_txt)
print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1))
print(' * Prec@1 {top1.avg:.3f} Best_prec1 {best_prec1:.3f}'.format(top1=top1, best_prec1=best_prec1), file=F_txt)
return top1.avg, accuracies
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def mean_confidence_interval(data, confidence=0.95):
a = [1.0*np.array(data[i].cpu()) for i in range(len(data))]
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * sp.stats.t._ppf((1+confidence)/2., n-1)
return m,h
# ======================================== Settings of path ============================================
# saving path
opt.outf = opt.outf+'_'+opt.data_name+'_'+str(opt.basemodel)+'_'+str(opt.way_num)+'Way_'+str(opt.shot_num)+'Shot'+'_K'+str(opt.neighbor_k)
if not os.path.exists(opt.outf):
os.makedirs(opt.outf)
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# save the opt and results to a txt file
txt_save_path = os.path.join(opt.outf, 'Test_resutls.txt')
F_txt = open(txt_save_path, 'a+')
print(opt)
print(opt, file=F_txt)
# ========================================== Model Config ===============================================
ngpu = int(opt.ngpu)
global best_prec1, epoch_index
best_prec1 = 0
epoch_index = 0
model = DN4Net.define_DN4Net(which_model=opt.basemodel, num_classes=opt.way_num, neighbor_k=opt.neighbor_k, norm='batch',
init_type='normal', use_gpu=opt.cuda)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.beta1, 0.9))
# optionally resume from a checkpoint
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
epoch_index = checkpoint['epoch_index']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']))
print("=> loaded checkpoint '{}' (epoch {})".format(opt.resume, checkpoint['epoch_index']), file=F_txt)
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
print("=> no checkpoint found at '{}'".format(opt.resume), file=F_txt)
if opt.ngpu > 1:
model = nn.DataParallel(model, range(opt.ngpu))
# print the architecture of the network
print(model)
print(model, file=F_txt)
# ============================================ Testing phase ========================================
print('\n............Start testing............')
start_time = time.time()
repeat_num = 5 # repeat running the testing code several times
total_accuracy = 0.0
total_h = np.zeros(repeat_num)
total_accuracy_vector = []
for r in range(repeat_num):
print('===================================== Round %d =====================================' %r)
print('===================================== Round %d =====================================' %r, file=F_txt)
# ======================================= Folder of Datasets =======================================
# image transform & normalization
ImgTransform = transforms.Compose([
transforms.Resize((opt.imageSize, opt.imageSize)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
testset = Imagefolder_csv(
data_dir=opt.dataset_dir, mode=opt.mode, image_size=opt.imageSize, transform=ImgTransform,
episode_num=opt.episode_test_num, way_num=opt.way_num, shot_num=opt.shot_num, query_num=opt.query_num
)
print('Testset: %d-------------%d' %(len(testset), r), file=F_txt)
# ========================================== Load Datasets =========================================
test_loader = torch.utils.data.DataLoader(
testset, batch_size=opt.testepisodeSize, shuffle=True,
num_workers=int(opt.workers), drop_last=True, pin_memory=True
)
# =========================================== Evaluation ==========================================
prec1, accuracies = validate(test_loader, model, criterion, epoch_index, F_txt)
test_accuracy, h = mean_confidence_interval(accuracies)
print("Test accuracy", test_accuracy, "h", h[0])
print("Test accuracy", test_accuracy, "h", h[0], file=F_txt)
total_accuracy += test_accuracy
total_accuracy_vector.extend(accuracies)
total_h[r] = h
aver_accuracy, _ = mean_confidence_interval(total_accuracy_vector)
print("Aver_accuracy:", aver_accuracy, "Aver_h", total_h.mean())
print("Aver_accuracy:", aver_accuracy, "Aver_h", total_h.mean(), file=F_txt)
F_txt.close()
# ============================================== Testing End ==========================================
|
nilq/baby-python
|
python
|
# Copyright 2016 Anselm Binninger, Thomas Maier, Ralph Schaumann
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import logging
from gossip.control import convert
from gossip.util.message import MessageGossipPeerResponse, MessageGossipPeerRequest, MessageGossipPeerInit, \
MessageGossipPeerUpdate, MessageGossipAnnounce
from gossip.util.packing import pack_gossip_peer_response, pack_gossip_peer_request, pack_gossip_peer_init, \
pack_gossip_peer_update, pack_gossip_announce, PEER_UPDATE_TYPE_PEER_LOST, PEER_UPDATE_TYPE_PEER_FOUND
from gossip.util.message_code import MESSAGE_CODE_ANNOUNCE, MESSAGE_CODE_PEER_REQUEST, MESSAGE_CODE_PEER_RESPONSE, \
MESSAGE_CODE_PEER_UPDATE, MESSAGE_CODE_PEER_INIT
from gossip.util.queue_item_types import QUEUE_ITEM_TYPE_SEND_MESSAGE, QUEUE_ITEM_TYPE_CONNECTION_LOST, \
QUEUE_ITEM_TYPE_RECEIVED_MESSAGE, QUEUE_ITEM_TYPE_ESTABLISH_CONNECTION, QUEUE_ITEM_TYPE_NEW_CONNECTION
__author__ = 'Anselm Binninger, Thomas Maier, Ralph Schaumann'
class P2PController(multiprocessing.Process):
def __init__(self, from_p2p_queue, to_p2p_queue, to_api_queue, p2p_connection_pool, p2p_server_address,
announce_message_cache, update_message_cache, api_registration_handler, max_ttl,
bootstrapper_address=None):
""" This controller is responsible for all incoming messages from the P2P layer. If a P2P client sends any
message, this controller handles it in various ways.
:param from_p2p_queue: Used by the P2P layer for incoming messages and commands
:param to_p2p_queue: Messages and commands for the P2P layer are sent through this queue
:param to_api_queue: Messages and commands for the API layer are sent through this queue
:param p2p_connection_pool: Pool which contains all P2P connections/clients/sockets
:param p2p_server_address: The P2P server address for this gossip instance
:param announce_message_cache: Message cache which contains announce messages.
:param update_message_cache: Message cache for peer update messages
:param api_registration_handler: Used for registrations (via NOTIFY message) from API clients
:param max_ttl: Max. amount of hops until messages will be dropped
:param bootstrapper_address: (optional) dict to specify the bootstrapper {'host': <IPv4>: 'port': <int(port)>}
"""
multiprocessing.Process.__init__(self)
self.from_p2p_queue = from_p2p_queue
self.to_p2p_queue = to_p2p_queue
self.to_api_queue = to_api_queue
self.p2p_connection_pool = p2p_connection_pool
self.p2p_server_address = p2p_server_address
self.announce_message_cache = announce_message_cache
self.update_message_cache = update_message_cache
self.api_registration_handler = api_registration_handler
self.max_ttl = max_ttl
self.bootstrapper_address = bootstrapper_address
def run(self):
""" Typical run method which is used to handle P2P messages and commands. It reacts on incoming messages with
changing the state of Gossip internally or by sending new messages resp. establishing new connections. """
logging.info('%s started - PID: %s' % (type(self).__name__, self.pid))
# Bootstrapping part
if self.bootstrapper_address:
bootstrapper_identifier = '%s:%d' % (self.bootstrapper_address['host'], self.bootstrapper_address['port'])
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_ESTABLISH_CONNECTION, 'identifier': bootstrapper_identifier})
self.send_peer_request(bootstrapper_identifier)
# Usual controller part
while True:
queue_item = self.from_p2p_queue.get()
queue_item_type = queue_item['type']
message = queue_item['message']
senders_identifier = queue_item['identifier']
if queue_item_type == QUEUE_ITEM_TYPE_RECEIVED_MESSAGE:
msg_code = message.get_values()['code']
if msg_code == MESSAGE_CODE_ANNOUNCE:
logging.debug('P2PController | Handle received announce (%d): %s' % (MESSAGE_CODE_ANNOUNCE,
message))
# Spread message via API layer (only registered clients) if it's unknown until now
msg_id = self.announce_message_cache.add_message(message)
if msg_id:
logging.info('P2PController | Spread message (id: %d) through API layer' % msg_id)
# Change ttl and create new announce message
ttl = message.get_values()['TTL']
if ttl > 1 or ttl == 0:
ttl = ttl-1 if ttl > 1 else 0
packed_announce_msg = pack_gossip_announce(ttl, message.get_values()['type'],
message.get_values()['message'])['data']
announce_msg = MessageGossipAnnounce(packed_announce_msg)
# Communication with API clients works with notification messages only. Therefore we have to
# convert the announce message.
notification_msg = convert.from_announce_to_notification(msg_id, announce_msg)
for receiver in self.api_registration_handler.get_registrations(message.data_type):
if receiver != senders_identifier:
self.to_api_queue.put({'type': QUEUE_ITEM_TYPE_SEND_MESSAGE, 'identifier': receiver,
'message': notification_msg})
else:
logging.info('P2PController | Discard message (already known).')
elif msg_code == MESSAGE_CODE_PEER_REQUEST:
# Someone wants to know our known identifiers
logging.debug('P2PController | Handle received peer request (%d): %s' % (MESSAGE_CODE_PEER_REQUEST,
message))
# The peer request message contains the server address of the other peer
peer_server_identifier = message.get_values()['p2p_server_address']
self.p2p_connection_pool.update_connection(senders_identifier, peer_server_identifier)
# Build identifier list BUT exclude the identifier of the requesting peer!
own_p2p_server_identifier = '%s:%d' % (self.p2p_server_address['host'],
self.p2p_server_address['port'])
known_server_identifiers = self.p2p_connection_pool.get_server_identifiers(
identifier_to_exclude=[peer_server_identifier, own_p2p_server_identifier])
# Send the assembled identifier list
packed_data = pack_gossip_peer_response(known_server_identifiers)['data']
peer_response_msg = MessageGossipPeerResponse(packed_data)
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_SEND_MESSAGE, 'identifier': senders_identifier,
'message': peer_response_msg})
logging.debug('P2PController | Answering with peer response (%d): %s' % (MESSAGE_CODE_PEER_RESPONSE,
peer_response_msg))
# We've got the server identifier with the peer request, so spread it to anyone we know
senders_server_identifier = self.p2p_connection_pool.get_server_identifier(senders_identifier)
self.send_peer_update(senders_identifier, senders_server_identifier, self.max_ttl)
elif msg_code == MESSAGE_CODE_PEER_INIT:
# Someone wants to inform us about his server identifier
logging.debug('P2PController | Handle received peer init (%d): %s' % (MESSAGE_CODE_PEER_INIT,
message))
# The peer request message contains the server address of the other peer
peer_server_identifier = message.get_values()['p2p_server_address']
self.p2p_connection_pool.update_connection(senders_identifier, peer_server_identifier)
# We've got the server identifier with the peer init, so spread it to anyone we know
senders_server_identifier = self.p2p_connection_pool.get_server_identifier(senders_identifier)
self.send_peer_update(senders_identifier, senders_server_identifier, self.max_ttl)
elif msg_code == MESSAGE_CODE_PEER_RESPONSE:
# We received the known identifiers of someone
logging.debug('P2PController | Handle received peer response (%d): %s'
% (MESSAGE_CODE_PEER_RESPONSE, message))
# Use the peer response only if there is space for new connections in the pool
if self.p2p_connection_pool.get_capacity() > 0:
received_server_identifiers = message.get_values()['data']
new_identifiers = self.p2p_connection_pool.filter_new_server_identifiers(
received_server_identifiers)
# If the peer response provides new identifiers, we establish a new connection with them
if len(new_identifiers) > 0:
for new_identifier in new_identifiers:
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_ESTABLISH_CONNECTION,
'identifier': new_identifier})
# Send initial message
logging.debug('P2PController | Sending peer init (%d): %s' % (MESSAGE_CODE_PEER_INIT,
message))
own_p2p_server_identifier = '%s:%d' % (self.p2p_server_address['host'],
self.p2p_server_address['port'])
packed_data = pack_gossip_peer_init(own_p2p_server_identifier)['data']
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_SEND_MESSAGE,
'identifier': new_identifier,
'message': MessageGossipPeerInit(packed_data)})
# Stop if the pool is full
if self.p2p_connection_pool.get_capacity() <= 0:
break
else:
logging.debug('P2PController | Discarding message (%d) because pool is full!' % msg_code)
elif msg_code == MESSAGE_CODE_PEER_UPDATE:
# We received a peer update of someone
logging.debug('P2PController | Handle received peer update (%d): %s' % (MESSAGE_CODE_PEER_UPDATE,
message))
new_server_identifier = message.get_values()['address']
update_type = message.get_values()['update_type']
ttl = message.get_values()['ttl']
if ttl < int(self.max_ttl/2):
if update_type == PEER_UPDATE_TYPE_PEER_FOUND:
# Use the peer update only if there is space for a new connection in the pool
if self.p2p_connection_pool.get_capacity() > 0:
new_identifiers = self.p2p_connection_pool.filter_new_server_identifiers(
[new_server_identifier])
# If the peer update provides a new identifier, we establish a new connection with it
for new_identifier in new_identifiers:
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_ESTABLISH_CONNECTION,
'identifier': new_identifier})
# Send initial message
logging.debug('P2PController | Sending peer init (%d): %s' % (MESSAGE_CODE_PEER_INIT,
message))
own_p2p_server_identifier = '%s:%d' % (self.p2p_server_address['host'],
self.p2p_server_address['port'])
packed_data = pack_gossip_peer_init(own_p2p_server_identifier)['data']
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_SEND_MESSAGE,
'identifier': new_identifier,
'message': MessageGossipPeerInit(packed_data)})
else:
logging.debug('P2PController | Discarding message (%d) because pool is full' % msg_code)
elif update_type == PEER_UPDATE_TYPE_PEER_LOST:
# Currently a peer update of type PEER_UPDATE_TYPE_PEER_LOST does not need to be handled
pass
# If we don't know the peer update already, spread it
if ttl > 1:
ttl -= 1
self.send_peer_update(senders_identifier, new_server_identifier, ttl)
elif ttl == 0: # A ttl of 0 means that the message is unstoppable!
self.send_peer_update(senders_identifier, new_server_identifier, ttl)
else:
logging.debug('P2PController | Discarding message (%d)' % msg_code)
elif queue_item_type == QUEUE_ITEM_TYPE_CONNECTION_LOST:
# A connection has been disconnected from this instance
logging.debug('P2PController | One connection lost, try to get a new one %s' % senders_identifier)
random_identifier = self.p2p_connection_pool.get_random_identifier(senders_identifier)
if random_identifier:
self.send_peer_request(random_identifier)
elif queue_item_type == QUEUE_ITEM_TYPE_NEW_CONNECTION:
# Our instance know a new connection
senders_server_identifier = self.p2p_connection_pool.get_server_identifier(senders_identifier)
# We can inform everyone only if we know the server identifier of the sender
if senders_server_identifier:
self.send_peer_update(senders_identifier, senders_server_identifier, self.max_ttl)
else:
logging.debug('P2PController | Don\'t know the server identifier of the new connection, wait for'
' peer server address of %s' % senders_identifier)
self.exchange_messages(senders_identifier)
def send_peer_update(self, senders_identifier, senders_server_identifier, ttl):
""" Sends peer updates to several peers.
:param senders_identifier: Identifier of the sender we received this update from
:param senders_server_identifier: Server identifier of the changed peer
:param ttl: ttl to set in the new update messages
"""
packed_data = pack_gossip_peer_update(senders_server_identifier, ttl, PEER_UPDATE_TYPE_PEER_FOUND)['data']
peer_update_msg = MessageGossipPeerUpdate(packed_data)
msg_id = self.update_message_cache.add_message(peer_update_msg, valid=True)
if msg_id and senders_server_identifier != '%s:%d' % (self.p2p_server_address['host'],
self.p2p_server_address['port']):
logging.debug('P2PController | Spread information about new connection %s' % senders_identifier)
identifiers = self.p2p_connection_pool.get_identifiers()
for identifier in identifiers:
if identifier not in [senders_identifier, senders_server_identifier]:
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_SEND_MESSAGE, 'identifier': identifier,
'message': peer_update_msg})
def send_peer_request(self, peer_request_identifier):
""" Sends a peer request
:param peer_request_identifier: The identifier dict of the receiving peer
"""
own_p2p_server_identifier = '%s:%d' % (self.p2p_server_address['host'], self.p2p_server_address['port'])
packed_msg = pack_gossip_peer_request(own_p2p_server_identifier)
peer_request_msg = MessageGossipPeerRequest(packed_msg['data'])
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_SEND_MESSAGE, 'identifier': peer_request_identifier,
'message': peer_request_msg})
def exchange_messages(self, peer_identifier):
""" Send messages to new connected peer.
:param peer_identifier: Receiving peer
"""
logging.debug('P2PController | Exchanging messages with (%s)' % peer_identifier)
for message in self.announce_message_cache.iterator():
self.to_p2p_queue.put({'type': QUEUE_ITEM_TYPE_SEND_MESSAGE, 'identifier': peer_identifier,
'message': message["message"]})
|
nilq/baby-python
|
python
|
import unittest
import pyarrow
import pymarrow
import pandas as pd
class TestPyMarrow(unittest.TestCase):
def test_add_index(self):
batch = pyarrow.RecordBatch.from_arrays([
[5, 4, 3, 2, 1],
[1, 2, 3, 4, 5]
], ["a", "b"])
actual = pymarrow.add_index(batch, ["a"])
expected = pyarrow.RecordBatch.from_arrays([
pyarrow.array([4, 3, 2, 1, 0], pyarrow.int8()),
[5, 4, 3, 2, 1],
[1, 2, 3, 4, 5]
], ["__marrow_index", "a", "b"], metadata={"_marrow:index": "a"})
pd.testing.assert_frame_equal(actual.to_pandas(), expected.to_pandas())
self.assertTrue(actual.equals(expected))
def test_sort(self):
batch = pyarrow.RecordBatch.from_arrays([
[5, 4, 3, 2, 1],
[1, 2, 3, 4, 5]
], ["a", "b"])
actual = pymarrow.sort(batch, ["a"])
expected = pyarrow.RecordBatch.from_arrays([
[1, 2, 3, 4, 5],
[5, 4, 3, 2, 1]
], ["a", "b"], metadata={"_marrow:index": "a"})
pd.testing.assert_frame_equal(actual.to_pandas(), expected.to_pandas())
self.assertTrue(actual.equals(expected))
def test_merge(self):
batch1 = pyarrow.RecordBatch.from_arrays([
[1, 1, 2, 3, 4, 5],
[6, 5, 4, 3, 2, 1]
], ["a", "b"], metadata={"_marrow:index": "a"})
batch2 = pyarrow.RecordBatch.from_arrays([
[1, 2, 3, 4, 5, 5],
[5, 4, 3, 2, 1, 0]
], ["a", "c"], metadata={"_marrow:index": "a"})
actual = pymarrow.merge(batch1, batch2, on=["a"], how="inner")
expected = pyarrow.RecordBatch.from_arrays([
[1, 1, 2, 3, 4, 5, 5],
[6, 5, 4, 3, 2, 1, 1],
[5, 5, 4, 3, 2, 1, 0]
], ["a", "b", "c"])
pd.testing.assert_frame_equal(actual.to_pandas(), expected.to_pandas())
self.assertTrue(actual.equals(expected))
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from .forms import PDBForm
from .runVis import LoadModel, DOPE, HDXRepr, RepLabels, CustomRes
import urllib
d_obj = {
"1":RepLabels,
"2":HDXRepr,
"3":DOPE,
"4":CustomRes
}
d_desc = {
"1":"Selected residues are shown in red as ball and stick representation.",
"2":{
"folding":"Green: Early, Yellow: Intermediate, Red: Late.",
"stability":"Green: Strong, Yellow: Medium, Red: Weak."
},
"3":"Lighter residues indicate better DOPE scoring regions.",
"4":"Color gradient from white to dark red indicates low -> high scoring."
}
# Create your views here.
def index(request):
if request.method == 'GET':
context = {'form':PDBForm()}
return render(request,"visualise/home.html",context)
if request.method == 'POST':
form = PDBForm(request.POST, request.FILES)
print("Files")
print(request.FILES)
if form.is_valid():
input_data = form.cleaned_data
if 'data' not in request.FILES:
request.FILES['data']=None
obj = d_obj[input_data["choice"]](input_data['PDB'],request.FILES['data'])
url = obj.open_url(open_link=False,print_out=True,data_label=input_data["hdx_opt"])
print(len(url))
context = {'state':"\n".join(obj.state)}
if input_data["choice"] == "2":
desc = d_desc["2"][input_data["hdx_opt"]]
else:
desc = d_desc[input_data["choice"]]
try:
req = urllib.request.urlopen(url)
assert req.getcode()==200
req.close()
return render(request,"visualise/results.html",{'url':url,'desc':desc})
except:
filename = "%s_state.txt" % input_data['PDB']
content = "\n".join(obj.state)
# response = HttpResponse(content, content_type='text/plain')
response = render(request,"visualise/results.html",{'url':'https://www.ncbi.nlm.nih.gov/Structure/icn3d/full.html','desc':desc})
response['Content-Disposition'] = 'attachment; filename={0}'.format(filename)
print(response)
return response
return HttpResponse("Testing")
def help(request):
return render(request,"visualise/help.html")
def results(request):
return render(request,"visualise/results.html",{'url':'https://www.ncbi.nlm.nih.gov/Structure/icn3d/full.html'})
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from webkitpy.benchmark_runner.generic_factory import GenericFactory
class HTTPServerDriverFactory(GenericFactory):
products = {}
|
nilq/baby-python
|
python
|
# Copyright (c) 2019-2021, Manfred Moitzi
# License: MIT License
from typing import TYPE_CHECKING, Iterable, Tuple, Optional, List, Iterator
import abc
import warnings
from ezdxf.math import Vec3, Vec2
if TYPE_CHECKING:
from ezdxf.math import Vertex, AnyVec
__all__ = ["BoundingBox2d", "BoundingBox", "AbstractBoundingBox"]
class AbstractBoundingBox:
__slots__ = ("extmin", "extmax")
def __init__(self, vertices: Iterable["Vertex"] = None):
self.extmax: Optional["AnyVec"] = None
self.extmin: Optional["AnyVec"] = None
if vertices is not None:
try:
self.extmin, self.extmax = self.extends_detector(vertices)
except ValueError:
# No or invalid data creates an empty BoundingBox
pass
def copy(self):
box = self.__class__()
box.extmin = self.extmin
box.extmax = self.extmax
return box
def __str__(self) -> str:
return f"[{self.extmin}, {self.extmax}]"
def __repr__(self) -> str:
name = self.__class__.__name__
if self.has_data:
return f"{name}({self.__str__()})"
else:
return f"{name}()"
def __iter__(self) -> Iterator["AnyVec"]:
if self.has_data:
yield self.extmin
yield self.extmax
@abc.abstractmethod
def extends_detector(
self, vertices: Iterable["Vertex"]
) -> Tuple["AnyVec", "AnyVec"]:
pass
@property
@abc.abstractmethod
def is_empty(self) -> bool:
...
@abc.abstractmethod
def inside(self, vertex: "Vertex") -> bool:
...
@abc.abstractmethod
def has_intersection(self, other: "AbstractBoundingBox") -> bool:
...
@abc.abstractmethod
def has_overlap(self, other: "AbstractBoundingBox") -> bool:
...
@abc.abstractmethod
def intersection(
self, other: "AbstractBoundingBox"
) -> "AbstractBoundingBox":
...
def contains(self, other: "AbstractBoundingBox") -> bool:
"""Returns ``True`` if the `other` bounding box is completely inside
of this bounding box.
.. versionadded:: 0.17.2
"""
return self.inside(other.extmin) and self.inside(other.extmax)
def any_inside(self, vertices: Iterable["Vertex"]) -> bool:
"""Returns ``True`` if any vertex is inside this bounding box.
Vertices at the box border are inside!
"""
if self.has_data:
return any(self.inside(v) for v in vertices)
return False
def all_inside(self, vertices: Iterable["Vertex"]) -> bool:
"""Returns ``True`` if all vertices are inside this bounding box.
Vertices at the box border are inside!
"""
if self.has_data:
# all() returns True for an empty set of vertices
has_any = False
for v in vertices:
has_any = True
if not self.inside(v):
return False
return has_any
return False
@property
def has_data(self) -> bool:
"""Returns ``True`` if the bonding box has known limits."""
return self.extmin is not None
@property
def size(self):
"""Returns size of bounding box."""
return self.extmax - self.extmin
@property
def center(self):
"""Returns center of bounding box."""
return self.extmin.lerp(self.extmax)
def extend(self, vertices: Iterable["Vertex"]) -> None:
"""Extend bounds by `vertices`.
Args:
vertices: iterable of Vertex objects
"""
v = list(vertices)
if not v:
return
if self.has_data:
v.extend([self.extmin, self.extmax])
self.extmin, self.extmax = self.extends_detector(v)
def union(self, other: "AbstractBoundingBox"):
"""Returns a new bounding box as union of this and `other` bounding
box.
"""
vertices: List["AnyVec"] = []
if self.has_data:
vertices.extend(self)
if other.has_data:
vertices.extend(other)
return self.__class__(vertices)
def rect_vertices(self) -> Tuple[Vec2, ...]:
"""Returns the corners of the bounding box in the xy-plane as
:class:`Vec2` objects.
"""
if self.has_data: # extmin is not None!
x0, y0, *_ = self.extmin # type: ignore
x1, y1, *_ = self.extmax # type: ignore
return Vec2(x0, y0), Vec2(x1, y0), Vec2(x1, y1), Vec2(x0, y1)
else:
raise ValueError("empty bounding box")
def grow(self, value: float) -> None:
"""Grow or shrink the bounding box by an uniform value in x, y and
z-axis. A negative value shrinks the bounding box.
Raises :class:`ValueError` for shrinking the size of the bounding box to
zero or below in any dimension.
"""
if self.has_data:
if value < 0.0:
min_ext = min(self.size)
if -value >= min_ext / 2.0:
raise ValueError("shrinking one or more dimensions <= 0")
self.extmax += Vec3(value, value, value) # type: ignore
self.extmin += Vec3(-value, -value, -value) # type: ignore
class BoundingBox(AbstractBoundingBox):
"""3D bounding box.
Args:
vertices: iterable of ``(x, y, z)`` tuples or :class:`Vec3` objects
"""
__slots__ = ("extmin", "extmax")
@property
def is_empty(self) -> bool:
"""Returns ``True`` if the bounding box is empty. The bounding box has a
size of 0 in any or all dimensions or is undefined.
"""
if self.has_data:
sx, sy, sz = self.size
return sx * sy * sz == 0.0
return True
def extends_detector(
self, vertices: Iterable["Vertex"]
) -> Tuple[Vec3, Vec3]:
return extends3d(vertices)
def inside(self, vertex: "Vertex") -> bool:
"""Returns ``True`` if `vertex` is inside this bounding box.
Vertices at the box border are inside!
"""
if self.extmin is None or self.extmax is None:
return False
x, y, z = Vec3(vertex).xyz
xmin, ymin, zmin = self.extmin.xyz
xmax, ymax, zmax = self.extmax.xyz
return (
(xmin <= x <= xmax) and (ymin <= y <= ymax) and (zmin <= z <= zmax)
)
def has_intersection(self, other: "AbstractBoundingBox") -> bool:
"""Returns ``True`` if this bounding box intersects with `other` but does
not include touching bounding boxes, see also :meth:`has_overlap`::
bbox1 = BoundingBox([(0, 0, 0), (1, 1, 1)])
bbox2 = BoundingBox([(1, 1, 1), (2, 2, 2)])
assert bbox1.has_intersection(bbox2) is False
"""
# Source: https://gamemath.com/book/geomtests.html#intersection_two_aabbs
# Check for a separating axis:
if (
self.extmin is None
or self.extmax is None
or other.extmin is None
or other.extmax is None
):
return False
o_min = Vec3(other.extmin) # could be a 2D bounding box
o_max = Vec3(other.extmax) # could be a 2D bounding box
# Check for a separating axis:
if self.extmin.x >= o_max.x:
return False
if self.extmax.x <= o_min.x:
return False
if self.extmin.y >= o_max.y:
return False
if self.extmax.y <= o_min.y:
return False
if self.extmin.z >= o_max.z:
return False
if self.extmax.z <= o_min.z:
return False
return True
def intersect(self, other: "AbstractBoundingBox") -> bool:
warnings.warn(
"intersect() is deprecated, replaced by has_intersection()",
DeprecationWarning,
)
return self.has_intersection(other)
def has_overlap(self, other: "AbstractBoundingBox") -> bool:
"""Returns ``True`` if this bounding box intersects with `other` but
in contrast to :meth:`has_intersection` includes touching bounding boxes too::
bbox1 = BoundingBox([(0, 0, 0), (1, 1, 1)])
bbox2 = BoundingBox([(1, 1, 1), (2, 2, 2)])
assert bbox1.has_overlap(bbox2) is True
.. versionadded:: 0.17.2
"""
# Source: https://gamemath.com/book/geomtests.html#intersection_two_aabbs
# Check for a separating axis:
if (
self.extmin is None
or self.extmax is None
or other.extmin is None
or other.extmax is None
):
return False
o_min = Vec3(other.extmin) # could be a 2D bounding box
o_max = Vec3(other.extmax) # could be a 2D bounding box
# Check for a separating axis:
if self.extmin.x > o_max.x:
return False
if self.extmax.x < o_min.x:
return False
if self.extmin.y > o_max.y:
return False
if self.extmax.y < o_min.y:
return False
if self.extmin.z > o_max.z:
return False
if self.extmax.z < o_min.z:
return False
return True
def overlap(self, other: "AbstractBoundingBox") -> bool:
warnings.warn(
"overlap() is deprecated, replaced by has_overlap()",
DeprecationWarning,
)
return self.has_overlap(other)
def cube_vertices(self) -> Tuple[Vec3, ...]:
"""Returns the 3D corners of the bounding box as :class:`Vec3` objects."""
if self.extmin is not None and self.extmax is not None:
x0, y0, z0 = self.extmin
x1, y1, z1 = self.extmax
return (
Vec3(x0, y0, z0),
Vec3(x1, y0, z0),
Vec3(x1, y1, z0),
Vec3(x0, y1, z0),
Vec3(x0, y0, z1),
Vec3(x1, y0, z1),
Vec3(x1, y1, z1),
Vec3(x0, y1, z1),
)
else:
raise ValueError("empty bounding box")
def intersection(self, other: "AbstractBoundingBox") -> "BoundingBox":
"""Returns the bounding box of the intersection cube of both
3D bounding boxes. Returns an empty bounding box if the intersection
volume is 0.
"""
new_bbox = self.__class__()
if not self.has_intersection(other):
return new_bbox
s_min_x, s_min_y, s_min_z = Vec3(self.extmin)
o_min_x, o_min_y, o_min_z = Vec3(other.extmin)
s_max_x, s_max_y, s_max_z = Vec3(self.extmax)
o_max_x, o_max_y, o_max_z = Vec3(other.extmax)
new_bbox.extend(
[
(
max(s_min_x, o_min_x),
max(s_min_y, o_min_y),
max(s_min_z, o_min_z),
),
(
min(s_max_x, o_max_x),
min(s_max_y, o_max_y),
min(s_max_z, o_max_z),
),
]
)
return new_bbox
class BoundingBox2d(AbstractBoundingBox):
"""Optimized 2D bounding box.
Args:
vertices: iterable of ``(x, y[, z])`` tuples or :class:`Vec3` objects
"""
__slots__ = ("extmin", "extmax")
@property
def is_empty(self) -> bool:
"""Returns ``True`` if the bounding box is empty. The bounding box has a
size of 0 in any or all dimensions or is undefined.
"""
if self.has_data:
sx, sy = self.size
return sx * sy == 0.0
return True
def extends_detector(
self, vertices: Iterable["Vertex"]
) -> Tuple[Vec2, Vec2]:
return extends2d(vertices)
def inside(self, vertex: "Vertex") -> bool:
"""Returns ``True`` if `vertex` is inside this bounding box.
Vertices at the box border are inside!
"""
if self.extmin is None or self.extmax is None:
return False
v = Vec2(vertex)
min_ = self.extmin
max_ = self.extmax
return (min_.x <= v.x <= max_.x) and (min_.y <= v.y <= max_.y)
def has_intersection(self, other: "AbstractBoundingBox") -> bool:
"""Returns ``True`` if this bounding box intersects with `other` but does
not include touching bounding boxes, see also :meth:`has_overlap`::
bbox1 = BoundingBox2d([(0, 0), (1, 1)])
bbox2 = BoundingBox2d([(1, 1), (2, 2)])
assert bbox1.has_intersection(bbox2) is False
"""
# Source: https://gamemath.com/book/geomtests.html#intersection_two_aabbs
if (
self.extmin is None
or self.extmax is None
or other.extmin is None
or other.extmax is None
):
return False
# Check for a separating axis:
if self.extmin.x >= other.extmax.x:
return False
if self.extmax.x <= other.extmin.x:
return False
if self.extmin.y >= other.extmax.y:
return False
if self.extmax.y <= other.extmin.y:
return False
return True
def intersect(self, other: "AbstractBoundingBox") -> bool:
warnings.warn(
"intersect() is deprecated, replaced by has_intersection()",
DeprecationWarning,
)
return self.has_intersection(other)
def intersection(self, other: "AbstractBoundingBox") -> "BoundingBox2d":
"""Returns the bounding box of the intersection rectangle of both
2D bounding boxes. Returns an empty bounding box if the intersection
area is 0.
"""
new_bbox = self.__class__()
if not self.has_intersection(other):
return new_bbox
s_min_x, s_min_y = Vec2(self.extmin)
o_min_x, o_min_y = Vec2(other.extmin)
s_max_x, s_max_y = Vec2(self.extmax)
o_max_x, o_max_y = Vec2(other.extmax)
new_bbox.extend(
[
(max(s_min_x, o_min_x), max(s_min_y, o_min_y)),
(min(s_max_x, o_max_x), min(s_max_y, o_max_y)),
]
)
return new_bbox
def has_overlap(self, other: "AbstractBoundingBox") -> bool:
"""Returns ``True`` if this bounding box intersects with `other` but
in contrast to :meth:`has_intersection` includes touching bounding boxes too::
bbox1 = BoundingBox2d([(0, 0), (1, 1)])
bbox2 = BoundingBox2d([(1, 1), (2, 2)])
assert bbox1.has_overlap(bbox2) is True
.. versionadded:: 0.17.2
"""
# Source: https://gamemath.com/book/geomtests.html#intersection_two_aabbs
if (
self.extmin is None
or self.extmax is None
or other.extmin is None
or other.extmax is None
):
return False
# Check for a separating axis:
if self.extmin.x > other.extmax.x:
return False
if self.extmax.x < other.extmin.x:
return False
if self.extmin.y > other.extmax.y:
return False
if self.extmax.y < other.extmin.y:
return False
return True
def overlap(self, other: "AbstractBoundingBox") -> bool:
warnings.warn(
"overlap() is deprecated, replaced by has_overlap()",
DeprecationWarning,
)
return self.has_overlap(other)
def extends3d(vertices: Iterable["Vertex"]) -> Tuple[Vec3, Vec3]:
minx, miny, minz = None, None, None
maxx, maxy, maxz = None, None, None
for v in vertices:
v = Vec3(v)
if minx is None:
minx, miny, minz = v.xyz # type: ignore
maxx, maxy, maxz = v.xyz # type: ignore
else:
x, y, z = v.xyz
if x < minx:
minx = x
elif x > maxx:
maxx = x
if y < miny:
miny = y
elif y > maxy:
maxy = y
if z < minz:
minz = z
elif z > maxz:
maxz = z
if minx is None:
raise ValueError("No vertices give.")
return Vec3(minx, miny, minz), Vec3(maxx, maxy, maxz)
def extends2d(vertices: Iterable["Vertex"]) -> Tuple[Vec2, Vec2]:
minx, miny = None, None
maxx, maxy = None, None
for v in vertices:
v = Vec2(v)
x, y = v.x, v.y # type: ignore
if minx is None:
minx = x
maxx = x
miny = y
maxy = y
else:
if x < minx:
minx = x
elif x > maxx:
maxx = x
if y < miny:
miny = y
elif y > maxy:
maxy = y
if minx is None:
raise ValueError("No vertices give.")
return Vec2(minx, miny), Vec2(maxx, maxy)
|
nilq/baby-python
|
python
|
#
# Author: Robert Abram <rabram991@gmail.com>
#
# This file is subject to the terms and conditions defined in the
# file 'LICENSE', which is part of this source code package.
#
#
# Signal handlers for model change events, see: proj.settings.appconfig.
# These are a great way to log user activity.
#
from datetime import datetime
from django.apps import apps
from django.core.exceptions import ObjectDoesNotExist
from proj.middleware import get_current_user
# These are apps that should not be remotely logged
LOGGING_EXCLUDED_APPS = (
'auth',
'axes',
'oauth2_provider',
)
# These are models that should not be remotely logged
LOGGING_EXCLUDED_MODELS = (
'AccessLog',
'User',
'SystemActions',
)
# These are model fields that should not be remotely logged
LOGGING_EXCLUDED_FIELDS = (
'id',
'http_passwd',
)
def signal_model_pre_save(sender, instance, **kwargs):
pass
def signal_model_post_save(sender, instance, **kwargs):
pass
def signal_model_pre_delete(sender, instance, **kwargs):
pass
|
nilq/baby-python
|
python
|
from common import *
import collections
import numpy as np
def test_astype(ds_local):
ds = ds_local
ds_original = ds.copy()
#ds.columns['x'] = (ds.columns['x']*1).copy() # convert non non-big endian for now
ds['x'] = ds['x'].astype('f4')
assert ds.x.evaluate().dtype == np.float32
assert ds.x.tolist() == ds_original.x.as_numpy().evaluate().astype(np.float32).tolist()
def test_astype_str():
df = vaex.from_arrays(x=['10,010', '-50,0', '11,111'])
df['x'] = df['x'].str.replace(',', '').evaluate()
df['x'] = (df['x'].astype('float')).astype('int64').evaluate()
assert df.columns['x'].dtype == np.int64
assert df.x.dtype == np.int64
def test_astype_dtype():
df = vaex.from_arrays(x=[0, 1])
assert df.x.astype(str).data_type() in [pa.string(), pa.large_string()]
df = vaex.from_arrays(x=[np.nan, 1])
# assert df.x.astype(str).dtype == vaex.column.str_type
assert df.x.astype(str).data_type() in [pa.string(), pa.large_string()]
|
nilq/baby-python
|
python
|
""" File: P3_semi_supervised_topic_modeling.py
Description: Loads a previously created pre-processed chat corpus, then performs
semi-supervised topic modeling utilizing CorEx and GuidedLDA.
INPUT FILES:
0) anchors.txt - anchor/seed words each on their own line
Previously created preprocessed chat corpus from either:
1) wholeChatsFilePOS_N_ADJ_V.txt -- preprocessing keeping nouns, adjectives, and verbs
2) wholeChatsFilePOS_N_ADJ.txt -- preprocessing keeping nouns and adjectives
3) wholeChatsFile.txt -- NO POS preprocessing so all parts of speech
4) onlyQuestionsFile.txt -- Only initial question of chats
OUTPUT FILES:
1) "raw_" text (.txt) file listing topics with each word scored
2) "LDA_" text (.txt) file containing only the text for the
specified number of topics with the specified number of words per topic
Acknowledgements:
Here we are used the CorEx (Correlation Explanation) package available at GitHub:
https://github.com/gregversteeg/corex_topic
Here we are used the GuidedLDA package is available at GitHub:
https://github.com/vi3k6i5/GuidedLDA
NOTE: We had difficulty installing GuidedLDA, but we were finally successful
by following the work-around posted at:
https://github.com/dex314/GuidedLDA_WorkAround
"""
import os.path
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from corextopic import corextopic as ct
import pandas as pd
import nltk
from time import time
import re, sys, random, math
import numpy as np
from lda import guidedlda as glda
from lda import glda_datasets as gldad
from collections import Counter
from timeit import default_timer as timer
from P2_utility_functions import *
from P3_utility_functions import *
def main():
print('Welcome to Phase 3 which runs the semi-supervised topic modeling techniques.',
'\n\nYou should have first run Phase 1 to pre-process your chat data.',
'\nIt would generate cleaned chat files varying the parts of speech or question-only.',
'\nFiles generated are: wholeChatsFile.txt, wholeChatsFilePOS_N_ADJ_V.txt,',
'\nwholeChatsFilePOS_N_ADJ.txt, and onlyQuestionsFile.txt.\n\n')
print('\n\nYou could have also run Phase 2 to execute unsupervised topic modeling techniques.',
'\nIt would generate files: possible_2_word_anchors.txt and possible_3_word_anchors.txt which',
'\nyou might use to create a text-file (.txt) with anchors one per line.\n')
prompt = "\nStep 1. Please input the pre-processed (.txt) file." + \
'\n(For example: "wholeChatsFile.txt"):'
fileName = getFileName(prompt)
chats = readChatCorpusFile(fileName)
prompt = "\nStep 2. Please input the anchors/seeds (.txt) file." + \
'\n(For example: "anchors.txt"):'
fileName = getFileName(prompt)
anchorList = readAnchorsFile(fileName)
modelDict = {'GuidedLDA':run_GuidedLDA,'CorEx':run_CorEx}
n_topics = getPositiveInteger('\nStep 3. Please specify the number of topics. (suggested range 10-20)\n')
n_words_per_topic = getPositiveInteger('\nStep 4. Please specify the number of words per topics. (suggested range 5-10)\n')
for model in modelDict:
print("="*35)
print("\nPerforming", model,"topic modeling -- please wait it might take a couple minutes!")
topicList = modelDict[model](chats, anchorList, n_topics, n_words_per_topic)
averagePMI, averageLCP, averageNZ = calculateTopicCoherenceMetrics(chats, topicList)
print("\nResults for",model," TC-PMI %3.3f, TC-LCP %3.3f, TC-NZ %3.3f:" % (averagePMI, averageLCP, averageNZ))
for topic in topicList:
print(topic)
main()
|
nilq/baby-python
|
python
|
# ==============================================================================
# Imports
# ==============================================================================
import numpy as np
import os, glob
from tqdm import tqdm as tqdm
import tensorflow.compat.v1 as tf
tfq = tf.quantization
import tensorflow_probability as tfp
tfd = tfp.distributions
from binary_io import to_bit_string, from_bit_string
from misc import stateless_normal_sample
# ==============================================================================================
# ==============================================================================================
# ==============================================================================================
#
# Greedy Sampling
#
# ==============================================================================================
# ==============================================================================================
# ==============================================================================================
def code_greedy_sample(t_loc,
t_scale,
p_loc,
p_scale,
n_bits_per_step,
n_steps,
seed,
rho=1.):
n_samples = int(2**n_bits_per_step)
# The scale divisor needs to be square rooted because
# we are dealing with standard deviations and not variances
scale_divisor = np.sqrt(n_steps)
proposal_shard = tfd.Normal(loc=p_loc / n_steps,
scale=rho * p_scale / scale_divisor)
target = tfd.Normal(loc=t_loc,
scale=t_scale)
# Setup greedy sampler for loop
def loop_step(i, sample_index, best_sample):
samples = stateless_normal_sample(loc=proposal_shard.loc,
scale=proposal_shard.scale,
num_samples=n_samples,
seed=1000 * seed + i)
test_samples = tf.tile(tf.expand_dims(best_sample, 0), [n_samples, 1]) + samples
log_probs = tf.reduce_sum(target.log_prob(test_samples), axis=1)
index = tf.argmax(log_probs)
best_sample = test_samples[index, :]
return [i + 1, tf.concat((sample_index, [index]), axis=0), best_sample]
i = tf.constant(0)
best_sample = tf.zeros(tf.shape(p_loc), dtype=tf.float32)
sample_index = tf.cast([], tf.int32)
cond = lambda i, sample_index, best_sample: i < n_steps
_, sample_index, best_sample = tf.while_loop(cond=cond,
body=loop_step,
loop_vars=[i, sample_index, best_sample],
shape_invariants=[i.get_shape(),
tf.TensorShape([None]),
best_sample.get_shape()])
sample_index = tf.map_fn(lambda x: tf.numpy_function(to_bit_string, [x, n_bits_per_step], tf.string),
sample_index,
dtype=tf.string)
sample_index = tf.numpy_function(lambda indices: ''.join([ind.decode('utf-8') for ind in indices]),
[sample_index],
tf.string)
return best_sample, sample_index
def decode_greedy_sample(sample_index,
p_loc,
p_scale,
n_bits_per_step,
n_steps,
seed,
rho=1.):
# Perform a for loop for the below list comprehension
#
# indices = [from_bit_string(sample_index[i:i + n_bits_per_step])
# for i in range(0, n_bits_per_step * n_steps, n_bits_per_step)]
#
i = tf.constant(0, tf.int32)
indices = tf.cast([], tf.int32)
cond = lambda i, indices: i < n_bits_per_step * n_steps
def index_loop_step(i, indices):
index = tf.numpy_function(from_bit_string,
[tf.strings.substr(sample_index, i, n_bits_per_step)],
tf.int64)
index = tf.cast(index, tf.int32)
return [i + n_bits_per_step, tf.concat((indices, [index]), axis=0)]
_, indices = tf.while_loop(cond=cond,
body=index_loop_step,
loop_vars=[i, indices],
shape_invariants=[i.get_shape(),
tf.TensorShape([None])])
# ---------------------------------------------------------------------
# Reconver the sample
# ---------------------------------------------------------------------
# The scale divisor needs to be square rooted because
# we are dealing with standard deviations and not variances
scale_divisor = np.sqrt(n_steps)
proposal_shard = tfd.Normal(loc=p_loc / n_steps,
scale=rho * p_scale / scale_divisor)
n_samples = int(2**n_bits_per_step)
# Loop variables
i = tf.constant(0, tf.int32)
sample = tf.zeros(tf.shape(p_loc), dtype=tf.float32)
# Loop condition
cond = lambda i, indices: i < n_steps
# Loop body
def sample_loop_step(i, sample):
samples = tf.tile(tf.expand_dims(sample, 0), [n_samples, 1])
samples = samples + stateless_normal_sample(loc=proposal_shard.loc,
scale=proposal_shard.scale,
num_samples=n_samples,
seed=1000 * seed + i)
return [i + 1, samples[indices[i], :]]
# Run loop
_, sample = tf.while_loop(cond=cond,
body=sample_loop_step,
loop_vars=[i, sample],
shape_invariants=[i.get_shape(),
sample.get_shape()])
return sample
def code_grouped_greedy_sample(sess,
target,
proposal,
n_steps,
n_bits_per_step,
seed,
max_group_size_bits=12,
adaptive=True,
backfitting_steps=0,
use_log_prob=False,
rho=1.):
# Make sure the distributions have the correct type
if target.dtype is not tf.float32:
raise Exception("Target datatype must be float32!")
if proposal.dtype is not tf.float32:
raise Exception("Proposal datatype must be float32!")
n_bits_per_group = n_bits_per_step * n_steps
num_dimensions = sess.run(tf.reduce_prod(tf.shape(proposal.loc)))
# rescale proposal by the proposal
p_loc = sess.run(tf.reshape(tf.zeros_like(proposal.loc), [-1]))
p_scale = sess.run(tf.reshape(tf.ones_like(proposal.scale), [-1]))
# rescale target by the proposal
t_loc = sess.run(tf.reshape((target.loc - proposal.loc) / proposal.scale, [-1]))
t_scale = sess.run(tf.reshape(target.scale / proposal.scale, [-1]))
kl_divergences = tf.reshape(tfd.kl_divergence(target, proposal), [-1])
# ======================================================================
# Preprocessing step: determine groups for sampling
# ======================================================================
group_start_indices = [0]
group_kls = []
kl_divs = sess.run(kl_divergences)
total_kl_bits = np.sum(kl_divs) / np.log(2)
print("Total KL to split up: {:.2f} bits, "
"maximum bits per group: {}, "
"estimated number of groups: {},"
"coding {} dimensions".format(total_kl_bits,
n_bits_per_group,
total_kl_bits // n_bits_per_group + 1,
num_dimensions
))
current_group_size = 0
current_group_kl = 0
n_nats_per_group = n_bits_per_group * np.log(2) - 1
for idx in range(num_dimensions):
group_bits = np.log(current_group_size + 1) / np.log(2)
if group_bits >= max_group_size_bits or \
current_group_kl + kl_divs[idx] >= n_nats_per_group or \
idx == num_dimensions - 1:
group_start_indices.append(idx)
group_kls.append(current_group_kl / np.log(2))
current_group_size = 1
current_group_kl = kl_divs[idx]
else:
current_group_kl += kl_divs[idx]
current_group_size += 1
# ======================================================================
# Sample each group
# ======================================================================
results = []
group_start_indices += [num_dimensions]
# Get the importance sampling op before looping it to avoid graph construction cost
# The length is variable, hence the shape is [None]
target_loc = tf.placeholder(tf.float32, shape=[None])
target_scale = tf.placeholder(tf.float32, shape=[None])
prop_loc = tf.placeholder(tf.float32, shape=[None])
prop_scale = tf.placeholder(tf.float32, shape=[None])
seed_feed = tf.placeholder(tf.int32)
greedy_op = code_greedy_sample(t_loc=target_loc,
t_scale=target_scale,
p_loc=prop_loc,
p_scale=prop_scale,
n_bits_per_step=n_bits_per_step,
n_steps=n_steps,
seed=seed_feed,
rho=rho)
for i in tqdm(range(len(group_start_indices) - 1)):
start_idx = group_start_indices[i]
end_idx = group_start_indices[i + 1]
result = sess.run(greedy_op, feed_dict={target_loc: t_loc[start_idx:end_idx],
target_scale: t_scale[start_idx:end_idx],
prop_loc: p_loc[start_idx:end_idx],
prop_scale: p_scale[start_idx:end_idx],
seed_feed: seed + i})
results.append(result)
samples, codes = zip(*results)
bitcode = ''.join([c.decode('utf-8') for c in codes])
sample = tf.concat(samples, axis=0)
# Rescale the sample
sample = tf.reshape(proposal.scale, [-1]) * sample + tf.reshape(proposal.loc, [-1])
sample = sess.run(sample)
return sample, bitcode, group_start_indices
def decode_grouped_greedy_sample(sess,
bitcode,
group_start_indices,
proposal,
n_bits_per_step,
n_steps,
seed,
adaptive=True,
rho=1.):
# Make sure the distributions have the correct type
if proposal.dtype is not tf.float32:
raise Exception("Proposal datatype must be float32!")
n_bits_per_group = n_bits_per_step * n_steps
num_dimensions = sess.run(tf.reduce_prod(tf.shape(proposal.loc)))
# ======================================================================
# Decode each group
# ======================================================================
samples = []
group_start_indices += [num_dimensions]
p_loc = sess.run(tf.reshape(tf.zeros_like(proposal.loc), [-1]))
p_scale = sess.run(tf.reshape(tf.ones_like(proposal.scale), [-1]))
# Placeholders
sample_index = tf.placeholder(tf.string)
prop_loc = tf.placeholder(tf.float32, shape=[None])
prop_scale = tf.placeholder(tf.float32, shape=[None])
seed_feed = tf.placeholder(tf.int32)
# Get decoding op
decode_greedy_op = decode_greedy_sample(sample_index=sample_index,
p_loc=prop_loc,
p_scale=prop_scale,
n_bits_per_step=n_bits_per_step,
n_steps=n_steps,
seed=seed_feed,
rho=rho)
for i in tqdm(range(len(group_start_indices) - 1)):
if bitcode[n_bits_per_group * i: n_bits_per_group * (i + 1)] == '':
break
samp = sess.run(decode_greedy_op, feed_dict = {
sample_index: bitcode[n_bits_per_group * i: n_bits_per_group * (i + 1)],
prop_loc: p_loc[group_start_indices[i]:group_start_indices[i + 1]],
prop_scale: p_scale[group_start_indices[i]:group_start_indices[i + 1]],
seed_feed: seed + i
})
samples.append(samp)
sample = tf.concat(samples, axis=0)
# Rescale the sample
sample = tf.reshape(proposal.scale, [-1]) * sample + tf.reshape(proposal.loc, [-1])
return sess.run(sample)
|
nilq/baby-python
|
python
|
from .features import Dictionary, RegexMatches, Stemmed, Stopwords
name = "portuguese"
try:
import enchant
dictionary = enchant.Dict("pt")
except enchant.errors.DictNotFoundError:
raise ImportError("No enchant-compatible dictionary found for 'pt'. " +
"Consider installing 'myspell-pt'.")
dictionary = Dictionary(name + ".dictionary", dictionary.check)
"""
:class:`~revscoring.languages.features.Dictionary` features via
:class:`enchant.Dict` "pt". Provided by `myspell-pt`
"""
try:
from nltk.corpus import stopwords as nltk_stopwords
stopwords = set(nltk_stopwords.words('portuguese'))
except LookupError:
raise ImportError("Could not load stopwords for {0}. ".format(__name__) +
"You may need to install the nltk 'stopwords' " +
"corpora. See http://www.nltk.org/data.html")
stopwords = Stopwords(name + ".stopwords", stopwords)
"""
:class:`~revscoring.languages.features.Stopwords` features provided by
:func:`nltk.corpus.stopwords` "portuguese"
"""
try:
from nltk.stem.snowball import SnowballStemmer
stemmer = SnowballStemmer("portuguese")
except ValueError:
raise ImportError("Could not load stemmer for {0}. ".format(__name__))
stemmed = Stemmed(name + ".stemmed", stemmer.stem)
"""
:class:`~revscoring.languages.features.Stemmed` word features via
:class:`nltk.stem.snowball.SnowballStemmer` "portuguese"
"""
badword_regexes = [
r"baba[ckq](as?|ão|ões|u?i[cçs]s?e)", # douchebag
r"bi(ch|x)as?", # gay man
r"boio(l[ai](tico)?|l[aã]o|lo(go|[gj]i[sx]ta))s?", # gay man
r"bo(qu|k)etes?", # blowjob
r"bo[sx]t(ao?s?|alhao?)", # shit
r"b[uo]s?[cçs]s?et+(a[os]?|inha)?", # pussy (vagina)
r"bu[mn]d((inh)?as?|[ãa]o)", # ass
r"b[uo]rr[oaei](ce|[ius])?", # donkey/jackass
r"[ck]a[csç]s?ete?s?", # bludgeon
# shit
r"[ck]ag(a(r|n?do|dao?|n?ei(r[ao])?|(lh)?a?o|nitas?|dela|lhoto)?|ou)",
r"[ck]ara(l?hl?([ou]?s?|ao|inh[ou]s?)|i([ou]s?)?)", # fuck
r"(ch|x)at[ao]s?", # boring
r"(ch|x)up[aeiou]([dv]a|te|nha|ndo|r|u)?", # blow me
r"[ck]o[ck]ô", # poo
r"[ck]om(er?|i)", # fucked
r"[ck]onas?", # cunt
r"[ck]uz([aã]o|inho)", # asshole
r"doid(inh)?[ao]s?", # crazy
r"fed?(id?[ao]|e|orent[ao])s?", # stinks/stinky
r"fei[ao]s?", # ugly
r"fendi", # ???
r"f[ou]d(a[os]?|e[ru]?|idos?)", # fuck
r"go[sx]tos([ao]s?|ão|ões|onas?)", # hot
r"idiot(a|i[cçs]s?e)s?", # idiot
r"lo(k[oa]s?|u[ck]([oa]s?|ura|a(mente)?))", # crazy
r"maconheir[ao]s?", # bothead
r"m[áa]fia", # mafia
r"maldizentes", # slanderers
r"mecos", # cum ???
r"mentir(a|os[oa])s?", # lie/liar
r"merd(a|[ãa]o|oso|ica)s?", # shit
r"noob", # noob
r"ot[áa]ri[oa]s?", # sucker
r"pari[ou]", # part of "puta que o pariu"
r"pategos", # hick / yokel
r"pau", # dick
r"peid([ao]|[ãa]o|ei|ar(ia)?|ando|aç[oa])s?", # fart
r"p[êe]nis+", # penis
r"pilas?", # dick
r"piroca", # dick
r"porcaria", r"porn[ôo]?", # filth/porn
r"po(rr|h)a", # cum
r"pum", # fart
r"punhet(a|eir[oa])s?", # jack off / masturbate
r"put([ao]|[ao]na|aria|eiro|inha)s?", # bitch/hooker
r"safad([ao]|ona)s?", # shameless
r"te[sz]ão", r"te[sz]ud[oa]s?", # turn-on / horny
r"tran[sz]([aá](r(am)?|n?do)?|ou)", # sex
r"tretas?", # bullshit
r"trou?(ch|x)as?",
r"vadi([ao]s?|agem)", # bitch
r"viad(agem?|[aã]?o|inh[ou])s?", # gay person ("fucker")
r"xixi" # pee
]
badwords = RegexMatches(name + ".badwords", badword_regexes)
"""
:class:`~revscoring.languages.features.RegexMatches` features via a list of
badword detecting regexes.
"""
informal_regexes = [
r"adoro", # love
r"aki", # here
r"amo", # master
r"(?:b+?l+?a+?h*)+", # bla, blah, bbblllaaaahhhhhblah
r"carambas?", # OMG
r"coco", # coconut
r"copie[im]", # I copied
r"delicia", # delicious
r"editei", # edited
r"enfiar?", # to stick (up one's ass)
r"entao", # then
r"estrag(ar|uem)", # spoiled / ruined
r"fixe", # cool
r"gajo", # dude
r"h[aiou](h[aeiou])*", r"h[e](h[aeiou])+", # hi, ha, hehe, hohoho
r"k+?", # k, kkkkkkkkkkkkkkk
r"lindo", # pretty
r"l+[uo][uol]*l", # lol, LOLOLOL, LLLLoOOoLLL
r"mae", # mom
r"mto", # very
r"naum", # no (slang)
r"n[óo]is", # it's us (slang)
r"odeio", # hate
r"oi+", # hi
r"ol[aá]", # hello
r"ratas?", # "rat" -- a snitch
r"(?:rs)+", # lol
r"tava", # was / were (slang)
r"tbm", # also (slang)
r"vao", # vain
r"vcs", r"voce", r"voces", # you
r"xau" # bye
]
informals = RegexMatches(name + ".informals", informal_regexes)
"""
:class:`~revscoring.languages.features.RegexMatches` features via a list of
informal word detecting regexes.
"""
|
nilq/baby-python
|
python
|
# --------------------------------------------------------------------
# Directory syncer by Alexander Sirotin (c) 2016
# Originally created for syncing between home NAS backup and Amazon cloud
# Both are mounted on the host machine (Amazon cloud is mounted using acd_cli)
# This program comes without any warranty, use it at your own risk.
# Feel free to contact me at sirotin@gmail.com
# --------------------------------------------------------------------
import os
import sys
import filecmp
import logging
import argparse
import shutil
class DirectorySyncer:
# Note: Recursive function, goes over all the sub-directories as well
def __compareTwoDirectories(self, left, right):
logging.debug("Comparing between '%s' and '%s'" % (left, right))
# Make sure both directories exists
if not os.path.exists(left) or not os.path.isdir(left):
raise Exception, "Provided left directory '%s' does not exist or not a directory!" % left
if not os.path.exists(right) or not os.path.isdir(right):
raise Exception, "Provided right directory '%s' does not exist or not a directory!" % right
# Compare the two directories and create two lists containing the missing parts
result = filecmp.dircmp(left, right)
leftOnly = self.__removeSpecial(result.left_only)
rightOnly = self.__removeSpecial(result.right_only)
# Add full path to the elements
leftOnly = self.__convertToFullPath(left, leftOnly)
rightOnly = self.__convertToFullPath(right, rightOnly)
# Go over all the files and make sure that their sizes match
commonFiles = self.__removeSpecial(result.common_files)
for file in commonFiles:
leftPath = os.path.join(left, file)
leftFileSize = os.path.getsize(leftPath)
rightPath = os.path.join(right, file)
rightFileSize = os.path.getsize(rightPath)
if leftFileSize > rightFileSize:
logging.warn("Problem found: Size of '%s' (%s) is bigger than '%s' (%s)" % (leftPath, self.__formatDiskSpace(leftFileSize), rightPath, self.__formatDiskSpace(rightFileSize)))
leftOnly.append(leftPath)
elif rightFileSize > leftFileSize:
logging.warn("Problem found: Size of '%s' (%s) is bigger than '%s' (%s)" % (rightPath, self.__formatDiskSpace(rightFileSize), leftPath, self.__formatDiskSpace(leftFileSize)))
rightOnly.append(rightPath)
# Get common dirs for recursive call
dirs = self.__removeSpecial(result.common_dirs)
for dir in dirs:
childLeftOnly, childRightOnly = self.__compareTwoDirectories(os.path.join(left, dir), os.path.join(right, dir))
leftOnly.extend(childLeftOnly)
rightOnly.extend(childRightOnly)
return leftOnly, rightOnly
def __removeSpecial(self, list):
return [x for x in list if not x.startswith(".")]
def __convertToFullPath(self, basePath, list):
for i in range(len(list)):
list[i] = os.path.join(basePath, list[i])
return list
def __removeRootLocation(self, path, list):
n = len(path) + 1
for i in range(len(list)):
list[i] = list[i][n:]
return list
def __getSizeStr(self, path):
size = os.path.getsize(path)
if (os.path.isdir(path)):
size += self.__calculateDiskSpace(path, os.listdir(path))
return self.__formatDiskSpace(size)
def __calculateDiskSpace(self, path, list):
diskSpace = 0
for x in list:
fullX = os.path.join(path, x)
diskSpace += os.path.getsize(fullX)
if os.path.isdir(fullX):
content = [os.path.join(fullX, f) for f in os.listdir(fullX)]
diskSpace += self.__calculateDiskSpace(path, content)
return diskSpace
def __askYesNoQuestion(self, message):
yes = set(["yes", "y", ""])
no = set(["no", "n"])
while True:
sys.stdout.write("%s [Y/N] " % message)
choice = raw_input().lower()
if choice in yes:
return True
elif choice in no:
return False
else:
print("Please response with a valid answer.")
def __buildYesNoQuestion(self, fromPath, toPath, file):
f = os.path.join(fromPath, file)
t = os.path.join(toPath, file)
return self.__askYesNoQuestion("Copy from '%s' to '%s' (%s) ? " % (f, t, self.__getSizeStr(f)))
def __verboseSelectFromList(self, fromPath, toPath, list):
return [x for x in list if self.__buildYesNoQuestion(fromPath, toPath, x)]
# Note: Recursive function, enters each directory and copies each file seperately
def __copyMissingFiles(self, fromPath, toPath, list, dryRun):
for file in list:
src = os.path.join(fromPath, file)
dst = os.path.join(toPath, file)
# In case destination file exists, remove it...
if (not dryRun) and os.path.exists(dst):
os.remove(dst)
try:
if os.path.isdir(src):
# Create the destination directory
if not dryRun:
os.mkdir(dst)
# Recursive call to copy all directory content
recursiveList = os.listdir(src)
self.__copyMissingFiles(src, dst, recursiveList, dryRun)
else:
logging.info("Copying '%s' to '%s' (%s)" % (src, dst, self.__getSizeStr(src)))
if not dryRun:
shutil.copy(src, dst)
except Exception as e:
# In case of exception, we want to remove dst in order to avoid partially copied files
if not dryRun:
if os.path.isdir(dst):
shutil.rmtree(dst)
else:
os.remove(dst)
raise e
def __formatDiskSpace(self, space):
KB = 1024.0
MB = 1024 * KB
GB = 1024 * MB
if space < 10 * MB:
return "%.2f KB" % (space / KB)
if (space < GB):
return "%.2f MB" % (space / MB)
return "%.2f GB" % (space / GB)
def __showNeededDiskSpace(self, pointA, pointB, leftOnly, rightOnly):
logging.info("Needed disk space for sync point '%s' is %s" % (pointA, self.__formatDiskSpace(self.__calculateDiskSpace(pointB, rightOnly))))
logging.info("Needed disk space for sync point '%s' is %s" % (pointB, self.__formatDiskSpace(self.__calculateDiskSpace(pointA, leftOnly))))
def sync(self, pointA, pointB, dryRun=False, verbose=False):
if dryRun:
logging.warn("DRY-RUN - No actual copies will occur !!!")
logging.info("Syncing between '%s' and '%s'" % (pointA, pointB))
try:
# Create two lists contains the differences between the given points
leftOnly, rightOnly = self.__compareTwoDirectories(pointA, pointB)
leftOnlyLen = len(leftOnly)
rightOnlyLen = len(rightOnly)
logging.info("Found %d differences (%d are missing in '%s' and %d are missing in '%s')" % (leftOnlyLen + rightOnlyLen, rightOnlyLen, pointA, leftOnlyLen, pointB))
# Remove base path from results
leftOnly = self.__removeRootLocation(pointA, leftOnly)
rightOnly = self.__removeRootLocation(pointB, rightOnly)
# Show needed disk space
self.__showNeededDiskSpace(pointA, pointB, leftOnly, rightOnly)
# In case of verbose flag, ask the user what to do
if (not dryRun) and verbose:
leftOnly = self.__verboseSelectFromList(pointA, pointB, leftOnly)
rightOnly = self.__verboseSelectFromList(pointB, pointA, rightOnly)
# Show needed disk space
self.__showNeededDiskSpace(pointA, pointB, leftOnly, rightOnly)
# Recalculate number of differences
leftOnlyLen = len(leftOnly)
rightOnlyLen = len(rightOnly)
logging.info("Start processing %d differences (%d are missing in '%s' and %d are missing in '%s')" % (leftOnlyLen + rightOnlyLen, rightOnlyLen, pointA, leftOnlyLen, pointB))
self.__copyMissingFiles(pointA, pointB, leftOnly, dryRun)
self.__copyMissingFiles(pointB, pointA, rightOnly, dryRun)
logging.info("Done!")
except Exception as e:
logging.error(e.args[0])
return False
return True
def configure():
# Configure the logger
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s", datefmt="%d/%m/%Y %H:%M:%S", level=logging.INFO)
# Read the command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--left", required=True)
parser.add_argument("-r", "--right", required=True)
parser.add_argument("-d", "--dry_run", action="store_const", const=True, default=False);
parser.add_argument("-v", "--verbose", action="store_const", const=True, default=False);
args = parser.parse_args()
# Return the arguments
return args
def main():
args = configure()
pointA = os.path.normpath(args.left)
pointB = os.path.normpath(args.right)
syncer = DirectorySyncer()
syncer.sync(pointA, pointB, dryRun=args.dry_run, verbose=args.verbose)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
@author: yuejl
@application:
@contact: lewyuejian@163.com
@file: wechatApiConf.py
@time: 2021/7/1 0001 11:32
@desc:
'''
class WechatApiConfig:
def __init__(self):
self.url = None
self.init = None
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2018 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
import attr
from plugincode.post_scan import PostScanPlugin
from plugincode.post_scan import post_scan_impl
from scancode import CommandLineOption
from scancode import POST_SCAN_GROUP
from summarycode import facet
# Tracing flags
TRACE = False
def logger_debug(*args):
pass
if TRACE:
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, unicode) and a or repr(a) for a in args))
"""
A plugin to compute a licensing clarity score as designed in ClearlyDefined
"""
# minimum score to consider a license detection as good.
MIN_GOOD_LICENSE_SCORE = 80
@post_scan_impl
class LicenseClarityScore(PostScanPlugin):
"""
Compute a License clarity score at the codebase level.
"""
codebase_attributes = dict(license_score=attr.ib(default=attr.Factory(OrderedDict)))
sort_order = 110
options = [
CommandLineOption(('--license-clarity-score',),
is_flag=True,
default=False,
help='Compute a summary license clarity score at the codebase level.',
help_group=POST_SCAN_GROUP,
required_options=['classify', 'license', 'copyright'],
)
]
def is_enabled(self, license_clarity_score, **kwargs):
return license_clarity_score
def process_codebase(self, codebase, license_clarity_score, **kwargs):
if TRACE:
logger_debug('LicenseClarityScore:process_codebase')
scoring_elements = compute_license_score(codebase, **kwargs)
codebase.attributes.license_score.update(scoring_elements)
def compute_license_score(codebase, min_score=MIN_GOOD_LICENSE_SCORE, **kwargs):
"""
Return a mapping of scoring elements and a license clarity score computed at
the codebase level.
"""
score = 0
scoring_elements = OrderedDict(score=score)
# FIXME: separate the compute of each score element from applying the weights
############################################################################
top_level_declared_licenses_weight = 30
has_top_level_declared_licenses = get_top_level_declared_licenses(codebase, min_score)
scoring_elements['has_top_level_declared_licenses'] = bool(has_top_level_declared_licenses)
if has_top_level_declared_licenses:
score += top_level_declared_licenses_weight
if TRACE:
logger_debug(
'compute_license_score:has_top_level_declared_licenses:',
has_top_level_declared_licenses, 'score:', score)
############################################################################
file_level_license_and_copyright_weight = 25
file_level_license_and_copyright_coverage = 0
files_with_lic_copyr, files_count = get_other_licenses_and_copyrights_counts(codebase, min_score)
if TRACE:
logger_debug('compute_license_score:files_with_lic_copyr:',
files_with_lic_copyr, 'files_count:', files_count)
scoring_elements['file_level_license_and_copyright_coverage'] = 0
if files_count:
file_level_license_and_copyright_coverage = files_with_lic_copyr / files_count
score += int(file_level_license_and_copyright_coverage * file_level_license_and_copyright_weight)
scoring_elements['file_level_license_and_copyright_coverage'] = file_level_license_and_copyright_coverage
if TRACE:
logger_debug('compute_license_score:file_level_license_and_copyright_coverage:',
file_level_license_and_copyright_coverage, 'score:', score)
############################################################################
license_consistency_weight = 15
has_consistent_key_and_file_level_license = False
key_files_license_keys, other_files_license_keys = get_unique_licenses(codebase, min_score)
if key_files_license_keys and key_files_license_keys == other_files_license_keys:
has_consistent_key_and_file_level_license = True
scoring_elements['has_consistent_key_and_file_level_license'] = has_consistent_key_and_file_level_license
if has_consistent_key_and_file_level_license:
score += license_consistency_weight
if TRACE:
logger_debug(
'compute_license_score:has_consistent_key_and_file_level_license:',
has_consistent_key_and_file_level_license, 'score:', score)
############################################################################
spdx_standard_licenses_weight = 15
has_all_spdx_licenses = all(has_spdx_licenses(res) for res in codebase.walk() if res.is_file)
scoring_elements['has_all_spdx_licenses'] = has_all_spdx_licenses
if has_all_spdx_licenses:
score += spdx_standard_licenses_weight
if TRACE:
logger_debug(
'compute_license_score:',
'has_all_spdx_licenses:',
has_all_spdx_licenses, 'score:', score)
############################################################################
license_texts_weight = 15
all_keys = key_files_license_keys & other_files_license_keys
keys_with_license_text = get_detected_license_keys_with_full_text(codebase, min_score)
has_all_license_texts = all_keys == keys_with_license_text
scoring_elements['has_all_license_texts'] = has_all_license_texts
if has_all_license_texts:
score += license_texts_weight
scoring_elements['score'] = score
return scoring_elements
def get_top_level_declared_licenses(codebase, min_score=MIN_GOOD_LICENSE_SCORE):
"""
A project has specific key file(s) at the top level of its code hierarchy
such as LICENSE, NOTICE or similar (and/or a package manifest) containing
structured license information such as an SPDX license expression or SPDX
license identifier, and the file(s) contain "clearly defined" declared
license information (a license declaration such as a license expression
and/or a series of license statements or notices).
Note: this ignores facets.
"""
key_files = (res for res in codebase.walk(topdown=True) if is_key_file(res))
detected_good_licenses = []
for resource in key_files:
if resource.scan_errors:
continue
# TODO: should we also ignore or penalize non SPDX licenses?
for detected_license in resource.licenses:
"""
"licenses": [
{
"score": 23.0,
"start_line": 1,
"end_line": 1,
"matched_rule": {
"identifier": "lgpl-2.1_38.RULE",
"license_expression": "lgpl-2.1",
"licenses": [
"lgpl-2.1"
]
},
"""
if detected_license['score'] < min_score:
continue
items = ('path', resource.path,)
items += tuple((k, v) for k, v in detected_license.items()
if (
k in ('score', 'start_line', 'end_line', 'matched_rule',)
)
)
detected_good_licenses.append(items)
return detected_good_licenses
def is_key_file(resource):
"""
Return True if a Resource is considered as a "key file".
"""
return (
resource.is_file
and resource.is_top_level
and (resource.is_readme
or resource.is_legal
or resource.is_manifest)
)
def is_core_facet(resource, core_facet=facet.FACET_CORE):
"""
Return True if the resource is in the core facet.
If we do not have facets, everything is considered as being core by default.
"""
has_facets = hasattr(resource, 'facets')
if not has_facets:
return True
# facets is a list
return not resource.facets or core_facet in resource.facets
def has_good_licenses(resource, min_score=MIN_GOOD_LICENSE_SCORE):
"""
Return True if a Resource licenses are all detected with a score above min_score.
"""
if not resource.licenses:
return False
if resource.scan_errors:
return False
for detected_license in resource.licenses:
if detected_license['score'] < min_score:
return False
return True
def has_spdx_licenses(resource):
"""
Return True if a Resource licenses are all known SPDX licenses.
"""
if resource.scan_errors:
return False
for detected_license in resource.licenses:
if not detected_license.get('spdx_license_key'):
return False
return True
def get_unique_licenses(codebase, min_score=MIN_GOOD_LICENSE_SCORE):
"""
Return a tuple of two sets of license keys found in the codebase with at least min_score:
- the set license found in key files
- the set license found in non-key files
This is only for files in the core facet.
"""
key_license_keys = set()
other_license_keys = set()
for resource in codebase.walk():
# FIXME: consider only text, source-like files for now
if not resource.is_file:
continue
if not (is_key_file(resource) or is_core_facet(resource)):
continue
if is_key_file(resource):
license_keys = key_license_keys
else:
license_keys = other_license_keys
for detected_license in resource.licenses:
if detected_license['score'] < min_score:
continue
license_keys.add(detected_license['key'])
return key_license_keys, other_license_keys
def get_detected_license_keys_with_full_text(codebase, min_score=MIN_GOOD_LICENSE_SCORE):
"""
Return a set of license keys for which at least one detection includes the
full license text.
This is for any files in the core facet or not.
"""
license_keys = set()
for resource in codebase.walk():
# FIXME: consider only text, source-like files for now
if not resource.is_file:
continue
for detected_license in resource.licenses:
if detected_license['score'] < min_score:
continue
if not detected_license['matched_rule']['is_license_text']:
continue
license_keys.add(detected_license['key'])
return license_keys
def get_other_licenses_and_copyrights_counts(codebase, min_score=MIN_GOOD_LICENSE_SCORE):
"""
Return a tuple of (count of files with a license/copyright, total count of
files).
Do files that can contain licensing and copyright information reliably carry
such information? This is based on a percentage of files in the core facet
of the project that have both:
- A license statement such as a text, notice or an SPDX-License-Identifier and,
- A copyright statement in standard format.
Here "reliably" means that these are reliably detected by tool(s) with a
high level of confidence This is a progressive element that is computed
based on:
- LICCOP: the number of files with a license notice and copyright statement
- TOT: the total number of files
"""
total_files_count = 0
files_with_good_license_and_copyright_count = 0
for resource in codebase.walk():
# FIXME: consider only text, source-like files for now
if is_key_file(resource) or not resource.is_file:
continue
if not is_core_facet(resource):
continue
total_files_count += 1
if resource.scan_errors:
continue
if not (resource.licenses or resource.copyrights):
continue
if not has_good_licenses(resource, min_score):
continue
files_with_good_license_and_copyright_count += 1
return files_with_good_license_and_copyright_count, total_files_count
|
nilq/baby-python
|
python
|
"""
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.common.partial_infer.utils import int64_array
from mo.front.common.replacement import FrontReplacementSubgraph
from mo.front.tf.graph_utils import create_op_node_with_second_input
from mo.graph.graph import Graph
from mo.ops.reshape import Reshape
class NonMaxSuppressionNormalize(FrontReplacementSubgraph):
"""
The transformation converts several inputs of the NonMaxSuppression layer to be 1D instead of 0D with shape [1] to
comply with the layer specification.
"""
enabled = True
def find_and_replace_pattern(self, graph: Graph):
for nms in graph.get_op_nodes(op='NonMaxSuppression'):
# make inputs 2 to 5 to have shape [1] instead of [0] (convert 0D to 1D)
nms_name = nms.soft_get('name', nms.id)
for port_id in range(2, 6):
if port_id in nms.in_ports() and not nms.in_port(port_id).disconnected():
reshape_1d = create_op_node_with_second_input(graph, Reshape, int64_array([1]),
{'name': nms_name + '/Reshape_1D_{}'.format(port_id)})
nms.in_port(port_id).get_connection().insert_node(reshape_1d)
|
nilq/baby-python
|
python
|
import os.path as osp
import pickle
from collections import Counter
import torch
from torch.utils.data import DataLoader
import spacy
from tqdm import tqdm
import lineflow as lf
import lineflow.datasets as lfds
PAD_TOKEN = '<pad>'
UNK_TOKEN = '<unk>'
START_TOKEN = '<s>'
END_TOKEN = '</s>'
IGNORE_INDEX = -100
NLP = spacy.load('en_core_web_sm',
disable=['vectors', 'textcat', 'tagger', 'ner', 'parser'])
def preprocess(x):
tokens = [token.text.lower() for token in NLP(x[0]) if not token.is_space]
return ([START_TOKEN] + tokens + [END_TOKEN], x[1])
def build_vocab(tokens, cache='vocab.pkl', max_size=50000):
if not osp.isfile(cache):
counter = Counter(tokens)
words, _ = zip(*counter.most_common(max_size))
words = [PAD_TOKEN, UNK_TOKEN] + list(words)
token_to_index = dict(zip(words, range(len(words))))
if START_TOKEN not in token_to_index:
token_to_index[START_TOKEN] = len(token_to_index)
words += [START_TOKEN]
if END_TOKEN not in token_to_index:
token_to_index[END_TOKEN] = len(token_to_index)
words += [END_TOKEN]
with open(cache, 'wb') as f:
pickle.dump((token_to_index, words), f)
else:
with open(cache, 'rb') as f:
token_to_index, words = pickle.load(f)
return token_to_index, words
def postprocess(token_to_index,
unk_index):
def f(x):
token_index = [token_to_index.get(token, unk_index) for token in x[0]]
return token_index, x[1]
return f
def get_collate_fn(pad_index):
def f(batch):
indices, labels = zip(*batch)
max_length = max(len(x) for x in indices)
padded = [x + [pad_index] * (max_length - len(x)) for x in indices]
return torch.LongTensor(padded), torch.LongTensor(labels)
return f
if __name__ == '__main__':
print('Reading...')
train = lfds.Imdb('train').map(preprocess)
tokens = lf.flat_map(lambda x: x[0],
train,
lazy=True)
print('Building vocabulary...')
token_to_index, _ = build_vocab(tokens, 'vocab.pkl')
print(f'Vocab Size: {len(token_to_index)}')
pad_index = token_to_index[PAD_TOKEN]
unk_index = token_to_index[UNK_TOKEN]
loader = DataLoader(
train
.map(postprocess(token_to_index, unk_index))
.save('imdb.train.cache'),
batch_size=32,
num_workers=4,
collate_fn=get_collate_fn(pad_index))
for batch in tqdm(loader):
...
del loader
|
nilq/baby-python
|
python
|
import unittest
from helpers.queuehelper import QueueName
#from backend.fcmapp import InfrastructureService
from backend.fcmbus import Bus
class TestBus(unittest.TestCase):
#@classmethod
#def make_bus(self):
# return Bus(InfrastructureService('', '', '', '', '', ''))
def test_bus_get_name_q(self):
#bus = Test_bus.make_bus()
self.assertTrue(Bus.get_queue_name(QueueName.Q_ALERT) == "alert")
def test_bus_get_name_str(self):
#bus = Test_bus.make_bus()
self.assertTrue(Bus.get_queue_name("alert") == "alert")
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import importlib
import sys
# the following are python opcodes taken from the `opcode` module
# these have been constantized for easier access
# these are the opcodes used by python
# not to be confused with opcodes from neo.VM.OpCode,
# which are the opcodes for the neo vm
POP_TOP = 1
ROT_TWO = 2
ROT_THREE = 3
DUP_TOP = 4
DUP_TOP_TWO = 5
NOP = 9
UNARY_POSITIVE = 10
UNARY_NEGATIVE = 11
UNARY_NOT = 12
UNARY_INVERT = 15
BINARY_MATRIX_MULTIPLY = 16
INPLACE_MATRIX_MULTIPLY = 17
BINARY_POWER = 19
BINARY_MULTIPLY = 20
BINARY_MODULO = 22
BINARY_ADD = 23
BINARY_SUBTRACT = 24
BINARY_SUBSCR = 25
BINARY_FLOOR_DIVIDE = 26
BINARY_TRUE_DIVIDE = 27
INPLACE_FLOOR_DIVIDE = 28
INPLACE_TRUE_DIVIDE = 29
GET_AITER = 50
GET_ANEXT = 51
BEFORE_ASYNC_WITH = 52
INPLACE_ADD = 55
INPLACE_SUBTRACT = 56
INPLACE_MULTIPLY = 57
INPLACE_MODULO = 59
STORE_SUBSCR = 60
DELETE_SUBSCR = 61
BINARY_LSHIFT = 62
BINARY_RSHIFT = 63
BINARY_AND = 64
BINARY_XOR = 65
BINARY_OR = 66
INPLACE_POWER = 67
GET_ITER = 68
GET_YIELD_FROM_ITER = 69
PRINT_EXPR = 70
LOAD_BUILD_CLASS = 71
YIELD_FROM = 72
GET_AWAITABLE = 73
INPLACE_LSHIFT = 75
INPLACE_RSHIFT = 76
INPLACE_AND = 77
INPLACE_XOR = 78
INPLACE_OR = 79
BREAK_LOOP = 80
WITH_CLEANUP_START = 81
WITH_CLEANUP_FINISH = 82
RETURN_VALUE = 83
IMPORT_STAR = 84
YIELD_VALUE = 86
POP_BLOCK = 87
END_FINALLY = 88
POP_EXCEPT = 89
HAVE_ARGUMENT = 90 # Opcodes from here have an argument:
STORE_NAME = 90 # Index in name list
DELETE_NAME = 91 # ""
UNPACK_SEQUENCE = 92 # Number of tuple items
FOR_ITER = 93 # jrel op
UNPACK_EX = 94
STORE_ATTR = 95 # Index in name list
DELETE_ATTR = 96 # ""
STORE_GLOBAL = 97 # ""
DELETE_GLOBAL = 98 # ""
LOAD_CONST = 100 # Index in const list
LOAD_NAME = 101 # Index in name list
BUILD_TUPLE = 102 # Number of tuple items
BUILD_LIST = 103 # Number of list items
BUILD_SET = 104 # Number of set items
BUILD_MAP = 105 # Number of dict entries (upto 255
LOAD_ATTR = 106 # Index in name list
COMPARE_OP = 107 # Comparison operator
IMPORT_NAME = 108 # Index in name list
IMPORT_FROM = 109 # Index in name list
JUMP_FORWARD = 110 # Number of bytes to skip
JUMP_IF_FALSE_OR_POP = 111 # Target byte offset from beginning of code
JUMP_IF_TRUE_OR_POP = 112 # "jabs op"
JUMP_ABSOLUTE = 113 # "jabs op"
POP_JUMP_IF_FALSE = 114 # "jabs op"
POP_JUMP_IF_TRUE = 115 # "jabs op"
LOAD_GLOBAL = 116 # Index in name list
CONTINUE_LOOP = 119 # Target address jrel
SETUP_LOOP = 120 # Distance to target address jrel
SETUP_EXCEPT = 121 # "jrel"
SETUP_FINALLY = 122 # "jrel"
LOAD_FAST = 124 # Local variable number
STORE_FAST = 125 # Local variable number
DELETE_FAST = 126 # Local variable number
RAISE_VARARGS = 130 # Number of raise arguments (1, 2, or 3
CALL_FUNCTION = 131 # #args + (#kwargs << 8
MAKE_FUNCTION = 132 # Number of args with default values
BUILD_SLICE = 133 # Number of items
MAKE_CLOSURE = 134
LOAD_CLOSURE = 135
LOAD_DEREF = 136
STORE_DEREF = 137
DELETE_DEREF = 138
CALL_FUNCTION_VAR = 140 # #args + (#kwargs << 8
CALL_FUNCTION_KW = 141 # #args + (#kwargs << 8
CALL_FUNCTION_VAR_KW = 142 # #args + (#kwargs << 8
SETUP_WITH = 143
LIST_APPEND = 145
SET_ADD = 146
MAP_ADD = 147
LOAD_CLASSDEREF = 148
SETUP_ASYNC_WITH = 154
EXTENDED_ARG = 144
BUILD_LIST_UNPACK = 149
BUILD_MAP_UNPACK = 150
BUILD_MAP_UNPACK_WITH_CALL = 151
BUILD_TUPLE_UNPACK = 152
BUILD_SET_UNPACK = 153
# boa custom ops
FROMALTSTACK = 241
DROP = 242
BR_S = 243
SETITEM = 244
LD_ELEMENT = 245
XSWAP = 246
ROLL = 247
DROP_BODY = 248
LOAD_CLASS_ATTR = 249
DEBUG_OP = 250
# the following is a convienience method
# for a human readable version of the ops
module = importlib.import_module('boa.code.pyop')
items = dir(sys.modules[__name__])
def to_name(op):
"""
:param op:
:return:
"""
for item in items:
n = getattr(module, item)
if op == n:
return item
return None
|
nilq/baby-python
|
python
|
from django.contrib import admin
from testModel.models import Test,Contact,Tag
class TagInline(admin.TabularInline):
model =Tag
# Register your models here.
class ContactAdmin(admin.ModelAdmin):
list_display = ('name','age', 'email')
search_fields = ('name',)
inlines =[TagInline]
# fields = ("name","email")
fieldsets =(
["Main",{
"fields":("name","email"),
}],
["Advance",{
"classes":("collapse",),
"fields":("age",),
}],
)
admin.site.register(Contact,ContactAdmin)
admin.site.register([Test,Tag])
|
nilq/baby-python
|
python
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def buildTree(self, inorder: 'List[int]', postorder: 'List[int]') -> 'TreeNode':
if not inorder or not postorder:
return
root = TreeNode(postorder[-1])
i = 0
while inorder[i] != postorder[-1]:
i += 1
root.left = self.buildTree(inorder[:i], postorder[:i])
root.right =self.buildTree(inorder[i+1:], postorder[i:-1])
return root
# def buildTree(self, inorder: 'List[int]', postorder: 'List[int]') -> 'TreeNode':
# if not inorder or not postorder:
# return []
# root = TreeNode(postorder[-1])
# self._buildTree(root,inorder,postorder)
# return root
# def _buildTree(self, node, inorder, postorder) -> 'TreeNode':
# rootIndex_inorder = inorder.index(postorder[-1])
# lenOfLeftSubTree = rootIndex_inorder
# lenOfRightSubTree = len(inorder)-lenOfLeftSubTree-1
# if lenOfLeftSubTree > 0:
# node.left = TreeNode(postorder[lenOfLeftSubTree-1])
# self._buildTree(node.left,inorder[0:rootIndex_inorder],postorder[0:lenOfLeftSubTree])
# if lenOfRightSubTree > 0:
# node.right = TreeNode(postorder[lenOfLeftSubTree+lenOfRightSubTree-1])
# self._buildTree(node.right,inorder[rootIndex_inorder+1:],postorder[lenOfLeftSubTree:lenOfLeftSubTree+lenOfRightSubTree])
# return
|
nilq/baby-python
|
python
|
import argparse
import os.path
import numpy as np
import torch
import torchvision
import torchvision.transforms as T
from sklearn.model_selection import train_test_split
from MIA.Attack.Augmentation import Augmentation
from model import CIFAR
parser = argparse.ArgumentParser()
parser.add_argument("--save_to", default='models', type=str)
parser.add_argument("--name", default='cifar10', type=str)
if __name__ == "__main__":
args = parser.parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
target = CIFAR(10)
target.to(device)
target.load_state_dict(torch.load(os.path.join(args.save_to, args.name + ".pth")))
train = torchvision.datasets.CIFAR10(root='../data', train=True,
download=True)
test = torchvision.datasets.CIFAR10(root='../data', train=False,
download=True)
X, Y = np.concatenate((train.data, test.data)), np.concatenate((train.targets, test.targets)).astype(np.int64)
target_X, shadow_X, target_Y, shadow_Y = train_test_split(X, Y, test_size=0.5, random_state=42)
transform = T.Compose(
[T.ToTensor(),
T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trans = [T.RandomRotation(10)]
times = [5 for _ in range(len(trans))]
attack_model = Augmentation(device, trans, times, transform=transform)
attack_model.evaluate(target, *train_test_split(target_X, target_Y, test_size=0.7, random_state=42), show=True)
# membership = attack_model(target, target_X, target_Y)
|
nilq/baby-python
|
python
|
"""TrackML scoring metric"""
__authors__ = ['Sabrina Amrouche', 'David Rousseau', 'Moritz Kiehn',
'Ilija Vukotic']
import numpy
import pandas
def _analyze_tracks(truth, submission):
"""Compute the majority particle, hit counts, and weight for each track.
Parameters
----------
truth : pandas.DataFrame
Truth information. Must have hit_id, particle_id, and weight columns.
submission : pandas.DataFrame
Proposed hit/track association. Must have hit_id and track_id columns.
Returns
-------
pandas.DataFrame
Contains track_id, nhits, major_particle_id, major_particle_nhits,
major_nhits, and major_weight columns.
"""
# true number of hits for each particle_id
particles_nhits = truth['particle_id'].value_counts(sort=False)
total_weight = truth['weight'].sum()
# combined event with minimal reconstructed and truth information
event = pandas.merge(truth[['hit_id', 'particle_id', 'weight']],
submission[['hit_id', 'track_id']],
on=['hit_id'], how='left', validate='one_to_one')
event.drop('hit_id', axis=1, inplace=True)
event.sort_values(by=['track_id', 'particle_id'], inplace=True)
# ASSUMPTIONs: 0 <= track_id, 0 <= particle_id
tracks = []
# running sum for the reconstructed track we are currently in
rec_track_id = -1
rec_nhits = 0
# running sum for the particle we are currently in (in this track_id)
cur_particle_id = -1
cur_nhits = 0
cur_weight = 0
# majority particle with most hits up to now (in this track_id)
maj_particle_id = -1
maj_nhits = 0
maj_weight = 0
for hit in event.itertuples(index=False):
# we reached the next track so we need to finish the current one
if (rec_track_id != -1) and (rec_track_id != hit.track_id):
# could be that the current particle is the majority one
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# store values for this track
tracks.append((rec_track_id, rec_nhits, maj_particle_id,
particles_nhits[maj_particle_id], maj_nhits,
maj_weight / total_weight))
# setup running values for next track (or first)
if rec_track_id != hit.track_id:
rec_track_id = hit.track_id
rec_nhits = 1
cur_particle_id = hit.particle_id
cur_nhits = 1
cur_weight = hit.weight
maj_particle_id = -1
maj_nhits = 0
maj_weights = 0
continue
# hit is part of the current reconstructed track
rec_nhits += 1
# reached new particle within the same reconstructed track
if cur_particle_id != hit.particle_id:
# check if last particle has more hits than the majority one
# if yes, set the last particle as the new majority particle
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# reset runnig values for current particle
cur_particle_id = hit.particle_id
cur_nhits = 1
cur_weight = hit.weight
# hit belongs to the same particle within the same reconstructed track
else:
cur_nhits += 1
cur_weight += hit.weight
# last track is not handled inside the loop
if maj_nhits < cur_nhits:
maj_particle_id = cur_particle_id
maj_nhits = cur_nhits
maj_weight = cur_weight
# store values for the last track
tracks.append((rec_track_id, rec_nhits, maj_particle_id,
particles_nhits[maj_particle_id], maj_nhits, maj_weight / total_weight))
cols = ['track_id', 'nhits',
'major_particle_id', 'major_particle_nhits',
'major_nhits', 'major_weight']
return pandas.DataFrame.from_records(tracks, columns=cols)
def score_event(truth, submission):
"""Compute the TrackML event score for a single event.
Parameters
----------
truth : pandas.DataFrame
Truth information. Must have hit_id, particle_id, and weight columns.
submission : pandas.DataFrame
Proposed hit/track association. Must have hit_id and track_id columns.
"""
tracks = _analyze_tracks(truth, submission)
purity_rec = numpy.true_divide(tracks['major_nhits'], tracks['nhits'])
purity_maj = numpy.true_divide(tracks['major_nhits'], tracks['major_particle_nhits'])
good_track = (0.5 < purity_rec) & (0.5 < purity_maj)
return tracks['major_weight'][good_track].sum()
|
nilq/baby-python
|
python
|
from crc.api.common import ApiError
from crc.scripts.script import Script
from crc.services.study_service import StudyService
class UpdateStudyAssociates(Script):
argument_error_message = "You must supply at least one argument to the " \
"update_study_associates task, an array of objects in the form " \
"{'uid':'someid', 'role': 'text', 'send_email: 'boolean', " \
"'access':'boolean'} "
def get_description(self):
return """Allows you to associate other users with a study - only 'uid' is required in the
incoming dictionary, but will be useless without other information - all values will default to
false or blank
An empty list will delete the existing Associated list (except owner)
Each UID will be validated vs ldap and will raise an error if the uva_uid is not found. This supplied list will replace
any
associations already in place.
example : update_study_associates([{'uid':'sbp3ey','role':'Unicorn Herder', 'send_email': False, 'access':True}])
"""
def validate_arg(self, arg):
if not isinstance(arg, list):
raise ApiError("invalid parameter", "This function is expecting a list of dictionaries")
if len(arg[0]) > 0:
if not len(arg) > 0 and not isinstance(arg[0], dict):
raise ApiError("invalid paramemter", "This function is expecting a list of dictionaries")
def do_task_validate_only(self, task, study_id, workflow_id, *args, **kwargs):
if len(args) == 0:
items = []
else:
items = args[0]
self.validate_arg(items)
return all([x.get('uid', False) for x in items])
def do_task(self, task, study_id, workflow_id, *args, **kwargs):
if len(args) == 0:
access_list = []
else:
access_list = args[0]
self.validate_arg(access_list)
return StudyService.update_study_associates(study_id, access_list)
|
nilq/baby-python
|
python
|
from django.shortcuts import render, redirect
from .models import *
from django.http import Http404
from django.contrib.auth.models import User
from rest_framework import viewsets
from .sheet2 import interest_responses, firstapplication_response
# from .sheet3 import assesment_responses, score_response
# from django.contrib.auth.models import User
# from django.shortcuts import render
# from .filters import UserFilter
# Create your views here.
#class Profileview(viewsets.ModelViewSet):
#queryset= Profile.objects.all()
#serializer_class = ProfileSerializer
def homepage(request):
'''
assuming we make the api call
'''
form_data=interest_responses()
response = firstapplication_response()
for email in interestModel.objects.values_list('email', flat=True).distinct():
interestModel.objects.filter(pk__in= interestModel.objects.filter(email=email).values_list('id', flat=True)[1:]).delete()
res= interestModel.objects.all()
return render(request,'interest.html',{'data':res})
# def scorecard(request):
# '''
# Assuming we make the api call
# '''
# # form_data=assesment_responses()
# form_data=assesment_responses()
# response = score_response()
# for email in scoreModel.objects.values_list('email', flat=True).distinct():
# scoreModel.objects.filter(pk__in= scoreModel.objects.filter(email=email).values_list('id', flat=True)[1:]).delete()
# res= scoreModel.objects.all()
# return render(request,'scores.html',{'data':res})
# def search(request):
# user_list = User.objects.all()
# user_filter = UserFilter(request.GET, queryset=user_list)
# return render(request, 'search/user_list.html', {'filter': user_filter})
|
nilq/baby-python
|
python
|
"""The qnap component."""
|
nilq/baby-python
|
python
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('all')
def test_nginx_service(host):
assert host.service("nginx-podman").is_running
assert host.service("nginx-podman").is_enabled
def test_nginx_listening(host):
assert host.socket("tcp://0.0.0.0:80").is_listening
def test_serve_static_page(host):
assert host.check_output("curl http://localhost") == "Hello World"
|
nilq/baby-python
|
python
|
import unittest
from typing import List, Optional
from swift_cloud_py.common.errors import SafetyViolation
from swift_cloud_py.entities.intersection.intersection import Intersection
from swift_cloud_py.entities.intersection.traffic_light import TrafficLight
from swift_cloud_py.entities.intersection.signalgroup import SignalGroup
from swift_cloud_py.entities.control_output.fixed_time_schedule import FixedTimeSchedule, GreenYellowInterval
from swift_cloud_py.entities.intersection.sg_relations import Conflict
from swift_cloud_py.validate_safety_restrictions.validate_completeness import validate_completeness
class TestValidatingCompleteness(unittest.TestCase):
""" Unittests of the function find_other_sg_relation_matches """
@staticmethod
def get_default_signalgroup(name: str, min_greenyellow: float = 10.0, max_greenyellow: float = 80.0,
min_red: float = 10.0, max_red: float = 80.0) -> SignalGroup:
""" Get a default signalgroup object"""
traffic_light = TrafficLight(capacity=0.5, lost_time=0.0)
return SignalGroup(id=name, traffic_lights=[traffic_light],
min_greenyellow=min_greenyellow, max_greenyellow=max_greenyellow, min_red=min_red,
max_red=max_red, min_nr=1, max_nr=3)
@staticmethod
def get_default_intersection(additional_signalgroups: Optional[List[SignalGroup]] = None
) -> Intersection:
"""
Get a default intersection object with 2 conflicting signal groups "sg1" and "sg2"
:param additional_signalgroups: signal groups to add to the intersection (besides signal group 'sg1' and 'sg2')
(besides the conflict between signal group 'sg1' and 'sg2')
:return: the intersection object
"""
if additional_signalgroups is None:
additional_signalgroups = []
signalgroup1 = TestValidatingCompleteness.get_default_signalgroup(name="sg1")
signalgroup2 = TestValidatingCompleteness.get_default_signalgroup(name="sg2")
conflict = Conflict(id1="sg1", id2="sg2", setup12=2, setup21=3)
intersection = Intersection(signalgroups=[signalgroup1, signalgroup2] + additional_signalgroups,
conflicts=[conflict])
return intersection
def test_complete(self) -> None:
# WHEN
fts = FixedTimeSchedule(greenyellow_intervals=dict(
sg1=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),
GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],
sg2=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=30),
GreenYellowInterval(start_greenyellow=50, end_greenyellow=60)]),
period=100)
intersection = TestValidatingCompleteness.get_default_intersection()
# WHEN
validate_completeness(intersection=intersection, fts=fts)
# THEN no error should be raised
def test_signalgroup_missing(self) -> None:
# WHEN
fts = FixedTimeSchedule(greenyellow_intervals=dict(
sg1=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),
GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],
sg2=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=30),
GreenYellowInterval(start_greenyellow=50, end_greenyellow=60)]),
period=100)
signalgroup3 = TestValidatingCompleteness.get_default_signalgroup(name="sg3")
intersection = TestValidatingCompleteness.get_default_intersection(additional_signalgroups=[signalgroup3])
with self.assertRaises(SafetyViolation):
# WHEN
validate_completeness(intersection=intersection, fts=fts)
# THEN no error should be raised
def test_no_greenyellow_intervals(self) -> None:
# WHEN
fts = FixedTimeSchedule(greenyellow_intervals=dict(
sg1=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=40),
GreenYellowInterval(start_greenyellow=50, end_greenyellow=70)],
sg2=[GreenYellowInterval(start_greenyellow=10, end_greenyellow=30),
GreenYellowInterval(start_greenyellow=50, end_greenyellow=60)],
sg3=[]),
period=100)
signalgroup3 = TestValidatingCompleteness.get_default_signalgroup(name="sg3")
intersection = TestValidatingCompleteness.get_default_intersection(additional_signalgroups=[signalgroup3])
with self.assertRaises(SafetyViolation):
# WHEN
validate_completeness(intersection=intersection, fts=fts)
# THEN no error should be raised
|
nilq/baby-python
|
python
|
import cv2
from PIL import Image
import argparse
import os
import glob
import time
from pathlib import Path
import torch
from config import get_config
from mtcnn import MTCNN
import mxnet as mx
import numpy as np
from Learner import face_learner
from utils import load_facebank, draw_box_name, prepare_facebank
from face_detection.accuracy_evaluation import predict
from face_detection.config_farm import configuration_10_320_20L_5scales_v2 as cfg
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='for face verification')
parser.add_argument("-f", "--folder", help="folder of test images",default='./', type=str)
parser.add_argument("--extension", help="image extension",default='jpg', type=str)
parser.add_argument("-s", "--save_name", help="output file name",default='recording', type=str)
parser.add_argument('-th','--threshold',help='threshold to decide identical faces',default=1.54, type=float)
parser.add_argument("-u", "--update", help="whether perform update the facebank",action="store_true")
parser.add_argument("-tta", "--tta", help="whether test time augmentation",action="store_true")
parser.add_argument("-c", "--score", help="whether show the confidence score",action="store_true")
parser.add_argument("-b", "--begin", help="from when to start detection(in seconds)", default=0, type=int)
parser.add_argument("-d", "--duration", help="perform detection for how long(in seconds)", default=0, type=int)
parser.add_argument("-w", "--weight", help="model path", default='', type=str)
args = parser.parse_args()
conf = get_config(False)
mtcnn = MTCNN()
print('mtcnn loaded')
learner = face_learner(conf, True)
learner.threshold = args.threshold
if conf.device.type == 'cpu':
learner.load_state(conf, 'cpu_final.pth', True, True)
else:
# learner.load_state(conf, 'mobilefacenet.pth', True, True)
learner.load_state(conf, 'ir_se50.pth', True, True, weight=args.weight)
learner.model.eval()
print('learner loaded')
if args.update:
targets, names = prepare_facebank(conf, learner.model, mtcnn, tta = args.tta)
print('facebank updated')
else:
targets, names = load_facebank(conf)
print('facebank loaded')
# cap = cv2.VideoCapture(str(conf.facebank_path/args.file_name))
# cap.set(cv2.CAP_PROP_POS_MSEC, args.begin * 1000)
# fps = cap.get(cv2.CAP_PROP_FPS)
fps = 30
# os.chdir(args.folder)
video_writer = cv2.VideoWriter(str(conf.facebank_path/'{}.avi'.format(args.save_name)),
cv2.VideoWriter_fourcc(*'XVID'), int(fps), (1280,720))
if args.duration != 0:
i = 0
symbol_file_path = 'face_detection/symbol_farm/symbol_10_320_20L_5scales_v2_deploy.json'
model_file_path = 'face_detection/saved_model/configuration_10_320_20L_5scales_v2/train_10_320_20L_5scales_v2_iter_1800000.params'
# self.model = Backbone(conf.net_depth, conf.drop_ratio, conf.net_mode).to(conf.device)
# print('{}_{} model generated'.format(conf.net_mode, conf.net_depth))
ctx = mx.gpu(0)
face_detector = predict.Predict(mxnet=mx,
symbol_file_path=symbol_file_path,
model_file_path=model_file_path,
ctx=ctx,
receptive_field_list=cfg.param_receptive_field_list,
receptive_field_stride=cfg.param_receptive_field_stride,
bbox_small_list=cfg.param_bbox_small_list,
bbox_large_list=cfg.param_bbox_large_list,
receptive_field_center_start=cfg.param_receptive_field_center_start,
num_output_scales=cfg.param_num_output_scales)
for file in glob.glob(args.folder + "/*.{}".format(args.extension)):
print(file)
frame = cv2.imread(file)
image = Image.fromarray(frame[...,::-1]) #bgr to rgb
# image = Image.fromarray(frame)
try:
# bboxes, faces = mtcnn.align_multi(image, conf.face_limit, 16)
# print(faces[0].size)
# backSub = cv2.createBackgroundSubtractorMOG2()
# backSub = cv2.createBackgroundSubtractorKNN()
# test = cv2.resize(frame, dsize=None ,fx=0.25, fy=0.25)
# fgMask = backSub.apply(test)
# cv2.imshow('window_test', test)
# cv2.imshow('window', fgMask)
# if cv2.waitKey(0) == ord('q'):
# break
faces, infer_time = face_detector.predict(frame, resize_scale=0.2, score_threshold=0.6, top_k=10000, \
NMS_threshold=0.2, NMS_flag=True, skip_scale_branch_list=[])
print(len(faces))
bboxes = faces
except Exception as e:
print(e)
bboxes = []
faces = []
if len(bboxes) == 0:
print('no face')
continue
else:
# bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces
# bboxes = bboxes.astype(int)
# bboxes = bboxes + [-1,-1,1,1] # personal choice
img_size = 112
margin = 0
# faces = np.empty((len(bboxes), img_size, img_size, 3))
faces = []
img_h, img_w, _ = np.shape(image)
for i, bbox in enumerate(bboxes):
x1, y1, x2, y2= bbox[0], bbox[1], bbox[2] ,bbox[3]
xw1 = max(int(x1 - margin ), 0)
yw1 = max(int(y1 - margin ), 0)
xw2 = min(int(x2 + margin ), img_w - 1)
yw2 = min(int(y2 + margin ), img_h - 1)
face = cv2.resize(frame[yw1:yw2 + 1, xw1:xw2 + 1], (img_size, img_size))
faces.append(Image.fromarray(face[...,::-1]))
start_time = time.time()
results, score = learner.infer(conf, faces, targets, True)
print('Duration: {}'.format(time.time()-start_time))
for idx,bbox in enumerate(bboxes):
x1, y1, x2, y2= bbox[0], bbox[1], bbox[2] ,bbox[3]
xw1 = max(int(x1 - margin ), 0)
yw1 = max(int(y1 - margin ), 0)
xw2 = min(int(x2 + margin ), img_w - 1)
yw2 = min(int(y2 + margin ), img_h - 1)
bbox = [xw1, yw1, xw2,yw2]
if args.score:
frame = draw_box_name(bbox, names[results[idx] + 1] + '_{:.2f}'.format(score[idx]), frame)
else:
frame = draw_box_name(bbox, names[results[idx] + 1], frame)
frame = cv2.resize(frame, dsize=None ,fx=0.25, fy=0.25)
cv2.imshow('window', frame)
if cv2.waitKey(0) == ord('q'):
break
video_writer.write(frame)
# if args.duration != 0:
# i += 1
# if i % 25 == 0:
# print('{} second'.format(i // 25))
# if i > 25 * args.duration:
# break
# cap.release()
video_writer.release()
|
nilq/baby-python
|
python
|
class PixelNotChangingError(Exception):
pass
|
nilq/baby-python
|
python
|
__author__ = 'kim'
try:
import pyfftw
print('-------------------')
print('| pyFFTW detected |')
print('-------------------')
except:
print('-------------------------------')
print('* WARNING: No pyFFTW detected *')
print('-------------------------------')
from upsilon.utils import utils
from upsilon.utils.logger import Logger
from upsilon.extract_features.extract_features import ExtractFeatures
from upsilon.extract_features.is_period_alias import is_period_alias as IsPeriodAlias
from upsilon.extract_features.feature_set import get_feature_set
from upsilon.datasets.base import load_rf_model
from upsilon.predict.predict import predict
from upsilon.test.predict import run as test_predict
|
nilq/baby-python
|
python
|
import mysql.connector
from mysql.connector import errorcode
from flask import flash
try:
# Establish a connection with the MySQL database
# Password here is hardcoded for simplicity.
# For all practical purposes, use environment variables or config files.
cnx = mysql.connector.connect(user='test_user', password='password',
host='127.0.0.1',
database='phone_directory')
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
def add_contact(contact):
status = None
cursor = cnx.cursor()
insert_query = "INSERT INTO contacts (name, mobile_no, email) VALUES (%s, %s, %s)"
contact = (contact.get("name"), contact.get(
"mobile_no"), contact.get("email"))
try:
cursor.execute(insert_query, contact)
status = True
except Exception as e:
print(e)
status = False
finally:
cnx.commit()
cursor.close()
return status
def get_contacts():
try:
cursor = cnx.cursor()
cursor.execute("SELECT * FROM contacts")
contacts = cursor.fetchall()
cursor.close()
return contacts
except Exception as e:
print(e)
flash("Error occured while reading from the database")
return None
def delete_contact(contact_id):
cursor = cnx.cursor()
try:
cursor.execute("DELETE FROM contacts WHERE id = %s", (contact_id, ))
flash("Contact deleted successfully")
except Exception as e:
print(e)
flash("Error occured while deleting from the database")
cursor.close()
|
nilq/baby-python
|
python
|
"""
Django template tags for configurations.
"""
|
nilq/baby-python
|
python
|
from services import user_service
from viewmodels.shared.viewmodelbase import ViewModelBase
class IndexViewModel(ViewModelBase):
def __init__(self):
super().__init__()
self.user = user_service.get_user_by_id(self.user_id)
def validate(self):
if not self.user_id:
self.errors.append('No user id. ')
if self.user_id and not self.user:
self.errors.append('No user. ')
|
nilq/baby-python
|
python
|
# coding: utf-8
# In[1]:
from flask import Flask,render_template,session,url_for,request,redirect
from flask_pymongo import PyMongo
from flask_bcrypt import Bcrypt
from flask import jsonify,json
import os
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pprint
import datetime
import argparse
import pickle
from model import create_model
import pprint
import datetime
app1 = Flask(__name__)
nn4_small2_pretrained = create_model()
# In[2]:
nn4_small2_pretrained.load_weights('weights/nn4.small2.v1.h5')
# arguments
parser = argparse.ArgumentParser()
parser.add_argument("-c","--course",help="Course ID for attendance")
args = vars(parser.parse_args())
print(args['course'])
# In[3]:
import numpy as np
import os.path
class IdentityMetadata():
def __init__(self, base, name, file):
# dataset base directory
self.base = base
# identity name
self.name = name
# image file name
self.file = file
def __repr__(self):
return self.image_path()
def image_path(self):
return os.path.join(self.base, self.name, self.file)
def load_metadata(path):
metadata = []
for i in os.listdir(path):
for f in os.listdir(os.path.join(path, i)):
# Check file extension. Allow only jpg/jpeg' files.
ext = os.path.splitext(f)[1]
if ext == '.jpg' or ext == '.jpeg' or ext=='.png':
metadata.append(IdentityMetadata(path, i, f))
return np.array(metadata)
metadata = load_metadata('images')
print('metadata created')
print(metadata)
# In[4]:
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from align import AlignDlib
# get_ipython().run_line_magic('matplotlib', 'inline')
def load_image(path):
img = cv2.imread(path, 1) #BGR
return img[...,::-1] #RGB
alignment = AlignDlib('shape_predictor_68_face_landmarks.dat')
#combined transformation
def align_image(img):
return alignment.align(96, img, alignment.getLargestFaceBoundingBox(img),
landmarkIndices=AlignDlib.OUTER_EYES_AND_NOSE)
# In[5]:
import pickle
embedded = np.zeros((metadata.shape[0], 128))
real_name = {}
embeddings = open('embeddings.pkl','rb')
embedded = pickle.load(embeddings)
embeddings.close()
for i, m in enumerate(metadata):
# img = load_image(m.image_path())
# img = align_image(img)
# scale RGB values to interval [0,1]
# if img is not None:
# img = (img / 255.).astype(np.float32)
# obtain embedding vector for image
# embedded[i] = nn4_small2_pretrained.predict(np.expand_dims(img, axis=0))[0]
real_name[os.path.dirname(m.image_path()[7:])] = embedded[i]
print(i)
print(m.name)
# embeddings = open('embeddings.pkl','wb')
# pickle.dump(embedded,embeddings)
# embeddings.close()
def real_names():
return real_name
# In[6]:
def distance(emb1, emb2):
return np.sum(np.square(emb1 - emb2))
# In[7]:
def show_pair(idx1, idx2):
plt.figure(figsize=(6,3))
plt.suptitle(f'Distance = {distance(embedded[idx1], embedded[idx2]):.2f}')
plt.subplot(121)
plt.imshow(load_image(metadata[idx1].image_path()))
plt.subplot(122)
plt.imshow(load_image(metadata[idx2].image_path()));
show_pair(78, 76)
show_pair(78, 17)
# In[8]:
def recognize(embedded):
min_dist = 100
_id = None
for name,emb in real_name.items():
dist = np.sum(np.square(emb - embedded))
if dist < min_dist:
min_dist = dist
_id = name
if min_dist > 0.58:
print(min_dist)
return None
else:
print(min_dist)
return _id
# In[9]:
from sklearn.metrics import f1_score, accuracy_score
distances = [] # squared L2 distance between pairs
identical = [] # 1 if same identity, 0 otherwise
num = len(metadata)
for i in range(num - 1):
for j in range(1, num):
distances.append(distance(embedded[i], embedded[j]))
identical.append(1 if metadata[i].name == metadata[j].name else 0)
distances = np.array(distances)
identical = np.array(identical)
thresholds = np.arange(0.3, 1.0, 0.01)
f1_scores = [f1_score(identical, distances < t) for t in thresholds]
acc_scores = [accuracy_score(identical, distances < t) for t in thresholds]
opt_idx = np.argmax(f1_scores)
# Threshold at maximal F1 score
opt_tau = thresholds[opt_idx]
# Accuracy at maximal F1 score
opt_acc = accuracy_score(identical, distances < opt_tau)
# Plot F1 score and accuracy as function of distance threshold
plt.plot(thresholds, f1_scores, label='F1 score');
plt.plot(thresholds, acc_scores, label='Accuracy');
plt.axvline(x=opt_tau, linestyle='--', lw=1, c='lightgrey', label='Threshold')
plt.title(f'Accuracy at threshold {opt_tau:.2f} = {opt_acc:.3f}');
plt.xlabel('Distance threshold')
plt.legend();
# In[10]:
embedded = np.zeros((1, 128))
def recognize_image(image_path):
img = load_image(image_path)
img = align_image(img)
if img is not None:
img = (img / 255.).astype(np.float32)
# obtain embedding vector for image
embedded = nn4_small2_pretrained.predict(np.expand_dims(np.array(img), axis=0))[0]
name = recognize(embedded)
print(name)
return name
return None
cap = cv2.VideoCapture(1)
# def webcam_recognize():
# while(True):
# ret, frame = cap.read()
# cv2.imwrite('temp.jpg',frame)
# cv2.waitKey(20)
# if 'temp.jpg' is not None:
# name = recognize_image('temp.jpg')
# cv2.imshow('temp',frame)
# print(name)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# cap.release()
# cv2.destroyAllWindows()
# webcam_recognize()
# recognize_image('.jpg')
#students list for gspread updations
students = []
from mtcnn.mtcnn import MTCNN
import pickle
def multiple_recognize():
while(True):
ret,frame = cap.read()
cv2.imwrite('temp.jpg',frame)
detector = MTCNN()
image = load_image('temp.jpg')
faces = detector.detect_faces(image)
print(faces)
if faces is not None:
for face in faces:
(x,y,w,h) = face['box']
cv2.imwrite('temp.jpg',frame)
if 'temp.jpg' is not None:
name = recognize_image('temp.jpg')
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),5)
cv2.imshow('Faces',frame)
if name!=None and name not in students:
students.append(name)
stud_names = open('present.pickle','wb')
pickle.dump(students, stud_names,protocol=2)
stud_names.close()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# cv2.imshow('parts',image[y:y+h,x:x+w])
multiple_recognize()
# In[11]:
# import datetime
# import os
# import subprocess as s
# s.call("python mark_attendance.py", shell=True)
# today = datetime.date.today()
# formatted_date = today.strftime("%m-%d-%Y")
# print(formatted_date)
# from mark_attendance import mark_attendance
# mark_attendance(students)
import datetime
today = datetime.date.today()
formatted_date = today.strftime("%m-%d-%Y")
print(formatted_date)
from mark_attendance import mark_attendance
mark_attendance(students,args['course'])
|
nilq/baby-python
|
python
|
# Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
from typing import List, Callable, Optional
import requests
from .models import (
User,
Address,
Transaction,
Transactions,
RequestForQuote,
Quote,
CreateTransaction,
AccountInfo,
OffChainSequenceInfo,
TransactionId,
FundsTransfer,
PaymentDetails,
PreparePaymentInfoResponse,
)
from .models_fppa import (
FundPullPreApprovalScope,
FundsPullPreApprovalRequest,
FundsPullPreApproval,
FundPullPreApprovalStatus,
)
RequestSender = Callable[[str, str, Optional[dict]], requests.Response]
class ReferenceWalletProxy:
def __init__(self, base_url):
self.base_url = base_url
self.authorization_header = {}
self.funds_pull_preapproval = ReferenceWalletProxyFPPA(self._request_authorized)
def create_new_user(self, username, password):
add_user_json = {"username": username, "password": password}
add_user_response = self._request("POST", "user", json=add_user_json)
self._set_authorization_token(add_user_response.text)
def get_user(self):
user_response = self._request_authorized("GET", "user")
return User.from_json(user_response.text)
def update_user(self, user: User):
self._request_authorized("PUT", "user", json=user.to_dict())
return self.get_user()
def get_account_info(self) -> AccountInfo:
response = self._request_authorized("GET", "account")
return AccountInfo.from_json(response.text)
def get_balance(self, currency):
return sum(
x.balance
for x in self.get_account_info().balances
if x.currency == currency
)
def get_receiving_address(self) -> str:
address_response = self._request_authorized(
"POST", "account/receiving-addresses"
)
address = Address.from_json(address_response.text)
return address.address
def get_transaction_list(self) -> List[Transaction]:
account_transactions_response = self._request_authorized(
"GET", "account/transactions"
)
transactions = Transactions.from_json(account_transactions_response.text)
return transactions.transaction_list
def create_deposit_quote(self, amount: int, currency_pair) -> Quote:
quote_request = RequestForQuote(
action="buy",
amount=amount,
currency_pair=currency_pair,
)
quote_response = self._request_authorized(
"POST", "account/quotes", json=quote_request.to_dict()
)
return Quote.from_json(quote_response.text)
def execute_quote(self, quote_id: str):
self._request_authorized(
"POST", f"account/quotes/{quote_id}/actions/execute", json={}
)
def get_offchain_state(self, reference_id) -> OffChainSequenceInfo:
# TBD: There is no way, at the moment, to get off-chain sequence
# state. Should be implemented.
return OffChainSequenceInfo()
def send_transaction(self, address, amount, currency) -> TransactionId:
tx_request = CreateTransaction(
currency=currency,
amount=amount,
receiver_address=address,
)
send_transaction_response = self._request_authorized(
"POST", "account/transactions", json=tx_request.to_dict()
)
return TransactionId.from_json(send_transaction_response.text)
def create_payment_command_as_sender(
self,
reference_id,
vasp_address,
merchant_name,
action,
currency,
amount,
expiration,
):
request = {
"reference_id": reference_id,
"vasp_address": vasp_address,
"merchant_name": merchant_name,
"action": action,
"currency": currency,
"amount": amount,
"expiration": expiration,
}
self._request_authorized("POST", "offchain/payment_command", json=request)
def get_payment_details(self, reference_id, vasp_address) -> PaymentDetails:
response = self._request_authorized(
"GET",
f"offchain/query/payment_details?"
f"vasp_address={vasp_address}&"
f"reference_id={reference_id}",
)
return PaymentDetails.from_json(response.text) if response.text else None
def prepare_payment_as_receiver(self, action: str = "charge") -> (str, str):
response = self._request_authorized(
"POST", f"/validation/payment_info/{action}"
)
response_object = PreparePaymentInfoResponse.from_json(response.text)
return response_object.reference_id, response_object.address
def approve_payment(self, reference_id: str, init_offchain: bool):
self._request_authorized(
"POST",
f"/offchain/payment/{reference_id}/actions/approve",
json={"init_offchain_required": init_offchain},
)
def approve_payment_command(self, reference_id):
self._request_authorized(
"POST", f"/offchain/payment_command/{reference_id}/actions/approve"
)
def reject_payment_command(self, reference_id):
self._request_authorized(
"POST", f"/offchain/payment_command/{reference_id}/actions/reject"
)
def get_transaction(self, tx_id) -> FundsTransfer:
response = self._request_authorized("GET", f"account/transactions/{tx_id}")
return FundsTransfer.from_json(response.text)
def _set_authorization_token(self, token):
self.authorization_header = {"Authorization": "Bearer " + token}
def _request(self, method, endpoint, json=None):
response = requests.request(
method, url=f"{self.base_url}/{endpoint}", json=json
)
response.raise_for_status()
return response
def _request_authorized(self, method, endpoint, json=None) -> requests.Response:
response = requests.request(
method,
url=f"{self.base_url}/{endpoint}",
json=json,
headers=self.authorization_header,
)
response.raise_for_status()
return response
class ReferenceWalletProxyFPPA:
"""
Sends to the reference wallet funds pull pre-approval related requests.
"""
def __init__(self, request_wallet_authorized: RequestSender):
self._request_authorized = request_wallet_authorized
def get_all_preapprovals(self) -> List[FundsPullPreApproval]:
r = self._request_authorized("GET", "offchain/funds_pull_pre_approvals")
preapprovals = r.json()
return [
FundsPullPreApproval.from_dict(x)
for x in preapprovals["funds_pull_pre_approvals"]
]
def request_preapproval_from_another(
self,
payer_addr_bech32: str,
scope: FundPullPreApprovalScope,
description: str = None,
) -> str:
fppa_request = FundsPullPreApprovalRequest(
payer_address=payer_addr_bech32,
description=description,
scope=scope,
)
r = self._request_authorized(
"POST", "validation/funds_pull_pre_approvals", fppa_request.to_dict()
)
return r.json()["funds_pull_pre_approval_id"]
def create_fppa_request_for_unknown_payer(
self,
scope: FundPullPreApprovalScope,
description: str = None,
) -> (str, str):
r = self._request_authorized(
"POST",
"validation/funds_pull_pre_approvals",
{"description": description, "scope": scope.to_dict()},
)
return r.json()["funds_pull_pre_approval_id"], r.json()["address"]
def update_preapproval_status(
self, fppa_id: str, status: FundPullPreApprovalStatus
):
self._request_authorized(
"PUT",
f"offchain/funds_pull_pre_approvals/{fppa_id}",
{"status": status.value},
)
def create_and_approve(
self,
biller_address: str,
funds_pull_pre_approval_id: str,
scope: FundPullPreApprovalScope,
description: str,
):
self._request_authorized(
"POST",
"offchain/funds_pull_pre_approvals",
{
"biller_address": biller_address,
"funds_pull_pre_approval_id": funds_pull_pre_approval_id,
"scope": scope.to_dict(),
"description": description,
},
)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 30 14:05:38 2019
@author: ico
"""
import numpy as np
class MyNeuron:
def training(self,X,Y):
self.W=np.random.random((X.shape[1]+1,1))
X=np.append(np.ones((X.shape[0],1)),X,axis=1)
for j in range(1,21):
i=0
for x in X:
if np.dot(self.W.T,x)>0:
y = 1
else:
y = 0
self.W=self.W+(Y[i]-y)*x.reshape(3,1)
i=i+1
#W=np.array([-100,100/6])
#constructor
def __init__(self,funcActivation):
self.funcAct=funcActivation
#Predice si aprueba o no
def predict(self,x):
x=np.append(1,x)
y=np.dot(self.W.T,x.reshape(self.W.shape[0]))
if self.funcAct=="heaviside":
return self.heaviside(y)
if self.funcAct=="tanh":
return self.tanh(y)
if self.funcAct=="sigmoid":
return self.sigmoid(y)
#funciones activación
def Yp(self,x):
if x>=0:
return 1#aprobado
else:
return -1#no aprobado
def tanh(self,x):
return np.sinh(x)/np.cosh(x)
def sigmoid(self,x):
return 1/(1+np.exp(-x))
#transforma a 0 o 1
def transformPredictions(self,Y):
Y=np.array(Y)
if self.funcAct=='heaviside':
idxNeg=Y==-1
Y[idxNeg]=0
elif self.funcAct=='tanh':
idxPos = Y >=0
idxNeg = Y<0
Y[idxPos]=1
Y[idxNeg]=0
else:
idxPos = Y>=0.5
idxNeg=Y<0.5
Y[idxPos]=1
Y[idxNeg]=0
return Y
clf=MyNeuron("sigmoid")
import pandas as pd
#datos=pd.read_csv('training.csv')
#X=datos.iloc[:,[0,1]]
#Y=datos.iloc[:,2]
ceros=np.random.uniform(0,0.3,10)#vector de 10 ceros
unos= np.random.uniform(0.7,1.0,10)#vecto de 10 unos
#creo conjunto de datos
X=np.append(ceros,ceros)
X=np.append(X,unos)
X=np.append(X,unos)
X=np.append(X,ceros)
X=np.append(X,unos)
X=np.append(X,ceros)
X=np.append(X,unos)
X=X.reshape(40,2)
Y=np.zeros((30,1))
Y=np.append(Y,np.ones((10,1)))
Y=Y.reshape(40,1)
clf.training(X,Y)
prueba=pd.read_csv('test.csv')
Xt=prueba.iloc[:,[0,1]]
Yt=prueba.iloc[:,2]
Yp=[]
for i in range(0,Xt.shape[0]):
p=np.array(Xt.iloc[i,:])
Yp.append(clf.predict(p))
"""
import matplotlib.pyplot as plt
idxPos = Y ==1
idxNeg = Y ==0
Xgraf=X[idxPos]
plt.plot(Xgraf.iloc[:,0],Xgraf.iloc[:,1],'bo')
Xgraf=X[idxNeg]
plt.plot(Xgraf.iloc[:,0],Xgraf.iloc[:,1],'ro')
plt.title("Entrenamiento")
plt.show()
for i in range(0,Xt.shape[0]):
p=np.array(Xt.iloc[i,:])
Yp.append(clf.predict(p))
idxPos = Yt ==1
idxNeg = Yt ==0
Xgraf=Xt[idxPos]
plt.plot(Xgraf.iloc[:,0],Xgraf.iloc[:,1],'bo')
Xgraf=Xt[idxNeg]
plt.plot(Xgraf.iloc[:,0],Xgraf.iloc[:,1],'ro')
plt.title("Test")
plt.show()
#calculo de matriz de confusion
#convertir las predicciones en clases
Yp=clf.transformPredictions(Yp)
Yt=np.array(Yt)
Yt=Yt.reshape(Yt.shape[0],1)
a=np.sum(np.logical_and(Yp==0,Yt==0))
b=np.sum(np.logical_and(Yp==1,Yt==0))
c=np.sum(np.logical_and(Yp==0,Yt==1))
d=np.sum(np.logical_and(Yp==1,Yt==1))
"""
#cm=np.array([a,b,c,d]).reshape((2,2))
#Ypprint(cm)
#con imprimimos los resultados#
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import argparse
import random
import sys
from mpmath import mp
from common import print_integral_single_input
from common.randomgen import generate_basis_function
parser = argparse.ArgumentParser()
parser.add_argument("--filename", type=str, required=True, help="Output file name")
parser.add_argument("--max-am", type=int, required=True, help="Maximum AM of the basis functions")
parser.add_argument("--alpha-power", type=int, required=True, help="Maximum power of the exponent (range will be 1e-x to 1e+x)")
parser.add_argument("--xyz-power", type=int, required=True, help="Maximum power of the coordinates (range will be -1e+x to 1e+x)")
parser.add_argument("--seed", type=int, required=True, help="Seed to use for the pseudo-random number generator")
parser.add_argument("--ndigits", type=int, required=True, help="Number of digits for the value of the integral")
parser.add_argument("--ncenter", type=int, required=True, help="Number of centers in the integral (typically 2 or 4)")
parser.add_argument("--ntests", type=int, required=True, help="Number of tests to generate")
args = parser.parse_args()
random.seed(args.seed, version=2)
with open(args.filename, 'w') as f:
f.write("# THIS FILE IS GENERATED VIA A SCRIPT. DO NOT EDIT\n")
f.write("#\n")
f.write("# Input parameters for integral generated with:\n")
f.write("# " + " ".join(sys.argv[:]) + "\n")
f.write("#\n")
f.write(str(args.ntests))
f.write("\n")
for i in range(args.ntests):
entry = []
for n in range(args.ncenter):
bf = generate_basis_function(args.max_am, args.alpha_power, args.xyz_power, args.ndigits)
entry.append(bf)
print_integral_single_input(f, entry)
|
nilq/baby-python
|
python
|
# coding: utf-8
r"""Distance conversions"""
from corelib.units.base import create_code
distances = {"mm": 1e-3, "millimeter": 1e-3, "millimeters": 1e-3, "millimetre": 1e-3, "millimetres": 1e-3,
"cm": 1e-2, "centimeter": 1e-2, "centimeters": 1e-2, "centimetre": 1e-2, "centimetres": 1e-2,
"m": 1., "meter": 1., "meters": 1., "metre": 1., "metres": 1.,
"km": 1000., "kilometer": 1000., "kilometers": 1000., "kilometre": 1000., "kilometres": 1000.,
# "in": 0.0254, # in is a reserved keyword in Python
"inch": 0.0254, "inches": 0.0254,
"ft": 0.3048, "foot": 0.3048, "feet": 0.3048,
"yd": 0.9144, "yard": 0.9144, "yards": 0.9144,
"mi": 1609.344, "mile": 1609.344, "miles": 1609.344,
"ftm": 1.8288, "fathom": 1.8288, "fathoms": 1.8288,
"nm": 1852., "nautical_mile": 1852., "nautical_miles": 1852.}
for k in distances.keys():
# code = fs_units.base.create_code("distances", k)
# exec code in module.__dict__
# g = globals()
exec(create_code("distances", k), globals())
def convert(value, to_unit, from_unit):
r"""Convenience function for cases where the to_unit and the from_unit are
in string form
Parameters
----------
value : float or int
to_unit : str
The desired unit
from_unit : str
The input unit
"""
return globals()[to_unit](**{from_unit: value})
|
nilq/baby-python
|
python
|
kOk = 0
kNoSuchSession = 6
kNoSuchElement = 7
kNoSuchFrame = 8
kUnknownCommand = 9
kStaleElementReference = 10
kElementNotVisible = 11
kInvalidElementState = 12
kUnknownError = 13
kJavaScriptError = 17
kXPathLookupError = 19
kTimeout = 21
kNoSuchWindow = 23
kInvalidCookieDomain = 24
kUnexpectedAlertOpen = 26
kNoAlertOpen = 27
kScriptTimeout = 28
kInvalidSelector = 32
kSessionNotCreatedException = 33
# Xwalk-specific status codes.
kXwalkNotReachable = 100
kNoSuchExecutionContext = 101
kDisconnected = 102
kForbidden = 103
kTabCrashed = 104
class _DefaultMessageForStatusCode(object):
""" Returns the string equivalent of the given |ErrorCode|."""
Message = {
kOk: "ok",
kNoSuchSession: "no such session",
kNoSuchElement: "no such element",
kNoSuchFrame: "no such frame",
kUnknownCommand: "unknown command",
kStaleElementReference: "stale element reference",
kElementNotVisible: "element not visible",
kInvalidElementState: "invalid element state",
kUnknownError: "unknown error",
kJavaScriptError: "javascript error",
kXPathLookupError: "xpath lookup error",
kTimeout: "timeout",
kNoSuchWindow: "no such window",
kInvalidCookieDomain: "invalid cookie domain",
kUnexpectedAlertOpen: "unexpected alert open",
kNoAlertOpen: "no alert open",
kScriptTimeout: "asynchronous script timeout",
kInvalidSelector: "invalid selector",
kSessionNotCreatedException: "session not created exception",
kNoSuchExecutionContext: "no such execution context",
kXwalkNotReachable: "xwalk not reachable",
kDisconnected: "disconnected",
kForbidden: "forbidden",
kTabCrashed: "tab crashed",
}
class Status(object):
def __init__(self, code=kOk, details=""):
self.code = code
if type(details) == str and details:
self.msg = _DefaultMessageForStatusCode.Message[code] + ":" + details
else:
self.msg = _DefaultMessageForStatusCode.Message[code]
def Update(self, other):
self.code = other.code
self.msg = other.msg
def IsOk(self):
return self.code == kOk
def IsError(self):
return self.code != kOk
def Code(self):
return self.code
def Message(self):
return self.msg
def AddDetails(self, details):
self.msg += details
|
nilq/baby-python
|
python
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library containing helpers for adding post export metrics for evaluation.
These post export metrics can be included in the add_post_export_metrics
parameter of Evaluate to compute them.
"""
from typing import Any, Dict, List, Optional, Tuple
import tensorflow as tf
from tensorflow_model_analysis import types
from tensorflow_model_analysis.post_export_metrics import metric_keys
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import metrics_for_slice_pb2 as metrics_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
# pylint: disable=protected-access
@post_export_metrics._export('fairness_indicators')
class _FairnessIndicators(post_export_metrics._ConfusionMatrixBasedMetric):
"""Metrics that can be used to evaluate the following fairness metrics.
* Demographic Parity or Equality of Outcomes.
For each slice measure the Positive* Rate, or the percentage of all
examples receiving positive scores.
* Equality of Opportunity
Equality of Opportunity attempts to match the True Positive* rate
(aka recall) of different data slices.
* Equality of Odds
In addition to looking at Equality of Opportunity, looks at equalizing the
False Positive* rates of slices as well.
The choice to focus on these metrics as a starting point is based primarily on
the paper Equality of Opportunity in Supervised Learning and the excellent
visualization created as a companion to the paper.
https://arxiv.org/abs/1610.02413
http://research.google.com/bigpicture/attacking-discrimination-in-ml/
* Note that these fairness formulations assume that a positive prediction is
associated with a positive outcome for the user--in certain contexts such as
abuse, positive predictions translate to non-opportunity. You may want to use
the provided negative rates for comparison instead.
"""
_thresholds = ... # type: List[float]
_example_weight_key = ... # type: str
_labels_key = ... # type: str
_metric_tag = None # type: str
# We could use the same keys as the ConfusionMatrix metrics, but with the way
# that post_export_metrics are currently implemented, if both
# post_export_metrics were specified we would pop the matrices/thresholds in
# the first call, and have issues with the second.
thresholds_key = metric_keys.FAIRNESS_CONFUSION_MATRIX_THESHOLDS
matrices_key = metric_keys.FAIRNESS_CONFUSION_MATRIX_MATRICES
def __init__(self,
thresholds: Optional[List[float]] = None,
example_weight_key: Optional[str] = None,
target_prediction_keys: Optional[List[str]] = None,
labels_key: Optional[str] = None,
metric_tag: Optional[str] = None,
tensor_index: Optional[int] = None) -> None:
if not thresholds:
thresholds = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
# Determine the number of threshold digits to display as part of the metric
# key. We want lower numbers for readability, but allow differentiation
# between close thresholds.
self._key_digits = 2
for t in thresholds:
if len(str(t)) - 2 > self._key_digits:
self._key_digits = len(str(t)) - 2
super().__init__(
thresholds,
example_weight_key,
target_prediction_keys,
labels_key,
metric_tag,
tensor_index=tensor_index)
def get_metric_ops(
self, features_dict: types.TensorTypeMaybeDict,
predictions_dict: types.TensorTypeMaybeDict,
labels_dict: types.TensorTypeMaybeDict
) -> Dict[str, Tuple[types.TensorType, types.TensorType]]:
values, update_ops = self.confusion_matrix_metric_ops(
features_dict, predictions_dict, labels_dict)
# True positive rate is computed by confusion_matrix_metric_ops as 'recall'.
# pytype: disable=unsupported-operands
values['tnr'] = tf.math.divide_no_nan(values['tn'],
values['tn'] + values['fp'])
values['fpr'] = tf.math.divide_no_nan(values['fp'],
values['fp'] + values['tn'])
values['positive_rate'] = tf.math.divide_no_nan(
values['tp'] + values['fp'],
values['tp'] + values['fp'] + values['tn'] + values['fn'])
values['fnr'] = tf.math.divide_no_nan(values['fn'],
values['fn'] + values['tp'])
values['negative_rate'] = tf.math.divide_no_nan(
values['tn'] + values['fn'],
values['tp'] + values['fp'] + values['tn'] + values['fn'])
values['false_discovery_rate'] = tf.math.divide_no_nan(
values['fp'], values['fp'] + values['tp'])
values['false_omission_rate'] = tf.math.divide_no_nan(
values['fn'], values['fn'] + values['tn'])
# pytype: enable=unsupported-operands
update_op = tf.group(update_ops['fn'], update_ops['tn'], update_ops['fp'],
update_ops['tp'])
value_op = tf.transpose(
a=tf.stack([
values['fn'], values['tn'], values['fp'], values['tp'],
values['precision'], values['recall']
]))
output_dict = {
self._metric_key(self.matrices_key): (value_op, update_op),
self._metric_key(self.thresholds_key): (tf.identity(self._thresholds),
tf.no_op()),
}
for i, threshold in enumerate(self._thresholds):
output_dict[self._metric_key(
metric_keys.base_key(
'positive_rate@%.*f' %
(self._key_digits, threshold)))] = (values['positive_rate'][i],
update_op)
output_dict[self._metric_key(
metric_keys.base_key(
'true_positive_rate@%.*f' %
(self._key_digits, threshold)))] = (values['recall'][i],
update_op)
output_dict[self._metric_key(
metric_keys.base_key(
'false_positive_rate@%.*f' %
(self._key_digits, threshold)))] = (values['fpr'][i], update_op)
output_dict[self._metric_key(
metric_keys.base_key(
'negative_rate@%.*f' %
(self._key_digits, threshold)))] = (values['negative_rate'][i],
update_op)
output_dict[self._metric_key(
metric_keys.base_key(
'true_negative_rate@%.*f' %
(self._key_digits, threshold)))] = (values['tnr'][i], update_op)
output_dict[self._metric_key(
metric_keys.base_key(
'false_negative_rate@%.*f' %
(self._key_digits, threshold)))] = (values['fnr'][i], update_op)
output_dict[self._metric_key(
metric_keys.base_key('false_discovery_rate@%.*f' %
(self._key_digits, threshold)))] = (
values['false_discovery_rate'][i], update_op)
output_dict[self._metric_key(
metric_keys.base_key('false_omission_rate@%.*f' %
(self._key_digits, threshold)))] = (
values['false_omission_rate'][i], update_op)
return output_dict # pytype: disable=bad-return-type
def populate_stats_and_pop(
self, unused_slice_key: slicer.SliceKeyType, combine_metrics: Dict[str,
Any],
output_metrics: Dict[str, metrics_pb2.MetricValue]) -> None:
matrices = combine_metrics.pop(self._metric_key(self.matrices_key))
thresholds = combine_metrics.pop(self._metric_key(self.thresholds_key))
# We assume that thresholds are already sorted.
if len(matrices) != len(thresholds):
raise ValueError(
'matrices should have the same length as thresholds, but lengths '
'were: matrices: %d, thresholds: %d' %
(len(matrices), len(thresholds)))
for threshold, raw_matrix in zip(thresholds, matrices):
# Adds confusion matrix table as well as ratios used for fairness metrics.
if isinstance(threshold, types.ValueWithTDistribution):
threshold = threshold.unsampled_value
output_matrix = post_export_metrics._create_confusion_matrix_proto(
raw_matrix, threshold)
(output_metrics[self._metric_key(metric_keys.FAIRNESS_CONFUSION_MATRIX)]
.confusion_matrix_at_thresholds.matrices.add().CopyFrom(output_matrix))
# If the fairness_indicator in enabled, the slicing inside the tfx evaluator
# config will also be added into this metrics as a subgroup key.
# However, handling the subgroup metrics with slices is still TBD.
@post_export_metrics._export('fairness_auc')
class _FairnessAuc(post_export_metrics._PostExportMetric):
"""Metric that computes bounded AUC for predictions in [0, 1].
This metrics calculates the subgroup auc, the background positive subgroup
negative auc and background negative subgroup positive auc. For more
explanation about the concepts of these auc metrics, please refer to paper
[Measuring and Mitigating Unintended Bias in Text
Classification](https://ai.google/research/pubs/pub46743)
"""
_target_prediction_keys = ... # type: List[str]
_labels_key = ... # type: str
_metric_tag = None # type: str
_tensor_index = ... # type: int
def __init__(self,
subgroup_key: str,
example_weight_key: Optional[str] = None,
num_buckets: int = post_export_metrics._DEFAULT_NUM_BUCKETS,
target_prediction_keys: Optional[List[str]] = None,
labels_key: Optional[str] = None,
metric_tag: Optional[str] = None,
tensor_index: Optional[int] = None) -> None:
"""Create a metric that computes fairness auc.
Predictions should be one of:
(a) a single float in [0, 1]
(b) a dict containing the LOGISTIC key
(c) a dict containing the PREDICTIONS key, where the prediction is
in [0, 1]
Label should be a single float that is either exactly 0 or exactly 1
(soft labels, i.e. labels between 0 and 1 are *not* supported).
Args:
subgroup_key: The key inside the feature column to indicate where this
example belongs to the subgroup or not. The expected mapping tensor of
this key should contain an integer/float value that's either 1 or 0.
example_weight_key: The key of the example weight column in the features
dict. If None, all predictions are given a weight of 1.0.
num_buckets: The number of buckets used for the curve. (num_buckets + 1)
is used as the num_thresholds in tf.metrics.auc().
target_prediction_keys: If provided, the prediction keys to look for in
order.
labels_key: If provided, a custom label key.
metric_tag: If provided, a custom metric tag. Only necessary to
disambiguate instances of the same metric on different predictions.
tensor_index: Optional index to specify class predictions to calculate
metrics on in the case of multi-class models.
"""
self._subgroup_key = subgroup_key
self._example_weight_key = example_weight_key
self._curve = 'ROC'
self._num_buckets = num_buckets
self._metric_name = metric_keys.FAIRNESS_AUC
self._subgroup_auc_metric = self._metric_key(self._metric_name +
'/subgroup_auc/' +
self._subgroup_key)
self._bpsn_auc_metric = self._metric_key(
f'{self._metric_name}/bpsn_auc/{self._subgroup_key}')
self._bnsp_auc_metric = self._metric_key(self._metric_name + '/bnsp_auc/' +
self._subgroup_key)
super().__init__(
target_prediction_keys=target_prediction_keys,
labels_key=labels_key,
metric_tag=metric_tag,
tensor_index=tensor_index)
def check_compatibility(self, features_dict: types.TensorTypeMaybeDict,
predictions_dict: types.TensorTypeMaybeDict,
labels_dict: types.TensorTypeMaybeDict) -> None:
post_export_metrics._check_feature_present(features_dict,
self._example_weight_key)
post_export_metrics._check_feature_present(features_dict,
self._subgroup_key)
self._get_labels_and_predictions(predictions_dict, labels_dict)
def get_metric_ops(
self, features_dict: types.TensorTypeMaybeDict,
predictions_dict: types.TensorTypeMaybeDict,
labels_dict: types.TensorTypeMaybeDict
) -> Dict[str, Tuple[types.TensorType, types.TensorType]]:
# Note that we have to squeeze predictions, labels, weights so they are all
# N element vectors (otherwise some of them might be N x 1 tensors, and
# multiplying a N element vector with a N x 1 tensor uses matrix
# multiplication rather than element-wise multiplication).
predictions, labels = self._get_labels_and_predictions(
predictions_dict, labels_dict)
predictions = post_export_metrics._flatten_to_one_dim(
tf.cast(predictions, tf.float64))
labels = post_export_metrics._flatten_to_one_dim(
tf.cast(labels, tf.float64))
weights = tf.ones_like(predictions)
subgroup = post_export_metrics._flatten_to_one_dim(
tf.cast(features_dict[self._subgroup_key], tf.bool))
if self._example_weight_key:
weights = post_export_metrics._flatten_to_one_dim(
tf.cast(features_dict[self._example_weight_key], tf.float64))
predictions, labels, weights = (
post_export_metrics
._create_predictions_labels_weights_for_fractional_labels(
predictions, labels, weights))
# To let subgroup tensor match the size with prediction, labels and weights
# above.
subgroup = tf.concat([subgroup, subgroup], axis=0)
labels_bool = tf.cast(labels, tf.bool)
pos_subgroup = tf.math.logical_and(labels_bool, subgroup)
neg_subgroup = tf.math.logical_and(
tf.math.logical_not(labels_bool), subgroup)
pos_background = tf.math.logical_and(labels_bool,
tf.math.logical_not(subgroup))
neg_background = tf.math.logical_and(
tf.math.logical_not(labels_bool), tf.math.logical_not(subgroup))
bnsp = tf.math.logical_or(pos_subgroup, neg_background)
bpsn = tf.math.logical_or(neg_subgroup, pos_background)
ops_dict = {}
# Add subgroup auc.
ops_dict.update(
post_export_metrics._build_auc_metrics_ops(
self._subgroup_auc_metric, labels, predictions,
tf.multiply(weights, tf.cast(subgroup, tf.float64)),
self._num_buckets + 1, self._curve))
# Add backgroup positive subgroup negative auc.
ops_dict.update(
post_export_metrics._build_auc_metrics_ops(
self._bpsn_auc_metric, labels, predictions,
tf.multiply(weights, tf.cast(bpsn, tf.float64)),
self._num_buckets + 1, self._curve))
# Add backgroup negative subgroup positive auc.
ops_dict.update(
post_export_metrics._build_auc_metrics_ops(
self._bnsp_auc_metric, labels, predictions,
tf.multiply(weights, tf.cast(bnsp, tf.float64)),
self._num_buckets + 1, self._curve))
return ops_dict
def populate_stats_and_pop(
self, slice_key: slicer.SliceKeyType, combine_metrics: Dict[str, Any],
output_metrics: Dict[str, metrics_pb2.MetricValue]) -> None:
for metrics_key in (self._subgroup_auc_metric, self._bpsn_auc_metric,
self._bnsp_auc_metric):
if slice_key:
combine_metrics.pop(metric_keys.lower_bound_key(metrics_key))
combine_metrics.pop(metric_keys.upper_bound_key(metrics_key))
combine_metrics.pop(metrics_key)
else:
post_export_metrics._populate_to_auc_bounded_value_and_pop(
combine_metrics, output_metrics, metrics_key)
# pylint: enable=protected-access
|
nilq/baby-python
|
python
|
# terrascript/arukas/d.py
|
nilq/baby-python
|
python
|
# Simple NTP daemon for MicroPython using asyncio.
# Copyright (c) 2020 by Thorsten von Eicken
# Based on https://github.com/wieck/micropython-ntpclient by Jan Wieck
# See LICENSE file
try:
import uasyncio as asyncio
from sys import print_exception
except ImportError:
import asyncio
import sys, socket, struct, time, logging
try:
from time import time_us, settime, adjtime
except ImportError:
# (date(2000, 1, 1) - date(1970, 1, 1)).days * 24*60*60
UNIX_DELTA = 946684800
def time_us():
return int((time.time() - UNIX_DELTA) * 1000000)
def settime(usecs):
print("settime(%d) - a step of %d" % (usecs, time_us() - (usecs + UNIX_DELTA)))
def adjtime(usecs):
print("adjtime(%d) - an adjustment of %d" % (usecs, time_us() - (usecs + UNIX_DELTA)))
from asyncio_dgram import connect as dgram_connect
log = logging.getLogger(__name__)
# (date(2000, 1, 1) - date(1900, 1, 1)).days * 24*60*60
NTP_DELTA = 3155673600
# Delta from MP Epoch of 2000/1/1 to NTP Epoch 1 of Feb 7, 2036 06:28:16 UTC
# NTP_DELTA = 1139293696
# Offsets into the NTP packet
OFF_ORIG = 24
OFF_RX = 32
OFF_TX = 40
# Poll and adjust intervals
MIN_POLL = 64 # never poll faster than every 32 seconds
MAX_POLL = 1024 # default maximum poll interval
# ntp2mp converts from NTP seconds+fraction with an Epoch 1 of Feb 7, 2036 06:28:16 UTC
# to MP microseconds with an Epoch of 2000/1/1
def ntp2mp(secs, frac):
usec = (frac * 1000000) >> 32
# print(secs, frac, "->", secs - NTP_DELTA, (secs - NTP_DELTA) * 1000000, usec)
return ((secs - NTP_DELTA) * 1000000) + usec
# mp2ntp converts from MP microseconds to NTP seconds and frac
def mp2ntp(usecs):
(secs, usecs) = divmod(usecs, 1000000)
return (secs + NTP_DELTA, (usecs << 32) // 1000000)
# ntpclient -
# Class implementing the uasyncio based NTP client
class SNTP:
def __init__(self, host="pool.ntp.org", poll=MAX_POLL, max_step=1):
self._host = host
self._sock = None
self._addr = None
self._send = None
self._recv = None
self._close = None
self._req_poll = poll
self._min_poll = MIN_POLL
self._max_step = int(max_step * 1000000)
self._poll_task = None
def start(self):
self._poll_task = asyncio.get_event_loop().create_task(self._poller())
async def stop(self):
if self._poll_task is not None:
self._poll_task.cancel()
try:
await self._poll_task
except:
pass
self._close()
self._poll_task = None
async def _poll(self):
# We try to stay with the same server as long as possible. Only
# lookup the address on startup or after errors.
if self._sock is None:
self._addr = socket.getaddrinfo(self._host, 123)[0][-1]
log.debug("server %s->%s", self._host, self._addr)
if sys.implementation.name == "micropython":
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.connect(self._addr)
stream = asyncio.StreamReader(self._sock)
async def write_drain(pkt):
stream.write(pkt)
await stream.drain()
self._send = write_drain
self._recv = lambda length: stream.read(length)
self._close = lambda: self._sock.close()
else:
stream = await dgram_connect(self._addr)
async def stream_send(pkt):
return await stream.send(pkt)
self._send = stream_send
async def stream_recv(length):
return (await stream.recv())[0]
self._recv = stream_recv
self._close = lambda: stream.close()
# Send the NTP v3 request to the server
wbuf = bytearray(48)
wbuf[0] = 0b00011011
send_us = time_us()
send_ntp = mp2ntp(send_us)
struct.pack_into("!II", wbuf, OFF_TX, send_ntp[0], send_ntp[1]) # set tx timestamp
await self._send(wbuf)
# Get server reply
while True:
# Raises asyncio.TimeoutError on time-out
rbuf = await asyncio.wait_for(self._recv(48), timeout=1)
recv_us = time_us()
# Verify it's truly a response to our request
orig_ntp = struct.unpack_from("!II", rbuf, OFF_ORIG) # get originate timestamp
if orig_ntp == send_ntp:
break
# Calculate clock step to apply per RFC4330
rx_us = ntp2mp(*struct.unpack_from("!II", rbuf, OFF_RX)) # get server recv timestamp
tx_us = ntp2mp(*struct.unpack_from("!II", rbuf, OFF_TX)) # get server transmit timestamp
delay = (recv_us - send_us) - (tx_us - rx_us)
step = ((rx_us - send_us) + (tx_us - recv_us)) // 2
tup = struct.unpack_from("!IIIIII", rbuf, OFF_ORIG)
r = mp2ntp(recv_us)
# log.debug( "orig=[%d,%x] rx=[%d,%x] tx=[%d,%x] recv=[%d,%x] -> delay=%fms step=%dus",
# tup[0], tup[1], tup[2], tup[3], tup[4], tup[5], r[0], r[1], delay / 1000, step)
return (delay, step)
async def _poller(self):
self._status = 0
while True:
# print("\nperforming NTP query")
try:
self.status = (self._status << 1) & 0xFFFF
(delay_us, step_us) = await self._poll()
if step_us > self._max_step or -step_us > self._max_step:
# print(time.localtime())
(tgt_s, tgt_us) = divmod(time.time_us() + step_us, 1000000)
log.warning("stepping to %s", time.localtime(tgt_s))
settime(tgt_s, tgt_us)
# print(time.localtime())
else:
lvl = logging.DEBUG if abs(step_us) < 10000 else logging.INFO
log.log(lvl, "adjusting by %dus (delay=%dus)", step_us, delay_us)
adjtime(step_us)
self.status |= 1
await asyncio.sleep(61)
except asyncio.TimeoutError:
log.warning("%s timed out", self._host)
if (self._status & 0x7) == 0:
# Three failures in a row, force fresh DNS look-up
self.sock = None
await asyncio.sleep(11)
except OSError as e:
# Most likely DNS lookup failure
log.warning("%s: %s", self._host, e)
self.sock = None
await asyncio.sleep(11)
except Exception as e:
log.error("%s", e)
print_exception(e)
await asyncio.sleep(121)
def start(mqtt, config):
from utime import tzset
tzset(config.pop("zone", "UTC+0"))
async def on_init(config):
ss = SNTP(**config)
ss.start()
mqtt.on_init(on_init(config))
# if __name__ == "__main__":
#
# logging.basicConfig(level=logging.DEBUG)
#
# async def runner():
# ss = SNTP(host="192.168.0.1")
# ss.start()
# while True:
# await asyncio.sleep(300)
#
# asyncio.run(runner())
|
nilq/baby-python
|
python
|
import re
import random
TOOBIG = -1
TOOSMALL = -2
NOTNEW = -3
EMPTY = -1
class NameJoiner:
def __init__(self, str1, str2):
words = [str1, str2]
random.shuffle(words)
self.fullStartName = words[0]
self.fullEndName = words[1]
self.initVariables()
def initVariables(self):
self.lower_limit = min(len(self.fullStartName), len(self.fullEndName))
self.upper_limit = max(len(self.fullStartName), len(self.fullEndName)) + self.lower_limit - 1
self.firstPositions = self.getKeyVocalsPositions(self.fullStartName)
self.secondPositions = self.getKeyVocalsPositions(self.fullEndName)
def join(self):
res = self.tryToJoin()
if res == -1:
self.fullStartName, self.fullEndName = self.fullEndName, self.fullStartName
self.initVariables()
res = self.tryToJoin()
if res == -1:
self.initVariables()
return self.fullStartName+self.fullEndName[self.secondPositions[-1]+1:]
return res
def tryToJoin(self):
firstSplitPlace = self.chooseRandomFirstSplit()
secondSplitPlace = NOTNEW
while secondSplitPlace < 0:
secondSplitPlace = self.chooseRandomSecondSplit(firstSplitPlace)
if secondSplitPlace < 0:
self.handleErrorWithFirstPlace(
secondSplitPlace, firstSplitPlace)
firstSplitPlace = self.chooseRandomFirstSplit()
if firstSplitPlace == EMPTY:
return EMPTY
else:
namex = self.fullStartName[:firstSplitPlace] + \
self.fullEndName[secondSplitPlace:]
if namex in [self.fullStartName, self.fullEndName]:
self.secondPositions = [
i for i in self.secondPositions if i != secondSplitPlace-1]
if len(self.secondPositions) == 0:
self.secondPositions = self.getKeyVocalsPositions(
self.fullEndName)
self.firstPositions = self.erasePlaceEq(
firstSplitPlace)
firstSplitPlace = self.chooseRandomFirstSplit()
if firstSplitPlace == EMPTY:
return EMPTY
secondSplitPlace = NOTNEW
return self.fullStartName[:firstSplitPlace] + self.fullEndName[secondSplitPlace:]
def handleErrorWithFirstPlace(self, error, firstSplitPlace):
if error == TOOBIG: # Need smaller first part
self.firstPositions = self.erasePlacesGreaterEq(firstSplitPlace)
elif error == TOOSMALL: # Need greater first part
self.firstPositions = self.erasePlacesLowerEq(firstSplitPlace)
def erasePlaceEq(self, position):
p = position - 1
res = [i for i in self.firstPositions if i != p]
return res
def erasePlacesLowerEq(self, position):
p = position - 1
res = [i for i in self.firstPositions if i > p]
return res
def erasePlacesGreaterEq(self, position):
p = position - 1
res = [i for i in self.firstPositions if i < p]
return res
def chooseRandomFirstSplit(self):
if len(self.firstPositions) == 0:
print(
f"{self.fullStartName} has been omitted while trying to join with {self.fullEndName}")
return -1
pos = random.choice(self.firstPositions) + 1
return pos
def getKeyVocalsPositions(self, s):
regex_iter = re.finditer(r'[aeiouy][^aeiou]', s.lower())
positions = [i.start() for i in regex_iter]
return positions
def chooseRandomSecondSplit(self, firstSplitPlace):
minimumCharactersLeft = self.lower_limit - firstSplitPlace
maximumCharactersLeft = self.upper_limit - firstSplitPlace
minimumIndex = len(self.fullEndName) - maximumCharactersLeft
maximumIndex = len(self.fullEndName) - minimumCharactersLeft
filtered_big_positions = [
i for i in self.secondPositions if i <= maximumIndex]
if len(filtered_big_positions) == 0:
return -2
filtered_positions = [
i for i in self.secondPositions if minimumIndex <= i + 1 <= maximumIndex]
if len(filtered_positions) == 0:
return -1
return random.choice(filtered_positions) + 1
|
nilq/baby-python
|
python
|
import sys
from PySide2.QtWidgets import QApplication, QWidget, QPushButton, QLineEdit, QTextBrowser, QMainWindow, QTextEdit
from PySide2.QtCore import QFile, Slot
from ui_mainwindow import Ui_MainWindow
class MainWindow(QMainWindow):
def __init__(self, filename : str):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.filename = filename
self.getChildren()
self.rechercher_button.clicked.connect(self.check_if_in_table)
self.inserer_button.clicked.connect(self.add_in_table)
self.supprimer_button.clicked.connect(self.delete_from_table)
self.output_field.setReadOnly(True)
self.output_field.append("Messages :\n")
self.table_visualizer.setReadOnly(True)
self.visualize_table()
self.show()
def getChildren(self):
self.central_widget = self.findChild(QWidget, "centralwidget")
self.inserer_button = self.central_widget.findChild(QPushButton,"inserer_button")
self.rechercher_button = self.central_widget.findChild(QPushButton,"rechercher_button")
self.supprimer_button = self.central_widget.findChild(QPushButton,"supprimer_button")
self.output_field = self.central_widget.findChild(QTextEdit,"output_field")
self.table_visualizer = self.central_widget.findChild(QTextEdit,"table_visualizer")
self.corps_text_input = self.central_widget.findChild(QLineEdit,"corps_text_input")
self.tete_text_input = self.central_widget.findChild(QLineEdit,"tete_text_input")
self.antenne_text_input = self.central_widget.findChild(QLineEdit,"antenne_text_input")
self.bras_text_input = self.central_widget.findChild(QLineEdit,"bras_text_input")
self.jambes_text_input = self.central_widget.findChild(QLineEdit,"jambes_text_input")
self.doigts_text_input = self.central_widget.findChild(QLineEdit,"doigts_text_input")
self.yeux_text_input = self.central_widget.findChild(QLineEdit,"yeux_text_input")
def visualize_table(self):
self.table_visualizer.setText("Ficher texte \"{}\" :\n".format(filename))
with open(self.filename, "r") as table:
for line in table.readlines():
entry_as_list = line.replace("\n", '').split(",")[:-1]
self.table_visualizer.append("{}".format(entry_as_list))
def _get_input_list(self) -> list:
input_list = []
input_list.append(self.corps_text_input.text())
input_list.append(self.tete_text_input.text())
input_list.append(self.antenne_text_input.text())
input_list.append(self.bras_text_input.text())
input_list.append(self.jambes_text_input.text())
input_list.append(self.doigts_text_input.text())
input_list.append(self.yeux_text_input.text())
new_list = []
for val in input_list:
if(val == ''):
new_list.append('0')
else:
new_list.append(val)
return new_list
def check_if_in_table(self, input_list : list = None, standalone : bool = True) -> bool:
"""
Function that checks whether a given input_list is already in the table.
Can be called either by itself, in this case the boolean outputdoesn't matter but an
output is produced on the GUI.
Or it is called by the methods add_to_table() or _delete_from_table() where the boolean
value is used but there is no output on the GUI.
"""
print("In function check_if_in_table()")
if(not input_list):
input_list = self._get_input_list()
print("Input list is {}".format(input_list))
is_numeric = self._is_input_numeric(input_list)
if(not is_numeric and standalone):
print("ERROR, INPUT IS NOT NUMERIC")
self.output_field.append("{} n'est pas valide. Reessayez (chiffres uniquement).".format(input_list))
else:
with open(self.filename, "r") as table:
print("Opening file")
for line in table.readlines():
entry_as_list = line.replace("\n", '').split(",")[:-1]
if(input_list == entry_as_list):
print("{} is present in table".format(input_list))
if(standalone): # Write only when check_if_in_table is called by itself
self.output_field.append("{} est déjà présent dans la table".format(input_list))
return True
if(standalone):
self.output_field.append("{} n'est pas présent dans la table".format(input_list))
return False
def add_in_table(self):
print("In function add_in_table()")
input_list = self._get_input_list()
print("Input list is {}".format(input_list))
if(self.check_if_in_table(input_list, False)):
print("{} already in table. Aborting...".format(input_list))
self.output_field.append("{} est déjà présent dans la table. Abandon...".format(input_list))
else:
with open(self.filename, "a") as table:
status = self._add_one_line(table, input_list)
if(status):
self.output_field.append("{} inséré.".format(input_list))
self.visualize_table()
def _is_input_numeric(self, input_list) -> bool:
write_line = True
# Check if everything is correct with the input list
for input in input_list:
write_line = write_line and input.isnumeric()
return write_line
def _add_one_line(self, file, input_list) -> bool:
# Check if everything is correct with the input list
is_numeric = self._is_input_numeric(input_list)
# If correct, write to file
if(is_numeric):
for input in input_list:
file.write("{},".format(int(input)))
file.write("\n")
print("Inserted {}".format(input_list))
# Else, print error
else:
print("ERROR, INPUT IS NOT NUMERIC")
self.output_field.append("{} n'est pas valide. Reessayez (chiffres uniquement).".format(input_list))
return is_numeric # Report status
def delete_from_table(self):
print("In function delete_from_table()")
input_list = self._get_input_list()
if(not self._is_input_numeric(input_list)):
print("ERROR, INPUT IS NOT NUMERIC")
self.output_field.append("{} n'est pas valide. Reessayez (chiffres uniquement).".format(input_list))
return
all_entries = []
if(self.check_if_in_table(input_list, False)):
print("Deleting {}".format(input_list))
self.output_field.append("{} supprimé.".format(input_list))
# Reading all line from file and converting them to a list
with open(self.filename, "r") as table:
for line in table.readlines():
all_entries.append(line.replace("\n", '').split(",")[:-1])
print(line)
# Re-opening the same file and writing back every item except
# the input_list that we want to delete
with open(self.filename, "w") as table:
for entry in all_entries:
if(entry != input_list):
print("Writing {} to file".format(entry))
self._add_one_line(table, entry)
self.visualize_table()
else:
print("{} not in file".format(input_list))
self.output_field.append("{} n'est PAS présent dans la table. Abandon...".format(input_list))
if __name__ == "__main__":
app = QApplication(sys.argv)
# filename = sys.argv[1]
filename = 'tables.txt'
window = MainWindow(filename)
sys.exit(app.exec_())
|
nilq/baby-python
|
python
|
#! usr/bin/activate
"""AstroThings library: imagination and Universe."""
def main():
pass
if __name__ == '__main__':
main()
__version__ = '0.1.0.dev1'
|
nilq/baby-python
|
python
|
from unittest import TestCase
from tilapia.lib.provider.chains.btc.sdk import transaction
class TestTransaction(TestCase):
def test_calculate_vsize(self):
self.assertEqual(79, transaction.calculate_vsize(["P2WPKH"], []))
self.assertEqual(176, transaction.calculate_vsize(["P2WPKH"], ["P2WPKH", "P2PKH", "P2WPKH-P2SH"]))
self.assertEqual(255, transaction.calculate_vsize(["P2PKH"], ["P2WPKH", "P2PKH", "P2WPKH-P2SH"]))
self.assertEqual(199, transaction.calculate_vsize(["P2WPKH-P2SH"], ["P2WPKH", "P2PKH", "P2WPKH-P2SH"]))
self.assertEqual(246, transaction.calculate_vsize([], [], op_return="a" * 200))
|
nilq/baby-python
|
python
|
from applauncher.event import EventManager, KernelReadyEvent, ConfigurationReadyEvent
class TestClass:
def test_events(self):
em = EventManager()
class KernelCounter:
c = 0
@staticmethod
def inc(event):
KernelCounter.c += 1
@staticmethod
def dec(event):
KernelCounter.c -= 1
class OtherCounter:
c = 0
@staticmethod
def inc(event):
OtherCounter.c += 1
@staticmethod
def dec(event):
OtherCounter.c -= 1
assert KernelCounter.c == 0
assert OtherCounter.c == 0
em.add_listener(KernelReadyEvent, KernelCounter.inc)
em.add_listener(ConfigurationReadyEvent, OtherCounter.inc)
assert KernelCounter.c == 0
em.dispatch(KernelReadyEvent())
assert KernelCounter.c == 1
assert OtherCounter.c == 0
em.dispatch(KernelReadyEvent())
assert KernelCounter.c == 2
assert OtherCounter.c == 0
em.dispatch(ConfigurationReadyEvent({"config": "config"}))
assert KernelCounter.c == 2
assert OtherCounter.c == 1
def test_event_content(self):
em = EventManager()
class OtherCounter:
config = None
@staticmethod
def event(event):
OtherCounter.config = event.configuration
assert OtherCounter.config is None
em.dispatch(ConfigurationReadyEvent({"config": "config"}))
assert OtherCounter.config is None
em.add_listener(ConfigurationReadyEvent, OtherCounter.event)
em.dispatch(ConfigurationReadyEvent({"config": "config"}))
assert OtherCounter.config == {"config": "config"}
def test_string_event(self):
"""The same as above but using a string value instead of the event"""
em = EventManager()
class OtherCounter:
config = None
@staticmethod
def event(event):
OtherCounter.config = event.configuration
assert OtherCounter.config is None
em.dispatch(ConfigurationReadyEvent({"config": "config"}))
assert OtherCounter.config is None
em.add_listener(ConfigurationReadyEvent.event_name, OtherCounter.event)
em.dispatch(ConfigurationReadyEvent({"config": "config"}))
assert OtherCounter.config == {"config": "config"}
|
nilq/baby-python
|
python
|
from flask_wtf import FlaskForm
from wtforms import BooleanField, SelectField, IntegerField
from wtforms.validators import Required, Optional
from .vcconnect import get_main_area_dropdown, get_service_dropdown, get_client_group_dropdown
class OrgSearchForm(FlaskForm):
main_area_id = SelectField('Area',
coerce=int,
choices=get_main_area_dropdown(),
validators=[Optional()])
service_id = SelectField('Service',
coerce=int,
choices=get_service_dropdown(),
validators=[Optional()])
client_group_id = SelectField('Client Group',
coerce=int,
choices=get_client_group_dropdown(),
validators=[Optional()])
class VenueSearchForm(FlaskForm):
area_id = SelectField('Area',
coerce=int,
choices=get_main_area_dropdown(),
validators=[Optional()])
venue_car_parking = BooleanField('Car Parking')
disabled = BooleanField('Disabled Parking')
catering = BooleanField('Catering')
event_management = BooleanField('Event Management')
hearing = BooleanField('Hearing Loop')
photocopy = BooleanField('Photocopying')
refreshments = BooleanField('Refreshments')
wheelchair = BooleanField('Wheelchair Access')
max_capacity = IntegerField('Room Capacity', validators=[Optional()])
|
nilq/baby-python
|
python
|
from airflow.hooks.base_hook import BaseHook
def get_conn(conn_id):
# get connection by name from BaseHook
conn = BaseHook.get_connection(conn_id)
return conn
|
nilq/baby-python
|
python
|
import sys
import torch.nn as nn
from net6c import ClusterNet6c, ClusterNet6cTrunk
from vgg import VGGNet
__all__ = ["ClusterNet6cTwoHead"]
class ClusterNet6cTwoHeadHead(nn.Module):
def __init__(self, config, output_k, semisup=False):
super(ClusterNet6cTwoHeadHead, self).__init__()
self.batchnorm_track = config.batchnorm_track
self.cfg = ClusterNet6c.cfg
num_features = self.cfg[-1][0]
self.semisup = semisup
# Features are downsampled three times by MaxPool layers, size is halved every time, so each dimension is
# effectively divided by 8
if config.input_sz == [24, 24]:
features_sp_size = [3, 3]
elif config.input_sz == [64, 64]:
features_sp_size = [8, 8]
elif config.input_sz == [64, 216]:
features_sp_size = [8, 27]
else:
raise NotImplementedError("input images have to be of size 24x24, 64x64 or 64x216")
if not semisup:
self.num_subheads = config.num_subheads
# is default (used for iid loss)
# use multi heads
# include softmax
self.heads = nn.ModuleList([
nn.Sequential(
nn.Linear(num_features * features_sp_size[0] * features_sp_size[1], output_k),
nn.Softmax(dim=1)
)
for _ in xrange(self.num_subheads)
])
else:
self.head = nn.Linear(num_features * features_sp_size[0] * features_sp_size[1], output_k)
def forward(self, x, kmeans_use_features=False):
if not self.semisup:
results = []
for i in xrange(self.num_subheads):
if kmeans_use_features:
results.append(x) # duplicates
else:
results.append(self.heads[i](x))
return results
else:
return self.head(x)
class ClusterNet6cTwoHead(VGGNet):
cfg = [(64, 1), ('M', None), (128, 1), ('M', None), (256, 1), ('M', None), (512, 1)]
def __init__(self, config):
super(ClusterNet6cTwoHead, self).__init__()
assert len(config.output_ks) == 2
self.batchnorm_track = config.batchnorm_track
self.trunk = ClusterNet6cTrunk(config)
self.head_A = ClusterNet6cTwoHeadHead(config, output_k=config.output_ks[0])
semisup = (hasattr(config, "semisup") and config.semisup)
print("semisup: %s" % semisup)
self.head_B = ClusterNet6cTwoHeadHead(config, output_k=config.output_ks[1], semisup=semisup)
self._initialize_weights()
def forward(self, x, head_idx=1, kmeans_use_features=False, trunk_features=False, penultimate_features=False):
if penultimate_features:
print("Not needed/implemented for this arch")
exit(1)
# default is index 1 (for head B) for use by eval code
# training script switches between A (index 0) and B
x = self.trunk(x)
if trunk_features: # for semisup
return x
# returns list or single
if head_idx == 0:
x = self.head_A(x, kmeans_use_features=kmeans_use_features)
elif head_idx == 1:
x = self.head_B(x, kmeans_use_features=kmeans_use_features)
else:
assert False, "Index too high for TwoHead architecture"
return x
|
nilq/baby-python
|
python
|
import webbrowser
import click
from ghutil.types import Repository
@click.command()
@Repository.argument('repo')
def cli(repo):
""" Open a repository in a web browser """
webbrowser.open_new(repo.data["html_url"])
|
nilq/baby-python
|
python
|
from __future__ import print_function
import os
import json
from typtop.dbaccess import (
UserTypoDB, get_time, on_wrong_password,
on_correct_password, logT, auxT,
FREQ_COUNTS, INDEX_J, WAITLIST_SIZE,
WAIT_LIST, pkdecrypt,
NUMBER_OF_ENTRIES_TO_ALLOW_TYPO_LOGIN,
logT, auxT, call_check
)
import typtop.config as config
import typtop.dbaccess as dbaccess
import yaml
import pytest
import time
import pwd
dbaccess.WARM_UP_CACHE = False
NN = 5
secretAuxSysT = "SecretAuxData"
ORIG_PW_ID = 'OrgPwID'
dbaccess.NUMBER_OF_ENTRIES_TO_ALLOW_TYPO_LOGIN = 30
dbaccess.WARM_UP_CACHE = 0
@pytest.fixture(autouse=True)
def no_requests(monkeypatch):
monkeypatch.setattr("typtop.config.TEST", True)
def get_username():
user = pwd.getpwuid(os.getuid()).pw_name
return user
def DB_path():
# TODO _ for some reason it does't work
user = get_username()
db = UserTypoDB(user, debug_mode=True)
return db.get_db_path()
#return "/home/{}/{}.db".format(get_username(), DB_NAME)
def remove_DB():
print(DB_path())
os.remove(DB_path())
def start_DB():
remove_DB()
db = UserTypoDB(get_username(), debug_mode=True)
db.init_typtop(get_pw(), allow_typo_login=True)
return db
def test_warmed_cache():
t1, dbaccess.WARM_UP_CACHE = dbaccess.WARM_UP_CACHE, 1
t2, dbaccess.NUMBER_OF_ENTRIES_TO_ALLOW_TYPO_LOGIN = dbaccess.NUMBER_OF_ENTRIES_TO_ALLOW_TYPO_LOGIN, 0
typoDB = start_DB()
assert typoDB.check(pws[1]), pws[1]
assert typoDB.check(pws[0]), pws[0]
dbaccess.WARM_UP_CACHE, dbaccess.NUMBER_OF_ENTRIES_TO_ALLOW_TYPO_LOGIN = t1, t2
def count_real_typos_in_cache(t_db, PW_CHANGE=False):
flist_ctx = t_db.get_from_auxtdb(FREQ_COUNTS) # , yaml.load)
f_list_all = json.loads(pkdecrypt(t_db._sk, flist_ctx))
f_list = [f for f in f_list_all if f>0]
return len(f_list), sum(f_list)
def test_login_settings():
typoDB = start_DB()
#db = typoDB.getdb()
assert typoDB.is_allowed_login()
typoDB.allow_login(allow=False)
assert not typoDB.is_allowed_login()
typoDB.allow_login()
assert typoDB.is_allowed_login()
@pytest.mark.skip(reason='Root is allowed now')
def test_root_login():
with pytest.raises(AssertionError):
db = UserTypoDB('root', debug_mode=True)
def test_db_not_readable():
import stat
db = start_DB()
on_correct_password(db, get_pw())
on_wrong_password(db, get_pw()+'1')
s = os.stat(db.get_db_path()).st_mode
assert not ((stat.S_IROTH | stat.S_IWOTH) & s)
remove_DB()
def test_waitlist(isStandAlone=True):
typoDB = start_DB()
pwset = set(pws[:4])
for i in range(4):
typoDB.check(pws[i])
typos_in_waitlist = set()
install_id = typoDB.get_installation_id()
for typo_ctx in typoDB.get_from_auxtdb(WAIT_LIST):
typo_txt = pkdecrypt(typoDB._sk, typo_ctx)
typo, ts = yaml.safe_load(typo_txt)
if not typo.startswith(install_id):
typos_in_waitlist.add(typo)
assert not (typos_in_waitlist - pwset) and not (pwset - typos_in_waitlist)
def test_unintialized_exceptions():
db = UserTypoDB(get_username(), debug_mode=True)
assert not call_check(0, get_username(), get_pw())
assert call_check(1, get_username(), get_pw())
db.init_typtop(get_pw())
assert call_check(0, get_username(), get_pw()) == 0
assert call_check(0, get_username(), pws[1]) == 0
assert call_check(0, get_username(), get_pw()) == 0
def test_typtop_id():
db = start_DB()
oid = db.get_installation_id()
db.reinit_typtop(pws[0])
nid = db.get_installation_id()
assert oid == nid
def test_add_to_cache(isStandAlone=True):
typoDB = start_DB()
indexj = typoDB.get_from_auxtdb(INDEX_J) # , int)
typoDB.check(pws[0])
typoDB.check(pws[0])
typoDB.check(pws[1])
typoDB.check(pws[5])
typoDB.check(pws[2])
assert (typoDB.get_from_auxtdb(INDEX_J) - indexj) % WAITLIST_SIZE == 5
typoDB.check(get_pw())
# ntypo, fcount = count_real_typos_in_cache(typoDB)
# assert ntypo == 3
# assert fcount > 5
# No idea what the followig is doing.
# sk_dict1, isIn_t1 = typoDB.fetch_from_cache(pws[0], False, False)
# t1_h,_ = sk_dict1.popitem()
# assert isIn_t1
# assert hash_t.count(H_typo=t1_h) == 1
# assert
# assert hash_t.count(H_typo=t5_h) == 1
if isStandAlone:
remove_DB()
else:
return typoDB
def test_alt_typo(isStandAlone = True):
typoDB = test_add_to_cache(False)
# assert count_real_typos_in_cache(typoDB) > 0
for _ in xrange(30):
typoDB.check_login_count(update=True)
for _ in range(5):
typoDB.check(pws[4])
## print("added 5 typos to waitlist")
assert typoDB.check(get_pw())
assert typoDB.check(pws[4])
if isStandAlone:
remove_DB()
else:
return typoDB
def test_many_entries(isStandAlone = True):
print("TEST MANY ENTRIES")
BIG = 60
config.WARM_UP_CACHE = True
typoDB = start_DB()
log_t = typoDB.getdb('Log')
assert all(a['ts'] == -1 for a in log_t)
assert len(log_t)> 0 and len(log_t) <= len(config.warm_up_with(get_pw()))
print("start log:{}".format(len(log_t)))
for typ in listOfOneDist(BIG):
typoDB.check(typ)
typoDB.check(get_pw())
print("log len:{}".format(len(log_t)))
# print("hash len:{}".format(count_real_typos_in_cache(typoDB)))
assert(len(log_t) >= WAITLIST_SIZE + 1) # plus the original password
# realIn = min(BIG, NN)
# tcnt, fcnt = count_real_typos_in_cache(typoDB)
config.WARM_UP_CACHE = False
if isStandAlone:
remove_DB()
else:
return typoDB
def test_deleting_logs(isStandAlone = True):
typoDB = start_DB()
insert = 10
for i in range(10):
typoDB.check(pws[i%len(pws)])
typoDB.check(get_pw())
log_t = typoDB.getdb('Log')
assert len(log_t) >= 11 # because that's the length of the log so far
to_send, log_iter = typoDB.get_last_unsent_logs_iter()
assert not to_send
typoDB.update_last_log_sent_time('0')
to_send,log_iter = typoDB.get_last_unsent_logs_iter()
count = len(list(log_iter))
now = get_time()
typoDB.update_last_log_sent_time(now)
typoDB.update_last_log_sent_time(now,delete_old_logs=True)
assert len(log_t) == 0
if isStandAlone:
remove_DB()
else:
return typoDB
def test_pw_change(isStandAlone = True):
typoDB = test_alt_typo(isStandAlone = False)
db = typoDB._db
typoDB.reinit_typtop(new_pw())
# assert count_real_typos_in_cache(typoDB,True)[0] == 1
# assert len(db[logT]) == 0
assert len(db[auxT][WAIT_LIST]) == WAITLIST_SIZE
for newTypo in listOfOneDist(5):
typoDB.check(newTypo)
typoDB.check(new_pw())
# ntypo, ftypo = count_real_typos_in_cache(typoDB, True)
# assert ntypo == 1
for newTypo in listOfOneDist(5):
typoDB.check(newTypo)
assert not typoDB.check(get_pw())
if isStandAlone:
remove_DB()
else:
return typoDB
def test_edit_dist_entropy_cap(is_stand_alone=True):
typodb = start_DB()
typodb.allow_login()
on_correct_password(typodb, get_pw())
on_wrong_password(typodb, '')
on_wrong_password(typodb, ' ')
log = typodb._db[logT]
assert all((l['edit_dist'] <= 5 for l in log))
assert all((-10 <= l['rel_entropy'] <= 10 for l in log))
if is_stand_alone:
remove_DB()
else:
return typodb
# TODO: assert some property of logT
def test_logT(is_stand_alone=True):
typoDB = start_DB()
typoDB.allow_login()
assert typoDB.is_allowed_login()
assert not on_wrong_password(typoDB, pws[0])
assert on_correct_password(typoDB, get_pw()) # 1
assert not on_wrong_password(typoDB, pws[0]) # not enough login count
for _ in range(dbaccess.NUMBER_OF_ENTRIES_TO_ALLOW_TYPO_LOGIN-1):
# on_wrong_password(typoDB, pws[0]) # not enough login count
assert typoDB.check(get_pw())
# on_correct_password(typoDB, get_pw())
assert on_wrong_password(typoDB, pws[0]) # now it should work
typoDB.allow_login(allow=False)
assert not on_wrong_password(typoDB, pws[0]) # now it should work
assert on_correct_password(typoDB, get_pw())
typoDB.allow_login(allow=True)
assert on_wrong_password(typoDB, pws[0])
assert set(typoDB._db[logT][0].keys()) == set(config.logT_cols)
if is_stand_alone:
remove_DB()
else:
return typoDB
# TODO: assert some property of logT
# this test takes a bit longer
def test_disabling_first_30_times(isStandAlone = True):
# checks that entry with a typo is allowed
# only after the real pw was entered more than 30 times
typoDB = start_DB()
assert not on_wrong_password(typoDB, pws[0])
assert not on_wrong_password(typoDB, pws[1])
assert on_correct_password(typoDB, get_pw())
# count = 1
# 29 left
for i in xrange(29):
print("{}th try".format(i))
assert not on_wrong_password(typoDB, pws[0])
assert not on_wrong_password(typoDB, pws[1])
assert on_correct_password(typoDB, get_pw())
# 30 entries have been done
assert on_wrong_password(typoDB,pws[0])
assert on_wrong_password(typoDB,pws[1])
if isStandAlone:
remove_DB()
else:
return typoDB
def add_pw(pw, correct=False):
db = UserTypoDB(get_username(), debug_mode=False)
if correct:
on_correct_password(db, pw)
else:
on_wrong_password(db, pw)
def test_profile():
typoDB = start_DB()
time_to_add, time_to_delete = 0, 0
for t in xrange(10):
t0 = time.time()
for i in xrange(10):
add_pw(pws[i%len(pws)], correct=False)
time_to_add += (time.time() - t0)/(i+1)
t0 = time.time()
add_pw(get_pw(), correct=True)
time_to_delete += time.time() - t0
time_to_delete /= (t+1)
time_to_add /= (t+1)
assert time_to_add < 0.06 and time_to_delete < 0.07
remove_DB()
def get_pw():
return 'GoldApp&3'
def new_pw():
return "Beetle*Juice94"
pws = [
'goldApp&3', # 0, lower initial
'gOLDaPP&3', # 1, caps
'GoldApp3', # 2, dropped 1 char, too low entropy
'GoldApp&2', # 3, 1 edit distance
'GoldApp&35', # 4, 1 edit distance
'G0ldAppp&3' # 5, 2 edit dist
]
def listOfOneDist(length):
# using only lower letters
# to avoid shift --> 2 edit dist
m = ord('a')
M = ord('z') + 1 - m
for ii in range(length):
col = ii/M + 1
newC = chr(ii%M + m)
typo = get_pw()[:col]+newC+get_pw()[col:]
yield typo
# profile()
# pytest.main([__file__])
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import argparse
import os
import sys
from functools import partial
from glob import iglob as glob
from itertools import chain
from dautil.IO import makedirs
from dautil.util import map_parallel
PY2 = sys.version_info[0] == 2
if PY2:
import cPickle as pickle
else:
import pickle
__version__ = '0.1'
def convert(basedir, output, in_path, protocol=2):
out_path = in_path.replace(basedir, output, 1)
if os.path.isfile(out_path):
print('{} existed, skip.'.format(out_path))
return
makedirs(os.path.dirname(out_path))
with open(in_path, 'rb') as f:
data = pickle.load(f) if PY2 else pickle.load(f, encoding='latin1')
with open(out_path, 'wb') as f:
pickle.dump(data, f, protocol=2)
return
def main(args):
_glob = partial(glob, recursive=True) if args.recursive else glob
in_paths = chain(*(_glob(os.path.join(args.basedir, glob_i))
for glob_i in args.glob))
_convert = partial(convert, args.basedir, args.output, protocol=args.protocol)
Nones = map_parallel(
_convert,
in_paths,
mode=('mpi' if args.mpi else 'multiprocessing'),
processes=args.processes
)
if args.verbose:
print('Finish converting {} pickle files.'.format(len(Nones)))
def cli():
parser = argparse.ArgumentParser(description='Convert pickle to pickle in a certain protocol.')
parser.add_argument('basedir',
help='Base directory of input pickle files.')
parser.add_argument('-o', '--output', required=True,
help='Base directory of output pickle files.')
parser.add_argument('--glob', required=True, nargs='+',
help='Glob pattern from BASEDIR.')
parser.add_argument('-R', '--recursive', action='store_true',
help='If specified, recursive globbing, Python 3 only.')
parser.add_argument('--protocol', type=int, default=2,
help='Output pickle procotol. Default: 2.')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s {}'.format(__version__))
parser.add_argument('-V', '--verbose', action='store_true',
help='Print info on the input files. If not selected, only the filename of the to-be-deleted files are print.')
parser.add_argument('--mpi', action='store_true',
help='If specified, use MPI.')
parser.add_argument('-p', '--processes', type=int, default=1,
help='use p processes with multiprocessing. Hint: use total no. of threads available.')
args = parser.parse_args()
main(args)
if __name__ == "__main__":
cli()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from scrapy import signals
from scrapy.exporters import CsvItemExporter
class CompanyListStorePipeline:
exporter = None
@classmethod
def from_crawler(cls, crawler):
pipeline = cls()
crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
return pipeline
def spider_opened(self, spider):
self.file = open(spider.name + '.csv', 'wb')
self.exporter = CsvItemExporter(self.file)
self.exporter.start_exporting()
def spider_closed(self, spider):
self.exporter.finish_exporting()
self.file.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
|
nilq/baby-python
|
python
|
"""
:mod:`cookie`
-------------
This is a cookie authentication implementation for Pando.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from .. import auth
from ..utils import to_rfc822, utcnow
from ..website import THE_PAST
MINUTE = datetime.timedelta(seconds=60)
HOUR = 60 * MINUTE
DAY = 24 * HOUR
WEEK = 7 * DAY
TIMEOUT = 2 * HOUR
# Public config knobs
# ===================
# Feel free to modify for your application.
NAME = "auth"
DOMAIN = None
PATH = "/"
HTTPONLY = "Yes, please."
# Hooks
# =====
def inbound_early(request):
"""Authenticate from a cookie.
"""
if 'user' not in request.context:
token = None
if NAME in request.headers.cookie:
token = request.headers.cookie[NAME].value
token = token.decode('US-ASCII')
request.context['user'] = auth.User(token)
def outbound(response):
"""Set outbound auth cookie.
"""
if 'user' not in response.request.context:
# XXX When does this happen? When auth.inbound_early hasn't run, eh?
raise # XXX raise what?
user = response.request.context['user']
if not isinstance(user, auth.User):
raise Exception("If you define 'user' in a simplate it has to be an "
"instance of pando.auth.User.")
if NAME not in response.request.headers.cookie:
# no cookie in the request, don't set one on response
return
elif user.ANON:
# user is anonymous, instruct browser to delete any auth cookie
cookie_value = ''
cookie_expires = THE_PAST
else:
# user is authenticated, keep it rolling for them
cookie_value = user.token
cookie_expires = to_rfc822(utcnow() + TIMEOUT)
# Configure outgoing cookie.
# ==========================
response.headers.cookie[NAME] = cookie_value # creates a cookie object?
cookie = response.headers.cookie[NAME] # loads a cookie object?
cookie['expires'] = cookie_expires
if DOMAIN is not None:
# Browser default is the domain of the resource requested.
# Pando default is the browser default.
cookie['domain'] = DOMAIN
if PATH is not None:
# XXX What's the browser default? Probably /? Or current dir?
# Pando default is "/".
cookie['path'] = PATH
if HTTPONLY is not None:
# Browser default is to allow access from JavaScript.
# Pando default is to prevent access from JavaScript.
cookie['httponly'] = HTTPONLY
|
nilq/baby-python
|
python
|
from query_generator.query import Query
from utils.contracts import Operator, Schema
class UnionOperator(Operator):
def __init__(self, schema: Schema, leftSubQuery: Query, rightSubQuery: Query):
super().__init__(schema)
self._leftSubQuery = leftSubQuery
self._rightSubQuery = rightSubQuery
def generate_code(self) -> str:
return f"{self._leftSubQuery.generate_code()}.unionWith({self._rightSubQuery.generate_code()})"
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.