max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
py3plex/algorithms/infomap/examples/python/example-simple.py | awesome-archive/Py3plex | 1 | 16800 | <reponame>awesome-archive/Py3plex<filename>py3plex/algorithms/infomap/examples/python/example-simple.py
from infomap import infomap
infomapWrapper = infomap.Infomap("--two-level")
# Add weight as an optional third argument
infomapWrapper.addLink(0, 1)
infomapWrapper.addLink(0, 2)
infomapWrapper.addLink(0, 3)
infomapWrapper.addLink(1, 0)
infomapWrapper.addLink(1, 2)
infomapWrapper.addLink(2, 1)
infomapWrapper.addLink(2, 0)
infomapWrapper.addLink(3, 0)
infomapWrapper.addLink(3, 4)
infomapWrapper.addLink(3, 5)
infomapWrapper.addLink(4, 3)
infomapWrapper.addLink(4, 5)
infomapWrapper.addLink(5, 4)
infomapWrapper.addLink(5, 3)
infomapWrapper.run()
tree = infomapWrapper.tree
print("Found %d modules with codelength: %f" % (tree.numTopModules(), tree.codelength()))
print("\n#node module")
for node in tree.leafIter():
print("%d %d" % (node.physIndex, node.moduleIndex()))
| 2.8125 | 3 |
api/tests.py | everett-toews/metaslacker | 90 | 16801 | <filename>api/tests.py
import unittest
class MainTestCase(unittest.TestCase):
def test_two_and_two(self):
four = 2 + 2
self.assertEqual(four, 4)
self.assertNotEqual(four, 5)
self.assertNotEqual(four, 6)
self.assertNotEqual(four, 22)
if __name__ == '__main__':
unittest.main()
| 2.8125 | 3 |
tools/modules/verify.py | andscha/containerization-for-sap-s4hana | 6 | 16802 | <filename>tools/modules/verify.py
# ------------------------------------------------------------------------
# Copyright 2020, 2021 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
""" Verify settings in configuration YAML file (helper functions) """
# Global modules
# None
# Local modules
from modules.command import (
CmdShell,
CmdSsh
)
from modules.constants import getConstants
from modules.exceptions import RpmFileNotFoundException
from modules.ocp import ocLogin
from modules.tools import (
refSystemIsStandard,
areContainerMemResourcesValid,
getRpmFileForPackage,
strBold,
getHdbCopySshCommand
)
# Functions for formatting the output
def showMsgOk(text):
""" print text with header """
print("[Ok ] " + text)
def showMsgErr(text):
""" print text with header """
print('[' + strBold('Error') + '] ' + text)
def showMsgInd(text):
""" print text with header """
print("[.....] " + text)
# Classes
class Verify():
""" Verify various configuration settings """
def __init__(self, ctx):
self._ctx = ctx
self._cmdSshNfs = CmdSsh(ctx, ctx.cf.nfs.host.name, ctx.cr.nfs.user,
reuseCon=False)
self._cmdSshNws4 = CmdSsh(ctx, ctx.cf.refsys.nws4.host.name, ctx.cr.refsys.nws4.sidadm,
reuseCon=False)
self._cmdSshHdb = CmdSsh(ctx, ctx.cf.refsys.hdb.host.name, ctx.cr.refsys.hdb.sidadm,
reuseCon=False)
# Public methods
def verify(self):
""" Verify various configuration settings """
success = True
success = self._verifyOcp() and success
success = self._verifyImages() and success
success = self._verifyNws4() and success
success = self._verifyHdb() and success
success = self._verifyNfs() and success
success = self._verifySapSystem() and success
success = self.verifyNfsToHdbSshConnection() and success
return success
def verifyNfsToHdbSshConnection(self, doPrint=True):
""" Verify SSH connection from NFS host to HDB host """
hdbUser = self._ctx.cr.refsys.hdb.sidadm
hdbHost = self._ctx.cf.refsys.hdb.host
testSsh, testSshSecrets = getHdbCopySshCommand(self._ctx, withLogin=True, reuseCon=False)
# set dummy command
testSsh = testSsh + " true"
result = self._cmdSshNfs.run(testSsh, testSshSecrets)
success = result.rc == 0
if doPrint:
nfsUser = self._ctx.cr.nfs.user
nfsHost = self._ctx.cf.nfs.host.name
if success:
showMsgOk(f"SSH connection to HDB host '{hdbHost.name}' "
f"from NFS host '{nfsHost}' was successful.")
else:
showMsgErr(f"Cannot establish ssh connection '{nfsUser.name}@{nfsHost}"
f" → '{hdbUser.name}@{hdbHost.ip}' ('{hdbUser.name}@{hdbHost.name}').")
showMsgInd(f"Error message: '{result.out}'")
showMsgInd("Check the ssh connection"
f" '{nfsUser.name}@{nfsHost}' → '{hdbUser.name}@{hdbHost.ip}'.")
return success
# Private methods
def _verifyOcp(self):
""" Verify OCP settings """
# pylint: disable=too-many-statements
def isDomainNameValid(loginAnsw):
return 'no such host' not in loginAnsw
def isCredentialsValid(loginAnsw):
condFail1 = (loginAnsw.startswith('Login failed')
and 'Verify you have provided correct credentials' in loginAnsw)
condFail2 = not (loginAnsw.startswith('Logged into')
or loginAnsw.startswith('Login successful'))
return not (condFail1 or condFail2)
def isProjectValid(project):
# Assumes that an 'oc login' has been performed beforehand
cmd = f'oc get project {project} -o custom-columns=NAME:.metadata.name --no-headers'
# The command behaves as follows:
# - If the project exists in the OpenShift cluster its name is printed to stdout.
# - If it does not exist nothing is printed to stdout and an error message is printed
# to stderr
return project in CmdShell().run(cmd).out
def areResourcesValid(ocp, containerType):
return areContainerMemResourcesValid(ocp, containerType)
def isSecretExisting(secret):
# Assumes that an 'oc login' has been performed beforehand
cmd = f'oc describe secret {secret}'
out = CmdShell().run(cmd).err
return not out.startswith('Error from server')
def verifySetup(ocp, loginAnsw):
success = True
if isDomainNameValid(loginAnsw):
showMsgOk("OCP domain name is valid.")
if isCredentialsValid(loginAnsw):
showMsgOk("OCP user and password are valid.")
if isProjectValid(ocp.project):
showMsgOk("OCP project is valid.")
else:
showMsgErr(f"OCP project '{ocp.project}' does not exist.")
success = False
else:
showMsgErr(f"OCP user '{user.name}' and/or password are invalid.")
success = False
else:
showMsgErr(f"OCP domain name '{ocp.domain}' is invalid.")
success = False
return success
def verifyResources(ocp):
success = True
for containerType in self._ctx.config.getContainerFlavors():
if containerType == 'init':
continue
if areResourcesValid(ocp, containerType):
showMsgOk("OCP memory resources for container type "
f"'{containerType}' are valid.")
else:
showMsgErr(f"OCP memory limit for container type '{containerType}' "
f"is less than the value specified for requested memory.")
success = False
return success
def verifySecret(ocp):
success = True
if not refSystemIsStandard(self._ctx):
secret = ocp.containers.di.secret
if secret:
if isSecretExisting(secret):
showMsgOk(f"OCP secret '{secret}' exists.")
else:
showMsgErr(f"Specified OCP secret '{secret}' "
"was not found in OCP cluster.")
showMsgInd("Make sure the secret exists and is "
"created in the right project.")
success = False
else:
showMsgErr("Reference system is a distributed system.")
showMsgInd("You must specify the name of an OCP secret in the config.yaml file")
showMsgInd("containing the information about the "
"SAP HANA DB user and password.")
success = False
return success
ocp = self._ctx.cf.ocp
user = self._ctx.cr.ocp.user
success = verifySetup(ocp, ocLogin(self._ctx, user))
success = success and verifyResources(ocp)
success = success and verifySecret(ocp)
return success
def _verifyImages(self):
""" verify Settings for images """
def _isRpmFileForPackageAvailable(packageName, path):
try:
getRpmFileForPackage(packageName, path)
return True
except RpmFileNotFoundException as exp:
print(exp.errorText)
return False
def _getImageTypes(ctx):
return list(ctx.cf.images.__dict__)
success = True
defaultPackagesDir = getConstants().defaultPackagesDir
for flavor in _getImageTypes(self._ctx):
if flavor == "init":
continue
packages = getattr(self._ctx.cf.images, flavor).packages
for package in packages:
if package.dnfInstallable:
showMsgOk(f"Package {package.packageName} installable via dnf install.")
else:
if _isRpmFileForPackageAvailable(package.packageName, defaultPackagesDir):
showMsgOk(f"Package {package.packageName} installable via rpm.")
else:
showMsgErr(f"Package {package.packageName} not found "
"in {defaultPackagesDir}.")
success = False
return success
def _verifyNfs(self):
""" Verify NFS settings """
nfs = self._ctx.cf.nfs
user = self._ctx.cr.nfs.user
success = True
if self._isHostNameValid(self._cmdSshNfs):
showMsgOk("NFS host is valid.")
if self._isUserValid(self._cmdSshNfs):
showMsgOk("NFS user is valid.")
else:
showMsgErr(f"NFS user '{user.name}' is invalid "
f"or ssh is not set up correctly.")
showMsgInd(f"Check first the existence of '{user.name}' on '{nfs.host.name}'.")
showMsgInd(f"If exists, check the ssh connection by executing: "
f"ssh {user.name}@{nfs.host.name}")
success = False
else:
showMsgErr(f"NFS host '{nfs.host.name}' is invalid.")
success = False
return success
def _verifyNws4(self):
""" Verify settings for reference system component 'nws4' """
return self._verifyRefSys('nws4', self._cmdSshNws4)
def _verifyHdb(self):
""" Verify settings for reference system component 'hdb' """
success = self._verifyRefSys('hdb', self._cmdSshNws4)
if success:
if self._isHdbBaseDirValid():
showMsgOk("HDB base directory is valid.")
else:
showMsgErr(f"HDB base directory '{self._ctx.cf.refsys.hdb.base}' is invalid.")
success = False
return success
def _verifyRefSys(self, component, cmdSsh):
""" Verify settings for given component' """
compUp = component.upper()
sidU = getattr(self._ctx.cf.refsys, component).sidU
hostname = getattr(self._ctx.cf.refsys, component).host.name
user = getattr(self._ctx.cr.refsys, component).sidadm
success = True
if self._isHostNameValid(cmdSsh):
showMsgOk(f"{compUp} host is valid.")
if self._isUserValid(cmdSsh):
showMsgOk(f"{compUp} user is valid.")
if self._isSidInUsrSapServices(cmdSsh, sidU):
showMsgOk(f"{compUp} SAP system ID is valid.")
else:
showMsgErr(f"{compUp} SAP system ID is invalid.")
success = False
else:
showMsgErr(f"{compUp} user '{user.name}' is invalid "
f"or ssh is not set up correctly.")
showMsgInd(f"Check first the existence of '{user.name}' on '{hostname}'.")
showMsgInd(f"If exists, check the ssh connection by executing: "
f"ssh {user.name}@{hostname}")
success = False
else:
showMsgErr(f"{compUp} host '{hostname}' is invalid.")
success = False
return success
def _verifySapSystem(self):
""" Verify SAP system setup """
success = True
if refSystemIsStandard(self._ctx):
if not self._ctx.cf.refsys.nws4.host.name == self._ctx.cf.refsys.hdb.host.name:
success = False
showMsgErr(f"The HANADB database '{self._ctx.cf.refsys.hdb.sidU}' "
"must run on the same host as the NWS4 SAP System.")
if not self._isHdbSidInDefaultPfl():
showMsgErr("You must not use a different HANADB SAP System "
f"than specified for the NWS4 SAP System '{self._ctx.cf.refsys.nws4.sidU}'.")
success = False
return success
def _isHostNameValid(self, cmdSsh):
out = self._checkSshLogin(cmdSsh)
return 'Could not resolve hostname' not in out
def _isUserValid(self, cmdSsh):
out = self._checkSshLogin(cmdSsh)
return 'Permission denied' not in out and 'Connection reset' not in out
def _checkSshLogin(self, cmdSsh):
return cmdSsh.run('true').err
def _isSidInUsrSapServices(self, cmdSsh, sidU):
out = cmdSsh.run(f' grep {sidU} /usr/sap/sapservices | wc -l').err
return not out.startswith('0')
def _isDirValid(self, cmdSsh, directory):
out = cmdSsh.run(f' ls {directory}').err
return 'No such file or directory' not in out
def _isHdbBaseDirValid(self):
out = self._cmdSshHdb.run(f' ls {self._ctx.cf.refsys.hdb.base}').out
return 'data' in out and 'log' in out and 'shared' in out
def _isHdbSidInDefaultPfl(self):
defaultPfl = f'/usr/sap/{self._ctx.cf.refsys.nws4.sidU}/SYS/profile/DEFAULT.PFL'
out = self._cmdSshNws4.run(f' grep dbs/hdb/dbname {defaultPfl}').out
return self._ctx.cf.refsys.hdb.sidU in out
class VerifyOcp():
""" Verify various ocp settings """
def __init__(self, ctx):
self._ctx = ctx
ocLogin(ctx, ctx.cr.ocp.admin)
self._workerNodes = CmdShell().run(
'oc get nodes'
+ ' --selector="node-role.kubernetes.io/worker"'
+ " -o template --template"
+ " '{{range .items}}{{.metadata.name}}{{"+r'"\n"'+"}}{{end}}'"
).out.split()
# Public methods
def verify(self):
""" Verify various ocp settings """
success = True
success = self._verifySccForProject() and success
success = self._verifyOcpServiceAccount() and success
if not self._workerNodes:
showMsgErr("Could not retrieve list of worker nodes.")
showMsgInd("SELinux and pid limit settings cannot be verified!")
success = False
else:
success = self._verifySeLinux() and success
success = self._verifyPidLimit() and success
return success
# Private methods
def _runSshJumpCmd(self, worker, cmd):
ctx = self._ctx
innerSshCmd = 'ssh'
if ctx.cr.ocp.helper.user.sshid:
innerSshCmd += ' -i {ctx.cr.ocp.helper.user.sshid}'
innerSshCmd += ' -o StrictHostKeyChecking=no'
innerSshCmd += f' core@{worker} {cmd}'
helperHost = ctx.cf.ocp.helper.host
helperUser = ctx.cr.ocp.helper.user
res = CmdSsh(ctx, helperHost.name, helperUser, reuseCon=False).run(innerSshCmd)
rval = res.out
if res.rc != 0:
showMsgErr(f"Could not execute SSH command on worker node '{worker}'"
f" as user '{helperUser.name}' on helper node '{helperHost.name}'")
showMsgInd(f"({res.err})")
rval = 'SSH CONNECT ERROR'
return rval
def _verifySccForProject(self):
ocp = self._ctx.cf.ocp
out = CmdShell().run(
'oc adm policy who-can use scc anyuid'
" -o template --template='{{range .groups}}{{.}}{{"+r'"\n"'+"}}{{end}}'"
).out.split()
if f'system:serviceaccounts:{ocp.project}' in out:
showMsgOk("Security Context Constraint 'anyuid' is valid.")
return True
showMsgErr(f"Project '{ocp.project}' does not have "
"the 'anyuid' Security Context Constraint permission.")
showMsgInd("Logon as kube:admin and execute:")
showMsgInd(" oc adm policy add-scc-to-group anyuid"
f' "system:serviceaccounts:{ocp.project}"\n')
return False
def _verifyOcpServiceAccount(self):
ocp = self._ctx.cf.ocp
out = CmdShell().run(
'oc adm policy who-can use scc hostmount-anyuid'
" -o template --template='{{range .users}}{{.}}{{"+r'"\n"'+"}}{{end}}'"
).out.split()
if f'system:serviceaccount:{ocp.project}:{ocp.project}-sa' in out:
showMsgOk("Security Context Constraint 'hostmount-anyuid' is valid.")
return True
showMsgErr(f"Service account {ocp.project}-sa does not have "
"the 'hostmount-anyuid' Security Context Constraint.")
showMsgInd("Logon as kube:admin, create the service account and execute:")
showMsgInd(" oc adm policy add-scc-to-user hostmount-anyuid"
f' "system:serviceaccount:{ocp.project}:{ocp.project}-sa"\n')
return False
def _verifySeLinux(self):
success = True
for worker in self._workerNodes:
enforceState = self._runSshJumpCmd(worker, 'getenforce')
if enforceState in ('Permissive', 'Disabled'):
showMsgOk(f"SELinux setting for worker {worker} is valid.")
else:
showMsgErr(f"Invalid SELinux setting '{enforceState}' for worker {worker}.")
success = False
return success
def _verifyPidLimit(self):
success = True
for worker in self._workerNodes:
pidsLimit = self._runSshJumpCmd(worker, 'crio config | grep pids_limit')
pidsLimit = int(pidsLimit.split('=')[1])
if pidsLimit >= 8192:
showMsgOk(f"CRI-O pids_limit setting for worker {worker} is valid.")
else:
showMsgErr(f"CRI-O pids_limit setting for worker {worker} "
"is too low, must be >= 8192.")
success = False
return success
| 2.140625 | 2 |
rainbow/rainbow.py | jaxzin/adafruit-voice-docker | 0 | 16803 | <gh_stars>0
import time
import board
import adafruit_dotstar
import atexit
import signal
kill_now = False
DOTSTAR_DATA = board.D5
DOTSTAR_CLOCK = board.D6
dots = adafruit_dotstar.DotStar(DOTSTAR_CLOCK, DOTSTAR_DATA, 3, brightness=0.5)
def exit_handler():
kill_now = True
# turn off the pixel dots
for i in range(3):
dots[i] = (0,0,0)
dots.show()
atexit.register(exit_handler)
signal.signal(signal.SIGINT, exit_handler)
signal.signal(signal.SIGTERM, exit_handler)
def wheel(pos):
# Input a value 0 to 255 to get a color value.
# The colours are a transition r - g - b - back to r.
if pos < 0 or pos > 255:
return (0, 0, 0)
if pos < 85:
return (255 - pos * 3, pos * 3, 0)
if pos < 170:
pos -= 85
return (0, 255 - pos * 3, pos * 3)
pos -= 170
return (pos * 3, 0, 255 - pos * 3)
while not kill_now:
for j in range(255):
for i in range(3):
rc_index = (i * 256 // 3) + j * 5
dots[i] = wheel(rc_index & 255)
dots.show()
time.sleep(0.01)
| 2.984375 | 3 |
src/iranlowo/corpus/__init__.py | Niger-Volta-LTI/iranlowo | 17 | 16804 | <reponame>Niger-Volta-LTI/iranlowo<filename>src/iranlowo/corpus/__init__.py
from .corpus import Corpus, DirectoryCorpus
from .loaders import OweLoader, YorubaBlogCorpus, BBCCorpus, BibeliCorpus | 0.976563 | 1 |
seq2seq.py | frozen86/SeqLite | 1 | 16805 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from masked_cross_entropy import *
from preprocess import *
from parameter import *
import time
# # Training
def train(input_batches, input_lengths, target_batches, target_lengths, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):
batch_size = BATCH_SIZE
clip = CLIP
# Zero gradients of both optimizers
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
loss = 0 # Added onto for each word
# Run through encoder
encoder_outputs, encoder_hidden = encoder(
input_batches, input_lengths, None)
# Initialize decoder input
decoder_input = torch.LongTensor([SOS_index] * batch_size)
# Use last (forward) hidden state from encoder
# encoder_hidden size: num_layers * num_directions(=2), batch, hidden_size
# decoder_hidden size: num_layers, batch, hidden_size
decoder_hidden = encoder_hidden[:decoder.n_layers]
# Find the max length
max_target_length = max(target_lengths)
# Initialize decoder output
all_decoder_outputs = torch.zeros(
max_target_length, batch_size, decoder.output_size)
# Move new Variables to CUDA
if USE_CUDA:
decoder_input = decoder_input.cuda()
all_decoder_outputs = all_decoder_outputs.cuda()
# Run through decoder one time step at a time
for t in range(max_target_length):
decoder_output, decoder_hidden, decoder_attn = decoder(
decoder_input, decoder_hidden, encoder_outputs
)
all_decoder_outputs[t] = decoder_output
decoder_input = target_batches[t] # Next input is current target
# Loss calculation and backpropagation
# loss_cal = nn.BCELoss()
# loss = loss_cal(all_decoder_outputs, target_batches)
# print("target:", target_batches.size())
# print("output:", all_decoder_outputs.size())
loss = masked_cross_entropy(
all_decoder_outputs.transpose(0, 1).contiguous(), # -> batch x seq
target_batches.transpose(0, 1).contiguous(), # -> batch x seq
target_lengths
)
loss.backward()
# Clip gradient norms
ec = torch.nn.utils.clip_grad_norm_(encoder.parameters(), clip)
dc = torch.nn.utils.clip_grad_norm_(decoder.parameters(), clip)
# Update parameters with optimizers
encoder_optimizer.step()
decoder_optimizer.step()
return loss.item(), ec, dc
# # Evaluating the network
# def evaluate(input_seq, max_length=MAX_LENGTH):
def evaluate(input_batches, input_lengths, input_lang, output_lang, encoder, decoder, max_length=MAX_LENGTH):
# Set to not-training mode to disable dropout
encoder.train(False)
decoder.train(False)
# Run through encoder
encoder_outputs, encoder_hidden = encoder(
input_batches, input_lengths, None)
# Inference only, no back propagation
with torch.no_grad():
# Initialize decoder input
decoder_input = torch.LongTensor([SOS_index])
# Use last (forward) hidden state from encoder
decoder_hidden = encoder_hidden[:decoder.n_layers]
if USE_CUDA:
decoder_input = decoder_input.cuda()
# Store output words and attention states
output_sindices = []
decoder_attentions = torch.zeros(max_length + 1, max_length + 1)
# Run through decoder
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attn = decoder(
decoder_input, decoder_hidden, encoder_outputs
)
decoder_attentions[di, :decoder_attn.size(
2)] += decoder_attn.squeeze(0).squeeze(0).cpu().data
# Choose top word from output
topv, topi = decoder_output.data.topk(1)
ni = topi[0][0]
# Extract number from pytorch variable
ni = ni.item()
output_sindices.append(ni)
if ni == EOS_index:
break
# Next input is chosen word
decoder_input = torch.LongTensor([ni])
if USE_CUDA:
decoder_input = decoder_input.cuda()
# Set back to training mode
encoder.train(True)
decoder.train(True)
return output_sindices, decoder_attentions[:di + 1, :len(encoder_outputs)]
def evaluate_and_show_attention(input_sentence, input_length, input_lang, output_lang,
target_batches, encoder, decoder, epoch):
sindices, attentions = evaluate(
input_sentence, input_length, input_lang, output_lang, encoder, decoder)
input_sentence = indices_to_sentence(input_lang, input_sentence)
output_sentence = indices_to_sentence(output_lang, sindices)
target_sentence = indices_to_sentence(output_lang, target_batches)
print_summary = 'Evaluation:'+'\n'
print_summary += ' in/src:' + input_sentence + '\n'
print_summary += ' out:' + output_sentence + '\n'
if target_sentence is not None:
print_summary += ' tgt:' + target_sentence + '\n'
show_attention(input_sentence, output_sentence, attentions, epoch)
return input_sentence, output_sentence, target_sentence
def show_attention(input_sentence, output_sentence, attentions, epoch):
# Set up figure with colorbar
# print(attentions)
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' '), rotation=90)
ax.set_yticklabels([''] + output_sentence.split(' '))
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
fig.savefig(PLOT_PATH + '/epoch-%d.png' % epoch)
fig.savefig(PLOT_PATH + '/last.png')
# plt.show(block=True)
# plt.close()
| 2.734375 | 3 |
ANNarchy_future/__init__.py | vitay/ANNarchy_future | 2 | 16806 | <gh_stars>1-10
from .api import *
__version__ = "5.0.0" | 1.15625 | 1 |
pycqed/tests/analysis_v2/test_simple_analysis.py | nuttamas/PycQED_py3 | 60 | 16807 | import unittest
import pycqed as pq
import os
import matplotlib.pyplot as plt
from pycqed.analysis_v2 import measurement_analysis as ma
class Test_SimpleAnalysis(unittest.TestCase):
@classmethod
def tearDownClass(self):
plt.close('all')
@classmethod
def setUpClass(self):
self.datadir = os.path.join(pq.__path__[0], 'tests', 'test_data')
ma.a_tools.datadir = self.datadir
def test_1D_analysis_multi_file(self):
a = ma.Basic1DAnalysis(t_start='20170726_164507',
t_stop='20170726_164845',
options_dict={'scan_label': 'flipping'})
self.assertTrue(len(a.timestamps) > 5)
def test_1D_analysis_single_file(self):
# giving only a single file
a = ma.Basic1DAnalysis(t_start='20170726_164845',
options_dict={'scan_label': 'flipping'})
self.assertEqual(a.timestamps, ['20170726_164845'])
def test_2D_analysis_multi_file(self):
# N.B. by setting x2, x2_label and x2_unit in the options dict
# the values can be plotted versus the varied parameter between
# the linecuts
a = ma.Basic2DAnalysis(t_start='20170726_164521',
t_stop='20170726_164845',
options_dict={'scan_label': 'flipping'})
self.assertTrue(len(a.timestamps) > 5)
def test_2D_interpolated(self):
a=ma.Basic2DInterpolatedAnalysis(t_start='20180522_030206')
fig_keys = list(a.figs.keys())
exp_list_keys = ['Cost function value', 'Conditional phase',
'offset difference']
self.assertEqual(fig_keys, exp_list_keys)
@unittest.skip('FIXME: disabled, see PR #643')
def test_1D_binned_analysis(self):
a=ma.Basic1DBinnedAnalysis(label='120543_Single_qubit_GST_QL')
| 2.453125 | 2 |
CS2/1275_turtle_recursion/2499_koch_snowflake/alternate_snowflake.py | nealholt/python_programming_curricula | 7 | 16808 | import turtle
'''http://www.algorithm.co.il/blogs/computer-science/fractals-in-10-minutes-no-6-turtle-snowflake/
This would be a good introduction to recursion. I don't see how students
would invent this on their own, but they could modify it and see what
other fractals they could generate.
'''
pen = turtle.Turtle()
pen.penup()
pen.goto(-200,0)
pen.pendown()
pen.speed(0)
def fractal(pen, length, depth):
#Base case
if depth == 0:
pen.forward(length)
#Recursive case
else:
fractal(pen, length/3, depth-1)
pen.right(60)
fractal(pen, length/3, depth-1)
pen.left(120)
fractal(pen, length/3, depth-1)
pen.right(60)
fractal(pen, length/3, depth-1)
#Draw the fractal
fractal(pen, 500, 4)
turtle.done()
| 4.5625 | 5 |
test/test_edge.py | jbschwartz/spatial | 1 | 16809 | <gh_stars>1-10
import unittest
from spatial import Edge, Vector3
class TestEdge(unittest.TestCase):
def setUp(self) -> None:
self.start = Vector3(1, 2, 3)
self.end = Vector3(-1, -2, -3)
self.middle = Vector3(0, 0, 0)
self.edge = Edge(self.start, self.end)
def test__init__accepts_endpoints(self) -> None:
self.assertEqual(self.edge.start, self.start)
self.assertEqual(self.edge.end, self.end)
def test__eq__returns_true_for_edges_regardless_of_direction(self) -> None:
same_edge = Edge(self.start, self.end)
self.assertEqual(self.edge, same_edge)
opposite_edge = Edge(self.end, self.start)
self.assertEqual(self.edge, opposite_edge)
other_edge = Edge(self.start, self.middle)
self.assertNotEqual(other_edge, self.edge)
def test__eq__returns_notimplemented_for_incompatible_types(self) -> None:
self.assertTrue(self.edge.__eq__(2) == NotImplemented)
self.assertTrue(self.edge.__eq__("string") == NotImplemented)
def test_length_returns_the_length_of_the_edge(self) -> None:
self.assertEqual(self.edge.length, (self.start - self.end).length())
def test_vector_returns_the_vector_between_the_edges_endpoints(self) -> None:
self.assertEqual(self.edge.vector, self.end - self.start)
| 3.21875 | 3 |
wdae/wdae/user_queries/urls.py | iossifovlab/gpf | 0 | 16810 | from django.urls import re_path
from user_queries.views import UserQuerySaveView, UserQueryCollectView
urlpatterns = [
re_path(r"^/save/?$", UserQuerySaveView.as_view(), name="user-save-query"),
re_path(
r"^/collect/?$",
UserQueryCollectView.as_view(),
name="user-collect-queries",
),
]
| 1.914063 | 2 |
src/login/migrations/0017_auto_20191006_1716.py | vandana0608/Pharmacy-Managament | 0 | 16811 | <reponame>vandana0608/Pharmacy-Managament
# Generated by Django 2.0.7 on 2019-10-06 11:46
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('login', '0016_auto_20191006_1715'),
]
operations = [
migrations.AlterField(
model_name='login',
name='logout',
field=models.DateTimeField(default=datetime.datetime.now),
),
]
| 1.65625 | 2 |
InfoGain.py | gsndr/AIDA | 4 | 16812 | import pandas as pd
from math import log
class InfoGain():
def __init__(self, path):
self._path=path
def extractVariables(self):
self._df = pd.read_csv(self._path + ".csv");
# put the original column names in a python list
'''if 'Unnamed: 0' in self._df.columns:
self._df = self._df.drop(columns=['Unnamed: 0']);
if 'Unnamed: 0.1' in self._df.columns:
self._df = self._df.drop(columns=['Unnamed: 0.1']);
'''
self._categories=list(self._df.columns.values)
print(self._categories)
self._totalRows=self._df.count()
def splitCategories(self):
self._dfNormal=self._df
def entropy(pi):
'''
pi is an array that contain classifications
return the Entropy of a probability distribution:
entropy(p) = − SUM (Pi * log(Pi) )
defintion:
entropy is a metric to measure the uncertainty of a probability distribution.
entropy ranges between 0 to 1
Low entropy means the distribution varies (peaks and valleys).
High entropy means the distribution is uniform.
See:
http://www.cs.csi.cuny.edu/~imberman/ai/Entropy%20and%20Information%20Gain.htm
'''
total = 0
for p in pi:
p = p / sum(pi)
if p != 0:
total += p * log(p, 2)
else:
total += 0
total *= -1
return total
def gain(d, a):
'''
return the information gain:
gain(D, A) = entropy(D)− SUM ( |Di| / |D| * entropy(Di) )
'''
total = 0
for v in a:
total += sum(v) / sum(d) * InfoGain.entropy(v)
gain = InfoGain.entropy(d) - total
return gain
| 3.703125 | 4 |
bot/localization.py | Supportiii/telegram-report-bot | 0 | 16813 | strings = {
"en": {
"error_no_reply": "This command must be sent as a reply to one's message!",
"error_report_admin": "Whoa! Don't report admins 😈",
"error_restrict_admin": "You cannot restrict an admin.",
"report_date_format": "%d.%m.%Y at %H:%M",
"report_message": '👆 Sent {time} (server time)\n'
'<a href="{msg_url}">Go to message</a>',
"report_note": "\n\nNote: {note}",
"report_sent": "<i>Report sent</i>",
"action_del_msg": "Delete message",
"action_del_and_ban": "Delete and ban",
"action_deleted": "\n\n🗑 <b>Deleted</b>",
"action_deleted_banned": "\n\n🗑❌ <b>Deleted, user banned</b>",
"action_deleted_partially": "Some messages couldn't be found or deleted",
"readonly_forever": "🙊 <i>User set to read-only mode forever</i>",
"readonly_temporary": "🙊 <i>User set to read-only mode until {time} (server time)</i>",
"nomedia_forever": "🖼 <i>User set to text-only mode forever</i>",
"nomedia_temporary": "🖼 <i>User set to text-only mode until {time} (server time)</i>",
"need_admins_attention": 'Dear admins, your presence in chat is needed!\n\n'
'<a href="{msg_url}">Go to chat</a>',
},
"ru": {
"error_no_reply": "Эта команда должна быть ответом на какое-либо сообщение!",
"error_report_admin": "Админов репортишь? Ай-ай-ай 😈",
"error_restrict_admin": "Невозможно ограничить администратора.",
"report_date_format": "%d.%m.%Y в %H:%M",
"report_message": '👆 Отправлено {time} (время серверное)\n'
'<a href="{msg_url}">Перейти к сообщению</a>',
"report_note": "\n\nПримечание: {note}",
"report_sent": "<i>Жалоба отправлена администраторам</i>",
"action_del_msg": "Удалить сообщение",
"action_del_and_ban": "Удалить и забанить",
"action_deleted": "\n\n🗑 <b>Удалено</b>",
"action_deleted_banned": "\n\n🗑❌ <b>Удалено, юзер забанен</b>",
"action_deleted_partially": "Не удалось найти или удалить некоторые сообщения",
"readonly_forever": "🙊 <i>Пользователь переведён в режим «только чтение» навсегда</i>",
"readonly_temporary": "🙊 <i>Пользователь переведён в режим «только чтение» до {time} (время серверное)</i>",
"nomedia_forever": "🖼 <i>Пользователю запрещено отправлять медиафайлы навсегда</i>",
"nomedia_temporary": "🖼 <i>Пользователю запрещено отправлять медиафайлы до {time} (время серверное)</i>",
"need_admins_attention": 'Товарищи админы, в чате нужно ваше присутствие!\n\n'
'<a href="{msg_url}">Перейти к чату</a>',
},
"de": {
"error_no_reply": "Dieser Befehl kann nur als Antwort gesendet werden!",
"error_report_admin": "Whoa! Du kannst Admins nicht melden 😈",
"error_restrict_admin": "Du kannst keine Admins einschränken.",
"report_date_format": "%d.%m.%Y um %H:%M Uhr",
"report_message": '👆 Gesendet {time} (server time)\n'
'<a href="{msg_url}">Zur Nachricht</a>',
"report_note": "\n\nNotiz: {note}",
"report_sent": "<i>Gemeldet</i>",
"action_del_msg": "Nachricht löschen",
"action_del_and_ban": "Löschen und Sperren",
"action_deleted": "\n\n🗑 <b>Löschen</b>",
"action_deleted_banned": "\n\n🗑❌ <b>Gelöscht, Nutzer gesperrt!</b>",
"action_deleted_partially": "Einige Nachrichten wurden nicht gefunden zum löschen",
"readonly_forever": "🙊 <i>Nutzer ist für immer stumm</i>",
"readonly_temporary": "🙊 <i>Nutzer bis {time} stumm. (server time)</i>",
"nomedia_forever": "🖼 <i>Nutzer für immer im Nur-Text-Modus.</i>",
"nomedia_temporary": "🖼 <i>Nutzer bis {time} im nur Text-Modus. (server time)</i>",
"need_admins_attention": 'Liebe Admins, ich sehne euch herbei!\n\n'
'<a href="{msg_url}">Zum Chat</a>',
}
@@ -64,7 +89,7 @@ def get_string(lang: str, key: str):
lang = strings.get(lang)
if not lang:
if not strings.get("en"):
raise KeyError(f'Neither "{lang}" nor "en" locales found')
raise KeyError(f'Weder "{lang}" noch "en" gefunden.')
else:
lang = strings.get("en")
try:
return lang[key]
except KeyError:
return strings.get("en").get(key, "ERR_NO_STRING")
| 1.789063 | 2 |
swagger_server/models/linecode_r_matrix.py | garagonc/simulation-engine | 3 | 16814 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server.models.impedance import Impedance # noqa: F401,E501
from swagger_server import util
class LinecodeRMatrix(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, phase_r: Impedance=None, phase_s: Impedance=None, phase_t: Impedance=None): # noqa: E501
"""LinecodeRMatrix - a model defined in Swagger
:param phase_r: The phase_r of this LinecodeRMatrix. # noqa: E501
:type phase_r: Impedance
:param phase_s: The phase_s of this LinecodeRMatrix. # noqa: E501
:type phase_s: Impedance
:param phase_t: The phase_t of this LinecodeRMatrix. # noqa: E501
:type phase_t: Impedance
"""
self.swagger_types = {
'phase_r': Impedance,
'phase_s': Impedance,
'phase_t': Impedance
}
self.attribute_map = {
'phase_r': 'phase_R',
'phase_s': 'phase_S',
'phase_t': 'phase_T'
}
self._phase_r = phase_r
self._phase_s = phase_s
self._phase_t = phase_t
@classmethod
def from_dict(cls, dikt) -> 'LinecodeRMatrix':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Linecode_R_Matrix of this LinecodeRMatrix. # noqa: E501
:rtype: LinecodeRMatrix
"""
return util.deserialize_model(dikt, cls)
@property
def phase_r(self) -> Impedance:
"""Gets the phase_r of this LinecodeRMatrix.
:return: The phase_r of this LinecodeRMatrix.
:rtype: Impedance
"""
return self._phase_r
@phase_r.setter
def phase_r(self, phase_r: Impedance):
"""Sets the phase_r of this LinecodeRMatrix.
:param phase_r: The phase_r of this LinecodeRMatrix.
:type phase_r: Impedance
"""
self._phase_r = phase_r
@property
def phase_s(self) -> Impedance:
"""Gets the phase_s of this LinecodeRMatrix.
:return: The phase_s of this LinecodeRMatrix.
:rtype: Impedance
"""
return self._phase_s
@phase_s.setter
def phase_s(self, phase_s: Impedance):
"""Sets the phase_s of this LinecodeRMatrix.
:param phase_s: The phase_s of this LinecodeRMatrix.
:type phase_s: Impedance
"""
self._phase_s = phase_s
@property
def phase_t(self) -> Impedance:
"""Gets the phase_t of this LinecodeRMatrix.
:return: The phase_t of this LinecodeRMatrix.
:rtype: Impedance
"""
return self._phase_t
@phase_t.setter
def phase_t(self, phase_t: Impedance):
"""Sets the phase_t of this LinecodeRMatrix.
:param phase_t: The phase_t of this LinecodeRMatrix.
:type phase_t: Impedance
"""
self._phase_t = phase_t
| 2.125 | 2 |
workshop/serializers.py | shivammaniharsahu/django_api | 0 | 16815 | <filename>workshop/serializers.py
from rest_framework import serializers
from .models import Register
class RegisterSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Register
fields = ('id', 'name', 'email', 'contact', 'password', 'confirm_password')
| 1.898438 | 2 |
experiments/twitter_event_data_2019/evaluation/groundtruth_processor.py | HHansi/WhatsUp | 0 | 16816 | # Created by Hansi at 3/16/2020
import os
from algo.data_process.data_preprocessor import data_cleaning_flow
from algo.utils.file_utils import delete_create_folder
def extract_gt_tokens(text):
"""
Given GT string, method to extract GT labels.
GT string should be formatted as Twitter-Event-Data-2019.
parameters
-----------
:param text: str
:return: list
List of GT labels corresponding to a single event
Since there can be duplicate definitions for a single event, this list contains separate label lists for each
duplicate definition.
"""
duplicates = []
for element in text.split("|"):
labels = []
for subelement in element.split("["):
if subelement:
subelement = subelement.replace("\n", "")
subelement = subelement.replace("]", "")
tokens = subelement.split(",")
labels.append(tokens)
duplicates.append(labels)
return duplicates
def load_gt(folder_path):
"""
Method to read GT data into a dictionary formatted as {time-window: labels}
parameters
-----------
:param folder_path: str
Path to folder which contains GT data
:return: object
Dictionary of GT data
"""
gt = dict()
for root, dirs, files in os.walk(folder_path):
for file in files:
file_name = os.path.splitext(file)[0]
f = open(os.path.join(folder_path, file), 'r', encoding='utf-8')
events = []
for line in f:
tokens = extract_gt_tokens(line)
events.append(tokens)
gt[file_name] = events
f.close()
return gt
def generate_gt_string(tokens):
"""
Given a list of GT labels corresponding to a single event, convert them to a string formatted according to
Twitter-Event-Data-2019 GT format.
parameters
-----------
:param tokens: list
:return: str
"""
str = ""
for duplicate in tokens:
if str and str[-1] == "]":
str = str + "|"
for label in duplicate:
str = str + "["
for element in label:
if str[-1] == "[":
str = str + element
else:
str = str + "," + element
str = str + "]"
return str
def get_combined_gt(gt):
"""
Combine the GT labels of multiple events available at a time frame into single event representation.
parameters
-----------
:param gt: object
Dictionary of GT returned by load_GT
:return: object
Dictionary of combined GT
"""
combined_gt = dict()
for time_frame in gt.keys():
gt_events = gt[time_frame]
combined_gt_event = gt_events[0]
for event in gt_events[1:]:
temp = []
for duplicate in event:
for combined_event in combined_gt_event:
temp.append(combined_event + duplicate)
combined_gt_event = temp
# even though there is 1 event, it is added to a list to preserve consistency with general evaluation_v2 methods
events = [combined_gt_event]
combined_gt[time_frame] = events
return combined_gt
def preprocess_gt(input_filepath, output_filepath):
"""
Preprocess ground truth data in input_file and save to the output_file
parameters
-----------
:param input_filepath: str (.txt file path)
Ground truth file formatted as Twitter-Event-Data-2019
:param output_filepath: str (.txt file path)
:return:
"""
input_file = open(input_filepath, 'r')
output_file = open(output_filepath, 'a', encoding='utf-8')
events = []
for line in input_file:
tokens = extract_gt_tokens(line)
events.append(tokens)
# update tokens
new_events = []
for event in events:
new_duplicates = []
for duplicate in event:
new_labels = []
for label in duplicate:
new_elements = []
for element in label:
new_label = data_cleaning_flow(element)
new_elements.append(new_label)
new_labels.append(new_elements)
new_duplicates.append(new_labels)
new_events.append(new_duplicates)
for event in new_events:
str = generate_gt_string(event)
output_file.write(str)
output_file.write("\n")
output_file.close()
def preprocess_gt_bulk(input_folder_path, output_folder_path):
"""
Preprocess ground truth data in all files in input_folder and save to the output_folder
parameters
-----------
:param input_folder_path: str
Path to folder which contains GT data files
:param output_folder_path: str
Path to folder to save preprocessed GT data
:return:
"""
# delete if there already exist a folder and create new folder
delete_create_folder(output_folder_path)
for root, dirs, files in os.walk(input_folder_path):
for file in files:
input_filepath = os.path.join(input_folder_path, file)
output_filepath = os.path.join(output_folder_path, file)
preprocess_gt(input_filepath, output_filepath) | 2.890625 | 3 |
metaflow/plugins/env_escape/configurations/test_lib_impl/test_lib.py | RobBlumberg/metaflow | 5,821 | 16817 | <filename>metaflow/plugins/env_escape/configurations/test_lib_impl/test_lib.py
import functools
class MyBaseException(Exception):
pass
class SomeException(MyBaseException):
pass
class TestClass1(object):
cls_object = 25
def __init__(self, value):
self._value = value
self._value2 = 123
def unsupported_method(self):
pass
def print_value(self):
return self._value
def __str__(self):
return "My str representation is %s" % str(self._value)
def __repr__(self):
return "My repr representation is %s" % str(self._value)
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def to_class2(self, count, stride=1):
return TestClass2(self._value, stride, count)
@staticmethod
def somethingstatic(val):
return val + 42
@classmethod
def somethingclass(cls):
return cls.cls_object
@property
def override_value(self):
return self._value2
@override_value.setter
def override_value(self, value):
self._value2 = value
class TestClass2(object):
def __init__(self, value, stride, count):
self._mylist = [value + stride * i for i in range(count)]
def something(self, val):
return "In Test2 with %s" % val
def __iter__(self):
self._pos = 0
return self
def __next__(self):
if self._pos < len(self._mylist):
self._pos += 1
return self._mylist[self._pos - 1]
raise StopIteration
class TestClass3(object):
def __init__(self):
print("I am Class3")
def thirdfunction(self, val):
print("Got value: %s" % val)
# raise AttributeError("Some weird error")
def raiseSomething(self):
raise SomeException("Something went wrong")
def __hidden(self, name, value):
setattr(self, name, value)
def weird_indirection(self, name):
return functools.partial(self.__hidden, name)
def test_func(*args, **kwargs):
return "In test func"
test_value = 1
| 2.5625 | 3 |
venv/Lib/site-packages/aniso8601/tests/test_interval.py | GabrielSilva2y3d/api_atividade-sqlalchemy | 0 | 16818 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2019, <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import unittest
import aniso8601
from aniso8601.exceptions import ISOFormatError
from aniso8601.interval import (_parse_interval, parse_interval,
parse_repeating_interval)
from aniso8601.tests.compat import mock
class TestIntervalParserFunctions(unittest.TestCase):
def test_parse_interval(self):
testtuples = (('P1M/1981-04-05T01:01:00',
{'end': (('1981', '04', '05', None, None, None, 'date'),
('01', '01', '00', None, 'time'), 'datetime'),
'duration': (None, '1', None, None, None, None, None,
'duration')}),
('P1M/1981-04-05',
{'end': ('1981', '04', '05', None, None, None, 'date'),
'duration': (None, '1', None, None, None, None, None,
'duration')}),
('P1.5Y/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': ('1.5', None, None, None, None, None, None,
'duration')}),
('PT1H/2014-11-12',
{'end': ('2014', '11', '12', None, None, None, 'date'),
'duration': (None, None, None, None, '1', None, None,
'duration')}),
('PT4H54M6.5S/2014-11-12',
{'end': ('2014', '11', '12', None, None, None, 'date'),
'duration': (None, None, None, None, '4', '54', '6.5',
'duration')}),
('PT10H/2050-03-01T13:00:00Z',
{'end': (('2050', '03', '01',
None, None, None, 'date'),
('13', '00', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime'),
'duration': (None, None, None,
None, '10', None, None, 'duration')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('PT0.0000001S/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'0.0000001', 'duration')}),
('PT2.0000048S/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'2.0000048', 'duration')}),
('1981-04-05T01:01:00/P1M1DT1M',
{'start': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'duration': (None, '1', None,
'1', None, '1', None, 'duration')}),
('1981-04-05/P1M1D',
{'start': ('1981', '04', '05',
None, None, None, 'date'),
'duration': (None, '1', None,
'1', None, None, None, 'duration')}),
('2018-03-06/P2.5M',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, '2.5', None,
None, None, None, None, 'duration')}),
('2014-11-12/PT1H',
{'start': ('2014', '11', '12',
None, None, None, 'date'),
'duration': (None, None, None,
None, '1', None, None, 'duration')}),
('2014-11-12/PT4H54M6.5S',
{'start': ('2014', '11', '12',
None, None, None, 'date'),
'duration': (None, None, None,
None, '4', '54', '6.5', 'duration')}),
('2050-03-01T13:00:00Z/PT10H',
{'start': (('2050', '03', '01',
None, None, None, 'date'),
('13', '00', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime'),
'duration': (None, None, None,
None, '10', None, None, 'duration')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('2018-03-06/PT0.0000001S',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'0.0000001', 'duration')}),
('2018-03-06/PT2.0000048S',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'2.0000048', 'duration')}),
('1980-03-05T01:01:00/1981-04-05T01:01:00',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime')}),
('1980-03-05T01:01:00/1981-04-05',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
'end': ('1981', '04', '05',
None, None, None, 'date')}),
('1980-03-05/1981-04-05T01:01:00',
{'start': ('1980', '03', '05',
None, None, None, 'date'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime')}),
('1980-03-05/1981-04-05',
{'start': ('1980', '03', '05',
None, None, None, 'date'),
'end': ('1981', '04', '05',
None, None, None, 'date')}),
('1981-04-05/1980-03-05',
{'start': ('1981', '04', '05',
None, None, None, 'date'),
'end': ('1980', '03', '05',
None, None, None, 'date')}),
('2050-03-01T13:00:00Z/2050-05-11T15:30:00Z',
{'start': (('2050', '03', '01',
None, None, None, 'date'),
('13', '00', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime'),
'end': (('2050', '05', '11',
None, None, None, 'date'),
('15', '30', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('1980-03-05T01:01:00.0000001/'
'1981-04-05T14:43:59.9999997',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00.0000001',
None, 'time'), 'datetime'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('14', '43', '59.9999997', None, 'time'),
'datetime')}))
for testtuple in testtuples:
with mock.patch.object(aniso8601.builder.PythonTimeBuilder,
'build_interval') as mockBuildInterval:
mockBuildInterval.return_value = testtuple[1]
result = parse_interval(testtuple[0])
self.assertEqual(result, testtuple[1])
mockBuildInterval.assert_called_once_with(**testtuple[1])
#Test different separators
with mock.patch.object(aniso8601.builder.PythonTimeBuilder,
'build_interval') as mockBuildInterval:
expectedargs = {'start': (('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'end':(('1981', '04', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime')}
mockBuildInterval.return_value = expectedargs
result = parse_interval('1980-03-05T01:01:00--1981-04-05T01:01:00',
intervaldelimiter='--')
self.assertEqual(result, expectedargs)
mockBuildInterval.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.PythonTimeBuilder,
'build_interval') as mockBuildInterval:
expectedargs = {'start': (('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'end':(('1981', '04', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime')}
mockBuildInterval.return_value = expectedargs
result = parse_interval('1980-03-05 01:01:00/1981-04-05 01:01:00',
datetimedelimiter=' ')
self.assertEqual(result, expectedargs)
mockBuildInterval.assert_called_once_with(**expectedargs)
def test_parse_interval_mockbuilder(self):
mockBuilder = mock.Mock()
expectedargs = {'end': (('1981', '04', '05', None, None, None, 'date'),
('01', '01', '00', None, 'time'), 'datetime'),
'duration':(None, '1', None, None, None, None, None,
'duration')}
mockBuilder.build_interval.return_value = expectedargs
result = parse_interval('P1M/1981-04-05T01:01:00', builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_interval.assert_called_once_with(**expectedargs)
mockBuilder = mock.Mock()
expectedargs = {'start': ('2014', '11', '12', None, None, None,
'date'),
'duration': (None, None, None, None, '1', None, None,
'duration')}
mockBuilder.build_interval.return_value = expectedargs
result = parse_interval('2014-11-12/PT1H', builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_interval.assert_called_once_with(**expectedargs)
mockBuilder = mock.Mock()
expectedargs = {'start': (('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'end': (('1981', '04', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime')}
mockBuilder.build_interval.return_value = expectedargs
result = parse_interval('1980-03-05T01:01:00/1981-04-05T01:01:00',
builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_interval.assert_called_once_with(**expectedargs)
def test_parse_interval_relative(self):
with mock.patch.object(aniso8601.builder.RelativeTimeBuilder,
'build_interval') as mockBuildInterval:
expectedargs = {'end': (('1981', '04', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'duration': (None, '1', None, None, None, None,
None, 'duration')}
mockBuildInterval.return_value = expectedargs
result = parse_interval('P1M/1981-04-05T01:01:00', relative=True)
self.assertEqual(result, expectedargs)
mockBuildInterval.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.RelativeTimeBuilder,
'build_interval') as mockBuildInterval:
expectedargs = {'start': ('2014', '11', '12', None, None, None,
'date'),
'duration': (None, None, None, None, '1', None,
None, 'duration')}
mockBuildInterval.return_value = expectedargs
result = parse_interval('2014-11-12/PT1H', relative=True)
self.assertEqual(result, expectedargs)
mockBuildInterval.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.RelativeTimeBuilder,
'build_interval') as mockBuildInterval:
expectedargs = {'start': (('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'end': (('1981', '04', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime')}
mockBuildInterval.return_value = expectedargs
result = parse_interval('1980-03-05T01:01:00/1981-04-05T01:01:00',
relative=True)
self.assertEqual(result, expectedargs)
mockBuildInterval.assert_called_once_with(**expectedargs)
def test_parse_interval_repeating(self):
#Parse interval can't parse repeating intervals
with self.assertRaises(ISOFormatError):
parse_interval('R3/1981-04-05/P1D')
with self.assertRaises(ISOFormatError):
parse_interval('R3/1981-04-05/P0003-06-04T12:30:05.5')
with self.assertRaises(ISOFormatError):
parse_interval('R/PT1H2M/1980-03-05T01:01:00')
def test_parse_interval_suffixgarbage(self):
#Don't allow garbage after the duration
#https://bitbucket.org/nielsenb/aniso8601/issues/9/durations-with-trailing-garbage-are-parsed
with self.assertRaises(ValueError):
parse_interval('2001/P1Dasdf', builder=None)
with self.assertRaises(ValueError):
parse_interval('P1Dasdf/2001', builder=None)
with self.assertRaises(ValueError):
parse_interval('2001/P0003-06-04T12:30:05.5asdfasdf', builder=None)
with self.assertRaises(ValueError):
parse_interval('P0003-06-04T12:30:05.5asdfasdf/2001', builder=None)
class TestRepeatingIntervalParserFunctions(unittest.TestCase):
def test_parse_repeating_interval(self):
with mock.patch.object(aniso8601.builder.PythonTimeBuilder,
'build_repeating_interval') as mockBuilder:
expectedargs = {'R': False, 'Rnn': '3',
'interval': (('1981', '04', '05', None, None, None,
'date'),
None,
(None, None, None, '1', None, None,
None, 'duration'),
'interval')}
mockBuilder.return_value = expectedargs
result = parse_repeating_interval('R3/1981-04-05/P1D')
self.assertEqual(result, expectedargs)
mockBuilder.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.PythonTimeBuilder,
'build_repeating_interval') as mockBuilder:
expectedargs = {'R': False, 'Rnn': '11',
'interval': (None,
(('1980', '03', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(None, None, None, None, '1', '2',
None, 'duration'),
'interval')}
mockBuilder.return_value = expectedargs
result = parse_repeating_interval('R11/PT1H2M/1980-03-05T01:01:00')
self.assertEqual(result, expectedargs)
mockBuilder.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.PythonTimeBuilder,
'build_repeating_interval') as mockBuilder:
expectedargs = {'R': False, 'Rnn': '2',
'interval': ((('1980', '03', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(('1981', '04', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
None,
'interval')}
mockBuilder.return_value = expectedargs
result = parse_repeating_interval('R2--1980-03-05T01:01:00--'
'1981-04-05T01:01:00',
intervaldelimiter='--')
self.assertEqual(result, expectedargs)
mockBuilder.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.PythonTimeBuilder,
'build_repeating_interval') as mockBuilder:
expectedargs = {'R': False, 'Rnn': '2',
'interval': ((('1980', '03', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(('1981', '04', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
None,
'interval')}
mockBuilder.return_value = expectedargs
result = parse_repeating_interval('R2/'
'1980-03-05 01:01:00/'
'1981-04-05 01:01:00',
datetimedelimiter=' ')
self.assertEqual(result, expectedargs)
mockBuilder.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.PythonTimeBuilder,
'build_repeating_interval') as mockBuilder:
expectedargs = {'R': True, 'Rnn': None,
'interval': (None,
(('1980', '03', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(None, None, None, None, '1', '2',
None, 'duration'),
'interval')}
mockBuilder.return_value = expectedargs
result = parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00')
self.assertEqual(result, expectedargs)
mockBuilder.assert_called_once_with(**expectedargs)
def test_parse_repeating_interval_mockbuilder(self):
mockBuilder = mock.Mock()
args = {'R': False, 'Rnn': '3',
'interval': (('1981', '04', '05', None, None, None,
'date'),
None,
(None, None, None, '1', None, None,
None, 'duration'),
'interval')}
mockBuilder.build_repeating_interval.return_value = args
result = parse_repeating_interval('R3/1981-04-05/P1D',
builder=mockBuilder)
self.assertEqual(result, args)
mockBuilder.build_repeating_interval.assert_called_once_with(**args)
mockBuilder = mock.Mock()
args = {'R': False, 'Rnn': '11',
'interval': (None,
(('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(None, None, None, None, '1', '2',
None, 'duration'),
'interval')}
mockBuilder.build_repeating_interval.return_value = args
result = parse_repeating_interval('R11/PT1H2M/1980-03-05T01:01:00',
builder=mockBuilder)
self.assertEqual(result, args)
mockBuilder.build_repeating_interval.assert_called_once_with(**args)
mockBuilder = mock.Mock()
args = {'R': True, 'Rnn': None,
'interval': (None,
(('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(None, None, None, None, '1', '2',
None, 'duration'),
'interval')}
mockBuilder.build_repeating_interval.return_value = args
result = parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00',
builder=mockBuilder)
self.assertEqual(result, args)
mockBuilder.build_repeating_interval.assert_called_once_with(**args)
def test_parse_repeating_interval_relative(self):
with mock.patch.object(aniso8601.builder.RelativeTimeBuilder,
'build_repeating_interval') as mockBuild:
expectedargs = {'R': False, 'Rnn': '3',
'interval': (('1981', '04', '05', None, None, None,
'date'),
None,
(None, None, None, '1', None, None,
None, 'duration'),
'interval')}
mockBuild.return_value = expectedargs
result = parse_repeating_interval('R3/1981-04-05/P1D', relative=True)
self.assertEqual(result, expectedargs)
mockBuild.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.RelativeTimeBuilder,
'build_repeating_interval') as mockBuild:
expectedargs = {'R': False, 'Rnn': '11',
'interval': (None,
(('1980', '03', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(None, None, None, None, '1', '2',
None, 'duration'),
'interval')}
mockBuild.return_value = expectedargs
result = parse_repeating_interval('R11/'
'PT1H2M/'
'1980-03-05T01:01:00',
relative=True)
self.assertEqual(result, expectedargs)
mockBuild.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.builder.RelativeTimeBuilder,
'build_repeating_interval') as mockBuild:
expectedargs = {'R': True, 'Rnn': None,
'interval': (None,
(('1980', '03', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(None, None, None, None, '1', '2',
None, 'duration'),
'interval')}
mockBuild.return_value = expectedargs
result = parse_repeating_interval('R/'
'PT1H2M/'
'1980-03-05T01:01:00',
relative=True)
self.assertEqual(result, expectedargs)
mockBuild.assert_called_once_with(**expectedargs)
def test_parse_repeating_interval_suffixgarbage(self):
#Don't allow garbage after the duration
#https://bitbucket.org/nielsenb/aniso8601/issues/9/durations-with-trailing-garbage-are-parsed
with self.assertRaises(ISOFormatError):
parse_repeating_interval('R3/1981-04-05/P1Dasdf', builder=None)
with self.assertRaises(ISOFormatError):
parse_repeating_interval('R3/'
'1981-04-05/'
'P0003-06-04T12:30:05.5asdfasdf',
builder=None)
def test_parse_interval_internal(self):
#Test the internal _parse_interval function
testtuples = (('P1M/1981-04-05T01:01:00',
{'end': (('1981', '04', '05', None, None, None, 'date'),
('01', '01', '00', None, 'time'), 'datetime'),
'duration': (None, '1', None, None, None, None, None,
'duration')}),
('P1M/1981-04-05',
{'end': ('1981', '04', '05', None, None, None, 'date'),
'duration': (None, '1', None, None, None, None, None,
'duration')}),
('P1.5Y/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': ('1.5', None, None, None, None, None, None,
'duration')}),
('PT1H/2014-11-12',
{'end': ('2014', '11', '12', None, None, None, 'date'),
'duration': (None, None, None, None, '1', None, None,
'duration')}),
('PT4H54M6.5S/2014-11-12',
{'end': ('2014', '11', '12', None, None, None, 'date'),
'duration': (None, None, None, None, '4', '54', '6.5',
'duration')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('PT0.0000001S/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'0.0000001', 'duration')}),
('PT2.0000048S/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'2.0000048', 'duration')}),
('1981-04-05T01:01:00/P1M1DT1M',
{'start': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'duration': (None, '1', None,
'1', None, '1', None, 'duration')}),
('1981-04-05/P1M1D',
{'start': ('1981', '04', '05',
None, None, None, 'date'),
'duration': (None, '1', None,
'1', None, None, None, 'duration')}),
('2018-03-06/P2.5M',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, '2.5', None,
None, None, None, None, 'duration')}),
('2014-11-12/PT1H',
{'start': ('2014', '11', '12',
None, None, None, 'date'),
'duration': (None, None, None,
None, '1', None, None, 'duration')}),
('2014-11-12/PT4H54M6.5S',
{'start': ('2014', '11', '12',
None, None, None, 'date'),
'duration': (None, None, None,
None, '4', '54', '6.5', 'duration')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('2018-03-06/PT0.0000001S',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'0.0000001', 'duration')}),
('2018-03-06/PT2.0000048S',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'2.0000048', 'duration')}),
('1980-03-05T01:01:00/1981-04-05T01:01:00',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime')}),
('1980-03-05T01:01:00/1981-04-05',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
'end': ('1981', '04', '05',
None, None, None, 'date')}),
('1980-03-05/1981-04-05T01:01:00',
{'start': ('1980', '03', '05',
None, None, None, 'date'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime')}),
('1980-03-05/1981-04-05',
{'start': ('1980', '03', '05',
None, None, None, 'date'),
'end': ('1981', '04', '05',
None, None, None, 'date')}),
('1981-04-05/1980-03-05',
{'start': ('1981', '04', '05',
None, None, None, 'date'),
'end': ('1980', '03', '05',
None, None, None, 'date')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('1980-03-05T01:01:00.0000001/'
'1981-04-05T14:43:59.9999997',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00.0000001',
None, 'time'), 'datetime'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('14', '43', '59.9999997', None, 'time'),
'datetime')}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_interval.return_value = testtuple[1]
result = _parse_interval(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_interval.assert_called_once_with(**testtuple[1])
#Test different separators
expectedargs = {'start': (('1980', '03', '05',
None, None, None,
'date'),
('01', '01', '00',
None, 'time'),
'datetime'),
'end': (('1981', '04', '05',
None, None, None,
'date'),
('01', '01', '00',
None, 'time'),
'datetime')}
mockBuilder = mock.Mock()
mockBuilder.build_interval.return_value = expectedargs
result = _parse_interval('1980-03-05T01:01:00--1981-04-05T01:01:00',
mockBuilder,
intervaldelimiter='--')
self.assertEqual(result, expectedargs)
mockBuilder.build_interval.assert_called_once_with(**expectedargs)
expectedargs = {'start': (('1980', '03', '05',
None, None, None,
'date'),
('01', '01', '00',
None, 'time'),
'datetime'),
'end': (('1981', '04', '05',
None, None, None,
'date'),
('01', '01', '00',
None, 'time'),
'datetime')}
mockBuilder = mock.Mock()
mockBuilder.build_interval.return_value = expectedargs
_parse_interval('1980-03-05 01:01:00/1981-04-05 01:01:00',
mockBuilder,
datetimedelimiter=' ')
self.assertEqual(result, expectedargs)
mockBuilder.build_interval.assert_called_once_with(**expectedargs)
| 2.421875 | 2 |
cool/core/utils.py | 007gzs/django-cool | 11 | 16819 | <reponame>007gzs/django-cool
# encoding: utf-8
import operator
from functools import reduce
from django.core.exceptions import FieldDoesNotExist
from django.db.models import Q
from django.db.models.constants import LOOKUP_SEP
def split_camel_name(name, fall=False):
"""
驼峰命名分割为单词
GenerateURLs => [Generate, URLs]
generateURLsLite => [generate, URLs, Lite]
"""
if not name:
return []
lastest_upper = name[0].isupper()
idx_list = []
for idx, char in enumerate(name):
upper = char.isupper()
# rising
if upper and not lastest_upper:
idx_list.append(idx)
# falling
elif fall and not upper and lastest_upper:
idx_list.append(idx-1)
lastest_upper = upper
l_idx = 0
name_items = []
for r_idx in idx_list:
if name[l_idx:r_idx]:
name_items.append(name[l_idx:r_idx])
l_idx = r_idx
if name[l_idx:]:
name_items.append(name[l_idx:])
return name_items
def construct_search(queryset, field_name):
"""
生成搜索关键字
"""
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
# Use field_name if it includes a lookup.
opts = queryset.model._meta
lookup_fields = field_name.split(LOOKUP_SEP)
# Go through the fields, following all relations.
prev_field = None
for path_part in lookup_fields:
if path_part == 'pk':
path_part = opts.pk.name
try:
field = opts.get_field(path_part)
except FieldDoesNotExist:
# Use valid query lookups.
if prev_field and prev_field.get_lookup(path_part):
return field_name
else:
prev_field = field
if hasattr(field, 'get_path_info'):
# Update opts to follow the relation.
opts = field.get_path_info()[-1].to_opts
# Otherwise, use the field with icontains.
return "%s__icontains" % field_name
def get_search_results(queryset, search_term, search_fields, model):
"""
Return a tuple containing a queryset to implement the search
and a boolean indicating if the results may contain duplicates.
"""
try:
from django.contrib.admin.utils import (
lookup_needs_distinct as lookup_spawns_duplicates,
)
except ImportError:
from django.contrib.admin.utils import lookup_spawns_duplicates
use_distinct = False
if search_fields and search_term:
orm_lookups = [construct_search(queryset, str(search_field)) for search_field in search_fields]
for bit in search_term.split():
or_queries = [Q(**{orm_lookup: bit}) for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
use_distinct |= any(lookup_spawns_duplicates(model._meta, search_spec) for search_spec in orm_lookups)
return queryset, use_distinct
| 2.171875 | 2 |
setup.py | atait/klayout-gadgets | 13 | 16820 | <gh_stars>10-100
from setuptools import setup
def readme():
with open('README.md', 'r') as fx:
return fx.read()
setup(name='lygadgets',
version='0.1.31',
description='Tools to make klayout, the standalone, and python environments work better together',
long_description=readme(),
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['lygadgets'],
install_requires=['future', 'xmltodict'],
package_data={'': ['*.lym']},
include_package_data=True,
entry_points={'console_scripts': [
'lygadgets_link=lygadgets.command_line:cm_link_any',
'lygadgets_unlink=lygadgets.command_line:cm_unlink_any',
]},
)
| 1.539063 | 2 |
Python/Zelle/Chapter10_DefiningClasses/ProgrammingExercises/16_CannonballTarget/inputDialog.py | jeffvswanson/CodingPractice | 0 | 16821 | <filename>Python/Zelle/Chapter10_DefiningClasses/ProgrammingExercises/16_CannonballTarget/inputDialog.py
# inputDialog.py
""" Provides a window to get input values
from the user to animate a cannonball."""
from graphics import GraphWin, Entry, Text, Point
from button import Button
class InputDialog:
""" A custom window for getting simulation values (angle, velocity,
and height) from the user."""
def __init__(self, angle, vel, height):
""" Build and display the ingut window """
self.win = win = GraphWin("Initial Values", 200, 300)
win.setCoords(0, 4.5, 4, 0.5)
Text(Point(1, 1), "Angle").draw(win)
self.angle = Entry(Point(3, 1), 5).draw(win)
self.angle.setText(str(angle))
Text(Point(1, 2), "Velocity").draw(win)
self.vel = Entry(Point(3, 2), 5).draw(win)
self.vel.setText(str(vel))
Text(Point(1, 3), "Height").draw(win)
self.height = Entry(Point(3, 3), 5).draw(win)
self.height.setText(str(height))
self.fire = Button(win, Point(1, 4), 1.25, 0.5, "Fire!")
self.fire.activate()
self.quit = Button(win, Point(3, 4), 1.25, 0.5, "Quit")
self.quit.activate()
def interact(self):
""" wait for user to click Quit or Fire button
Returns a string indicating which button was clicked
"""
while True:
pt = self.win.getMouse()
if self.quit.clicked(pt):
return "Quit"
if self.fire.clicked(pt):
return "Fire!"
def getValues(self):
""" return input values """
a = float(self.angle.getText())
v = float(self.vel.getText())
h = float(self.height.getText())
return a, v, h
def close(self):
""" close the input window """
self.win.close() | 3.90625 | 4 |
gym_envs/envs/reacher_done.py | gautams3/reacher-done | 1 | 16822 | <reponame>gautams3/reacher-done
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from gym.envs.mujoco.reacher import ReacherEnv
import numpy as np
class ReacherDoneEnv(ReacherEnv):
metadata = {'render.modes': ['human']}
# def __init__(self):
# ...
def step(self, action):
self.do_simulation(action, self.frame_skip)
vec = self.get_body_com("fingertip")-self.get_body_com("target")
dist = np.linalg.norm(vec)
reward_dist = - dist
reward_ctrl = - 0.3 * np.square(action).sum()
reward_time = -0.2 # 5 times larger, to see the effect of time reward
done = dist < 0.04 # done if it's close enough
done_reward = 2
reward = reward_dist + reward_ctrl + reward_time + done*done_reward
ob = self._get_obs()
info = dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl, dist=dist)
return ob, reward, done, info
# def reset(self):
# super().reset()
# def render(self, mode='human'):
# ...
# def close(self):
# ... | 2.421875 | 2 |
stackalytics/get_metric.py | yaoice/python_demo | 0 | 16823 | #/usr/bin/env python
import httplib2
import json
import sys
from prettytable import PrettyTable
from config import field
class BaseStackalytics(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(BaseStackalytics, cls).__new__(cls, *args, **kwargs)
return cls._instance
class Stackalytics(BaseStackalytics):
def __init__(self, prefix):
super(Stackalytics, self).__init__()
self._prefix = prefix
self._http_instance = self.get_http_instance()
def get_http_instance(self):
return httplib2.Http(".cache")
def get_metrics(self, url):
try:
return self._http_instance.request(self._prefix + url,
"GET",
headers={'Accept': 'application/json'})
except httplib2.ServerNotFoundError:
print "Url {} not found".format(url)
sys.exit(1)
def main():
company_statistics = {}
engineer_statistics = {}
stackalytics = Stackalytics("http://stackalytics.com")
for project_type in field['project_type']:
company_statistics[project_type] = {}
for company in field['company']:
company_statistics[project_type][company] = {}
for metric in field['metric']:
company_statistics[project_type][company][metric] = {}
url = "/api/1.0/stats/companies?release={}&metric={}&project_type={}&company={}".format(field['release'],
metric,
project_type,
company)
resp, content = stackalytics.get_metrics(url)
stats = json.loads(content)['stats']
try:
metric_dict = stats[0]
except IndexError:
metric_dict = {'id': company, 'metric': 0}
company_statistics[project_type][company][metric] = metric_dict
for project_type in field['project_type']:
engineer_statistics[project_type] = {}
for engineer in field['engineers']['ids']:
engineer_statistics[project_type][engineer] = {}
for metric in field['metric']:
engineer_statistics[project_type][engineer][metric] = {}
engineers_url = "/api/1.0/stats/engineers?&release={}&metric={}"\
"&project_type={}&company={}&user_id={}".format(field['release'],
metric,
project_type,
field['engineers']['owercompany'],
engineer)
engineers_resp, engineers_content = stackalytics.get_metrics(engineers_url)
engineers_stats = json.loads(engineers_content)['stats']
try:
engineers_metric_dict = engineers_stats[0]
except IndexError:
engineers_metric_dict = {'id': engineer, 'metric': 0}
engineer_statistics[project_type][engineer][metric] = engineers_metric_dict
engineer_table_field = ['metric'] + [engineer for engineer in field['engineers']['ids']]
for project_type in field['project_type']:
print "{} {} project by tencent individual:".format(field['release'], project_type)
table = PrettyTable(engineer_table_field)
for metric in field['metric']:
table.add_row([metric] + [engineer_statistics[project_type][engineer][metric]['metric'] for engineer in field['engineers']['ids']])
print table
table_field = ['metric'] + [company.replace('%20', ' ') for company in field['company']]
for project_type in field['project_type']:
print "{} {} project by company:".format(field['release'], project_type)
table = PrettyTable(table_field)
for metric in field['metric']:
table.add_row([metric] + [company_statistics[project_type][company][metric]['metric'] for company in field['company']])
print table
# print company_statistics
if __name__ == '__main__':
sys.exit(main())
| 2.40625 | 2 |
modules/worker.py | strangest-quark/iConsent | 10 | 16824 | import logging
from queue import Queue
from threading import Thread
from time import time
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class Worker(Thread):
def __init__(self, queue, out_que):
Thread.__init__(self)
self.queue = queue
self.out_que = out_que
def run(self):
while True:
# Get the work from the queue and expand the tuple
video, txnId = self.queue.get()
try:
v = video.generate_video_part(txnId)
self.out_que.put(v)
finally:
self.queue.task_done()
def main(video_obj_arr, txnId, n):
ts = time()
# Create a queue to communicate with the worker threads
queue = Queue()
out_que = Queue()
# Create 7 worker threads
for x in range(2):
worker = Worker(queue, out_que)
# Setting daemon to True will let the main thread exit even though the workers are blocking
worker.daemon = True
worker.start()
# Put the tasks into the queue as a tuple
for i in range(1, n):
logger.info('Queueing {}'.format(i))
queue.put((video_obj_arr[i-1], txnId))
# Causes the main thread to wait for the queue to finish processing all the tasks
queue.join()
logging.info('Took %s', time() - ts)
return out_que
if __name__ == '__main__':
main()
| 3.140625 | 3 |
src/saml2/extension/pefim.py | cnelson/pysaml2 | 5,079 | 16825 | #!/usr/bin/env python
import saml2
from saml2 import SamlBase
from saml2.xmldsig import KeyInfo
NAMESPACE = 'urn:net:eustix:names:tc:PEFIM:0.0:assertion'
class SPCertEncType_(SamlBase):
"""The urn:net:eustix:names:tc:PEFIM:0.0:assertion:SPCertEncType element """
c_tag = 'SPCertEncType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_children['{http://www.w3.org/2000/09/xmldsig#}KeyInfo'] = ('key_info',
[KeyInfo])
c_cardinality['key_info'] = {"min": 1}
c_attributes['VerifyDepth'] = ('verify_depth', 'unsignedByte', False)
c_child_order.extend(['key_info'])
def __init__(self,
key_info=None,
x509_data=None,
verify_depth='1',
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
if key_info:
self.key_info = key_info
elif x509_data:
self.key_info = KeyInfo(x509_data=x509_data)
else:
self.key_info = []
self.verify_depth = verify_depth
#self.x509_data = x509_data
def spcertenc_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SPCertEncType_, xml_string)
class SPCertEnc(SPCertEncType_):
"""The urn:net:eustix:names:tc:PEFIM:0.0:assertion:SPCertEnc element """
c_tag = 'SPCertEnc'
c_namespace = NAMESPACE
c_children = SPCertEncType_.c_children.copy()
c_attributes = SPCertEncType_.c_attributes.copy()
c_child_order = SPCertEncType_.c_child_order[:]
c_cardinality = SPCertEncType_.c_cardinality.copy()
def spcertenc_from_string(xml_string):
return saml2.create_class_from_xml_string(SPCertEnc, xml_string)
ELEMENT_FROM_STRING = {
SPCertEnc.c_tag: spcertenc_from_string,
SPCertEncType_.c_tag: spcertenc_type__from_string,
}
ELEMENT_BY_TAG = {
'SPCertEnc': SPCertEnc,
'SPCertEncType': SPCertEncType_,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs) | 2.265625 | 2 |
ontask/migrations/0004_remove_old_migration_refs.py | pinheiroo27/ontask_b | 33 | 16826 | # Generated by Django 2.2.4 on 2019-08-24 06:02
from django.db import connection as con, migrations
from psycopg2 import sql
def remove_old_migration_refs(apps, schema_editor):
__sql_delete_migration_ref = 'DELETE FROM django_migrations WHERE app={0}'
old_apps = [
'action', 'core', 'dataops', 'logs', 'oauth', 'ontask_oauth',
'profiles', 'scheduler', 'table', 'workflow']
with con.cursor() as cursor:
for app_name in old_apps:
cursor.execute(
sql.SQL(__sql_delete_migration_ref).format(
sql.Literal(app_name)))
class Migration(migrations.Migration):
dependencies = [
('ontask', '0003_transfer_siteprefs'),
]
operations = [
migrations.RunPython(code=remove_old_migration_refs),
]
| 1.960938 | 2 |
scripts/tfloc_summary.py | lldelisle/bx-python | 122 | 16827 | <filename>scripts/tfloc_summary.py
#!/usr/bin/env python
"""
Read TFLOC output from stdin and write out a summary in which the nth line
contains the number of sites found in the nth alignment of the input.
TODO: This is very special case, should it be here?
"""
import sys
from collections import defaultdict
counts = defaultdict(int)
max_index = -1
for line in sys.stdin:
if line[0].isdigit():
current_index = int(line)
max_index = max(current_index, max_index)
elif line[0] == "'":
counts[current_index] += 1
else:
raise ValueError("Invalid input line " + line)
for i in range(max_index + 1):
print(counts.get(i, 0))
| 3.796875 | 4 |
plugins/data/bAbI/digitsDataPluginBAbI/data.py | Linda-liugongzi/DIGITS-digits-py3 | 0 | 16828 | # Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
import os
from digits.utils import subclass, override, constants
from digits.extensions.data.interface import DataIngestionInterface
from .forms import DatasetForm, InferenceForm
from . import utils
from flask_babel import lazy_gettext as _
DATASET_TEMPLATE = "templates/dataset_template.html"
INFERENCE_TEMPLATE = "templates/inference_template.html"
@subclass
class DataIngestion(DataIngestionInterface):
"""
A data ingestion extension for the bAbI dataset
"""
def __init__(self, is_inference_db=False, **kwargs):
super(DataIngestion, self).__init__(**kwargs)
self.userdata['is_inference_db'] = is_inference_db
if 'train_text_data' not in self.userdata:
# get task ID
try:
task_id = int(self.task_id)
except:
task_id = None
self.userdata['task_id'] = task_id
# get data - this doesn't scale well to huge datasets but this makes it
# straightforard to create a mapping of words to indices and figure out max
# dimensions of stories and sentences
self.userdata['train_text_data'] = utils.parse_folder_phase(
self.story_folder, task_id, train=True)
self.userdata['stats'] = utils.get_stats(self.userdata['train_text_data'])
@override
def encode_entry(self, entry):
stats = self.userdata['stats']
return utils.encode_sample(entry, stats['word_map'], stats['sentence_size'], stats['story_size'])
@staticmethod
@override
def get_category():
return "Text"
@staticmethod
@override
def get_id():
return "text-babi"
@staticmethod
@override
def get_dataset_form():
return DatasetForm()
@staticmethod
@override
def get_dataset_template(form):
"""
parameters:
- form: form returned by get_dataset_form(). This may be populated
with values if the job was cloned
return:
- (template, context) tuple
- template is a Jinja template to use for rendering dataset creation
options
- context is a dictionary of context variables to use for rendering
the form
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, DATASET_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@override
def get_inference_form(self):
return InferenceForm()
@staticmethod
@override
def get_inference_template(form):
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, INFERENCE_TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
@override
def get_title():
return _("bAbI")
@override
def itemize_entries(self, stage):
entries = []
if not self.userdata['is_inference_db']:
data = self.userdata['train_text_data']
n_val_entries = int(len(data)*self.pct_val/100)
if stage == constants.TRAIN_DB:
entries = data[n_val_entries:]
elif stage == constants.VAL_DB:
entries = data[:n_val_entries]
elif stage == constants.TEST_DB:
if not bool(self.snippet):
raise ValueError("You must write a story and a question")
entries = utils.parse_lines(str(self.snippet).splitlines())
return entries
| 2.109375 | 2 |
13_TransparentOrigami/fold2.py | dandrianneDEL/PyAdventOfCode2021 | 0 | 16829 | <filename>13_TransparentOrigami/fold2.py<gh_stars>0
import filehelper
fileResult = filehelper.readfile()
class Matrix:
cells: list[list[bool]]
maxX: int
maxY: int
def __init__(self, sizeX:int, sizeY:int) -> None:
self.cells = []
self.maxX = sizeX
self.maxY = sizeY
# print(f"INIT matrix {sizeX}x{sizeY}")
for y in range(sizeY+1):
row = [False] * (sizeX+1)
self.cells.append(row)
def fill_coords(self, coords:list[int]) -> None:
for carthesianCoordinate in coords:
x = carthesianCoordinate[0]
y = carthesianCoordinate[1]
self.cells[y][x] = True
def subselect(self, xStart:int, yStart:int, xMax:int, yMax:int, translateX: int, translateY: int) -> 'Matrix':
print(f"x={xStart}-{xMax}, y={yStart}-{yMax}")
newMatrix = Matrix(xMax-xStart, yMax-yStart)
coords = []
for x in range(xStart,xMax+1):
for y in range(yStart, yMax+1):
if self.cells[y][x]:
coords.append([x-translateX, y-translateY])
print(f"part coords(translateY={translateY}): {coords}")
newMatrix.fill_coords(coords)
return newMatrix
def merge_y(self, half2:'Matrix')->'Matrix':
merged = Matrix(self.maxX, self.maxY-1)
coords = []
# populate cell if either folds are populated
for x in range(self.maxX+1):
for y in range(self.maxY):
if self.cells[y][x] or half2.cells[half2.maxY-y][x]:
coords.append([x,y])
merged.fill_coords(coords)
return merged
def merge_x(self, half2:'Matrix')->'Matrix':
merged = Matrix(self.maxX-1, self.maxY)
coords = []
for x in range(self.maxX):
for y in range(self.maxY+1):
if self.cells[y][x] or half2.cells[y][half2.maxX-x]:
coords.append([x,y])
merged.fill_coords(coords)
return merged
def fold(self, fold) -> 'Matrix':
if fold[0] == 'y':
yAxisToFold = fold[1]
self.print(yAxisToFold, -1)
merged = self.fold_y(yAxisToFold)
else:
xAxisToFold = fold[1]
self.print(-1, xAxisToFold)
merged = self.fold_x(xAxisToFold)
merged.print(-1, -1)
return merged
def fold_y(self, y:int) -> 'Matrix':
half1 = self.subselect(0, 0, self.maxX, y, 0, 0)
half2 = self.subselect(0, y, self.maxX, self.maxY, 0, y)
return half1.merge_y(half2)
def fold_x(self, x:int) -> 'Matrix':
half1 = self.subselect(0, 0, x, self.maxY, 0, 0)
half2 = self.subselect(x, 0, self.maxX, self.maxY, x, 0)
return half1.merge_x(half2)
def print(self, splitY:int, splitX:int) -> None:
for y in range(len(self.cells)):
row = self.cells[y]
txt = ""
for x in range(len(row)):
flag = row[x]
if y == splitY:
txt += "-"
elif x == splitX:
txt += "|"
elif flag:
txt += "#"
else:
txt += f"."
print(txt)
# ******************************************
# PART 2 - Fold plastic transparent sheet
# Finish folding the transparent paper according to the instructions. The manual says the code is always eight capital letters.
# What code do you use to activate the infrared thermal imaging camera system?
# ******************************************
matrix = Matrix(fileResult.maxX, fileResult.maxY)
matrix.fill_coords(fileResult.coords)
# Perform folds
for fold in fileResult.folds:
print(f"performing fold {fold}")
matrix = matrix.fold(fold) | 3.15625 | 3 |
complete/01 - 10/Problem1/main.py | this-jacob/project-euler | 0 | 16830 | <reponame>this-jacob/project-euler
def main():
total = 0
for i in range(0, 1000):
if i % 3 == 0:
total += i
elif i % 5 == 0:
total += i
print(total)
if __name__ == '__main__':
main()
| 3.515625 | 4 |
reward/batcher/transforms/base_transform.py | lgvaz/torchrl | 5 | 16831 | <filename>reward/batcher/transforms/base_transform.py
class BaseTransform:
def transform_s(self, s, training=True):
return s
def transform_batch(self, batch, training=True):
return batch
def write_logs(self, logger):
pass
| 2.015625 | 2 |
webapp/scan_comments.py | ctrl-meta-f/ngk | 0 | 16832 | <reponame>ctrl-meta-f/ngk<filename>webapp/scan_comments.py
import logging
import time
import requests
import lxml.etree
import re
import os
from schema import ScopedSession, SyncState
logging.basicConfig(
filename=os.getenv("LOG_FILE", "../logs/scan_comments.log"),
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.DEBUG)
COMMENTS_URL = "http://govnokod.ru/comments"
FAST_DELAY = 15
SLOW_DELAY = 60
FAST_TO_SLOW_STEPS = 20
def fetch_latest_comments():
logging.debug("Fetching comments...")
r = requests.get(COMMENTS_URL)
r.raise_for_status()
root = lxml.etree.HTML(r.content)
comments = []
for link in root.xpath('//a[@class="comment-link"]'):
m = re.search("/([0-9]+)#comment([0-9]+)", link.get("href"))
post_id = int(m.group(1))
comment_id = int(m.group(2))
comments.append((post_id, comment_id))
return comments
def update_sync_states(comments):
has_updates = False
with ScopedSession() as session:
for post_id, comment_id in comments:
state = session.query(SyncState).filter(SyncState.post_id == post_id).one_or_none()
if not state:
logging.info("Got new comment %d for new post %d", comment_id, post_id)
has_updates = True
state = SyncState(post_id=post_id, last_comment_id=comment_id, pending=True, priority=SyncState.PRIORITY_HAS_COMMENTS)
session.add(state)
else:
if state.last_comment_id is None or comment_id > state.last_comment_id:
logging.info("Got new comment %d for post %d", comment_id, post_id)
has_updates = True
state.last_comment_id = comment_id
state.pending = True
state.priority=SyncState.PRIORITY_HAS_COMMENTS
return has_updates
logging.info("=== started ===")
fast_requests = 0
while True:
try:
comments = fetch_latest_comments()
has_updates = update_sync_states(comments)
if has_updates:
fast_requests = FAST_TO_SLOW_STEPS
except Exception as e:
logging.exception(e)
fast_requests = 0
if fast_requests > 0:
delay = FAST_DELAY
fast_requests -= 1
else:
delay = SLOW_DELAY
logging.debug("Sleeping for %d seconds (%d fast requests left)...", delay, fast_requests)
time.sleep(delay)
| 2.359375 | 2 |
s3prl/upstream/example/hubconf.py | hhhaaahhhaa/s3prl | 856 | 16833 | from .expert import UpstreamExpert as _UpstreamExpert
def customized_upstream(*args, **kwargs):
"""
To enable your customized pretrained model, you only need to implement
upstream/example/expert.py and leave this file as is. This file is
used to register the UpstreamExpert in upstream/example/expert.py
The following is a brief introduction of the registration mechanism.
The s3prl/hub.py will collect all the entries registered in this file
(callable variables without the underscore prefix) as a centralized
upstream factory. One can pick up this upstream from the factory via
1.
from s3prl.hub import customized_upstream
model = customized_upstream(ckpt, model_config)
2.
model = torch.hub.load(
'your_s3prl_path',
'customized_upstream',
ckpt,
model_config,
source='local',
)
Our run_downstream.py and downstream/runner.py follows the first usage
"""
return _UpstreamExpert(*args, **kwargs)
| 2.4375 | 2 |
g-code-testing/g_code_parsing/g_code_functionality_defs/thermocycler/set_ramp_rate_g_code_functionality_def.py | Opentrons/protocol_framework | 0 | 16834 | from typing import Dict
from g_code_parsing.g_code_functionality_defs.g_code_functionality_def_base import (
GCodeFunctionalityDefBase,
)
class SetRampRateGCodeFunctionalityDef(GCodeFunctionalityDefBase):
@classmethod
def _generate_command_explanation(cls, g_code_args: Dict[str, str]) -> str:
return (
"Setting thermocycler ramp rate."
"\nNote: This is an unimplemented feature, setting this does nothing"
)
| 2.390625 | 2 |
src/mrio.py | ElcoK/MRIA_Argentina | 0 | 16835 | import os,sys
import pandas as pd
import numpy as np
import subprocess
from tqdm import tqdm
from ras_method import ras_method
import warnings
warnings.filterwarnings('ignore')
def est_trade_value(x,output_new,sector):
"""
Function to estimate the trade value between two sectors
"""
if (sector is not 'other1') & (sector is not 'other2'):
sec_output = output_new.sum(axis=1).loc[output_new.sum(axis=1).index.get_level_values(1) == sector].reset_index()
else:
sec_output = output_new.sum(axis=1).loc[output_new.sum(axis=1).index.get_level_values(1) == 'IMP'].reset_index()
x['gdp'] = x.gdp*min(sec_output.loc[sec_output.region==x.reg1].values[0][2],sec_output.loc[sec_output.region==x.reg2].values[0][2])
return x
def estimate(table='INDEC',year=2015,print_output=False,print_progress=True):
"""
Function to create a province-level MRIO table, based on a national IO table. The default is the INDEC table.
"""
data_path = os.path.join('..','data')
# load sector data
sectors = list(pd.read_excel(os.path.join(data_path,'other_sources',
'industry_high_level_classification.xlsx'))['SEC_CODE'].values)
# load provincial mappers
reg_mapper = pd.read_excel(os.path.join(data_path,'INDEC','sh_cou_06_16.xls'),sheet_name='reg_mapper',header=None).iloc[:,:2]
reg_mapper = dict(zip(reg_mapper[0],reg_mapper[1]))
# load provincial data
prov_data = pd.read_excel(os.path.join(data_path,'INDEC','PIB_provincial_06_17.xls'),sheet_name='VBP',
skiprows=3,index_col=[0],header=[0],nrows=71)
prov_data = prov_data.loc[[x.isupper() for x in prov_data.index],:]
prov_data.columns = [x.replace(' ','_') for x in ['Ciudad de Buenos Aires', 'Buenos Aires', 'Catamarca', 'Cordoba',
'Corrientes', 'Chaco', 'Chubut', 'Entre Rios', 'Formosa', 'Jujuy',
'La Pampa', 'La Rioja', 'Mendoza', 'Misiones', 'Neuquen', 'Rio Negro',
'Salta', 'San Juan', 'San Luis', 'Santa Cruz', 'Santa Fe',
'Santiago del Estero', 'Tucuman', 'Tierra del Fuego',
'No distribuido', 'Total']]
region_names = list(prov_data.columns)[:-2]
prov_data.index = sectors+['TOTAL']
prov_data = prov_data.replace(0, 1)
### Create proxy data for first iteration
sectors+['other1','other2']
# proxy level 2
proxy_reg_arg = pd.DataFrame(prov_data.iloc[-1,:24]/prov_data.iloc[-1,:24].sum()).reset_index()
proxy_reg_arg['year'] = 2016
proxy_reg_arg = proxy_reg_arg[['year','index','TOTAL']]
proxy_reg_arg.columns = ['year','id','gdp']
proxy_reg_arg.to_csv(os.path.join('..','mrio_downscaling','proxy_reg_arg.csv'),index=False)
# proxy level 4
for iter_,sector in enumerate(sectors+['other1','other2']):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_sector = pd.DataFrame(prov_data.iloc[iter_,:24]/prov_data.iloc[iter_,:24].sum()).reset_index()
proxy_sector['year'] = 2016
proxy_sector['sector'] = 'sec{}'.format(sector)
proxy_sector = proxy_sector[['year','sector','index',sector]]
proxy_sector.columns = ['year','sector','region','gdp']
proxy_sector.to_csv(os.path.join('..','mrio_downscaling','proxy_sec{}.csv'.format(sector)),index=False)
else:
proxy_sector = pd.DataFrame(prov_data.iloc[-1,:24]/prov_data.iloc[-1,:24].sum()).reset_index()
proxy_sector['year'] = 2016
proxy_sector['sector'] = sector+'1'
proxy_sector = proxy_sector[['year','sector','index','TOTAL']]
proxy_sector.columns = ['year','sector','region','gdp']
proxy_sector.to_csv(os.path.join('..','mrio_downscaling','proxy_{}.csv'.format(sector)),index=False)
# proxy level 18
def change_name(x):
if x in sectors:
return 'sec'+x
elif x == 'other1':
return 'other11'
else:
return 'other21'
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, sectors+['other1','other2'], region_names],
names=['sec1', 'reg1','sec2','reg2'])
for iter_,sector in enumerate(sectors+['other1','other2']):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_{}.csv'.format(sector)),index=False)
"""
Create first version of MRIO for Argentina, without trade
"""
### save basetable for disaggregation usin the specific source:
basetable = pd.read_csv(os.path.join(data_path,'national_tables','{}_{}.csv'.format(year,table)),index_col=[0])
basetable.to_csv(os.path.join('..','mrio_downscaling','basetable.csv'),header=False,index=False)
### run libmrio
p = subprocess.Popen([r'..\mrio_downscaling\mrio_disaggregate', 'settings_notrade.yml'],
cwd=os.path.join('..','mrio_downscaling'))
p.wait()
### load data and reorder
region_names_list = [item for sublist in [[x]*(len(sectors)+2) for x in region_names]
for item in sublist]
rows = ([x for x in sectors+['VA','IMP']])*len(region_names)
cols = ([x for x in sectors+['FD','EXP']])*len(region_names)
index_mi = pd.MultiIndex.from_arrays([region_names_list, rows], names=('region', 'row'))
column_mi = pd.MultiIndex.from_arrays([region_names_list, cols], names=('region', 'col'))
MRIO = pd.read_csv(os.path.join('..','mrio_downscaling','output1.csv'),header=None,index_col=None)
MRIO.index = index_mi
MRIO.columns = column_mi
# create predefined index and col, which is easier to read
sector_only = [x for x in sectors]*len(region_names)
col_only = ['FD']*len(region_names)
region_col = [item for sublist in [[x]*len(sectors) for x in region_names] for item in sublist] + \
[item for sublist in [[x]*1 for x in region_names] for item in sublist]
column_mi_reorder = pd.MultiIndex.from_arrays(
[region_col, sector_only+col_only], names=('region', 'col'))
# sum va and imports
valueA = MRIO.xs('VA', level=1, axis=0).sum(axis=0)
valueA.drop('FD', level=1,axis=0,inplace=True)
valueA.drop('EXP', level=1,axis=0,inplace=True)
imports = MRIO.xs('IMP', level=1, axis=0).sum(axis=0)
imports.drop('FD', level=1,axis=0,inplace=True)
imports.drop('EXP', level=1,axis=0,inplace=True)
FinalD = MRIO.xs('FD', level=1, axis=1).sum(axis=1)
FinalD.drop('VA', level=1,axis=0,inplace=True)
FinalD.drop('IMP', level=1,axis=0,inplace=True)
Export = MRIO.xs('EXP', level=1, axis=1).sum(axis=1)
Export.drop('VA', level=1,axis=0,inplace=True)
Export.drop('IMP', level=1,axis=0,inplace=True)
output_new = MRIO.copy()
"""
Balance first MRIO version
"""
# convert to numpy matrix
X0 = MRIO.as_matrix()
# get sum of rows and columns
u = X0.sum(axis=1)
v = X0.sum(axis=0)
# and only keep T
v[:(len(u)-2)] = u[:-2]
# apply RAS method to rebalance the table
X1 = ras_method(X0, u, v, eps=1e-5,print_out=print_output)
#translate to pandas dataframe
output_new = pd.DataFrame(X1)
output_new.index = index_mi
output_new.columns = column_mi
if print_progress:
print('NOTE : Balanced MRIO table without trade finished using {} data'.format(table))
"""
Create second version of MRIO for Argentina, with trade
"""
### Load OD matrix
od_matrix_total = pd.DataFrame(pd.read_excel(os.path.join(data_path,'OD_data','province_ods.xlsx'),
sheet_name='total',index_col=[0,1],usecols =[0,1,2,3,4,5,6,7])).unstack(1).fillna(0)
od_matrix_total.columns.set_levels(['A','G','C','D','B','I'],level=0,inplace=True)
od_matrix_total.index = od_matrix_total.index.map(reg_mapper)
od_matrix_total = od_matrix_total.stack(0)
od_matrix_total.columns = od_matrix_total.columns.map(reg_mapper)
od_matrix_total = od_matrix_total.swaplevel(i=-2, j=-1, axis=0)
od_matrix_total = od_matrix_total.loc[:, od_matrix_total.columns.notnull()]
### Create proxy data
# proxy level 14
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, region_names],
names=['sec1', 'reg1','reg2'])
for iter_,sector in enumerate((sectors+['other1','other2'])):
if sector in ['A','G','C','D','B','I']:
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = 'sec{}'.format(sector)
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_sec{}.csv'.format(sector)),index=False)
elif (sector is not 'other1') & (sector is not 'other2') & (sector not in ['A','G','C','D','B','I']): # & (sector not in ['L','M','N','O','P']):
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
#proxy_trade[0].loc[(proxy_trade.origin_province == proxy_trade.destination_province)] = 0.9
#proxy_trade[0].loc[~(proxy_trade.origin_province == proxy_trade.destination_province)] = 0.1
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = 'sec{}'.format(sector)
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = sector+'1'
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_{}.csv'.format(sector)),index=False)
# proxy level 18
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, sectors+['other1','other2'], region_names],
names=['sec1', 'reg1','sec2','reg2'])
for iter_,sector in enumerate((sectors+['other1','other2'])):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade = proxy_trade.loc[proxy_trade.sec2.isin(['L','M','N','O','P'])]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade.query("reg1 == reg2")
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade = proxy_trade.loc[proxy_trade.sec2.isin(['L','M','N','O','P'])]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade.query("reg1 == reg2")
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_{}.csv'.format(sector)),index=False)
### run libmrio
p = subprocess.Popen([r'..\mrio_downscaling\mrio_disaggregate', 'settings_trade.yml'],
cwd=os.path.join('..','mrio_downscaling'))
p.wait()
# load data and reorder
region_names_list = [item for sublist in [[x]*(len(sectors)+2) for x in region_names]
for item in sublist]
rows = ([x for x in sectors+['VA','IMP']])*len(region_names)
cols = ([x for x in sectors+['FD','EXP']])*len(region_names)
index_mi = pd.MultiIndex.from_arrays([region_names_list, rows], names=('region', 'row'))
column_mi = pd.MultiIndex.from_arrays([region_names_list, cols], names=('region', 'col'))
MRIO = pd.read_csv(os.path.join('..','mrio_downscaling','output2.csv'),header=None,index_col=None)
MRIO.index = index_mi
MRIO.columns = column_mi
# create predefined index and col, which is easier to read
sector_only = [x for x in sectors]*len(region_names)
col_only = ['FD','EXP']*len(region_names)
region_col = [item for sublist in [[x]*len(sectors) for x in region_names] for item in sublist] + \
[item for sublist in [[x]*2 for x in region_names] for item in sublist]
column_mi_reorder = pd.MultiIndex.from_arrays(
[region_col, sector_only+col_only], names=('region', 'col'))
# sum va and imports
valueA = pd.DataFrame(MRIO.loc[MRIO.index.get_level_values(1) == 'VA'].sum(axis='index'))
valueA.columns = pd.MultiIndex.from_product([['Total'],['ValueA']],names=['region','row'])
IMP = pd.DataFrame(MRIO.loc[MRIO.index.get_level_values(1) == 'IMP'].sum(axis='index'))
IMP.columns = pd.MultiIndex.from_product([['Total'],['IMP']],names=['region','row'])
output = pd.concat([MRIO.loc[~MRIO.index.get_level_values(1).isin(['FD','EXP'])]])
output = output.drop(['VA','IMP'], level=1)
output = pd.concat([output,valueA.T,IMP.T])
output = output.reindex(column_mi_reorder, axis='columns')
mrio_arg = ras_method(np.array(output).T,np.array(list(output.sum(axis=1))[:384]+list(output.sum(axis=0)[-48:])),
np.array(list(output.sum(axis=1))[:384]+[output.loc[('Total','ValueA'),:].sum(),output.loc[('Total','IMP'),:].sum()]),
eps=1e-3,print_out=print_output)
mrio_argentina = pd.DataFrame(mrio_arg.T,index=output.index,columns=output.columns)
mrio_argentina.to_csv(os.path.join(data_path,'MRIO','MRIO_Argentina_{}_{}.csv'.format(table,year)))
if print_progress:
print('NOTE : Balanced MRIO table with trade finished using {} data'.format(table))
def prepare_table_mria(table='INDEC',year='2015',print_output=True):
"""
Convert MRIO table to an excel file in which all elements of the table are disaggregated.
"""
data_path = os.path.join('..','data')
# load table
MRIO = pd.read_csv(os.path.join(data_path,'MRIO','MRIO_Argentina_{}_{}.csv'.format(table,year)),index_col=[0,1],header=[0,1])
Xnew = MRIO.copy()
Xnew = Xnew+1e-6
# write to excel
writer = pd.ExcelWriter(os.path.join(data_path,'MRIO', 'mrio_argentina_disaggregated_{}_{}.xlsx'.format(table,year)))
# write T
df_T = Xnew.iloc[:384, :384]
df_T.columns = df_T.columns.droplevel()
df_labels_T = pd.DataFrame(df_T.reset_index()[['region', 'row']])
df_T.reset_index(inplace=True, drop=True)
df_T.to_excel(writer, 'T', index=False, header=False)
df_labels_T.to_excel(writer, 'labels_T', index=False, header=False)
# write FD
df_FD = Xnew.iloc[:384, 384:].iloc[:, Xnew.iloc[:384, 384:].columns.get_level_values(1)=='FD']
df_labels_FD = pd.DataFrame(list(df_FD.columns))
df_FD.columns = df_FD.columns.droplevel()
df_FD.reset_index(inplace=True, drop=True)
df_FD.to_excel(writer, 'FD', index=False, header=False)
df_labels_FD.to_excel(writer, 'labels_FD', index=False, header=False)
# write ExpROW
df_ExpROW = pd.DataFrame(Xnew.iloc[:384, 384:].iloc[:, Xnew.iloc[:384, 384:].columns.get_level_values(1)=='EXP'].sum(axis=1))
df_labels_ExpROW = pd.DataFrame(['Export'])
df_ExpROW.reset_index(inplace=True, drop=True)
df_ExpROW.to_excel(writer, 'ExpROW', index=False, header=False)
df_labels_ExpROW.reset_index(inplace=True, drop=True)
df_labels_ExpROW.columns = ['Export']
df_labels_ExpROW.to_excel(writer, 'labels_ExpROW', index=False, header=False)
# write VA
df_VA = pd.DataFrame(Xnew.iloc[384:, :409].T[('Total', 'ValueA')])
df_VA.columns = ['VA']
df_VA['imports'] = pd.DataFrame(Xnew.iloc[384:, :].T[('Total', 'IMP')])
df_VA.reset_index(inplace=True, drop=True)
df_VA.to_excel(writer, 'VA', index=False, header=False)
df_labels_VA = pd.DataFrame(['Import', 'VA']).T
df_labels_VA.to_excel(writer, 'labels_VA', index=False, header=False)
# save excel
writer.save()
if print_output:
print('NOTE : MRIO table ready to use for MRIA model using {} data'.format(table))
if __name__ == "__main__":
estimate(table='GTAP',year='2014',print_output=True)
prepare_table_mria(table='GTAP',year='2014',print_output=True) | 2.46875 | 2 |
task_manager/users/forms.py | Ritesh-Aggarwal/Task-Manager-Django | 0 | 16836 | from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import (
AuthenticationForm,
UserCreationForm,
UsernameField,
)
User = get_user_model()
class UserLoginForm(AuthenticationForm):
def __init__(self, *args, **kwargs):
super(UserLoginForm, self).__init__(*args, **kwargs)
username = UsernameField(widget=forms.TextInput(
attrs={'class': 'bg-gray-100 rounded-lg p-2'}))
password = forms.CharField(widget=forms.PasswordInput(
attrs={
'class': 'bg-gray-100 rounded-lg p-2',
}
))
class UserSignUpForm(UserCreationForm):
def __init__(self, *args, **kwargs):
super(UserSignUpForm, self).__init__(*args, **kwargs)
username = forms.CharField(
widget=forms.TextInput(attrs={"class": "bg-gray-100 rounded-lg p-2"})
)
password1 = forms.CharField(
widget=forms.PasswordInput(
attrs={
"class": "bg-gray-100 rounded-lg p-2",
}
)
)
password2 = forms.CharField(
widget=forms.PasswordInput(
attrs={
"class": "bg-gray-100 rounded-lg p-2",
}
)
)
| 2.515625 | 3 |
boa3/model/builtin/interop/oracle/oracletype.py | hal0x2328/neo3-boa | 25 | 16837 | from __future__ import annotations
from typing import Any, Dict, Optional
from boa3.model.method import Method
from boa3.model.property import Property
from boa3.model.type.classes.classarraytype import ClassArrayType
from boa3.model.variable import Variable
class OracleType(ClassArrayType):
"""
A class used to represent Oracle class
"""
def __init__(self):
super().__init__('Oracle')
self._variables: Dict[str, Variable] = {}
self._class_methods: Dict[str, Method] = {}
self._constructor: Method = None
@property
def instance_variables(self) -> Dict[str, Variable]:
return self._variables.copy()
@property
def class_variables(self) -> Dict[str, Variable]:
return {}
@property
def properties(self) -> Dict[str, Property]:
return {}
@property
def static_methods(self) -> Dict[str, Method]:
return {}
@property
def class_methods(self) -> Dict[str, Method]:
# avoid recursive import
from boa3.model.builtin.interop.oracle.oraclegetpricemethod import OracleGetPriceMethod
from boa3.model.builtin.interop.oracle.oraclerequestmethod import OracleRequestMethod
if len(self._class_methods) == 0:
self._class_methods = {
'get_price': OracleGetPriceMethod(),
'request': OracleRequestMethod()
}
return self._class_methods
@property
def instance_methods(self) -> Dict[str, Method]:
return {}
def constructor_method(self) -> Optional[Method]:
return self._constructor
@classmethod
def build(cls, value: Any = None) -> OracleType:
if value is None or cls._is_type_of(value):
return _Oracle
@classmethod
def _is_type_of(cls, value: Any):
return isinstance(value, OracleType)
_Oracle = OracleType()
| 2.46875 | 2 |
photo-hub/api/pagination.py | RodionChachura/photo-hub | 0 | 16838 | from rest_framework.pagination import PageNumberPagination
class StandardPagination(PageNumberPagination):
page_size = 30
page_size_query_param = 'page_size'
max_page_size = 1000 | 1.742188 | 2 |
wiiu.py | RN-JK/UBIART-Texture-Decoder | 0 | 16839 | import os, glob
try:
os.mkdir("output")
except:
pass
wiiudir="input/wiiu"
try:
os.makedirs(wiiudir)
print('The directories have been made.')
input('Insert your textures in input/wiiu and then run the tool again to convert it.')
except:
pass
dir = 'input/temp'
try:
os.makedirs(dir)
except:
pass
try:
for ckdtextures in os.listdir(wiiudir):
with open(wiiudir+'/'+ckdtextures,'rb') as f:
f.read(44)
data = f.read()
dds=open('input/temp/'+ckdtextures.replace('.tga.ckd','.gtx').replace('.png.ckd','.gtx'),'wb')
dds.write(data)
dds.close()
except:
pass
try:
for gtx in os.listdir(dir):
print('making '+gtx.replace(".gtx","")+'...')
os.system("texconv2 -i input/temp/"+gtx+" -o output/"+gtx.replace(".gtx",".dds"))
except:
pass
filelist = glob.glob(os.path.join(dir, "*"))
for f in filelist:
os.remove(f)
os.rmdir(dir) | 2.734375 | 3 |
jp.atcoder/abc046/arc062_a/8984820.py | kagemeka/atcoder-submissions | 1 | 16840 | import sys
n = int(sys.stdin.readline().rstrip())
ab = map(int, sys.stdin.read().split())
ab = list(zip(ab, ab))
def main():
c_a = ab[0][0]
c_b = ab[0][1]
for a, b in ab[1:]:
ratio = a / b
while c_a / c_b != ratio:
if c_a / c_b < ratio:
c_a += 1
else:
c_b += 1
ans = c_a + c_b
return ans
if __name__ == "__main__":
ans = main()
print(ans)
| 2.96875 | 3 |
pycsw/pycsw/plugins/profiles/profile.py | Geosoft2/Geosoftware-II-AALLH | 118 | 16841 | # -*- coding: utf-8 -*-
# =================================================================
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# Copyright (c) 2015 <NAME>
# Copyright (c) 2015 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import os
import warnings
class Profile(object):
''' base Profile class '''
def __init__(self, name, version, title, url,
namespace, typename, outputschema, prefixes, model, core_namespaces,
added_namespaces,repository):
''' Initialize profile '''
self.name = name
self.version = version
self.title = title
self.url = url
self.namespace = namespace
self.typename = typename
self.outputschema = outputschema
self.prefixes = prefixes
self.repository = repository
if 'DescribeRecord' in model['operations']:
model['operations']['DescribeRecord']['parameters']\
['typeName']['values'].append(self.typename)
model['operations']['GetRecords']['parameters']['outputSchema']\
['values'].append(self.outputschema)
model['operations']['GetRecords']['parameters']['typeNames']\
['values'].append(self.typename)
model['operations']['GetRecordById']['parameters']['outputSchema']\
['values'].append(self.outputschema)
if 'Harvest' in model['operations']:
model['operations']['Harvest']['parameters']['ResourceType']\
['values'].append(self.outputschema)
# namespaces
core_namespaces.update(added_namespaces)
# repository
model['typenames'][self.typename] = self.repository
def extend_core(self, model, namespaces, config):
''' Extend config.model and config.namespaces '''
raise NotImplementedError
def check_parameters(self):
''' Perform extra parameters checking.
Return dict with keys "locator", "code", "text" or None '''
raise NotImplementedError
def get_extendedcapabilities(self):
''' Return ExtendedCapabilities child as lxml.etree.Element '''
raise NotImplementedError
def get_schemacomponents(self):
''' Return schema components as lxml.etree.Element list '''
raise NotImplementedError
def check_getdomain(self, kvp):
'''Perform extra profile specific checks in the GetDomain request'''
raise NotImplementedError
def write_record(self, result, esn, outputschema, queryables):
''' Return csw:SearchResults child as lxml.etree.Element '''
raise NotImplementedError
def transform2dcmappings(self, queryables):
''' Transform information model mappings into csw:Record mappings '''
raise NotImplementedError
def load_profiles(path, cls, profiles):
''' load CSW profiles, return dict by class name '''
def look_for_subclass(modulename):
module = __import__(modulename)
dmod = module.__dict__
for modname in modulename.split('.')[1:]:
dmod = dmod[modname].__dict__
for key, entry in dmod.items():
if key == cls.__name__:
continue
try:
if issubclass(entry, cls):
aps['plugins'][key] = entry
except TypeError:
continue
aps = {}
aps['plugins'] = {}
aps['loaded'] = {}
for prof in profiles.split(','):
# fgdc, atom, dif, gm03 are supported in core
# no need to specify them explicitly anymore
# provide deprecation warning
# https://github.com/geopython/pycsw/issues/118
if prof in ['fgdc', 'atom', 'dif', 'gm03']:
warnings.warn('%s is now a core module, and does not need to be'
' specified explicitly. So you can remove %s from '
'server.profiles' % (prof, prof))
else:
modulename='%s.%s.%s' % (path.replace(os.sep, '.'), prof, prof)
look_for_subclass(modulename)
return aps
| 1.789063 | 2 |
push-package.py | OpenTrustGroup/scripts | 0 | 16842 | #!/usr/bin/env python
# Copyright 2017 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import errno
import json
import os
import subprocess
import sys
import tempfile
DEFAULT_DST_ROOT = '/system'
DEFAULT_OUT_DIR = 'out/debug-x64'
def netaddr_cmd(out_dir, device):
path = os.path.join(out_dir, '../build-zircon/tools/netaddr')
command = [
path,
'--fuchsia',
device,
]
return command
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def parse_package_manifest(paths, dst_root):
data = []
for path in paths:
with open(path) as package_manifest:
for line in package_manifest:
items = line.rstrip().split('=')
if len(items) != 2:
raise ValueError('Malformed manifest entry: ' + line)
dst = os.path.join(dst_root, items[0].lstrip('/'))
src = items[1]
data.append([dst, src])
return data
def update_device(device, batch_file, verbose, out_dir):
ssh_config_path = os.path.join(out_dir, 'ssh-keys', 'ssh_config')
try:
netaddr = netaddr_cmd(out_dir, device)
ipv6 = '[' + subprocess.check_output(netaddr).strip() + ']'
except subprocess.CalledProcessError:
# netaddr prints its own errors, no need to add another one here.
return 1
with open(os.devnull, 'w') as devnull:
status = subprocess.call(
['sftp', '-F', ssh_config_path, '-b', batch_file, ipv6],
stdout=sys.stdout if verbose else devnull)
if status != 0:
print >> sys.stderr, 'error: sftp failed'
return status
def scp_everything(devices, package_data, out_dir, name_filter, verbose):
# Temporary file for sftp
count = 0
with tempfile.NamedTemporaryFile() as f:
# Create a directory tree that mirrors what we want on the device.
for entry in package_data:
dst_path = entry[0]
src_path = entry[1]
if name_filter is not None and name_filter not in os.path.basename(
dst_path):
continue
# must "rm" the file first because memfs requires it
print >> f, '-rm %s' % dst_path
print >> f, 'put -P %s %s' % (src_path, dst_path)
count += 1
f.flush()
for device in devices:
if update_device(device, f.name, verbose, out_dir) == 0:
print 'Updated %d files on "%s".' % (count, device)
else:
print 'Update FAILED on "%s"' % device
return 0
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'package_files',
nargs='+',
help='Files containing manifest data. For example, ' \
'(e.g. out/debug-x64/package/modular*/system_manifest)')
parser.add_argument('-d', '--device', default=[':'], help='Device to update')
parser.add_argument(
'-o',
'--out-dir',
metavar='DIR',
default=DEFAULT_OUT_DIR,
help='Directory containing build products')
parser.add_argument(
'-t',
'--dst-root',
metavar='PATH',
default=DEFAULT_DST_ROOT,
help='Path on device to the directory to copy package products')
parser.add_argument(
'-f',
'--filter',
metavar='FILTER',
help='Push products with a name that contains FILTER')
parser.add_argument(
'-v', '--verbose', action='store_true', help='Display copy filenames')
args = parser.parse_args()
out_dir = args.out_dir or DEFAULT_OUT_DIR
dst_root = args.dst_root or DEFAULT_DST_ROOT
name_filter = args.filter
verbose = args.verbose
package_data = parse_package_manifest(args.package_files, dst_root)
return scp_everything(args.device, package_data, out_dir, name_filter,
verbose)
if __name__ == '__main__':
sys.exit(main())
| 2.046875 | 2 |
tests/e2e/registry/test_registry_image_push_pull.py | OdedViner/ocs-ci | 0 | 16843 | <gh_stars>0
import logging
import pytest
from ocs_ci.framework.testlib import workloads, E2ETest, ignore_leftovers
from ocs_ci.ocs import ocp, registry, constants
from ocs_ci.framework import config
from ocs_ci.ocs.exceptions import UnexpectedBehaviour
logger = logging.getLogger(__name__)
class TestRegistryImagePullPush(E2ETest):
"""
Test to check Image push and pull worked with registry backed by OCS
"""
@workloads
@ignore_leftovers
@pytest.mark.polarion_id("OCS-1080")
@pytest.mark.skip("Skip this test due to https://github.com/red-hat-storage/ocs-ci/issues/1547")
def test_registry_image_pull_push(self):
"""
Test case to validate registry image pull and push with OCS backend
"""
image_url = 'docker.io/library/busybox'
# Get openshift registry route and certificate access
registry.enable_route_and_create_ca_for_registry_access()
# Add roles to user so that user can perform image pull and push to registry
role_type = ['registry-viewer', 'registry-editor',
'system:registry', 'admin', 'system:image-builder']
for role in role_type:
registry.add_role_to_user(role_type=role, user=config.RUN['username'])
# Provide write access to registry
ocp_obj = ocp.OCP()
read_only_cmd = (
f"set env deployment.apps/image-registry"
f" REGISTRY_STORAGE_MAINTENANCE_READONLY- -n "
f"{constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE}"
)
ocp_obj.exec_oc_cmd(read_only_cmd)
# Pull image using podman
registry.image_pull(image_url=image_url)
# Push image to registry using podman
registry.image_push(
image_url=image_url, namespace=constants.OPENSHIFT_IMAGE_REGISTRY_NAMESPACE
)
# List the images in registry
img_list = registry.image_list_all()
logger.info(f"Image list {img_list}")
# Check either image present in registry or not
validate = registry.check_image_exists_in_registry(image_url=image_url)
if not validate:
raise UnexpectedBehaviour("Image URL not present in registry")
# Remove user roles from User
for role in role_type:
registry.remove_role_from_user(role_type=role, user=config.RUN['username'])
| 2.09375 | 2 |
tOYOpy/settings.py | fkab/tOYO | 0 | 16844 | <gh_stars>0
elements = {
'em': '',
'blockquote': '<br/>'
}
| 1.179688 | 1 |
1.6.py | kevrodg/pynet | 0 | 16845 | <reponame>kevrodg/pynet<gh_stars>0
#!/usr/bin/env python
import json
import yaml
my_list = [0, 1, 2, 3, 'whatever', 'hello', {'attribs': [0, 1, 2, 3, 4], 'ip_addr': '10.10.10.239'}]
with open("my_file.json", "w") as f:
json.dump(my_list, f)
with open("my_file.yaml", "w") as f:
f.write(yaml.dump(my_list, default_flow_style=False))
| 2.78125 | 3 |
jp.atcoder/abc069/arc080_a/11903517.py | kagemeka/atcoder-submissions | 1 | 16846 | <reponame>kagemeka/atcoder-submissions<gh_stars>1-10
import sys
n, *a = map(int, sys.stdin.read().split())
def main():
c4 = c2 = 0
for x in a:
if not x % 4:
c4 += 1
elif not x % 2:
c2 += 1
ans = "Yes" if c4 >= n // 2 or c4 * 2 + c2 >= n else "No"
print(ans)
if __name__ == "__main__":
main()
| 2.734375 | 3 |
tests/test_url_enc_dec.py | FWidm/poe-profile | 1 | 16847 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import logging
import sys
import unittest
from src.util.tree_codec import encode_hashes, decode_url
url = '<KEY>' \
'<KEY>' \
'3xrj6vp7c-uJO8n7zqvk_AZsT2xq7MvM9-0B_Tj9P72L3ZXtl82mLfsONq5FHqGOvu7IPsiu8O7-vwH_JF8933MvfX-Ov56PrS_Ev-Cv5U_oH-jw=='
decoded = (4, 3, 3, 1, [1031, 1203, 1609, 2292, 3533, 3644, 3676, 4397, 5152, 5233, 5415, 5743, 6230, 6237, 6289,
6712,
7374, 7388, 9386, 10031, 10490, 11088, 11420, 13009, 13714, 14057, 14930, 14936, 15073,
15405,
16775, 17412, 17735, 17790, 17821, 19635, 19897, 19939, 20551, 21330, 21958, 23027, 23185,
24383,
26270, 26523, 26725, 26740, 27203, 27308, 27386, 27611, 29199, 29353, 30380, 31875, 32245,
33287,
33479, 33631, 33740, 33755, 34144, 34880, 36017, 36542, 36634, 36915, 36949, 37569, 39085,
39648,
39818, 41472, 42583, 42668, 43133, 43716, 44184, 44202, 44429, 44529, 44606, 44967, 46910,
47251,
48287, 48362, 48719, 49254, 50422, 50862, 52412, 53118, 53279, 54159, 54267, 55485, 55646,
55676,
55906, 57264, 58218, 58449, 59928, 60398, 60547, 60554, 61198, 61419, 61471, 62021, 62429,
63282,
63447, 63723, 63976, 64210, 64587, 65034, 65108, 65153, 65167])
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_encode(self):
result = encode_hashes(decoded[0],decoded[1],decoded[2],decoded[3],decoded[4])
print(result)
print(url)
self.assertEqual(result,url)
def test_decode(self):
result = decode_url(url)
self.assertEqual(result,decoded)
if __name__ == '__main__':
logger = logging.getLogger()
logger.level = logging.DEBUG
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
unittest.main()
| 1.921875 | 2 |
_test/registry/reg04.py | javacommons/commonthread | 0 | 16848 | # source http://itasuke.hatenablog.com/entry/2018/01/08/133510
import winreg
newkey = winreg.CreateKeyEx(winreg.HKEY_CURRENT_USER, r'Software\__javacommons__\abc')
newkey.Close()
winreg.DeleteKeyEx(winreg.HKEY_CURRENT_USER, r'Software\__javacommons__\abc')
| 1.546875 | 2 |
setuptools-37.0.0/pkg_resources/tests/test_working_set.py | coderlongren/PreliminaryPython | 1 | 16849 | import inspect
import re
import textwrap
import pytest
import pkg_resources
from .test_resources import Metadata
def strip_comments(s):
return '\n'.join(
l for l in s.split('\n')
if l.strip() and not l.strip().startswith('#')
)
def parse_distributions(s):
'''
Parse a series of distribution specs of the form:
{project_name}-{version}
[optional, indented requirements specification]
Example:
foo-0.2
bar-1.0
foo>=3.0
[feature]
baz
yield 2 distributions:
- project_name=foo, version=0.2
- project_name=bar, version=1.0, requires=['foo>=3.0', 'baz; extra=="feature"']
'''
s = s.strip()
for spec in re.split('\n(?=[^\s])', s):
if not spec:
continue
fields = spec.split('\n', 1)
assert 1 <= len(fields) <= 2
name, version = fields.pop(0).split('-')
if fields:
requires = textwrap.dedent(fields.pop(0))
metadata=Metadata(('requires.txt', requires))
else:
metadata = None
dist = pkg_resources.Distribution(project_name=name,
version=version,
metadata=metadata)
yield dist
class FakeInstaller(object):
def __init__(self, installable_dists):
self._installable_dists = installable_dists
def __call__(self, req):
return next(iter(filter(lambda dist: dist in req,
self._installable_dists)), None)
def parametrize_test_working_set_resolve(*test_list):
idlist = []
argvalues = []
for test in test_list:
(
name,
installed_dists,
installable_dists,
requirements,
expected1, expected2
) = [
strip_comments(s.lstrip()) for s in
textwrap.dedent(test).lstrip().split('\n\n', 5)
]
installed_dists = list(parse_distributions(installed_dists))
installable_dists = list(parse_distributions(installable_dists))
requirements = list(pkg_resources.parse_requirements(requirements))
for id_, replace_conflicting, expected in (
(name, False, expected1),
(name + '_replace_conflicting', True, expected2),
):
idlist.append(id_)
expected = strip_comments(expected.strip())
if re.match('\w+$', expected):
expected = getattr(pkg_resources, expected)
assert issubclass(expected, Exception)
else:
expected = list(parse_distributions(expected))
argvalues.append(pytest.param(installed_dists, installable_dists,
requirements, replace_conflicting,
expected))
return pytest.mark.parametrize('installed_dists,installable_dists,'
'requirements,replace_conflicting,'
'resolved_dists_or_exception',
argvalues, ids=idlist)
@parametrize_test_working_set_resolve(
'''
# id
noop
# installed
# installable
# wanted
# resolved
# resolved [replace conflicting]
''',
'''
# id
already_installed
# installed
foo-3.0
# installable
# wanted
foo>=2.1,!=3.1,<4
# resolved
foo-3.0
# resolved [replace conflicting]
foo-3.0
''',
'''
# id
installable_not_installed
# installed
# installable
foo-3.0
foo-4.0
# wanted
foo>=2.1,!=3.1,<4
# resolved
foo-3.0
# resolved [replace conflicting]
foo-3.0
''',
'''
# id
not_installable
# installed
# installable
# wanted
foo>=2.1,!=3.1,<4
# resolved
DistributionNotFound
# resolved [replace conflicting]
DistributionNotFound
''',
'''
# id
no_matching_version
# installed
# installable
foo-3.1
# wanted
foo>=2.1,!=3.1,<4
# resolved
DistributionNotFound
# resolved [replace conflicting]
DistributionNotFound
''',
'''
# id
installable_with_installed_conflict
# installed
foo-3.1
# installable
foo-3.5
# wanted
foo>=2.1,!=3.1,<4
# resolved
VersionConflict
# resolved [replace conflicting]
foo-3.5
''',
'''
# id
not_installable_with_installed_conflict
# installed
foo-3.1
# installable
# wanted
foo>=2.1,!=3.1,<4
# resolved
VersionConflict
# resolved [replace conflicting]
DistributionNotFound
''',
'''
# id
installed_with_installed_require
# installed
foo-3.9
baz-0.1
foo>=2.1,!=3.1,<4
# installable
# wanted
baz
# resolved
foo-3.9
baz-0.1
# resolved [replace conflicting]
foo-3.9
baz-0.1
''',
'''
# id
installed_with_conflicting_installed_require
# installed
foo-5
baz-0.1
foo>=2.1,!=3.1,<4
# installable
# wanted
baz
# resolved
VersionConflict
# resolved [replace conflicting]
DistributionNotFound
''',
'''
# id
installed_with_installable_conflicting_require
# installed
foo-5
baz-0.1
foo>=2.1,!=3.1,<4
# installable
foo-2.9
# wanted
baz
# resolved
VersionConflict
# resolved [replace conflicting]
baz-0.1
foo-2.9
''',
'''
# id
installed_with_installable_require
# installed
baz-0.1
foo>=2.1,!=3.1,<4
# installable
foo-3.9
# wanted
baz
# resolved
foo-3.9
baz-0.1
# resolved [replace conflicting]
foo-3.9
baz-0.1
''',
'''
# id
installable_with_installed_require
# installed
foo-3.9
# installable
baz-0.1
foo>=2.1,!=3.1,<4
# wanted
baz
# resolved
foo-3.9
baz-0.1
# resolved [replace conflicting]
foo-3.9
baz-0.1
''',
'''
# id
installable_with_installable_require
# installed
# installable
foo-3.9
baz-0.1
foo>=2.1,!=3.1,<4
# wanted
baz
# resolved
foo-3.9
baz-0.1
# resolved [replace conflicting]
foo-3.9
baz-0.1
''',
'''
# id
installable_with_conflicting_installable_require
# installed
foo-5
# installable
foo-2.9
baz-0.1
foo>=2.1,!=3.1,<4
# wanted
baz
# resolved
VersionConflict
# resolved [replace conflicting]
baz-0.1
foo-2.9
''',
'''
# id
conflicting_installables
# installed
# installable
foo-2.9
foo-5.0
# wanted
foo>=2.1,!=3.1,<4
foo>=4
# resolved
VersionConflict
# resolved [replace conflicting]
VersionConflict
''',
'''
# id
installables_with_conflicting_requires
# installed
# installable
foo-2.9
dep==1.0
baz-5.0
dep==2.0
dep-1.0
dep-2.0
# wanted
foo
baz
# resolved
VersionConflict
# resolved [replace conflicting]
VersionConflict
''',
'''
# id
installables_with_conflicting_nested_requires
# installed
# installable
foo-2.9
dep1
dep1-1.0
subdep<1.0
baz-5.0
dep2
dep2-1.0
subdep>1.0
subdep-0.9
subdep-1.1
# wanted
foo
baz
# resolved
VersionConflict
# resolved [replace conflicting]
VersionConflict
''',
)
def test_working_set_resolve(installed_dists, installable_dists, requirements,
replace_conflicting, resolved_dists_or_exception):
ws = pkg_resources.WorkingSet([])
list(map(ws.add, installed_dists))
resolve_call = lambda: ws.resolve(
requirements, installer=FakeInstaller(installable_dists),
replace_conflicting=replace_conflicting,
)
if inspect.isclass(resolved_dists_or_exception):
with pytest.raises(resolved_dists_or_exception):
resolve_call()
else:
assert sorted(resolve_call()) == sorted(resolved_dists_or_exception)
| 2.625 | 3 |
setup.py | Sigel1/yolo-tf2 | 0 | 16850 | from setuptools import find_packages, setup
install_requires = [dep.strip() for dep in open('requirements.txt')]
setup(
name='yolo_tf2',
version='1.5',
packages=find_packages(),
url='https://github.com/schissmantics/yolo-tf2',
license='MIT',
author='schismantics',
author_email='<EMAIL>',
description='yolo(v3/v4) implementation in keras and tensorflow 2.5',
setup_requires=['numpy==1.19.5'],
install_requires=install_requires,
python_requires='>=3.7',
entry_points={
'console_scripts': [
'yolotf2=yolo_tf2.cli:execute',
],
},
)
| 1.484375 | 1 |
btc_tracker_engine/helper_functions.py | metalerk/4btc | 0 | 16851 | def rate_diff_percentage(previous_rate, current_rate, percentage=False):
diff_percentage = (current_rate - previous_rate) / previous_rate
if percentage:
return diff_percentage * 100
return diff_percentage | 3.171875 | 3 |
nngeometry/object/__init__.py | amyami187/nngeometry | 103 | 16852 | from .pspace import (PMatDense, PMatBlockDiag, PMatDiag,
PMatLowRank, PMatImplicit,
PMatKFAC, PMatEKFAC, PMatQuasiDiag)
from .vector import (PVector, FVector)
from .fspace import (FMatDense,)
from .map import (PushForwardDense, PushForwardImplicit,
PullBackDense)
| 1.164063 | 1 |
vkwave/bots/core/dispatching/dp/middleware/middleware.py | YorkDW/vkwave | 0 | 16853 | <filename>vkwave/bots/core/dispatching/dp/middleware/middleware.py
from abc import ABC, abstractmethod
from typing import List, NewType
from vkwave.bots.core.dispatching.events.base import BaseEvent
MiddlewareResult = NewType("MiddlewareResult", bool)
class BaseMiddleware(ABC):
@abstractmethod
async def pre_process_event(self, event: BaseEvent) -> MiddlewareResult:
...
class MiddlewareManager:
def __init__(self):
self.middlewares: List[BaseMiddleware] = []
def add_middleware(self, middleware: BaseMiddleware):
self.middlewares.append(middleware)
async def execute_pre_process_event(self, event: BaseEvent) -> MiddlewareResult:
for middleware in self.middlewares:
m_res = await middleware.pre_process_event(event)
if not m_res:
return MiddlewareResult(False)
return MiddlewareResult(True)
| 2.46875 | 2 |
plot_scripts/try_networkx.py | gabrielasuchopar/arch2vec | 35 | 16854 | import networkx as nx
import numpy as np
import matplotlib.pyplot as plt
def node_match(n1, n2):
if n1['op'] == n2['op']:
return True
else:
return False
def edge_match(e1, e2):
return True
def gen_graph(adj, ops):
G = nx.DiGraph()
for k, op in enumerate(ops):
G.add_node(k, op=op)
assert adj.shape[0] == adj.shape[1] == len(ops)
for row in range(len(ops)):
for col in range(row + 1, len(ops)):
if adj[row, col] > 0:
G.add_edge(row, col)
return G
def preprocess_adj_op(adj, op):
def counting_trailing_false(l):
count = 0
for TF in l[-1::-1]:
if TF:
break
else:
count += 1
return count
def transform_op(op):
idx2op = {0:'input', 1:'conv1x1-bn-relu', 2:'conv3x3-bn-relu', 3:'maxpool3x3', 4:'output'}
return [idx2op[idx] for idx in op.argmax(axis=1)]
adj = np.array(adj).astype(int)
op = np.array(op).astype(int)
assert op.shape[0] == adj.shape[0] == adj.shape[1]
# find all zero columns
adj_zero_col = counting_trailing_false(adj.any(axis=0))
# find all zero rows
adj_zero_row = counting_trailing_false(adj.any(axis=1))
# find all zero rows
op_zero_row = counting_trailing_false(op.any(axis=1))
assert adj_zero_col == op_zero_row == adj_zero_row - 1, 'Inconsistant result {}={}={}'.format(adj_zero_col, op_zero_row, adj_zero_row - 1)
N = op.shape[0] - adj_zero_col
adj = adj[:N, :N]
op = op[:N]
return adj, transform_op(op)
if __name__ == '__main__':
adj1 = np.array([[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
op1 = ['in', 'conv1x1', 'conv3x3', 'mp3x3', 'out']
adj2 = np.array([[0, 1, 1, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
op2 = ['in', 'conv1x1', 'mp3x3', 'conv3x3', 'out']
adj3 = np.array([[0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0]])
op3 = ['in', 'conv1x1', 'conv3x3', 'mp3x3', 'out','out2']
adj4 = np.array([[0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
op4 = np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
adj4, op4 = preprocess_adj_op(adj4, op4)
G1 = gen_graph(adj1, op1)
G2 = gen_graph(adj2, op2)
G3 = gen_graph(adj3, op3)
G4 = gen_graph(adj4, op4)
plt.subplot(141)
nx.draw(G1, with_labels=True, font_weight='bold')
plt.subplot(142)
nx.draw(G2, with_labels=True, font_weight='bold')
plt.subplot(143)
nx.draw(G3, with_labels=True, font_weight='bold')
plt.subplot(144)
nx.draw(G4, with_labels=True, font_weight='bold')
nx.graph_edit_distance(G1,G2, node_match=node_match, edge_match=edge_match)
nx.graph_edit_distance(G2,G3, node_match=node_match, edge_match=edge_match) | 2.703125 | 3 |
endpoints/v2/errors.py | giuseppe/quay | 2,027 | 16855 | import bitmath
class V2RegistryException(Exception):
def __init__(
self,
error_code_str,
message,
detail,
http_status_code=400,
repository=None,
scopes=None,
is_read_only=False,
):
super(V2RegistryException, self).__init__(message)
self.http_status_code = http_status_code
self.repository = repository
self.scopes = scopes
self.is_read_only = is_read_only
self._error_code_str = error_code_str
self._detail = detail
def as_dict(self):
error_dict = {
"code": self._error_code_str,
"message": str(self),
"detail": self._detail if self._detail is not None else {},
}
if self.is_read_only:
error_dict["is_readonly"] = True
return error_dict
class BlobUnknown(V2RegistryException):
def __init__(self, detail=None):
super(BlobUnknown, self).__init__("BLOB_UNKNOWN", "blob unknown to registry", detail, 404)
class BlobUploadInvalid(V2RegistryException):
def __init__(self, detail=None):
super(BlobUploadInvalid, self).__init__(
"BLOB_UPLOAD_INVALID", "blob upload invalid", detail
)
class BlobUploadUnknown(V2RegistryException):
def __init__(self, detail=None):
super(BlobUploadUnknown, self).__init__(
"BLOB_UPLOAD_UNKNOWN", "blob upload unknown to registry", detail, 404
)
class DigestInvalid(V2RegistryException):
def __init__(self, detail=None):
super(DigestInvalid, self).__init__(
"DIGEST_INVALID", "provided digest did not match uploaded content", detail
)
class ManifestBlobUnknown(V2RegistryException):
def __init__(self, detail=None):
super(ManifestBlobUnknown, self).__init__(
"MANIFEST_BLOB_UNKNOWN", "manifest blob unknown to registry", detail
)
class ManifestInvalid(V2RegistryException):
def __init__(self, detail=None, http_status_code=400):
super(ManifestInvalid, self).__init__(
"MANIFEST_INVALID", "manifest invalid", detail, http_status_code
)
class ManifestUnknown(V2RegistryException):
def __init__(self, detail=None):
super(ManifestUnknown, self).__init__("MANIFEST_UNKNOWN", "manifest unknown", detail, 404)
class TagExpired(V2RegistryException):
def __init__(self, message=None, detail=None):
super(TagExpired, self).__init__("TAG_EXPIRED", message or "Tag has expired", detail, 404)
class ManifestUnverified(V2RegistryException):
def __init__(self, detail=None):
super(ManifestUnverified, self).__init__(
"MANIFEST_UNVERIFIED", "manifest failed signature verification", detail
)
class NameInvalid(V2RegistryException):
def __init__(self, detail=None, message=None):
super(NameInvalid, self).__init__(
"NAME_INVALID", message or "invalid repository name", detail
)
class NameUnknown(V2RegistryException):
def __init__(self, detail=None):
super(NameUnknown, self).__init__(
"NAME_UNKNOWN", "repository name not known to registry", detail, 404
)
class SizeInvalid(V2RegistryException):
def __init__(self, detail=None):
super(SizeInvalid, self).__init__(
"SIZE_INVALID", "provided length did not match content length", detail
)
class TagAlreadyExists(V2RegistryException):
def __init__(self, detail=None):
super(TagAlreadyExists, self).__init__(
"TAG_ALREADY_EXISTS", "tag was already pushed", detail, 409
)
class TagInvalid(V2RegistryException):
def __init__(self, detail=None):
super(TagInvalid, self).__init__("TAG_INVALID", "manifest tag did not match URI", detail)
class LayerTooLarge(V2RegistryException):
def __init__(self, uploaded=None, max_allowed=None):
detail = {}
message = "Uploaded blob is larger than allowed by this registry"
if uploaded is not None and max_allowed is not None:
detail = {
"reason": "%s is greater than maximum allowed size %s" % (uploaded, max_allowed),
"max_allowed": max_allowed,
"uploaded": uploaded,
}
up_str = bitmath.Byte(uploaded).best_prefix().format("{value:.2f} {unit}")
max_str = bitmath.Byte(max_allowed).best_prefix().format("{value:.2f} {unit}")
message = "Uploaded blob of %s is larger than %s allowed by this registry" % (
up_str,
max_str,
)
class Unauthorized(V2RegistryException):
def __init__(self, detail=None, repository=None, scopes=None):
super(Unauthorized, self).__init__(
"UNAUTHORIZED",
"access to the requested resource is not authorized",
detail,
401,
repository=repository,
scopes=scopes,
)
class Unsupported(V2RegistryException):
def __init__(self, detail=None, message=None):
super(Unsupported, self).__init__(
"UNSUPPORTED", message or "The operation is unsupported.", detail, 405
)
class InvalidLogin(V2RegistryException):
def __init__(self, message=None):
super(InvalidLogin, self).__init__(
"UNAUTHORIZED", message or "Specified credentials are invalid", {}, 401
)
class InvalidRequest(V2RegistryException):
def __init__(self, message=None):
super(InvalidRequest, self).__init__(
"INVALID_REQUEST", message or "Invalid request", {}, 400
)
class NamespaceDisabled(V2RegistryException):
def __init__(self, message=None):
message = message or "This namespace is disabled. Please contact your system administrator."
super(NamespaceDisabled, self).__init__("DENIED", message, {}, 405)
class BlobDownloadGeoBlocked(V2RegistryException):
def __init__(self, detail=None):
message = (
"The region from which you are pulling has been geo-ip blocked. "
+ "Please contact the namespace owner."
)
super(BlobDownloadGeoBlocked, self).__init__("DENIED", message, detail, 403)
class ReadOnlyMode(V2RegistryException):
def __init__(self, detail=None):
message = (
"System is currently read-only. Pulls will succeed but all write operations "
+ "are currently suspended."
)
super(ReadOnlyMode, self).__init__("DENIED", message, detail, 405, is_read_only=True)
| 2.375 | 2 |
scoreboard.py | TheLurkingCat/scoreboard | 0 | 16856 | '''
LICENSE: MIT license
This module can help us know about who can ask when
we have troubles in some buggy codes while solving problems.
'''
from asyncio import gather, get_event_loop
from pandas import DataFrame, set_option
from online_judge import Online_Judge
loop = get_event_loop()
set_option('display.max_colwidth', -1)
class Scoreboard:
'''Handles a dataframe to build up a scoreboard.
Attributes:
problems: (list) A list of problem id which we are tracking.
scoreboard: (Dataframe) A pandas.Dataframe that saves user attempts.
by student id.
online_judge: (Online_Judge) An FOJ api wrapper.
'''
def __init__(self, token, problems, problem_name):
self.problems = problems
self.problem_name = problem_name
self.online_judge = Online_Judge(token)
self.scoreboard = DataFrame()
def update(self):
'''Update scoreboard using web crawler.
Since api return a json message, we can use it to update scoreboard.
'''
tasks = []
async def crawl(problem_id):
return await loop.run_in_executor(None, self.online_judge.get_submission, problem_id)
for problem_id in self.problems:
task = loop.create_task(crawl(problem_id))
tasks.append(task)
temp = dict(
zip(self.problems, loop.run_until_complete(gather(*tasks))))
self.scoreboard = DataFrame.from_dict(temp)
self.scoreboard.index.name = 'Student_ID'
self.scoreboard['Total'] = self.scoreboard.applymap(
lambda x: x == x and x['verdict'] == 10).sum(axis=1)
self.scoreboard['Penalty'] = self.scoreboard.applymap(
lambda x: x['penalty'] if isinstance(x, dict) and x['verdict'] == 10 else 0).sum(axis=1)
self.scoreboard.sort_values(
by=['Total', 'Penalty', 'Student_ID'], inplace=True, ascending=[False, True, True])
def visualize(self):
'''
Make scoreboard table.
Returns:
(str) A html page to be rendered.
'''
def make_verdict_string(x):
verdict = {4: 'CE', 5: 'RE', 6: 'MLE',
7: 'TLE', 8: 'OLE', 9: 'WA', 10: 'AC'}
if x == x:
return '<span class="{}" title="Attempted: {}">{}</span>'.format("right" if x['verdict'] == 10 else "wrong", x['penalty'], verdict[x['verdict']])
else:
return '<span class="none" title="Not Attempt">N/A</span>'
css = """<style type="text/css">
html,body{
margin:0;
padding:0;
height:100%;
width:100%;
}
.row_heading {width:70px}
.wrong {background-color:red}
.right {background-color:green}
.none {background-color:gray}
span{
text-align:center;
display:block;
width:60px;
}
th, td{
text-align:center;
width:60px;
}
a{
text-decoration:none;
color:black;
}
</style>
"""
scoreboard = self.scoreboard.drop(columns=['Total', 'Penalty']).applymap(
make_verdict_string)
scoreboard.index.name = None
scoreboard.index = scoreboard.index.map(
'<a href="https://oj.nctu.me/groups/11/submissions/?name={0}" target="_blank">{0}</a>'.format)
scoreboard.rename(lambda x: '<a href="https://oj.nctu.me/problems/{1}/" target="_blank" <span title="{0}">{1}</span></a>'.format(self.problem_name[str(x)], x),
axis='columns', inplace=True)
return css + scoreboard.to_html(border=0, max_cols=None, max_rows=None, escape=False)
| 3 | 3 |
custom_transforms.py | zyxu1996/Efficient-Transformer | 22 | 16857 | <filename>custom_transforms.py<gh_stars>10-100
import torch
import random
import numpy as np
import cv2
import os
import torch.nn as nn
from torchvision import transforms
class RandomHorizontalFlip(object):
def __call__(self, sample):
image = sample['image']
label = sample['label']
if random.random() < 0.5:
image = cv2.flip(image, 1)
label = cv2.flip(label, 1)
return {'image': image, 'label': label}
class RandomVerticalFlip(object):
def __call__(self, sample):
image = sample['image']
label = sample['label']
if random.random() < 0.5:
image = cv2.flip(image, 0)
label = cv2.flip(label, 0)
return {'image': image, 'label': label}
class RandomScaleCrop(object):
def __init__(self, base_size=None, crop_size=None, fill=0):
"""shape [H, W]"""
if base_size is None:
base_size = [512, 512]
if crop_size is None:
crop_size = [512, 512]
self.base_size = np.array(base_size)
self.crop_size = np.array(crop_size)
self.fill = fill
def __call__(self, sample):
img = sample['image']
mask = sample['label']
# random scale (short edge)
short_size = random.choice([self.base_size * 0.5, self.base_size * 0.75, self.base_size,
self.base_size * 1.25, self.base_size * 1.5])
short_size = short_size.astype(np.int)
h, w = img.shape[0:2]
if h > w:
ow = short_size[1]
oh = int(1.0 * h * ow / w)
else:
oh = short_size[0]
ow = int(1.0 * w * oh / h)
#img = img.resize((ow, oh), Image.BILINEAR)
#mask = mask.resize((ow, oh), Image.NEAREST)
img = cv2.resize(img, (ow, oh), interpolation=cv2.INTER_LINEAR)
mask = cv2.resize(mask, (ow, oh), interpolation=cv2.INTER_NEAREST)
# pad crop
if short_size[0] < self.crop_size[0] or short_size[1] < self.crop_size[1]:
padh = self.crop_size[0] - oh if oh < self.crop_size[0] else 0
padw = self.crop_size[1] - ow if ow < self.crop_size[1] else 0
#img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
#mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=self.fill)
img = cv2.copyMakeBorder(img, 0, padh, 0, padw, borderType=cv2.BORDER_DEFAULT)
mask = cv2.copyMakeBorder(mask, 0, padh, 0, padw, borderType=cv2.BORDER_DEFAULT)
# random crop crop_size
h, w = img.shape[0:2]
x1 = random.randint(0, w - self.crop_size[1])
y1 = random.randint(0, h - self.crop_size[0])
img = img[y1:y1+self.crop_size[0], x1:x1+self.crop_size[1], :]
mask = mask[y1:y1+self.crop_size[0], x1:x1+self.crop_size[1]]
return {'image': img, 'label': mask}
class ImageSplit(nn.Module):
def __init__(self, numbers=None):
super(ImageSplit, self).__init__()
"""numbers [H, W]
split from left to right, top to bottom"""
if numbers is None:
numbers = [2, 2]
self.num = numbers
def forward(self, x):
flag = None
if len(x.shape) == 3:
x = x.unsqueeze(dim=1)
flag = 1
b, c, h, w = x.shape
num_h, num_w = self.num[0], self.num[1]
assert h % num_h == 0 and w % num_w == 0
split_h, split_w = h // num_h, w // num_w
outputs = []
outputss = []
for i in range(b):
for h_i in range(num_h):
for w_i in range(num_w):
output = x[i][:, split_h * h_i: split_h * (h_i + 1),
split_w * w_i: split_w * (w_i + 1)].unsqueeze(dim=0)
outputs.append(output)
outputs = torch.cat(outputs, dim=0).unsqueeze(dim=0)
outputss.append(outputs)
outputs = []
outputss = torch.cat(outputss, dim=0).contiguous()
if flag is not None:
outputss = outputss.squeeze(dim=2)
return outputss
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self, add_edge=True):
"""imagenet normalize"""
self.normalize = transforms.Normalize((.485, .456, .406), (.229, .224, .225))
self.add_edge = add_edge
def get_edge(self, img, edge_width=3):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (11, 11), 0)
edge = cv2.Canny(gray, 50, 150)
# cv2.imshow('edge', edge)
# cv2.waitKey(0)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (edge_width, edge_width))
edge = cv2.dilate(edge, kernel)
edge = edge / 255
edge = torch.from_numpy(edge).unsqueeze(dim=0).float()
return edge
def __call__(self, sample):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
img = sample['image']
mask = sample['label']
mask = np.expand_dims(mask, axis=2)
img = np.array(img).astype(np.float32).transpose((2, 0, 1))
mask = np.array(mask).astype(np.int64).transpose((2, 0, 1))
img = torch.from_numpy(img).float().div(255)
img = self.normalize(img)
mask = torch.from_numpy(mask).float()
if self.add_edge:
edge = self.get_edge(sample['image'])
img = img + edge
return {'image': img, 'label': mask}
class RGBGrayExchange():
def __init__(self, path=None, palette=None):
self.palette = palette
"""RGB format"""
if palette is None:
self.palette = [[255, 255, 255], [0, 0, 255], [0, 255, 255],
[0, 255, 0], [255, 255, 0], [255, 0, 0]]
self.path = path
def read_img(self):
img = cv2.imread(self.path, cv2.IMREAD_UNCHANGED)
if len(img.shape) == 3:
img = img[:, :, ::-1]
return img
def RGB_to_Gray(self, image=None):
if not self.path is None:
image = self.read_img()
Gray = np.zeros(shape=[image.shape[0], image.shape[1]], dtype=np.uint8)
for i in range(len(self.palette)):
index = image == np.array(self.palette[i])
index[..., 0][index[..., 1] == False] = False
index[..., 0][index[..., 2] == False] = False
Gray[index[..., 0]] = i
print('unique pixels:{}'.format(np.unique(Gray)))
return Gray
def Gray_to_RGB(self, image=None):
if not self.path is None:
image = self.read_img()
RGB = np.zeros(shape=[image.shape[0], image.shape[1], 3], dtype=np.uint8)
for i in range(len(self.palette)):
index = image == i
RGB[index] = np.array(self.palette[i])
print('unique pixels:{}'.format(np.unique(RGB)))
return RGB
class Mixup(nn.Module):
def __init__(self, alpha=1.0, use_edge=False):
super(Mixup, self).__init__()
self.alpha = alpha
self.use_edge = use_edge
def criterion(self, lam, outputs, targets_a, targets_b, criterion):
return lam * criterion(outputs, targets_a) + (1 - lam) * criterion(outputs, targets_b)
def forward(self, inputs, targets, criterion, model):
if self.alpha > 0:
lam = np.random.beta(self.alpha, self.alpha)
else:
lam = 1
batch_size = inputs.size(0)
index = torch.randperm(batch_size).cuda()
mix_inputs = lam*inputs + (1-lam)*inputs[index, :]
targets_a, targets_b = targets, targets[index]
outputs = model(mix_inputs)
losses = 0
if isinstance(outputs, (list, tuple)):
if self.use_edge:
for i in range(len(outputs) - 1):
loss = self.criterion(lam, outputs[i], targets_a, targets_b, criterion[0])
losses += loss
edge_targets_a = edge_contour(targets).long()
edge_targets_b = edge_targets_a[index]
loss2 = self.criterion(lam, outputs[-1], edge_targets_a, edge_targets_b, criterion[1])
losses += loss2
else:
for i in range(len(outputs)):
loss = self.criterion(lam, outputs[i], targets_a, targets_b, criterion)
losses += loss
else:
losses = self.criterion(lam, outputs, targets_a, targets_b, criterion)
return losses
def edge_contour(label, edge_width=3):
import cv2
cuda_type = label.is_cuda
label = label.cpu().numpy().astype(np.int)
b, h, w = label.shape
edge = np.zeros(label.shape)
# right
edge_right = edge[:, 1:h, :]
edge_right[(label[:, 1:h, :] != label[:, :h - 1, :]) & (label[:, 1:h, :] != 255)
& (label[:, :h - 1, :] != 255)] = 1
# up
edge_up = edge[:, :, :w - 1]
edge_up[(label[:, :, :w - 1] != label[:, :, 1:w])
& (label[:, :, :w - 1] != 255)
& (label[:, :, 1:w] != 255)] = 1
# upright
edge_upright = edge[:, :h - 1, :w - 1]
edge_upright[(label[:, :h - 1, :w - 1] != label[:, 1:h, 1:w])
& (label[:, :h - 1, :w - 1] != 255)
& (label[:, 1:h, 1:w] != 255)] = 1
# bottomright
edge_bottomright = edge[:, :h - 1, 1:w]
edge_bottomright[(label[:, :h - 1, 1:w] != label[:, 1:h, :w - 1])
& (label[:, :h - 1, 1:w] != 255)
& (label[:, 1:h, :w - 1] != 255)] = 1
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (edge_width, edge_width))
for i in range(edge.shape[0]):
edge[i] = cv2.dilate(edge[i], kernel)
# edge[edge == 1] = 255 # view edge
# import random
# cv2.imwrite(os.path.join('./edge', '{}.png'.format(random.random())), edge[0])
if cuda_type:
edge = torch.from_numpy(edge).cuda()
else:
edge = torch.from_numpy(edge)
return edge
if __name__ == '__main__':
path = './data/vaihingen/annotations/labels'
filelist = os.listdir(path)
for file in filelist:
print(file)
img = cv2.imread(os.path.join(path, file), cv2.IMREAD_UNCHANGED)
img = torch.from_numpy(img).unsqueeze(dim=0).repeat(2, 1, 1)
img = edge_contour(img)
# cv2.imwrite(os.path.join(save_path, os.path.splitext(file)[0] + '.png'), gray)
| 2.40625 | 2 |
ui/staff.py | AryaStarkSakura/Stylized-Neural-Painting | 0 | 16858 | <filename>ui/staff.py<gh_stars>0
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'staff.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
MainWindow.setStyleSheet("QListWidget, QListView, QTreeWidget, QTreeView,QFrame {\n"
" outline: 0px;\n"
"}\n"
"/*设置左侧选项的最小最大宽度,文字颜色和背景颜色*/\n"
"QListWidget {\n"
" min-width: 200px;\n"
" max-width: 200px;\n"
" color: white;\n"
" background-color:#2f4050\n"
"}\n"
"#head\n"
"{\n"
"background:white;\n"
"border-radius:30px;\n"
"}\n"
"#head_2\n"
"{\n"
"background:#CCFFCC;\n"
"border:1px solid;\n"
"border-color:#CCFFCC;\n"
"border-radius:60px;\n"
"}\n"
"#Search\n"
"{\n"
"border-radius:5px;\n"
"background:#293846;\n"
"border:0.5px solid;\n"
"border-color:white;\n"
"\n"
"}\n"
"QListWidget::item\n"
"{\n"
"height:60;\n"
"background-color:#293846;\n"
"}\n"
"#frame\n"
"{\n"
"background-color:#2f4050\n"
"\n"
"}\n"
"/*被选中时的背景颜色和左边框颜色*/\n"
"QListWidget::item:selected {\n"
" background: rgb(52, 52, 52);\n"
" border-left: 2px solid rgb(9, 187, 7);\n"
"}\n"
"/*鼠标悬停颜色*/\n"
"HistoryPanel::item:hover {\n"
" background: rgb(52, 52, 52);\n"
"}\n"
"/*右侧的层叠窗口的背景颜色*/\n"
"QStackedWidget {\n"
" background: white;\n"
"}\n"
"/*模拟的页面*/\n"
"#frame > QLabel\n"
"{\n"
"color:white;\n"
"}\n"
"#frame_2\n"
"{\n"
"background-color:#CCFFCC;\n"
"}\n"
"#page_2 > QLineEdit,QDateEdit\n"
"{\n"
"border-radius:5px;\n"
"background:#FFFFFF;\n"
"border:1px solid;\n"
"border-color:#6699CC;\n"
"}\n"
"#page_4 > QLineEdit\n"
"{\n"
"border-radius:5px;\n"
"background:#FFFFFF;\n"
"border:1px solid;\n"
"border-color:#6699CC;\n"
"}\n"
"QLineEdit\n"
"{\n"
"border-radius:5px;\n"
"background:#FFFFFF;\n"
"border:1px solid;\n"
"border-color:#6699CC;\n"
"}\n"
"\n"
"\n"
"")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.stackedWidget = QtWidgets.QStackedWidget(self.centralwidget)
self.stackedWidget.setGeometry(QtCore.QRect(190, 0, 611, 601))
self.stackedWidget.setStyleSheet("background-color:#FFFFFF\n"
"")
self.stackedWidget.setObjectName("stackedWidget")
self.page = QtWidgets.QWidget()
self.page.setObjectName("page")
self.split = QtWidgets.QFrame(self.page)
self.split.setGeometry(QtCore.QRect(10, 210, 600, 2))
self.split.setStyleSheet("color:#CCFFCC;\n"
"border-color:#CCFFCC;\n"
"background-color:#CCFFCC")
self.split.setFrameShape(QtWidgets.QFrame.HLine)
self.split.setFrameShadow(QtWidgets.QFrame.Raised)
self.split.setObjectName("split")
self.head_2 = QtWidgets.QToolButton(self.page)
self.head_2.setGeometry(QtCore.QRect(260, 30, 121, 121))
self.head_2.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("./pictures/staff3.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.head_2.setIcon(icon)
self.head_2.setIconSize(QtCore.QSize(100, 100))
self.head_2.setObjectName("head_2")
self.name = QtWidgets.QLabel(self.page)
self.name.setGeometry(QtCore.QRect(260, 160, 131, 31))
self.name.setAlignment(QtCore.Qt.AlignCenter)
self.name.setObjectName("name")
self.label = QtWidgets.QLabel(self.page)
self.label.setGeometry(QtCore.QRect(190, 240, 61, 51))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.label.setFont(font)
self.label.setObjectName("label")
self.label_3 = QtWidgets.QLabel(self.page)
self.label_3.setGeometry(QtCore.QRect(190, 290, 51, 51))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.page)
self.label_4.setGeometry(QtCore.QRect(190, 340, 71, 51))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.page)
self.label_5.setGeometry(QtCore.QRect(190, 390, 61, 51))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(self.page)
self.label_6.setGeometry(QtCore.QRect(190, 440, 71, 51))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(self.page)
self.label_7.setGeometry(QtCore.QRect(190, 490, 81, 51))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.sname = QtWidgets.QLabel(self.page)
self.sname.setGeometry(QtCore.QRect(300, 250, 131, 31))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.sname.setFont(font)
self.sname.setObjectName("sname")
self.ssex = QtWidgets.QLabel(self.page)
self.ssex.setGeometry(QtCore.QRect(300, 300, 81, 31))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.ssex.setFont(font)
self.ssex.setObjectName("ssex")
self.stime = QtWidgets.QLabel(self.page)
self.stime.setGeometry(QtCore.QRect(300, 350, 91, 31))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.stime.setFont(font)
self.stime.setObjectName("stime")
self.srole = QtWidgets.QLabel(self.page)
self.srole.setGeometry(QtCore.QRect(300, 400, 81, 31))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.srole.setFont(font)
self.srole.setObjectName("srole")
self.sphone = QtWidgets.QLabel(self.page)
self.sphone.setGeometry(QtCore.QRect(300, 450, 141, 31))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.sphone.setFont(font)
self.sphone.setObjectName("sphone")
self.sidcard = QtWidgets.QLabel(self.page)
self.sidcard.setGeometry(QtCore.QRect(300, 500, 181, 31))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.sidcard.setFont(font)
self.sidcard.setObjectName("sidcard")
self.label_8 = QtWidgets.QLabel(self.page)
self.label_8.setGeometry(QtCore.QRect(190, 540, 81, 51))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.sidcard_2 = QtWidgets.QLabel(self.page)
self.sidcard_2.setGeometry(QtCore.QRect(300, 550, 181, 31))
font = QtGui.QFont()
font.setFamily("幼圆")
font.setPointSize(10)
self.sidcard_2.setFont(font)
self.sidcard_2.setObjectName("sidcard_2")
self.stackedWidget.addWidget(self.page)
self.page_3 = QtWidgets.QWidget()
self.page_3.setObjectName("page_3")
self.searchTable = QtWidgets.QTableWidget(self.page_3)
self.searchTable.setGeometry(QtCore.QRect(0, 240, 611, 361))
self.searchTable.setStyleSheet("")
self.searchTable.setObjectName("searchTable")
self.searchTable.setColumnCount(9)
self.searchTable.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.searchTable.setHorizontalHeaderItem(8, item)
self.frame_2 = QtWidgets.QFrame(self.page_3)
self.frame_2.setGeometry(QtCore.QRect(10, 30, 611, 211))
self.frame_2.setStyleSheet("background-color:rgb(255, 249, 246)")
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.searchName = QtWidgets.QLineEdit(self.frame_2)
self.searchName.setGeometry(QtCore.QRect(170, 40, 181, 41))
self.searchName.setStyleSheet("border-radius:10px;\n"
"background:#FFFFFF;\n"
"border:1px solid;\n"
"border-color:#CCCCFF;\n"
"")
self.searchName.setObjectName("searchName")
self.searchNB = QtWidgets.QToolButton(self.frame_2)
self.searchNB.setGeometry(QtCore.QRect(370, 40, 101, 41))
self.searchNB.setStyleSheet("background-color:rgb(255, 249, 246);\n"
"border:0px;\n"
"\n"
"border-radius:5px")
self.searchNB.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("./pictures/search.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.searchNB.setIcon(icon1)
self.searchNB.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.searchNB.setObjectName("searchNB")
self.label_74 = QtWidgets.QLabel(self.frame_2)
self.label_74.setGeometry(QtCore.QRect(310, 149, 151, 40))
self.label_74.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_74.setObjectName("label_74")
self.modifyvalue = QtWidgets.QLineEdit(self.frame_2)
self.modifyvalue.setGeometry(QtCore.QRect(430, 160, 111, 21))
self.modifyvalue.setStyleSheet("border-radius:5px")
self.modifyvalue.setText("")
self.modifyvalue.setObjectName("modifyvalue")
self.commitTableModify = QtWidgets.QPushButton(self.frame_2)
self.commitTableModify.setGeometry(QtCore.QRect(170, 155, 121, 31))
self.commitTableModify.setStyleSheet("#commitTableModify{background:#CCFFCC;\n"
"border-radius:8px}\n"
"#commitTableModify:hover\n"
"{\n"
"background:#CCFF99\n"
"}")
self.commitTableModify.setObjectName("commitTableModify")
self.label_78 = QtWidgets.QLabel(self.frame_2)
self.label_78.setGeometry(QtCore.QRect(360, 10, 231, 21))
font = QtGui.QFont()
font.setPointSize(8)
self.label_78.setFont(font)
self.label_78.setObjectName("label_78")
self.commitTableDel = QtWidgets.QPushButton(self.frame_2)
self.commitTableDel.setGeometry(QtCore.QRect(170, 110, 121, 31))
self.commitTableDel.setStyleSheet("#commitTableDel{background:#CCFFCC;\n"
"border-radius:8px}\n"
"#commitTableDel:hover\n"
"{\n"
"background:#CCFF99\n"
"}")
self.commitTableDel.setObjectName("commitTableDel")
self.split_3 = QtWidgets.QFrame(self.page_3)
self.split_3.setGeometry(QtCore.QRect(10, 30, 600, 2))
self.split_3.setStyleSheet("color:#CCFFCC;\n"
"border-color:#CCFFCC;\n"
"background-color:#CCFFCC")
self.split_3.setFrameShape(QtWidgets.QFrame.HLine)
self.split_3.setFrameShadow(QtWidgets.QFrame.Raised)
self.split_3.setObjectName("split_3")
self.toolButton_2 = QtWidgets.QToolButton(self.page_3)
self.toolButton_2.setGeometry(QtCore.QRect(20, 0, 101, 31))
font = QtGui.QFont()
font.setFamily("FontAwesome")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.toolButton_2.setFont(font)
self.toolButton_2.setStyleSheet("border:none")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap("./pictures/search1.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_2.setIcon(icon2)
self.toolButton_2.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.toolButton_2.setObjectName("toolButton_2")
self.line = QtWidgets.QFrame(self.page_3)
self.line.setGeometry(QtCore.QRect(10, 230, 601, 16))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.stackedWidget.addWidget(self.page_3)
self.page_2 = QtWidgets.QWidget()
self.page_2.setObjectName("page_2")
self.label_9 = QtWidgets.QLabel(self.page_2)
self.label_9.setGeometry(QtCore.QRect(100, 60, 101, 40))
self.label_9.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_9.setObjectName("label_9")
self.split_2 = QtWidgets.QFrame(self.page_2)
self.split_2.setGeometry(QtCore.QRect(10, 30, 600, 2))
self.split_2.setStyleSheet("color:#CCFFCC;\n"
"border-color:#CCFFCC;\n"
"background-color:#CCFFCC")
self.split_2.setFrameShape(QtWidgets.QFrame.HLine)
self.split_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.split_2.setObjectName("split_2")
self.label_10 = QtWidgets.QLabel(self.page_2)
self.label_10.setGeometry(QtCore.QRect(100, 260, 101, 41))
self.label_10.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_10.setObjectName("label_10")
self.label_11 = QtWidgets.QLabel(self.page_2)
self.label_11.setGeometry(QtCore.QRect(100, 110, 101, 41))
self.label_11.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_11.setObjectName("label_11")
self.label_12 = QtWidgets.QLabel(self.page_2)
self.label_12.setGeometry(QtCore.QRect(100, 310, 101, 41))
self.label_12.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_12.setObjectName("label_12")
self.label_13 = QtWidgets.QLabel(self.page_2)
self.label_13.setGeometry(QtCore.QRect(100, 160, 101, 41))
self.label_13.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_13.setObjectName("label_13")
self.label_14 = QtWidgets.QLabel(self.page_2)
self.label_14.setGeometry(QtCore.QRect(100, 360, 101, 41))
self.label_14.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_14.setObjectName("label_14")
self.label_15 = QtWidgets.QLabel(self.page_2)
self.label_15.setGeometry(QtCore.QRect(100, 210, 101, 41))
self.label_15.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_15.setObjectName("label_15")
self.label_16 = QtWidgets.QLabel(self.page_2)
self.label_16.setGeometry(QtCore.QRect(100, 410, 101, 41))
self.label_16.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_16.setObjectName("label_16")
self.label_17 = QtWidgets.QLabel(self.page_2)
self.label_17.setGeometry(QtCore.QRect(100, 460, 101, 41))
self.label_17.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_17.setObjectName("label_17")
self.inputsid = QtWidgets.QLineEdit(self.page_2)
self.inputsid.setGeometry(QtCore.QRect(220, 70, 221, 21))
self.inputsid.setObjectName("inputsid")
self.inputname = QtWidgets.QLineEdit(self.page_2)
self.inputname.setGeometry(QtCore.QRect(220, 120, 221, 21))
self.inputname.setObjectName("inputname")
self.inputuser = QtWidgets.QLineEdit(self.page_2)
self.inputuser.setGeometry(QtCore.QRect(220, 270, 221, 21))
self.inputuser.setObjectName("inputuser")
self.inputpwd = QtWidgets.QLineEdit(self.page_2)
self.inputpwd.setGeometry(QtCore.QRect(220, 320, 221, 21))
self.inputpwd.setObjectName("inputpwd")
self.inputrole = QtWidgets.QLineEdit(self.page_2)
self.inputrole.setGeometry(QtCore.QRect(220, 370, 221, 21))
self.inputrole.setObjectName("inputrole")
self.inputidcard = QtWidgets.QLineEdit(self.page_2)
self.inputidcard.setGeometry(QtCore.QRect(220, 420, 221, 21))
self.inputidcard.setObjectName("inputidcard")
self.inputphone = QtWidgets.QLineEdit(self.page_2)
self.inputphone.setGeometry(QtCore.QRect(220, 470, 221, 21))
self.inputphone.setObjectName("inputphone")
self.toolButton_3 = QtWidgets.QToolButton(self.page_2)
self.toolButton_3.setGeometry(QtCore.QRect(20, 0, 111, 31))
font = QtGui.QFont()
font.setFamily("FontAwesome")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.toolButton_3.setFont(font)
self.toolButton_3.setStyleSheet("border:none\n"
"")
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap("./pictures/insert.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_3.setIcon(icon3)
self.toolButton_3.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.toolButton_3.setObjectName("toolButton_3")
self.commitAdd = QtWidgets.QPushButton(self.page_2)
self.commitAdd.setGeometry(QtCore.QRect(200, 530, 211, 31))
self.commitAdd.setStyleSheet("#commitAdd{background:#CCFFCC;\n"
"border-radius:8px}\n"
"#commitAdd:hover\n"
"{\n"
"background:#CCFF99\n"
"}")
self.commitAdd.setObjectName("commitAdd")
self.inputdate = QtWidgets.QDateEdit(self.page_2)
self.inputdate.setGeometry(QtCore.QRect(220, 220, 221, 22))
self.inputdate.setDateTime(QtCore.QDateTime(QtCore.QDate(2020, 1, 1), QtCore.QTime(0, 0, 0)))
self.inputdate.setObjectName("inputdate")
self.inputfemale = QtWidgets.QRadioButton(self.page_2)
self.inputfemale.setGeometry(QtCore.QRect(320, 170, 115, 19))
self.inputfemale.setObjectName("inputfemale")
self.inputmale = QtWidgets.QRadioButton(self.page_2)
self.inputmale.setGeometry(QtCore.QRect(220, 170, 81, 19))
self.inputmale.setObjectName("inputmale")
self.stackedWidget.addWidget(self.page_2)
self.page_4 = QtWidgets.QWidget()
self.page_4.setObjectName("page_4")
self.split_4 = QtWidgets.QFrame(self.page_4)
self.split_4.setGeometry(QtCore.QRect(10, 30, 600, 2))
self.split_4.setStyleSheet("color:#CCFFCC;\n"
"border-color:#CCFFCC;\n"
"background-color:#CCFFCC")
self.split_4.setFrameShape(QtWidgets.QFrame.HLine)
self.split_4.setFrameShadow(QtWidgets.QFrame.Raised)
self.split_4.setObjectName("split_4")
self.toolButton_4 = QtWidgets.QToolButton(self.page_4)
self.toolButton_4.setGeometry(QtCore.QRect(20, 0, 111, 31))
font = QtGui.QFont()
font.setFamily("FontAwesome")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.toolButton_4.setFont(font)
self.toolButton_4.setStyleSheet("border:none\n"
"")
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap("./pictures/delete.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolButton_4.setIcon(icon4)
self.toolButton_4.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.toolButton_4.setObjectName("toolButton_4")
self.deleteTable = QtWidgets.QTableWidget(self.page_4)
self.deleteTable.setGeometry(QtCore.QRect(10, 260, 601, 341))
self.deleteTable.setStyleSheet("")
self.deleteTable.setObjectName("deleteTable")
self.deleteTable.setColumnCount(9)
self.deleteTable.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.deleteTable.setHorizontalHeaderItem(8, item)
self.desid = QtWidgets.QLineEdit(self.page_4)
self.desid.setGeometry(QtCore.QRect(250, 90, 221, 21))
self.desid.setObjectName("desid")
self.label_18 = QtWidgets.QLabel(self.page_4)
self.label_18.setGeometry(QtCore.QRect(150, 80, 91, 40))
self.label_18.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_18.setObjectName("label_18")
self.dename = QtWidgets.QLineEdit(self.page_4)
self.dename.setGeometry(QtCore.QRect(250, 130, 221, 21))
self.dename.setObjectName("dename")
self.label_19 = QtWidgets.QLabel(self.page_4)
self.label_19.setGeometry(QtCore.QRect(150, 120, 91, 41))
self.label_19.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_19.setObjectName("label_19")
self.deidcard = QtWidgets.QLineEdit(self.page_4)
self.deidcard.setGeometry(QtCore.QRect(250, 170, 221, 21))
self.deidcard.setObjectName("deidcard")
self.label_20 = QtWidgets.QLabel(self.page_4)
self.label_20.setGeometry(QtCore.QRect(150, 160, 81, 41))
self.label_20.setStyleSheet("font: 9pt \"FontAwesome\";")
self.label_20.setObjectName("label_20")
self.commitDe = QtWidgets.QPushButton(self.page_4)
self.commitDe.setGeometry(QtCore.QRect(240, 210, 93, 28))
self.commitDe.setStyleSheet("#commitDe{background:#CCFFCC;\n"
"border-radius:8px}\n"
"#commitDe:hover\n"
"{\n"
"background:#CCFF99\n"
"}")
self.commitDe.setObjectName("commitDe")
self.label_21 = QtWidgets.QLabel(self.page_4)
self.label_21.setGeometry(QtCore.QRect(210, 35, 211, 31))
font = QtGui.QFont()
font.setFamily("FontAwesome")
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.label_21.setFont(font)
self.label_21.setObjectName("label_21")
self.stackedWidget.addWidget(self.page_4)
self.listWidget = QtWidgets.QListWidget(self.centralwidget)
self.listWidget.setGeometry(QtCore.QRect(0, 200, 204, 400))
self.listWidget.setObjectName("listWidget")
item = QtWidgets.QListWidgetItem()
font = QtGui.QFont()
font.setFamily("FontAwesome")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
item.setFont(font)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap("./pictures/staff5.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
item.setIcon(icon5)
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
font = QtGui.QFont()
font.setFamily("FontAwesome")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
item.setFont(font)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap("./pictures/staff2.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
item.setIcon(icon6)
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
font = QtGui.QFont()
font.setFamily("FontAwesome")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
item.setFont(font)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap("./pictures/staff4.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
item.setIcon(icon7)
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
font = QtGui.QFont()
font.setFamily("FontAwesome")
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
item.setFont(font)
item.setIcon(icon5)
self.listWidget.addItem(item)
self.frame = QtWidgets.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(0, 0, 204, 211))
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.head = QtWidgets.QToolButton(self.frame)
self.head.setGeometry(QtCore.QRect(60, 20, 60, 60))
self.head.setText("")
self.head.setIcon(icon)
self.head.setIconSize(QtCore.QSize(60, 60))
self.head.setObjectName("head")
self.welcome = QtWidgets.QLabel(self.frame)
self.welcome.setGeometry(QtCore.QRect(30, 90, 110, 20))
self.welcome.setText("")
self.welcome.setAlignment(QtCore.Qt.AlignCenter)
self.welcome.setObjectName("welcome")
self.label_2 = QtWidgets.QLabel(self.frame)
self.label_2.setGeometry(QtCore.QRect(40, 140, 121, 16))
font = QtGui.QFont()
font.setPointSize(8)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.Search = QtWidgets.QLineEdit(self.frame)
self.Search.setGeometry(QtCore.QRect(20, 170, 145, 25))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(7)
self.Search.setFont(font)
self.Search.setStyleSheet("")
self.Search.setObjectName("Search")
self.toolButton = QtWidgets.QToolButton(self.frame)
self.toolButton.setGeometry(QtCore.QRect(170, 170, 21, 20))
self.toolButton.setStyleSheet("background-color:#2f4050;\n"
"border:0px;\n"
"\n"
"border-radius:5px")
self.toolButton.setText("")
self.toolButton.setIcon(icon1)
self.toolButton.setIconSize(QtCore.QSize(15, 15))
self.toolButton.setObjectName("toolButton")
self.role = QtWidgets.QLabel(self.frame)
self.role.setGeometry(QtCore.QRect(30, 120, 110, 15))
font = QtGui.QFont()
font.setPointSize(7)
self.role.setFont(font)
self.role.setText("")
self.role.setAlignment(QtCore.Qt.AlignCenter)
self.role.setObjectName("role")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.stackedWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.name.setText(_translate("MainWindow", "csa "))
self.label.setText(_translate("MainWindow", "姓名:"))
self.label_3.setText(_translate("MainWindow", "性别:"))
self.label_4.setText(_translate("MainWindow", "申请时间:"))
self.label_5.setText(_translate("MainWindow", "权限:"))
self.label_6.setText(_translate("MainWindow", "手机号:"))
self.label_7.setText(_translate("MainWindow", "身份证号:"))
self.sname.setText(_translate("MainWindow", "邵嘉毅"))
self.ssex.setText(_translate("MainWindow", "男"))
self.stime.setText(_translate("MainWindow", "2019-12-12"))
self.srole.setText(_translate("MainWindow", "1"))
self.sphone.setText(_translate("MainWindow", "2332121323"))
self.sidcard.setText(_translate("MainWindow", "1111111111111111111"))
self.label_8.setText(_translate("MainWindow", "用户号:"))
self.sidcard_2.setText(_translate("MainWindow", "1"))
item = self.searchTable.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "用户编号"))
item = self.searchTable.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "姓名"))
item = self.searchTable.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "性别"))
item = self.searchTable.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "登记申请时间"))
item = self.searchTable.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "账户名"))
item = self.searchTable.horizontalHeaderItem(5)
item.setText(_translate("MainWindow", "密码"))
item = self.searchTable.horizontalHeaderItem(6)
item.setText(_translate("MainWindow", "权限"))
item = self.searchTable.horizontalHeaderItem(7)
item.setText(_translate("MainWindow", "身份证号"))
item = self.searchTable.horizontalHeaderItem(8)
item.setText(_translate("MainWindow", "手机号"))
self.searchName.setPlaceholderText(_translate("MainWindow", "搜索用户姓名"))
self.label_74.setText(_translate("MainWindow", "选中部分修改为:"))
self.modifyvalue.setPlaceholderText(_translate("MainWindow", "修改值"))
self.commitTableModify.setText(_translate("MainWindow", "确认修改"))
self.label_78.setText(_translate("MainWindow", "*选中表格内可以进行修改和删除操作"))
self.commitTableDel.setText(_translate("MainWindow", "确认删除"))
self.toolButton_2.setText(_translate("MainWindow", "查询用户"))
self.label_9.setText(_translate("MainWindow", "用户编号:"))
self.label_10.setText(_translate("MainWindow", "账户名:"))
self.label_11.setText(_translate("MainWindow", "用户姓名:"))
self.label_12.setText(_translate("MainWindow", "密码:"))
self.label_13.setText(_translate("MainWindow", "用户性别:"))
self.label_14.setText(_translate("MainWindow", "权限:"))
self.label_15.setText(_translate("MainWindow", "登记入职时间:"))
self.label_16.setText(_translate("MainWindow", "身份证:"))
self.label_17.setText(_translate("MainWindow", "手机号:"))
self.inputsid.setPlaceholderText(_translate("MainWindow", "编号"))
self.inputname.setPlaceholderText(_translate("MainWindow", "姓名"))
self.inputuser.setPlaceholderText(_translate("MainWindow", "账号名"))
self.inputpwd.setPlaceholderText(_translate("MainWindow", "密码"))
self.inputrole.setPlaceholderText(_translate("MainWindow", "权限"))
self.inputidcard.setPlaceholderText(_translate("MainWindow", "身份证"))
self.inputphone.setPlaceholderText(_translate("MainWindow", "手机号"))
self.toolButton_3.setText(_translate("MainWindow", "增添用户"))
self.commitAdd.setText(_translate("MainWindow", "确认录入"))
self.inputfemale.setText(_translate("MainWindow", "女"))
self.inputmale.setText(_translate("MainWindow", "男"))
self.toolButton_4.setText(_translate("MainWindow", "删除用户"))
item = self.deleteTable.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "用户编号"))
item = self.deleteTable.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "姓名"))
item = self.deleteTable.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "性别"))
item = self.deleteTable.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "登记入职时间"))
item = self.deleteTable.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "账户名"))
item = self.deleteTable.horizontalHeaderItem(5)
item.setText(_translate("MainWindow", "密码"))
item = self.deleteTable.horizontalHeaderItem(6)
item.setText(_translate("MainWindow", "权限"))
item = self.deleteTable.horizontalHeaderItem(7)
item.setText(_translate("MainWindow", "身份证号"))
item = self.deleteTable.horizontalHeaderItem(8)
item.setText(_translate("MainWindow", "手机号"))
self.desid.setPlaceholderText(_translate("MainWindow", "编号"))
self.label_18.setText(_translate("MainWindow", "用户编号:"))
self.dename.setPlaceholderText(_translate("MainWindow", "姓名"))
self.label_19.setText(_translate("MainWindow", "用户姓名:"))
self.deidcard.setPlaceholderText(_translate("MainWindow", "身份证"))
self.label_20.setText(_translate("MainWindow", "身份证:"))
self.commitDe.setText(_translate("MainWindow", "确认删除"))
self.label_21.setText(_translate("MainWindow", "选择要删除的用户:"))
__sortingEnabled = self.listWidget.isSortingEnabled()
self.listWidget.setSortingEnabled(False)
item = self.listWidget.item(0)
item.setText(_translate("MainWindow", " 个人信息"))
item = self.listWidget.item(1)
item.setText(_translate("MainWindow", " 查询用户*"))
item = self.listWidget.item(2)
item.setText(_translate("MainWindow", " 增添用户*"))
item = self.listWidget.item(3)
item.setText(_translate("MainWindow", " 删除用户*"))
self.listWidget.setSortingEnabled(__sortingEnabled)
self.label_2.setText(_translate("MainWindow", "*表示需要最高权限"))
self.Search.setPlaceholderText(_translate("MainWindow", "搜索"))
| 1.789063 | 2 |
dataset.py | ceyzaguirre4/mac-network-pytorch | 4 | 16859 | import os
import pickle
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import h5py
from transforms import Scale
class CLEVR(Dataset):
def __init__(self, root, split='train', transform=None):
features_path = os.path.join(root, 'features')
with open('{}/{}.pkl'.format(features_path, split), 'rb') as f:
self.data = pickle.load(f)
# self.transform = transform
self.root = root
self.split = split
self.h = h5py.File('{}/{}_features.hdf5'.format(features_path, split), 'r')
self.img = self.h['data']
def close(self):
self.h.close()
def __getitem__(self, index):
imgfile, question, answer, family = self.data[index]
# img = Image.open(os.path.join(self.root, 'images',
# self.split, imgfile)).convert('RGB')
# img = self.transform(img)
id = int(imgfile.rsplit('_', 1)[1][:-4])
img = torch.from_numpy(self.img[id])
return img, question, len(question), answer, family, index
def __len__(self):
return len(self.data)
transform = transforms.Compose([
Scale([224, 224]),
transforms.Pad(4),
transforms.RandomCrop([224, 224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
])
def collate_data(batch):
images, lengths, answers, families, idxs = [], [], [], [], []
batch_size = len(batch)
max_len = max(map(lambda x: len(x[1]), batch))
questions = np.zeros((batch_size, max_len), dtype=np.int64)
sort_by_len = sorted(batch, key=lambda x: len(x[1]), reverse=True)
for i, b in enumerate(sort_by_len):
image, question, length, answer, family, idx = b
images.append(image)
length = len(question)
questions[i, :length] = question
lengths.append(length)
answers.append(answer)
families.append(family)
idxs.append(idx)
return torch.stack(images), torch.from_numpy(questions), \
lengths, torch.LongTensor(answers), families, idxs
| 2.296875 | 2 |
tests/distributions/test_log_normal.py | thomasaarholt/xgboost-distribution | 17 | 16860 | import pytest
import numpy as np
import pandas as pd
from xgboost_distribution.distributions import LogNormal
@pytest.fixture
def lognormal():
return LogNormal()
def test_target_validation(lognormal):
valid_target = np.array([0.5, 1, 4, 5, 10])
lognormal.check_target(valid_target)
@pytest.mark.parametrize(
"invalid_target",
[np.array([0, 1.2]), pd.Series([-1.1, 0.4, 2.3])],
)
def test_target_validation_raises(lognormal, invalid_target):
with pytest.raises(ValueError):
lognormal.check_target(invalid_target)
@pytest.mark.parametrize(
"y, params, natural_gradient, expected_grad",
[
(
np.array([1, 1]),
np.array([[np.log(1), 2], [1, 0]]),
True,
np.array([[0, 0.5], [1, 0]]),
),
(
np.array([1, 1]),
np.array([[np.log(1), 2], [1, 0]]),
False,
np.array([[0, 1], [1, 0]]),
),
],
)
def test_gradient_calculation(lognormal, y, params, natural_gradient, expected_grad):
grad, hess = lognormal.gradient_and_hessian(
y, params, natural_gradient=natural_gradient
)
np.testing.assert_array_equal(grad, expected_grad)
def test_loss(lognormal):
loss_name, loss_value = lognormal.loss(
# fmt: off
y=np.array([0, ]),
params=np.array([[1, 0], ]),
)
assert loss_name == "LogNormalError"
assert loss_value == np.inf
| 2.53125 | 3 |
script/forecasting/forecaster.py | bialesdaniel/noisepage | 0 | 16861 | #!/usr/bin/env python3
"""
Main script for workload forecasting.
Example usage:
- Generate data (runs OLTP benchmark on the built database) and perform training, and save the trained model
./forecaster --gen_data --models=LSTM --model_save_path=model.pickle
- Use the trained models (LSTM) to generate predictions.
./forecaster --model_load_path=model.pickle --test_file=test_query.csv --test_model=LSTM
TODO:
- Better metrics for training and prediction (currently not focusing on models' accuracy yet)
- Multiple models (currently only simple-one-layer-untuned LSTM used)
- API and interaction with Pilot
"""
import argparse
import json
import pickle
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ..testing.self_driving.constants import (DEFAULT_ITER_NUM,
DEFAULT_QUERY_TRACE_FILE,
DEFAULT_TPCC_WEIGHTS,
DEFAULT_WORKLOAD_PATTERN)
from ..testing.self_driving.forecast import gen_oltp_trace
from ..testing.util.constants import LOG
from .cluster import QueryCluster
from .data_loader import DataLoader
from .models import ForecastModel, get_models
# Interval duration for aggregation in microseconds
INTERVAL_MICRO_SEC = 500000
# Number of Microseconds per second
MICRO_SEC_PER_SEC = 1000000
# Number of data points in a sequence
SEQ_LEN = 10 * MICRO_SEC_PER_SEC // INTERVAL_MICRO_SEC
# Number of data points for the horizon
HORIZON_LEN = 30 * MICRO_SEC_PER_SEC // INTERVAL_MICRO_SEC
# Number of data points for testing set
EVAL_DATA_SIZE = 2 * SEQ_LEN + HORIZON_LEN
argp = argparse.ArgumentParser(description="Query Load Forecaster")
# Generation stage related options
argp.add_argument(
"--gen_data",
default=False,
action="store_true",
help="If specified, OLTP benchmark would be downloaded and built to generate the query trace data")
argp.add_argument(
"--tpcc_weight",
type=str,
default=DEFAULT_TPCC_WEIGHTS,
help="Workload weights for the TPCC")
argp.add_argument(
"--tpcc_rates",
nargs="+",
default=DEFAULT_WORKLOAD_PATTERN,
help="Rate array for the TPCC workload")
argp.add_argument(
"--pattern_iter",
type=int,
default=DEFAULT_ITER_NUM,
help="Number of iterations the DEFAULT_WORKLOAD_PATTERN should be run")
argp.add_argument("--trace_file", default=DEFAULT_QUERY_TRACE_FILE,
help="Path to the query trace file", metavar="FILE")
# Model specific
argp.add_argument("--models", nargs='+', type=str, help="Models to use")
argp.add_argument("--models_config", type=str, metavar="FILE",
help="Models and init arguments JSON config file")
argp.add_argument("--seq_len", type=int, default=SEQ_LEN,
help="Length of one sequence in number of data points")
argp.add_argument(
"--horizon_len",
type=int,
default=HORIZON_LEN,
help="Length of the horizon in number of data points, "
"aka, how many further in the a sequence is used for prediction"
)
# Training stage related options
argp.add_argument("--model_save_path", metavar="FILE",
help="Where the model trained will be stored")
argp.add_argument(
"--eval_size",
type=int,
default=EVAL_DATA_SIZE,
help="Length of the evaluation data set length in number of data points")
argp.add_argument("--lr", type=float, default=0.001, help="Learning rate")
argp.add_argument("--epochs", type=int, default=10,
help="Number of epochs for training")
# Testing stage related options
argp.add_argument(
"--model_load_path",
default="model.pickle",
metavar="FILE",
help="Where the model should be loaded from")
argp.add_argument(
"--test_file",
help="Path to the test query trace file",
metavar="FILE")
argp.add_argument(
"--test_model",
type=str,
help="Model to be used for forecasting"
)
class Forecaster:
"""
A wrapper around various ForecastModels, that prepares training and evaluation data.
"""
TRAIN_DATA_IDX = 0
TEST_DATA_IDX = 1
def __init__(
self,
trace_file: str,
interval_us: int = INTERVAL_MICRO_SEC,
test_mode: bool = False,
eval_size: int = EVAL_DATA_SIZE,
seq_len: int = SEQ_LEN,
horizon_len: int = HORIZON_LEN) -> None:
"""
Initializer
:param trace_file: trace file for the forecaster
:param interval_us: number of microseconds for the time-series interval
:param test_mode: True If the Loader is for testing
:param eval_size: Number of data points used for evaluation(testing)
:param seq_len: Length of a sequence
:param horizon_len: Horizon length
"""
self._seq_len = seq_len
self._horizon_len = horizon_len
self._test_mode = test_mode
self._eval_data_size = eval_size
self._data_loader = DataLoader(
query_trace_file=trace_file,
interval_us=interval_us)
self._make_clusters()
def _make_clusters(self) -> None:
"""
Extract data from the DataLoader and put them into different clusters.
:return: None
"""
# FIXME:
# Assuming all the queries in the current trace file are from
# the same cluster for now. A future TODO would have a clustering
# process that separates traces into multiple clusters
self._clusters = [QueryCluster(self._data_loader.get_ts_data())]
self._cluster_data = []
for cluster in self._clusters:
# Aggregated time-series from the cluster
data = cluster.get_timeseries()
train_raw_data, test_raw_data = self._split_data(data)
self._cluster_data.append((train_raw_data, test_raw_data))
def _split_data(self, data: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Split the raw data into a training set, and a testing(evaluation) set.
:param data: All the raw data
:return: traing, test raw data set
"""
if self._test_mode:
self._test_set_size = len(data)
else:
self._test_set_size = self._eval_data_size
if self._test_set_size > len(data):
raise ValueError(
"Eval data size is too small. Not enough data points.")
split_idx = len(data) - self._test_set_size
# First part as the training set
train_raw_data = data[:split_idx]
# Last part as the testing set
test_raw_data = data[split_idx:]
return train_raw_data, test_raw_data
def _make_seqs(self,
input_data: np.ndarray,
start: int,
end: int,
with_label: bool = False) -> List[Union[Tuple[np.ndarray,
np.ndarray],
np.ndarray]]:
"""
Create time-series sequences of fixed sequence length from a continuous range of time-series.
:param input_data: Input time-series
:param start: Start index (inclusive) of the first sequence to be made
:param end: End index (exclusive) of the last sequence to be made
:param with_label: True if label in a certain horizon is added
:return: Sequences of fixed length if with_label is False,
or List of fixed length sequence and label if with_label is True
"""
seq_len = self._seq_len
horizon = self._horizon_len
seq_start = start
if with_label:
# Reserve space for horizon
seq_end = end - seq_len - horizon
else:
# Use all data for prediction
seq_end = end - seq_len
if seq_end <= seq_start:
raise IndexError(f"Not enough data points to make sequences")
seqs = []
for i in range(seq_start, seq_end):
seq = input_data[i:i + seq_len].reshape(-1, 1)
# Look beyond the horizon to get the label
if with_label:
label_i = i + seq_len + horizon
label = input_data[label_i: label_i + 1].reshape(1, -1)
seqs.append((seq, label))
else:
seqs.append(seq)
return seqs
@lru_cache(maxsize=32)
def _cluster_seqs(self,
cluster_id: int,
test_mode: bool = False,
with_label: bool = False) -> List[Union[Tuple[np.ndarray,
np.ndarray],
np.ndarray]]:
"""
Create time-series sequences of fixed sequence length from a continuous range of time-series. A cached wrapper
over _make_seqs with different options.
:param cluster_id: Cluster id
:param test_mode: True if using test dataset, otherwise use the training dataset
:param with_label: True if label (time-series data in a horizon from the sequence) is also added.
:return: Sequences of fixed length if with_label is False,
or List of fixed length sequence and label if with_label is True
"""
if test_mode:
input_data = self._cluster_data[cluster_id][self.TEST_DATA_IDX]
else:
input_data = self._cluster_data[cluster_id][self.TRAIN_DATA_IDX]
seqs = self._make_seqs(
input_data,
0,
len(input_data),
with_label=with_label)
return seqs
def train(self, models_kwargs: Dict) -> List[List[ForecastModel]]:
"""
:param models_kwargs: A dictionary of models' init arguments
:return: List of models(a list of models) for each cluster.
"""
models = []
for cid in range(len(self._cluster_data)):
cluster_models = get_models(models_kwargs)
train_seqs = self._cluster_seqs(
cid, test_mode=False, with_label=True)
for model_name, model in cluster_models.items():
# Fit the model
model.fit(train_seqs)
self.eval(cid, model)
models.append(cluster_models)
return models
def eval(self, cid: int, model: ForecastModel) -> None:
"""
Evaluate a fitted model on the test dataset.
:param cid: Cluster id
:param model: Model to use
"""
eval_seqs = self._cluster_seqs(cid, test_mode=True, with_label=True)
preds = []
gts = []
for seq, label in eval_seqs:
pred = model.predict(seq)
preds.append(pred)
gts.append(label.item())
# FIXME:
# simple L2 norm for comparing the prediction and results
l2norm = np.linalg.norm(np.array(preds) - np.array(gts))
LOG.info(
f"[{model.name}] has L2 norm(prediction, ground truth) = {l2norm}")
def predict(self, cid: int, model: ForecastModel) -> Dict:
"""
Output prediction on the test dataset, and segregate the predicted cluster time-series into individual queries
:param cid: Cluser id
:param model: Model to use
:return: Dict of {query_id -> time-series}
"""
test_seqs = self._cluster_seqs(cid, test_mode=True, with_label=False)
preds = list([model.predict(seq) for seq in test_seqs])
query_preds = self._clusters[cid].segregate(preds)
return query_preds
def parse_model_config(model_names: Optional[List[str]],
models_config: Optional[str]) -> Dict:
"""
Load models from
:param model_names: List of model names
:param models_config: JSON model config file
:return: Merged model config Dict
"""
model_kwargs = dict([(model_name, {}) for model_name in model_names])
if models_config is not None:
with open(models_config, 'r') as f:
custom_config = json.load(f)
# Simple and non-recursive merging of options
model_kwargs.update(custom_config)
if len(model_kwargs) < 1:
raise ValueError("At least 1 model needs to be used.")
return model_kwargs
if __name__ == "__main__":
args = argp.parse_args()
if args.test_file is None:
# Parse models arguments
models_kwargs = parse_model_config(args.models, args.models_config)
# Generate OLTP trace file
if args.gen_data:
gen_oltp_trace(
tpcc_weight=args.tpcc_weight,
tpcc_rates=args.tpcc_rates,
pattern_iter=args.pattern_iter)
trace_file = DEFAULT_QUERY_TRACE_FILE
else:
trace_file = args.trace_file
forecaster = Forecaster(
trace_file=trace_file,
interval_us=INTERVAL_MICRO_SEC,
seq_len=args.seq_len,
eval_size=args.eval_size,
horizon_len=args.horizon_len)
models = forecaster.train(models_kwargs)
# Save the model
if args.model_save_path:
with open(args.model_save_path, "wb") as f:
pickle.dump(models, f)
else:
# Do inference on a trained model
with open(args.model_load_path, "rb") as f:
models = pickle.load(f)
forecaster = Forecaster(
trace_file=args.test_file,
test_mode=True,
interval_us=INTERVAL_MICRO_SEC,
seq_len=args.seq_len,
eval_size=args.eval_size,
horizon_len=args.horizon_len)
# FIXME:
# Assuming all the queries in the current trace file are from
# the same cluster for now
query_pred = forecaster.predict(0, models[0][args.test_model])
# TODO:
# How are we consuming predictions?
for qid, ts in query_pred.items():
LOG.info(f"[Query: {qid}] pred={ts[:10]}")
| 2.40625 | 2 |
tests/test_master/test_jobtypes_api.py | guidow/pyfarm-master | 0 | 16862 | <gh_stars>0
# No shebang line, this module is meant to be imported
#
# Copyright 2013 <NAME>
# Copyright 2014 Ambient Entertainment GmbH & Co. KG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from json import dumps
# test class must be loaded first
from pyfarm.master.testutil import BaseTestCase
BaseTestCase.build_environment()
from pyfarm.master.application import get_api_blueprint
from pyfarm.master.entrypoints import load_api
from pyfarm.models.jobtype import JobType, JobTypeVersion
code = """from pyfarm.jobtypes.core.jobtype import JobType
class TestJobType(JobType):
def get_command(self):
return "/usr/bin/touch"
def get_arguments(self):
return [os.path.join(
self.assignment_data["job"]["data"]["path"],
"%04d" % self.assignment_data[\"tasks\"][0][\"frame\"])]
"""
class TestJobTypeAPI(BaseTestCase):
def setup_app(self):
super(TestJobTypeAPI, self).setup_app()
self.api = get_api_blueprint()
self.app.register_blueprint(self.api)
load_api(self.app, self.api)
def test_jobtype_schema(self):
response = self.client.get("/api/v1/jobtypes/schema")
self.assert_ok(response)
schema = JobType.to_schema()
schema.update(JobTypeVersion.to_schema())
self.assertEqual(response.json, schema)
def test_jobtype_post(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_ok(response2)
self.assertEqual(
response2.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
response3 = self.client.get("/api/v1/jobtypes/%s" % id)
self.assert_ok(response3)
self.assertEqual(
response3.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_post_empty_max_batch(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": None,
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_ok(response2)
self.assertEqual(
response2.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": None,
"name": "TestJobType",
"software_requirements": [],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_post_with_requirements(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
software_id = response1.json['id']
software_min_version_id = response1.json["versions"][0]["id"]
software_max_version_id = response1.json["versions"][1]["id"]
response2 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
}]
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_ok(response3)
self.assertEqual(
response3.json, {
"batch_contiguous": True,
"classname": None,
"no_automatic_start_time": False,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [{
'max_version': '1.1',
'max_version_id': software_max_version_id,
'min_version': '1.0',
'min_version_id': software_min_version_id,
'software': 'foo',
'software_id': software_id
}],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_post_with_bad_requirements(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [{
"hardware": "bar"
}]
}))
self.assert_bad_request(response1)
response2 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [{
"software": "unknown_software"
}]
}))
self.assert_not_found(response2)
def test_jobtype_post_conflict(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
response2 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_conflict(response2)
def test_jobtypes_list(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.get("/api/v1/jobtypes/")
self.assert_ok(response2)
self.assertEqual(
response2.json, [
{
"id": id,
"name": "TestJobType"
}
])
def test_jobtype_post_with_no_name(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"description": "Jobtype for testing inserts and queries",
"code": code
}))
self.assert_bad_request(response1)
def test_jobtype_post_with_no_code(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries"
}))
self.assert_bad_request(response1)
def test_jobtype_post_with_additional_keys(self):
response1 = self.client.post(
"/api/v1/jobtypes/",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"code": code,
"unknown_key": 42
}))
self.assert_bad_request(response1)
def test_jobtype_get_unknown(self):
response1 = self.client.get("/api/v1/jobtypes/unknown_jobtype")
self.assert_not_found(response1)
def test_jobtype_put(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_ok(response2)
self.assertEqual(
response2.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_put_overwrite(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.put(
"/api/v1/jobtypes/%s" % id,
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing (updated)",
"max_batch": 1,
"code": code
}))
self.assert_created(response2)
response3 = self.client.get("/api/v1/jobtypes/%s" % id)
self.assert_ok(response3)
self.assertEqual(
response3.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing (updated)",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [],
"version": 2,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_put_unknown_keys(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"unknown_key": 42
}))
self.assert_bad_request(response1)
def test_jobtype_put_with_no_name(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"unknown_key": 42
}))
self.assert_bad_request(response1)
def test_jobtype_put_with_requirements(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
software_id = response1.json['id']
software_min_version_id = response1.json["versions"][0]["id"]
software_max_version_id = response1.json["versions"][1]["id"]
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
}
]
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_ok(response3)
self.assertEqual(
response3.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [
{
'max_version': '1.1',
'max_version_id': software_max_version_id,
'min_version': '1.0',
'min_version_id': software_min_version_id,
'software': 'foo',
'software_id': software_id
}
],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_put_with_requirements_not_list(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": 42
}))
self.assert_bad_request(response1)
def test_jobtype_put_with_requirement_not_dict(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [42]
}))
self.assert_bad_request(response1)
def test_jobtype_put_with_requirement_unknown_software(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
}
]
}))
self.assert_not_found(response1)
def test_jobtype_put_with_requirements_unknown_sw_version(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo"
}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.1"
}
]
}))
self.assert_not_found(response2)
response3 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"max_version": "1.1"
}
]
}))
self.assert_not_found(response3)
def test_jobtype_put_with_requirements_unknown_keys(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo"
}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"unknown_key": 42
}
]
}))
self.assert_bad_request(response2)
def test_jobtype_put_with_requirements_missing_keys(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{}
]
}))
self.assert_bad_request(response1)
def test_jobtype_put_retain_requirements(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
software_id = response1.json['id']
software_min_version_id = response1.json["versions"][0]["id"]
software_max_version_id = response1.json["versions"][1]["id"]
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
}
]
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing (updated)",
"max_batch": 1,
"code": code
}))
self.assert_created(response3)
response4 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_ok(response4)
self.assertEqual(
response4.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing (updated)",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [
{
'max_version': '1.1',
'max_version_id': software_max_version_id,
'min_version': '1.0',
'min_version_id': software_min_version_id,
'software': 'foo',
'software_id': software_id
}
],
"version": 2,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_delete(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
}
]
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.delete("/api/v1/jobtypes/TestJobType")
self.assert_no_content(response3)
response4 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_not_found(response4)
response5 = self.client.get("/api/v1/jobtypes/%s" % id)
self.assert_not_found(response5)
def test_jobtype_delete_by_id(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
}
]
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.delete("/api/v1/jobtypes/%s" % id)
self.assert_no_content(response3)
response4 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_not_found(response4)
response5 = self.client.get("/api/v1/jobtypes/%s" % id)
self.assert_not_found(response5)
def test_jobtype_list_versions(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.put(
"/api/v1/jobtypes/%s" % id,
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing (updated)",
"max_batch": 1,
"code": code
}))
self.assert_created(response2)
response3 = self.client.get("/api/v1/jobtypes/TestJobType/versions/")
self.assert_ok(response3)
self.assertEqual(response3.json, [1, 2])
response4 = self.client.get("/api/v1/jobtypes/%s/versions/" % id)
self.assert_ok(response4)
self.assertEqual(response4.json, [1, 2])
def test_jobtype_list_versions_unknown_jobtype(self):
response1 = self.client.get("/api/v1/jobtypes/UnknownJobType/versions/")
self.assert_not_found(response1)
def test_jobtype_get_versioned(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": []
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 2,
"code": code,
"software_requirements": []
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.get("/api/v1/jobtypes/TestJobType/versions/1")
self.assert_ok(response3)
self.assertEqual(
response3.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
response4 = self.client.get("/api/v1/jobtypes/%s/versions/1" % id)
self.assert_ok(response4)
self.assertEqual(
response4.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
response5 = self.client.get("/api/v1/jobtypes/%s/versions/2" % id)
self.assert_ok(response5)
self.assertEqual(
response5.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 2,
"name": "TestJobType",
"software_requirements": [],
"version": 2,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_get_unknown_version(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": []
}))
self.assert_created(response1)
response2 = self.client.get("/api/v1/jobtypes/TestJobType/versions/42")
self.assert_not_found(response2)
def test_jobtype_delete_version(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": []
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 2,
"code": code,
"software_requirements": []
}))
self.assert_created(response2)
response3 = self.client.delete("/api/v1/jobtypes/TestJobType/versions/2")
self.assert_no_content(response3)
response4 = self.client.get("/api/v1/jobtypes/TestJobType/versions/2")
self.assert_not_found(response4)
response5 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_ok(response5)
self.assertEqual(
response5.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_by_id_delete_version(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": []
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 2,
"code": code,
"software_requirements": []
}))
self.assert_created(response2)
response3 = self.client.delete("/api/v1/jobtypes/%s/versions/2" % id)
self.assert_no_content(response3)
response4 = self.client.get("/api/v1/jobtypes/TestJobType/versions/2")
self.assert_not_found(response4)
response5 = self.client.get("/api/v1/jobtypes/TestJobType")
self.assert_ok(response5)
self.assertEqual(
response5.json, {
"batch_contiguous": True,
"no_automatic_start_time": False,
"classname": None,
"code": code,
"description": "Jobtype for testing inserts and queries",
"id": id,
"max_batch": 1,
"name": "TestJobType",
"software_requirements": [],
"version": 1,
"fail_body": None,
"fail_subject": None,
"success_body": None,
"success_subject": None,
"supports_tiling": False
})
def test_jobtype_get_code(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": []
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.get(
"/api/v1/jobtypes/TestJobType/versions/1/code")
self.assert_ok(response2)
self.assertEqual(response2.data.decode(), code)
response3 = self.client.get(
"/api/v1/jobtypes/%s/versions/1/code" % id)
self.assert_ok(response3)
self.assertEqual(response3.data.decode(), code)
def test_jobtype_get_code_not_found(self):
response1 = self.client.get(
"/api/v1/jobtypes/UnknownJobType/versions/1/code")
self.assert_not_found(response1)
def test_jobtype_list_requirements(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
software_id = response1.json['id']
software_min_version_id = response1.json["versions"][0]["id"]
software_max_version_id = response1.json["versions"][1]["id"]
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
}
]
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/")
self.assert_ok(response3)
self.assertEqual(response3.json, [
{
"software": {
"software": "foo",
"id": software_id
},
"max_version": {
"version": "1.1",
"id": software_max_version_id
},
"min_version": {
"version": "1.0",
"id": software_min_version_id
},
"jobtype_version": {
"version": 1,
"jobtype": "TestJobType",
}
}
])
response4 = self.client.get(
"/api/v1/jobtypes/%s/versions/1/software_requirements/" % id)
self.assert_ok(response4)
self.assertEqual(response4.json, [
{
"software": {
"software": "foo",
"id": software_id
},
"max_version": {
"version": "1.1",
"id": software_max_version_id
},
"min_version": {
"version": "1.0",
"id": software_min_version_id
},
"jobtype_version": {
"version": 1,
"jobtype": "TestJobType",
}
}
])
def test_jobtype_list_requirements_unknown_jobtype(self):
response1 = self.client.get(
"/api/v1/jobtypes/UnknownJobType/software_requirements/")
self.assert_not_found(response1)
def test_jobtype_list_requirements_unknown_version(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.get(
"/api/v1/jobtypes/TestJobType/versions/100/software_requirements/")
self.assert_not_found(response2)
def test_jobtype_post_requirement(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response2)
software_id = response2.json['id']
software_min_version_id = response2.json["versions"][0]["id"]
software_max_version_id = response2.json["versions"][1]["id"]
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({
"software" : "foo",
"min_version": "1.0",
"max_version": "1.1"}))
self.assert_created(response3)
response4 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/")
self.assert_ok(response4)
self.assertEqual(response4.json, [
{
"software": {
"software": "foo",
"id": software_id
},
"max_version": {
"version": "1.1",
"id": software_max_version_id
},
"min_version": {
"version": "1.0",
"id": software_min_version_id
},
"jobtype_version": {
"version": 2,
"jobtype": "TestJobType",
}
}
])
def test_jobtype_by_id_post_requirement(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
id = response1.json['id']
response2 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": []
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobtypes/%s/software_requirements/" % id,
content_type="application/json",
data=dumps({"software" : "foo"}))
self.assert_created(response3)
def test_jobtype_versioned_post_requirement(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
response2 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/versions/1/software_requirements/",
content_type="application/json",
data=dumps({"software" : "foo"}))
self.assert_method_not_allowed(response3)
def test_jobtype_post_requirement_unknown_jobtype(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": []
}))
self.assert_created(response1)
response2 = self.client.post(
"/api/v1/jobtypes/UnknownJobType/software_requirements/",
content_type="application/json",
data=dumps({"software" : "foo"}))
self.assert_not_found(response2)
def test_jobtype_post_requirement_no_versions(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
response2 = self.client.delete("/api/v1/jobtypes/TestJobType/versions/1")
self.assert_no_content(response2)
response3 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "foo"}))
self.assert_created(response3)
response4 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software" : "foo"}))
self.assert_not_found(response4)
def test_jobtype_post_requirement_bad_software(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
response2 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({}))
self.assert_bad_request(response2)
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software": 42}))
self.assert_bad_request(response3)
def test_jobtype_post_requirement_unknown_software(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
response2 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software": "unknown software"}))
self.assert_not_found(response2)
def test_jobtype_post_requirement_with_existing(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [{
"software" : "foo",
"min_version": "1.0",
"max_version": "1.1"}]
}))
self.assert_created(response2)
response2 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "bar",
"versions": [
{"version": "0.1"},
{"version": "0.2"}
]
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software" : "bar",
"min_version": "0.1",
"max_version": "0.2"}))
self.assert_created(response3)
response4 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/")
self.assert_ok(response4)
self.assertEqual(len(response4.json), 2)
def test_jobtype_post_requirement_conflict(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "foo"}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [{"software" : "foo"}]
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software" : "foo"}))
self.assert_conflict(response3)
def test_jobtype_post_requirement_bad_min_version(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "foo"}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software": "foo",
"min_version": 42}))
self.assert_bad_request(response3)
def test_jobtype_post_requirement_unknown_min_version(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "foo"}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software": "foo",
"min_version": "1.0"}))
self.assert_not_found(response3)
def test_jobtype_post_requirement_bad_max_version(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "foo"}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software": "foo",
"max_version": 42}))
self.assert_bad_request(response3)
def test_jobtype_post_requirement_unknown_max_version(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "foo"}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response2)
response3 = self.client.post(
"/api/v1/jobtypes/TestJobType/software_requirements/",
content_type="application/json",
data=dumps({"software": "foo",
"max_version": "1.0"}))
self.assert_not_found(response3)
def test_jobtype_get_single_requirement(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
software_id = response1.json['id']
software_min_version_id = response1.json["versions"][0]["id"]
software_max_version_id = response1.json["versions"][1]["id"]
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
}
]
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/foo")
self.assert_ok(response3)
self.assertEqual(
response3.json, {
"software": {
"software": "foo",
"id": software_id
},
"max_version": {
"version": "1.1",
"id": software_max_version_id
},
"min_version":
{
"version": "1.0",
"id": software_min_version_id
},
"jobtype_version": {
"version": 1,
"jobtype": "TestJobType",
}
})
response4 = self.client.get(
"/api/v1/jobtypes/%s/software_requirements/foo" % id)
self.assert_ok(response4)
def test_jobtype_single_requirement_unknown_jobtype(self):
response1 = self.client.get(
"/api/v1/jobtypes/UnknownJobType/software_requirements/1")
self.assert_not_found(response1)
def test_jobtype_single_requirement_no_versions(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
response2 = self.client.delete("/api/v1/jobtypes/TestJobType/versions/1")
self.assert_no_content(response2)
response3 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/1")
self.assert_not_found(response3)
def test_jobtype_single_requirement_not_found(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
response2 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/1")
self.assert_not_found(response2)
def test_jobtype_delete_requirement(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "foo",
"versions": [
{"version": "1.0"},
{"version": "1.1"}
]
}))
self.assert_created(response1)
response2 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({
"software": "bar",
"versions": [
{"version": "0.1"},
{"version": "0.2"}
]
}))
self.assert_created(response2)
response3 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{
"software": "foo",
"min_version": "1.0",
"max_version": "1.1"
},
{
"software": "bar",
"min_version": "0.1",
"max_version": "0.2"
}
]
}))
self.assert_created(response3)
id = response3.json['id']
response4 = self.client.delete(
"/api/v1/jobtypes/TestJobType/software_requirements/foo")
self.assert_no_content(response4)
response5 = self.client.delete(
"/api/v1/jobtypes/TestJobType/software_requirements/foo")
self.assert_no_content(response5)
response6 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/foo")
self.assert_not_found(response6)
response7 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/bar")
self.assert_ok(response7)
def test_jobtype_by_id_delete_requirement(self):
response1 = self.client.post(
"/api/v1/software/",
content_type="application/json",
data=dumps({"software": "foo"}))
self.assert_created(response1)
response2 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code,
"software_requirements": [
{"software": "foo"}
]
}))
self.assert_created(response2)
id = response2.json['id']
response3 = self.client.delete(
"/api/v1/jobtypes/%s/software_requirements/foo" % id)
self.assert_no_content(response3)
response4 = self.client.get(
"/api/v1/jobtypes/TestJobType/software_requirements/")
self.assertEqual(len(response4.json), 0)
def test_jobtype_delete_requirement_unknown_jobtype(self):
response1 = self.client.delete(
"/api/v1/jobtypes/UnknownJobType/software_requirements/1")
self.assert_not_found(response1)
def test_jobtype_delete_requirement_no_versions(self):
response1 = self.client.put(
"/api/v1/jobtypes/TestJobType",
content_type="application/json",
data=dumps({
"name": "TestJobType",
"description": "Jobtype for testing inserts and queries",
"max_batch": 1,
"code": code
}))
self.assert_created(response1)
response2 = self.client.delete("/api/v1/jobtypes/TestJobType/versions/1")
self.assert_no_content(response2)
response3 = self.client.delete(
"/api/v1/jobtypes/TestJobType/software_requirements/1")
self.assert_not_found(response3)
| 1.875 | 2 |
imagekit/hashers.py | radicalgraphics/django-imagekit | 0 | 16863 | from copy import copy
from hashlib import md5
from pickle import Pickler, MARK, DICT
from types import DictionaryType
from .lib import StringIO
class CanonicalizingPickler(Pickler):
dispatch = copy(Pickler.dispatch)
def save_set(self, obj):
rv = obj.__reduce_ex__(0)
rv = (rv[0], (sorted(rv[1][0]),), rv[2])
self.save_reduce(obj=obj, *rv)
dispatch[set] = save_set
def save_dict(self, obj):
write = self.write
write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(sorted(obj.iteritems()))
dispatch[DictionaryType] = save_dict
def pickle(obj):
file = StringIO()
CanonicalizingPickler(file, 0).dump(obj)
return md5(file.getvalue()).hexdigest()
| 2.796875 | 3 |
sun.py | funxiun/AstroAlgorithms4Python | 7 | 16864 | '''Meeus: Astronomical Algorithms (2nd ed.), chapter 25'''
import math
from nutation_ecliptic import ecliptic
from constants import AU
def coordinates(jd):
'''equatorial coordinates of Sun'''
lon=math.radians(longitude(jd))
eps=math.radians(ecliptic(jd))
ra=math.degrees(math.atan2(math.cos(eps)*math.sin(lon),math.cos(lon)))
dec=math.degrees(math.asin(math.sin(eps)*math.sin(lon)))
return ra,dec
def longitude(jd):
'''longitude of Sun'''
T=(jd-2451545)/36525.
L=math.radians(280.46646+36000.76983*T+0.0003032*T**2)
M=math.radians(357.52911+35999.05029*T-0.0001537*T**2)
C=math.radians((1.914602-0.004817*T-0.000014*T**2)*math.sin(M)+(0.019993-0.000101*T)*math.sin(2*M)+0.000289*math.sin(3*M))
lon=L+C
return math.degrees(lon)
def distance(jd,km=True):
'''Earth-Sun distance in km'''
T=(jd-2451545)/36525.
e=0.016708634-0.000042037*T-0.0000001267*T**2
M=math.radians(357.52911+35999.05029*T-0.0001537*T**2)
C=math.radians((1.914602-0.004817*T-0.000014*T**2)*math.sin(M)+(0.019993-0.000101*T)*math.sin(2*M)+0.000289*math.sin(3*M))
nu=M+C
R=1.000001018*(1-e**2)/(1+e*math.cos(nu))
if km: R*=AU
return R
| 3.4375 | 3 |
test/paths.py | cychitivav/kobuki_navigation | 0 | 16865 | #!/usr/bin/python
import numpy as np
import cv2
from matplotlib import pyplot as plt
import networkx as nx
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[0:2]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1)
vertical = cv2.warpAffine(image, rot_mat, image.shape[0:2], flags=cv2.INTER_CUBIC)
im = vertical.copy()
for i in range(image.shape[0]):
for j in range(image.shape[1]):
if i < 100 or j < 100 or j > 924 or i > 924:
im[i,j] = 205
else:
neighbor = 0
if vertical[i+1,j] < 43.0:
neighbor += 1
if vertical[i-1,j] < 43.0:
neighbor += 1
if vertical[i+1,j-1] < 43.0:
neighbor += 1
if vertical[i+1,j+1] < 43.0:
neighbor += 1
if vertical[i-1,j+1] < 43.0:
neighbor += 1
if vertical[i-1,j-1] < 43.0:
neighbor += 1
if vertical[i,j+1] < 43.0:
neighbor += 1
if vertical[i,j-1] < 43.0:
neighbor += 1
if neighbor >= 5:
im[i,j] = 0
return im
if __name__ == "__main__":
image = cv2.imread('map/map.pgm', 0)
rotated = rotate_image(image, -7.66)
#cv2.imwrite('map/rotated.pgm', rotated)
_, th = cv2.threshold(rotated, 245, 255, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
op = cv2.morphologyEx(th, cv2.MORPH_OPEN, kernel)
skel = cv2.ximgproc.thinning(op)
plt.figure()
plt.subplot(1,3,1)
plt.imshow(image, cmap='gray')
plt.axis('off')
plt.title('Original')
plt.subplot(1,3,2)
plt.imshow(rotated, cmap='gray')
plt.axis('off')
plt.title('Rotada')
plt.subplot(1,3,3)
plt.imshow(skel, cmap='gray')
plt.axis('off')
plt.title('Adelgazada')
base = cv2.dilate(skel, None, iterations=12)
path = cv2.cvtColor(base, cv2.COLOR_GRAY2RGB)
corners = cv2.cornerHarris(skel,7,7,0.04)
corners = cv2.dilate(corners, None)
_, corners = cv2.threshold(corners,0.001,255,cv2.THRESH_BINARY)
corners = np.uint8(corners)
contours, _ = cv2.findContours(corners,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
path[corners>0.0]=[0,255,0]
cv2.drawContours(path,contours,-1,(255,0,0),1)
G = nx.Graph()
points = []
for i, c in enumerate(contours):
# calculate moments for each contour
M = cv2.moments(c)
# calculate x,y coordinate of center
cX = int(round(M["m10"] / M["m00"]))
cY = int(round(M["m01"] / M["m00"]))
path[cY,cX]=[0,0,255]
G.add_node(i, pos=(cX,cY))
points.append((cX,cY))
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.4
fontColor = (0,0,255)
thickness = 1
path = cv2.putText(path, str(i), (cX,cY), font, fontScale, fontColor, thickness)
plt.figure()
plt.subplot(1,2,1)
plt.imshow(base,cmap='gray')
plt.axis('off')
plt.title('Imagen base')
plt.subplot(1,2,2)
plt.imshow(path)
plt.axis('off')
plt.title('Esquinas')
noBlack = cv2.countNonZero(cv2.cvtColor(path,cv2.COLOR_BGR2GRAY))
for i, p1 in enumerate(points):
for j, p2 in enumerate(points):
if p1 == p2: continue
test_img = cv2.line(path.copy(), p1, p2, (234,0,234), 1)
# Recount to see if the images are the same
if cv2.countNonZero(cv2.cvtColor(test_img,cv2.COLOR_BGR2GRAY)) == noBlack:
# path = cv2.line(path, p1, p2, (234,0,234), 1)
G.add_edge(i,j,weight=np.hypot(p1[0]-p2[0], p1[1]-p2[1]))
plt.figure()
nx.draw(G,with_labels=True)
x_0, y_0 = [492,500]
x_f = np.random.randint(487) + 277
y_f = np.random.randint(448) + 368
path[y_0+1,x_0+1] = (255,0,0)
path[y_f+1,x_f+1] = (255,0,0)
_, th = cv2.threshold(rotated, 245, 255, cv2.THRESH_BINARY)
ero = cv2.erode(th,None,iterations=10)
th = ero.copy()
noBlack = cv2.countNonZero(th)
for i, p in enumerate(points):
test_img = cv2.line(th.copy(), (x_0,y_0), p, 234, 1)
# Recount to see if the images are the same
if cv2.countNonZero(test_img) == noBlack:
# path = cv2.line(path, p1, p2, (234,0,234), 1)
G.add_edge('p_0',i,weight=np.hypot(p[0]-x_0, y_0-p[1]))
for i, p in enumerate(points):
test_img = cv2.line(th.copy(), (x_f,y_f), p, 234, 1)
# Recount to see if the images are the same
if cv2.countNonZero(test_img) == noBlack:
# path = cv2.line(path, p1, p2, (234,0,234), 1)
G.add_edge('p_f',i,weight=np.hypot(p[0]-x_f, y_f-p[1]))
plan = nx.shortest_path(G,'p_0','p_f')
print plan
for i in range(len(plan)-1):
if i == 0:
path = cv2.line(path, (x_0,y_0), points[plan[i+1]], (251,229,78), 1)
elif i == len(plan)-2:
path = cv2.line(path, points[plan[i]], (x_f,y_f), (251,229,78), 1)
else:
path = cv2.line(path, points[plan[i]], points[plan[i+1]], (251,229,78), 1)
plt.figure()
plt.imshow(ero,cmap='gray')
plt.axis('off')
plt.title('Imagen erosionada')
plt.show()
| 2.734375 | 3 |
intro-to-programming/python-for-everyone/3-variables-expressions-statements/exercise-4.py | udpsunil/computer-science | 0 | 16866 | # Assume that we execute the following assignment statements
# width = 17
# height = 12.0
width = 17
height = 12.0
value_1 = width // 2
value_2 = width / 2.0
value_3 = height / 3
value_4 = 1 + 2 * 5
print(f"value_1 is {value_1} and it's type is {type(value_1)}")
print(f"value_2 is {value_2} and it's type is {type(value_2)}")
print(f"value_3 is {value_3} and it's type is {type(value_3)}")
print(f"value_4 is {value_4} and it's type is {type(value_4)}")
| 4.15625 | 4 |
apps/weapons/admin.py | tufbel/wFocus | 0 | 16867 | from django.contrib import admin
# Register your models here.
from apps.weapons.models import Weapon
admin.site.register(Weapon)
| 1.3125 | 1 |
modules/kubrick/apps/awards/models.py | Lab-Quatro/aposcar | 3 | 16868 | from django.db import models
class Nominee(models.Model):
name = models.TextField()
picture_url = models.ImageField(upload_to="nominees/")
description = models.TextField(max_length=350)
class Meta:
verbose_name_plural = "nominees"
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=40)
url_field = models.CharField(max_length=40)
class Meta:
verbose_name_plural = "categories"
def __str__(self):
return self.name
class Indication(models.Model):
nominated = models.ForeignKey(Nominee, on_delete=models.CASCADE)
category = models.ForeignKey(
Category, on_delete=models.CASCADE, related_name="indications"
)
year = models.IntegerField()
annotation = models.TextField(blank=True)
is_winner = models.BooleanField(default=False)
def __str__(self):
return f'"{self.nominated.name}" on "{self.category.name}"'
| 2.171875 | 2 |
scholarly_citation_finder/apps/citation/search/PublicationDocumentExtractor.py | citationfinder/scholarly_citation_finder | 1 | 16869 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from datetime import datetime
from scholarly_citation_finder import config
from scholarly_citation_finder.apps.parser.Parser import Parser
from scholarly_citation_finder.apps.core.models import PublicationUrl
from scholarly_citation_finder.tools.extractor.grobid.GrobidExtractor import GrobidExtractor
from scholarly_citation_finder.lib.file import download_file_pdf, DownloadFailedException, UnexpectedContentTypeException
from scholarly_citation_finder.lib.process import ProcessException
from scholarly_citation_finder.apps.parser.Exceptions import ParserRollbackError
from scholarly_citation_finder.lib.string import normalize_string
from scholarly_citation_finder.tools.extractor.grobid.TeiParser import TeiParserNoDocumentTitle,\
TeiParserNoReferences
from scholarly_citation_finder.tools.nameparser.StringMatching import nearly_match
logger = logging.getLogger(__name__)
class PublicationDocumentExtractor:
'''
Class to extract a document.
'''
NUM_MINIMUM_REFERENCES = 3
def __init__(self, database='default'):
'''
Create object.
:param database: Database name
'''
self.extractor = GrobidExtractor() # used to extract documents
self.parser = Parser(database=database) # used to store results
def extract_and_store(self, publication, url):
'''
Extract the publication from the given URL and store the result.
:param publication:
:param url:
:raise ExtractorNotAvaiableException:
'''
try:
document_meta, references = self.extract(publication.title, publication.id, url=url) # raises ExtractorNotAvaiableException
if document_meta and references:
self.__store_document_meta(publication=publication, document_meta=document_meta)
self.__store_references(publication=publication, url=url, references=references)
return True
# Download failed
except(DownloadFailedException, UnexpectedContentTypeException) as e:
logger.info('{}: {}'.format(type(e).__name__, str(e)))
# Extractor failed
except(ProcessException) as e:
logger.info('{}: {}'.format(type(e).__name__, str(e)))
# Storage failed
except(ParserRollbackError) as e:
logger.warn(e, exc_info=True)
return False
def extract(self, publication_title, publication_id, url):
'''
Try to download the document from the given URL and extract it.
:param publication_title: Title of the publication to check, if it's the correct document
:param publication_id: ID of the publication. Used for the filename of the temporary stored document
:param url: Document URL
:return: Document meta object, references array
False, False if (a) it failed to download the document (b) or the document has no title or references
:raise ProcessException: Extractor failed
:raise ExtractorNotAvaiableException: Extractor is not available
:raise DownloadFailedException: Download failed
:raise UnexpectedContentTypeException: File for given URL has the wrong content type
'''
try:
filename = download_file_pdf(url, path=config.DOWNLOAD_TMP_DIR, name='{}_tmp.pdf'.format(publication_id))
document_meta, references = self.extractor.extract_file(filename, completely=True)
# Check title
document_meta_title = document_meta['publication']['title'].lower().strip()
if not nearly_match(document_meta_title, publication_title):
logger.info('Wrong title! Is "%s", should "%s"' % (document_meta_title, publication_title) )
return False, False
# Check number of references
if len(references) < self.NUM_MINIMUM_REFERENCES:
logger.info('Not enough references')
return False, False
return document_meta, references
# Tei failed (invalid document)
except(TeiParserNoDocumentTitle, TeiParserNoReferences) as e:
logger.info('{}: {}'.format(type(e).__name__, str(e)))
return False, False
def __store_references(self, publication, references, url):
'''
Store the URL and the references.
:param publication: Publication that was extracted
:param references: References list, extracted from the document
:param url: URL of the document that was extracted
:raise ParserRollbackError: Storage (database commit) of the references failed
'''
publication_url = publication.publicationurl_set.create(url=url[:200],
type=PublicationUrl.MIME_TYPE_PDF,
extraction_date=datetime.now())
for reference in references:
# TODO: check if paper already exists (!)
reference['reference']['publication_id'] = publication.id
reference['reference']['source_id'] = publication_url.id
reference['publication']['source'] = '{}:{}'.format(reference['publication']['source'], publication_url.id)
self.parser.parse(**reference)
self.parser.commit() # raises ParserRollbackError
def __store_document_meta(self, publication, document_meta):
'''
Store the extracted head meta data.
:param publication: Publication object
:param document_meta: Extracted head meta data
'''
if 'keywords' in document_meta:
for keyword in document_meta['keywords']:
keyword = normalize_string(keyword)
if len(keyword) <= 100:
publication.publicationkeyword_set.get_or_create(name=keyword)
else:
logger.info('keyword "%s" is too long' % keyword)
| 1.921875 | 2 |
webapp/web.py | thunderz99/azure_image_caption | 1 | 16870 | import sys
import os
import json
import urllib
from PIL import Image
from flask import Flask, request, redirect, url_for
from flask import send_from_directory, render_template
from werkzeug.utils import secure_filename
from datetime import datetime
from caption_service import CaptionService
from translation_service import TranslationService
sys.path.append(os.curdir) # カレントファイルをインポートするための設定
UPLOAD_FOLDER = '/tmp/uploads'
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__, static_url_path='/static', static_folder='assets/static')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
cs = CaptionService()
ts = TranslationService()
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/flask/uploader', methods=['POST'])
def upload_file():
# check if the post request has the file part
# create a special subfolder for the files uploaded this time
# to avoid overwrite
subdir = datetime.now().strftime('%Y%m%d_%H%M%S')
current_files_dir = os.path.join(UPLOAD_FOLDER, subdir)
os.makedirs(current_files_dir, exist_ok=True)
upload_files = request.files.getlist('file[]')
ret = []
for file in upload_files:
image = {}
print('filename is', file.filename)
filename = secure_filename(file.filename)
image['filename'] = filename
filepath = os.path.join(current_files_dir, filename)
print('file saving to ', filepath)
file.save(filepath)
image['url'] = '/flask/uploads/{}/{}'.format(
subdir, urllib.parse.quote_plus(filename))
print('begin predict', filepath)
caption_en, caption_ja = get_caption(filepath)
image['result'] = caption_ja
ret.append(image)
return json.dumps(ret)
@app.route('/flask/uploads/<path:filepath>')
def uploaded_file(filepath):
print("filepath is {}".format(filepath))
filename = os.path.basename(filepath)
if not filename:
return ""
path = os.path.dirname(filepath)
print("path is {}, filename is {}".format(path, filename))
image_folder = os.path.join(UPLOAD_FOLDER, path)
return send_from_directory(image_folder,
urllib.parse.unquote_plus(filename))
@app.route('/')
def serve_index():
return send_from_directory('assets', 'index.html')
@app.route('/<filename>', defaults={'filename': 'index.html'})
def serve_assets(filename):
return send_from_directory('assets', filename)
def get_caption(filepath):
print('getting caption', filepath)
caption_en = cs.get_caption(filepath)
caption_ja = ts.get_translation(caption_en)
return caption_en, caption_ja
if __name__ == '__main__':
port = os.environ.get('PORT', 5000)
app.run(host='0.0.0.0', port=port)
| 2.515625 | 3 |
map2loop/m2l_map_checker.py | Leguark/map2loop | 0 | 16871 | import geopandas as gpd
from shapely.geometry import LineString, Polygon,MultiLineString
import os.path
from map2loop import m2l_utils
import warnings
import numpy as np
import pandas as pd
#explodes polylines and modifies objectid for exploded parts
def explode_polylines(indf,c_l,dst_crs):
#indf = gpd.GeoDataFrame.from_file(indata)
outdf = gpd.GeoDataFrame(columns=indf.columns, crs=dst_crs)
for idx, row in indf.iterrows():
if type(row.geometry) == LineString:
outdf = outdf.append(row,ignore_index=True)
if type(row.geometry) == MultiLineString:
multdf = gpd.GeoDataFrame(columns=indf.columns, crs=dst_crs)
recs = len(row.geometry)
multdf = multdf.append([row]*recs,ignore_index=True)
i=0
for geom in range(recs):
multdf.loc[geom,'geometry'] = row.geometry[geom]
multdf.loc[geom,c_l['o']]=str(multdf.loc[geom,c_l['o']])+'_'+str(i)
print('map2loop warning: Fault_'+multdf.loc[geom,c_l['o']],'is one of a set of duplicates, so renumbering')
i=i+1
outdf = outdf.append(multdf,ignore_index=True)
return outdf
def check_map(structure_file,geology_file,fault_file,mindep_file,fold_file,tmp_path,bbox,c_l,dst_crs,local_paths,drift_prefix,polygo):
#y_point_list = [bbox[1], bbox[1], bbox[3], bbox[3], bbox[1]]
#x_point_list = [bbox[0], bbox[2], bbox[2], bbox[0], bbox[0]]
#bbox_geom = Polygon(zip(x_point_list, y_point_list))
#polygo = gpd.GeoDataFrame(index=[0], crs=dst_crs, geometry=[bbox_geom])
m2l_errors=[]
m2l_warnings=[]
if(local_paths):
for file_name in (structure_file,geology_file,fault_file,mindep_file,fold_file):
if not os.path.isfile(file_name):
m2l_errors.append('file '+file_name+' not found')
# Process orientation points
if (os.path.isfile(structure_file) or not local_paths):
orientations2 = gpd.read_file(structure_file,bbox=bbox)
if(c_l['sf']==c_l['ds']):
new_code='NEW_'+c_l['sf']
new_code=new_code[:10]
orientations=orientations2.rename(columns={c_l['sf']:new_code}, errors="raise")
m2l_warnings.append('To avoid conflict with geology field of same name, orientation field named "'+str(c_l['sf'])+'" renamed to "'+new_code+'"')
c_l['sf']=new_code
else:
new_code=''
orientations=orientations2.copy()
if(c_l['bo']==c_l['ds'] and not new_code==''):
c_l['bo']=new_code
if(len(orientations)<2):
m2l_errors.append('not enough orientations to complete calculations (need at least 2)')
orientations = orientations.replace(r'^\s+$', np.nan, regex=True)
orientations = orientations[orientations[c_l['d']]!=-999]
for code in ('sf','d','dd','gi'):
if not c_l[code] in orientations.columns:
if(code=='sf'):
orientations[c_l[code]]='Bed'
m2l_warnings.append('field named "'+str(c_l[code])+'" added with default value "Bed"')
elif(not code=='gi'):
m2l_errors.append('"'+c_l[code]+'" field needed')
else:
m2l_warnings.append('field named "'+str(c_l[code])+'" added with default value')
orientations[c_l[code]] = np.arange(len(orientations))
else:
nans=orientations[c_l[code]].isnull().sum()
if(nans>0):
m2l_warnings.append(''+str(nans)+' NaN/blank found in column "'+str(c_l[code])+'" of orientations file, replacing with 0')
orientations[c_l[code]].fillna("0", inplace = True)
unique_o=set(orientations[c_l['gi']])
if(not len(unique_o) == len(orientations)):
m2l_warnings.append('duplicate orientation point unique IDs')
show_metadata(orientations,"orientations layer")
# Process geology polygons
if (os.path.isfile(geology_file) or not local_paths):
geology = gpd.read_file(geology_file,bbox=bbox)
if not c_l['o'] in geology.columns:
geology = geology.reset_index()
geology[c_l['o']]=geology.index
unique_g=set(geology[c_l['o']])
if(not len(unique_g) == len(geology)):
m2l_warnings.append('duplicate geology polygon unique IDs')
nans=geology[c_l['c']].isnull().sum()
if(nans>0):
m2l_errors.append(''+str(nans)+' NaN/blank found in column "'+str(c_l['c'])+'" of geology file, please fix')
if(c_l['g']=='No_col' or not c_l['g'] in geology.columns):
m2l_warnings.append('No secondary strat coding for geology polygons')
c_l['g']='group'
geology[c_l['g']]="Top"
geology = geology.replace(r'^\s+$', np.nan, regex=True)
geology[c_l['g']].fillna(geology[c_l['g2']], inplace=True)
geology[c_l['g']].fillna(geology[c_l['c']], inplace=True)
if(c_l['r1']=='No_col' or not c_l['r1'] in geology.columns):
m2l_warnings.append('No extra litho for geology polygons')
c_l['r1']='r1'
geology[c_l['r1']]='Nope'
if(c_l['r2']=='No_col' or not c_l['r2'] in geology.columns):
m2l_warnings.append('No more extra litho for geology polygons')
c_l['r2']='r2'
geology[c_l['r2']]='Nope'
if(c_l['min']=='No_col' or not c_l['min'] in geology.columns):
m2l_warnings.append('No min age for geology polygons')
c_l['min']='min'
geology[c_l['min']]=0
if(c_l['max']=='No_col' or not c_l['max'] in geology.columns):
m2l_warnings.append('No max age for geology polygons')
c_l['max']='max'
geology[c_l['max']]=100
if(c_l['c']=='No_col' or not c_l['c'] in geology.columns):
m2l_errors.append('Must have primary strat coding field for geology polygons')
for code in ('c','g','g2','ds','u','r1'):
if(c_l[code] in geology.columns):
geology[c_l[code]].str.replace(","," ")
if(code == 'c' or code =='g' or code=='g2'):
geology[c_l[code]].str.replace(" ","_")
geology[c_l[code]].str.replace("-","_")
geology[c_l[code]].str.replace(",","_")
nans=geology[c_l[code]].isnull().sum()
if(nans>0):
m2l_warnings.append(''+str(nans)+' NaN/blank found in column "'+str(c_l[code])+'" of geology file, replacing with 0')
geology[c_l[code]].fillna("0", inplace = True)
for drift in drift_prefix:
geology=geology[~geology[c_l['u']].str.startswith(drift)]
show_metadata(geology,"geology layer")
# Process fold polylines
if (os.path.isfile(fold_file) or not local_paths):
folds = gpd.read_file(fold_file,bbox=bbox)
if(len(folds)>0):
if not c_l['o'] in folds.columns:
folds = folds.reset_index()
folds[c_l['o']]=folds.index
unique_g=set(folds[c_l['o']])
if(not len(unique_g) == len(folds)):
m2l_warnings.append('duplicate fold polyline unique IDs')
folds = folds.replace(r'^\s+$', np.nan, regex=True)
for code in ('ff','t'):
if(c_l['ff']=='No_col' or not c_l['ff'] in folds.columns):
m2l_warnings.append('No fold code for fold polylines')
c_l['ff']='ff'
folds[c_l['ff']]=c_l['fold']
if(c_l['t']=='No_col' or not c_l['t'] in folds.columns):
m2l_warnings.append('No fold polarity for fold polylines')
c_l['t']='t'
folds[c_l['t']]='None'
if(c_l[code] in folds.columns):
folds[c_l[code]].str.replace(","," ")
nans=folds[c_l[code]].isnull().sum()
if(nans>0):
m2l_warnings.append(''+str(nans)+' NaN/blank found in column "'+str(c_l[code])+'" of folds file, replacing with 0')
folds[c_l[code]].fillna("0", inplace = True)
folds_clip=m2l_utils.clip_shp(folds,polygo)
if(len(folds_clip) > 0):
folds_explode = explode_polylines(folds_clip, c_l, dst_crs)
if(len(folds_explode) > len(folds_clip)):
m2l_warnings.append(
'some folds are MultiPolyLines, and have been split')
folds_explode.crs = dst_crs
show_metadata(folds_clip,"fold layer")
else:
print('No folds in area')
# Process fault polylines
if (os.path.isfile(fault_file) or not local_paths):
faults_folds = gpd.read_file(fault_file,bbox=bbox)
faults = faults_folds[faults_folds[c_l['f']].str.contains(c_l['fault'])]
faults = faults.replace(r'^\s+$', np.nan, regex=True)
if not c_l['o'] in faults.columns:
m2l_warnings.append('field named "'+str(c_l['o'])+'" added with default value')
faults[c_l['o']] = np.arange(len(faults))
for code in ('f','o','fdip','fdipdir','fdipest'):
if(c_l['f']=='No_col' or not c_l['f'] in faults.columns ):
m2l_warnings.append('No fault type for fault polylines')
c_l['f']='ftype'
faults[c_l['f']]=c_l['fault']
if(c_l['fdip']=='No_col' or not c_l['fdip'] in faults.columns ):
m2l_warnings.append('No fault dip for fault polylines')
c_l['fdip']='fdip'
faults[c_l['fdip']]=c_l['fdipnull']
if(c_l['fdipdir']=='No_col' or not c_l['fdipdir'] in faults.columns ):
m2l_warnings.append('No fault dip direction for fault polylines')
c_l['fdipdir']='fdipdir'
faults[c_l['fdipdir']]=0
if(c_l['fdipest']=='No_col' or not c_l['fdipest'] in faults.columns ):
m2l_warnings.append('No fault dip estimate for fault polylines')
c_l['fdipest']='fdipest'
faults[c_l['fdipest']]='None'
if(c_l['fdipest_vals']=='No_col' or not c_l['fdipest_vals'] in faults.columns ):
m2l_warnings.append('No fault dip estimate text for fault polylines')
c_l['fdipest_vals']='fdipest_vals'
faults[c_l['fdipest_vals']]='None'
if(c_l['n']=='No_col' or not c_l['n'] in faults.columns ):
m2l_warnings.append('No fault name for fault polylines')
c_l['n']='fname'
faults[c_l['n']]='None'
if not c_l[code] in faults.columns:
m2l_errors.append('field named "'+str(c_l[code])+'" not found in fault/fold file')
if(c_l[code] in faults.columns):
nans=faults[c_l[code]].isnull().sum()
if(nans>0):
m2l_warnings.append(''+str(nans)+' NaN/blank found in column "'+str(c_l[code])+'" of fault file, replacing with -999')
faults[c_l[code]].fillna("-999", inplace = True)
unique_f=set(faults[c_l['o']])
if(not len(unique_f) == len(faults)):
m2l_errors.append('duplicate fault/fold polyline unique IDs')
faults = faults.replace(r'^\s+$', np.nan, regex=True)
faults_clip=m2l_utils.clip_shp(faults,polygo)
if(len(faults_clip)>0):
faults_explode=explode_polylines(faults_clip,c_l,dst_crs)
if(len(faults_explode)>len(faults_clip)):
m2l_warnings.append('some faults are MultiPolyLines, and have been split')
faults_explode.crs = dst_crs
show_metadata(faults_explode,"fault layer")
else:
#fault_file='None'
print('No faults in area')
# Process mindep points
if (os.path.isfile(mindep_file) or not local_paths):
mindeps = gpd.read_file(mindep_file,bbox=bbox)
if(len(mindeps)==0):
m2l_warnings.append('no mindeps for analysis')
else:
mindeps = mindeps.replace(r'^\s+$', np.nan, regex=True)
for code in ('msc','msn','mst','mtc','mscm','mcom'):
if(c_l[code]=='No_col'):
mindeps[c_l[code]]='No_col'
if not c_l[code] in mindeps.columns:
m2l_errors.append('field named "'+str(c_l[code])+'" not found in mineral deposits file')
else:
nans=mindeps[c_l[code]].isnull().sum()
if(nans>0):
m2l_warnings.append(str(nans)+' NaN/blank found in column '+str(c_l[code])+' of mindep file, replacing with 0')
mindeps[c_l[code]].fillna("0", inplace = True)
show_metadata(mindeps,"mindeps layer")
# explode fault/fold multipolylines
# sometimes faults go off map and come back in again which after clipping creates multipolylines
if(len(m2l_warnings)>0):
print("\nWarnings:")
warnings.warn('The warnings listed above were issued')
for w in m2l_warnings:
print(" ",w)
if(len(m2l_errors)>0):
print("\nErrors:")
warnings.warn('The errors listed above must be fixed prior to rerunning map2loop')
for e in m2l_errors:
print(" ",e)
raise NameError('map2loop error: Fix errors before running again')
if(len(m2l_errors)==0):
if(len(folds_clip)>0):
fold_file=tmp_path+'folds_clip.shp'
folds_explode=folds_explode.dropna(subset=['geometry'])
folds_explode.to_file(fold_file)
else:
fold_file=tmp_path+'fold_clip.shp'
print("\nFold layer metadata\n--------------------")
print("No folds found")
if(len(faults_clip)>0):
fault_file=tmp_path+'faults_clip.shp'
faults_explode.crs=dst_crs
faults_explode=faults_explode.dropna(subset=['geometry'])
faults_explode.to_file(fault_file)
else:
fault_file=tmp_path+'faults_clip.shp'
print("\nFault layer metadata\n--------------------")
print("No faults found")
geol_clip=gpd.overlay(geology, polygo, how='intersection')
if(len(geol_clip)>0):
geol_clip.crs=dst_crs
geol_file=tmp_path+'geol_clip.shp'
geol_clip.to_file(geol_file)
if(len(orientations)>0):
structure_file=tmp_path+'structure_clip.shp'
orientations.crs=dst_crs
orientations[c_l['dd']] = pd.to_numeric(orientations[c_l['dd']])
orientations[c_l['d']] = pd.to_numeric(orientations[c_l['d']])
orientations.to_file(structure_file)
if(len(mindeps)>0):
mindep_file=tmp_path+'mindeps_clip.shp'
mindeps.crs=dst_crs
mindeps.to_file(mindep_file)
print('\nNo errors found, clipped and updated files saved to tmp')
return(structure_file,geol_file,fault_file,mindep_file,fold_file,c_l)
def show_metadata(gdf,name):
if(len(gdf)>0):
print("\n",name," metadata\n--------------------")
print(" bbox",gdf.total_bounds)
print(" CRS",gdf.crs)
print(" # items",len(gdf))
types=[]
for i,g in gdf.iterrows():
if(not g.geometry.type in types):
types.append(g.geometry.type)
print(" Data types",types)
else:
print("\n",name," metadata\n--------------------")
print(" empty file, check contents") | 2.40625 | 2 |
DeepBrainSeg/readers/nib.py | JasperHG90/DeepBrainSeg | 130 | 16872 | <reponame>JasperHG90/DeepBrainSeg
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# author: <NAME>
# contact: <EMAIL>
# MIT License
# Copyright (c) 2020 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import tempfile
from time import time
import datetime
import numpy as np
import nibabel as nib
class nib_loader(object):
"""
"""
def __init__(self):
pass
def load_vol(self, path):
"""
path : patient data path
returns numpy array of patient data
"""
self.patient = nib.load(path)
self.affine = self.patient.affine
return self.patient.get_data()
def write_vol(self, path, volume):
"""
path : path to write the data
vol : modifient volume
return: True or False based on saving of volume
"""
try:
volume = np.uint8(volume)
volume = nib.Nifti1Image(volume, self.affine)
volume.set_data_dtype(np.uint8)
nib.save(volume, path)
return True
except:
return False
| 2.046875 | 2 |
tensorflow/intro/main.py | donutloop/machine_learning_examples | 1 | 16873 | import os
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
x1 = tf.constant(5)
x2 = tf.constant(6)
result = tf.multiply(x1, x2)
print(result)
sess = tf.Session()
with tf.Session() as sess:
output = sess.run(result)
print(output)
| 2.640625 | 3 |
twitter-bots/auto_liker.py | debasish-dutta/Python-projects | 0 | 16874 | import auth_key
import tweepy
import time
auth = tweepy.OAuthHandler(auth_key.API_key, auth_key.API_secret_key)
auth.set_access_token(auth_key.Access_token, auth_key.Access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
user = api.me()
indId = 2282863
india_trend = api.trends_place(indId)
tweetNo = 5
a =[]
trndInd = api.trends_place(indId)
for trend in trndInd[0]['trends']:
a.append(trend['name'])
for item in a:
print(item)
for tweet in tweepy.Cursor(api.search, item).items(tweetNo):
try:
print("tweet liked & retweeted")
tweet.favorite()
tweet.retweet()
time.sleep(10)
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
| 2.703125 | 3 |
ds_discovery/engines/distributed_mesh/domain_products/controller/src/controller.py | project-hadron/discovery-transition-ds | 2 | 16875 | from ds_discovery import Controller
import os
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=DeprecationWarning)
__author__ = '<NAME>'
def domain_controller():
# Controller
uri_pm_repo = os.environ.get('HADRON_PM_REPO', None)
controller = Controller.from_env(uri_pm_repo=uri_pm_repo, default_save=False, has_contract=True)
run_book = os.environ.get('HADRON_CONTROLLER_RUNBOOK', None)
repeat = os.environ.get('HADRON_CONTROLLER_REPEAT', None)
sleep = os.environ.get('HADRON_CONTROLLER_SLEEP', None)
controller.run_controller(run_book=run_book, repeat=repeat, sleep=sleep)
if __name__ == '__main__':
domain_controller()
| 1.960938 | 2 |
utils_test.py | lostsquirrel/words | 0 | 16876 | import json
import unittest
from utils import CustomEncoder, Paging, ValidationError, generate_uuid, Validator
class UtilsTest(unittest.TestCase):
def test_uuid(self):
print(generate_uuid())
self.assertEqual(len(generate_uuid()), 32)
def test_valiate(self):
form = dict(
a=1,
b=2,
c=3
)
v = Validator().rule("a").rule("b").rule("c").rule("d", False, 4)
_a, _b, _c, _d = v.validate_form(form)
self.assertEqual(_a, 1)
self.assertEqual(_b, 2)
self.assertEqual(_c, 3)
self.assertEqual(_d, 4)
def test_validate_none_form(self):
v = Validator().rule("page", False, 1).rule("per_page", False, 10)
page, per_page = v.validate_form(None)
self.assertEqual(page, 1)
self.assertEqual(per_page, 10)
def test_validate_none_form_required(self):
v = Validator().rule("page")
try:
v.validate_form(None)
except ValidationError as e:
print(e)
try:
v.validate_form(dict(size=2))
except ValidationError as e:
print(e)
def test_extend(self):
try:
[].extend(None)
except TypeError as e:
print(e)
def test_paging(self):
p = Paging(101, 1, 10)
print(json.dumps(p.__dict__))
def test_json_encode(self):
p = Paging(101, 1, 10)
print(CustomEncoder().encode(p))
| 2.640625 | 3 |
src/pyclean/cli.py | uranusjr/pyclean-py | 0 | 16877 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import logging
import os
import sys
from . import entries, meta
logger = logging.getLogger(__name__)
def build_parser():
prog = os.path.basename(sys.argv[0])
if prog not in ("pyclean", "pyclean.py"):
prog = "pyclean"
parser = argparse.ArgumentParser(prog=prog)
parser.add_argument(
"entries", nargs="+", metavar="DIR_OR_FILE",
)
parser.add_argument(
"-v", "--verbose", dest="verbose",
action="store_true", help="be verbose",
)
parser.add_argument(
"--version", action="version",
version="%(prog)s, version {}".format(meta.__version__),
)
return parser
def parse_args(argv):
parser = build_parser()
options = parser.parse_args(argv)
return options
def setup_logging(options):
if options.verbose:
logging.root.setLevel(logging.DEBUG)
form = "%(levelname).1s: %(module)s:%(lineno)d: %(message)s"
else:
logging.root.setLevel(logging.INFO)
form = "%(message)s"
logging.basicConfig(format=form)
def main(argv=None):
options = parse_args(argv)
setup_logging(options)
if options.verbose:
logger.debug("options: %s", options.__dict__)
entries.clean(options.entries)
if __name__ == '__main__':
main()
| 2.46875 | 2 |
app.py | ZhongxuanWang/simple_web_remainder-python | 0 | 16878 | from flask import Flask, render_template, url_for, redirect, request
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from dateutil.relativedelta import relativedelta
from demail import demail
__author__ = '<NAME>'
__doc__ = 'Never Forget online remainder'
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///list.db'
# Remember, every time you make changes to the column (such as adding one col or removing one col, change the value),
# you have to do the following: open terminal from pycharm, python3.7, from app import db, db.create_all() and exit.
db = SQLAlchemy(app)
db.create_all()
datetime_format = '%b-%d-%Y %H:%M'
'''
This part requires your email information in order to receive email notifications. (This is left blank intentionally)
'''
email_account = ''
email_password = ''
# TODO send email warning if the due time is so soon and still incomplete,
class TODO(db.Model):
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.String(500), nullable=False)
time_created_str = datetime.now().strftime("%B-%d-%Y %H:%M:%S")
time_created = db.Column(db.String, default=time_created_str)
time_due = db.Column(db.String(500), nullable=False)
# By default, the email warning is disabled
email_warning = db.Column(db.Integer, default=0)
def __repr__(self):
return self.id
def __str__(self):
return self.__repr__()
def get_time_color(self):
time_dif = self.get_time_difference()
if time_dif['days'] < 0 or time_dif['seconds'] < 0:
return 'black'
elif time_dif['days'] > 30:
return "#0000ff"
elif time_dif['days'] > 7:
return "#0080ff"
elif time_dif['days'] > 2:
return '#00ff00'
elif time_dif['days'] >= 1:
return '#bfff00'
# >Half day
elif time_dif['seconds'] >= 43200:
return "#ffff00"
# >3h
elif time_dif['seconds'] >= 10800:
send_email(self)
return "#ffbf00"
# >1h
elif time_dif['seconds'] >= 3600:
send_email(self)
return "#ff8000"
else:
send_email(self)
return "#ff0000"
def get_time_difference(self):
return get_time_difference(datetime.strptime(self.time_due.__str__(), datetime_format))
'''
This will return a new date & time that after adding the values in time dictionaries
'''
def get_time(**time):
# TODO could I optimize those statements using comprehension for?
for item in ['hour', 'minute', 'day', 'month', 'year']:
if item not in time:
time[item] = 0
time_now = datetime.now() + relativedelta(hours=time['hour'], minutes=time['minute'], days=time['day'],
months=time['month'], years=time['year'])
return time_now.strftime(datetime_format)
def get_time_difference(time):
time_now = datetime.now().replace(microsecond=0)
diff = time - time_now
return {'days': diff.days, 'seconds': diff.seconds}
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
return redirect('issues/404.html')
elif request.method == 'GET':
tasks = TODO.query.order_by(TODO.time_created).all()
time_now = datetime.now().strftime(datetime_format)
return render_template("index.html", tasks=tasks, mintime=time_now, maxtime=get_time(year=100),
display_time=get_time(hour=3))
else:
return "Invalid method: " + request.method
@app.route('/addTask/<content>/<due_date>', methods=['POST'])
def addTask(content, due_date):
if request.method == 'POST':
# content = request.form['content']
try:
datetime.strptime(due_date, datetime_format)
except:
print("The time is not in correct format")
task = TODO(content=content, time_due=due_date)
# Add to database
try:
db.session.add(task)
db.session.commit()
return redirect('/')
except:
print("Unable to add the task")
else:
return render_template('issues/unable_to.html', issue="method not applicable")
@app.route('/editTask/<int:tid>/<content>/<due_date>/<email_warning>', methods=['POST'])
def editTask(tid, content, due_date, email_warning):
task = TODO.query.get_or_404(tid)
# Accessing through form in edit
task.content = content
task.time_due = due_date
task.email_warning = email_warning
try:
db.session.commit()
return redirect('/')
except:
print("Unable to edit the task")
@app.route('/editTask/<int:tid>', methods=['GET'])
def edit_task_jump(tid):
return render_template('edit.html', task=TODO.query.get_or_404(tid), maxtime=get_time(year=100))
@app.route('/cmTask/<int:tid>', methods=['GET'])
def cmTask(tid):
if request.method == 'GET':
task = TODO.query.get_or_404(tid)
try:
db.session.delete(task)
db.session.commit()
return redirect('/')
except:
return render_template('issues/unable_to.html', issue='complete the task')
else:
return render_template('issues/unable_to.html', issue="method not applicable")
@app.route('/setting/<email_add>', methods=['POST'])
def setting(email_add):
write_file('email.cfg', email_add)
return ''
@app.route('/setting/', methods=['GET'])
def setting_redirect():
email = '' + read_file('email.cfg')
return render_template('setting.html', email=email)
def read_file(filename):
try:
with open(filename) as f:
return f.readline()
except IOError:
print("IO ERROR Raised. Reading file failed,")
f = open(filename, "w")
f.write('<EMAIL>')
f.close()
return 'content'
def write_file(filename, file_content):
try:
with open(filename, 'w') as f:
f.write(file_content)
except IOError:
print("IO ERROR Raised. Writing file failed,")
return ''
def send_email(todo_object):
pass
# THIS FUNCTION MUST BE ENABLED MANUALLY
# THIS FUNCTION MUST BE ENABLED MANUALLY
# THIS FUNCTION MUST BE ENABLED MANUALLY
# assert isinstance(todo_object, TODO)
# sendto = read_file('email.cfg')
# email_obj = demail(email_account, email_password, sendto)
# email_content = f'''
# Subject: Your task is about to due
# Hello, this is automatic remainder that reminds you your task {todo_object.content} will due soon''' + '''
# ({todo_object.get_time_difference()['days']}days and {todo_object.get_time_difference()['seconds']} seconds) '''
# email_obj.send(email_content)
# return ''
if __name__ == '__main__':
app.run(debug=False)
| 2.75 | 3 |
homeassistant/components/solaredge/__init__.py | DavidDeSloovere/core | 4 | 16879 | """The solaredge integration."""
from __future__ import annotations
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
CONFIG_SCHEMA = cv.deprecated(DOMAIN)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Load the saved entities."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True
| 1.648438 | 2 |
main.py | eteq/door_beeper | 0 | 16880 | <gh_stars>0
import uos
import utime
import machine
from machine import Pin, PWM
import utils
default_config = dict(
sleep_time_ms = 250,
freezer_delay_ms = 1000,
fridge_delay_ms = 1000,
write_battery_voltage = True,
piezo_plus_pin_num = 12,
piezo_min_pin_num = 33,
freezer_switch_pin_num = 23,
fridge_switch_pin_num = 21
)
try:
config_dct = {}
execfile('config.py', config_dct)
except Exception as e:
print("Could not run config file, using defaults:", default_config, '. File error:')
print(e)
globals().update(default_config)
else:
for varnm in default_config.keys():
if varnm in config_dct:
globals()[varnm] = config_dct[varnm]
print('Loaded config value for', varnm, ':', config_dct[varnm])
else:
globals()[varnm] = default_config[varnm]
print('Using default config value for', varnm, ':', default_config[varnm])
# setup pins
led_pin = Pin(13, Pin.OUT)
piezo_min_pin = Pin(piezo_min_pin_num, Pin.OUT)
freezer_switch_pin = Pin(freezer_switch_pin_num, Pin.IN, Pin.PULL_UP)
fridge_switch_pin = Pin(fridge_switch_pin_num, Pin.IN, Pin.PULL_UP)
#set initial state of pins
piezo_min_pin.value(0)
led_pin.value(0)
# set up PWM
piezo_plus_pwm = PWM(Pin(piezo_plus_pin_num), duty=512)
piezo_plus_pwm.deinit()
# how often to write out the battery status. None means don't do it at all
battery_time_spacing_secs = 600
# use an infinite loop to watch for door opening
def check_open(pin, name, open_times_dct, piezo_args, delay_for_alarm_ms):
led_pin.value(0)
if pin.value() == 1:
print(name, 'open...')
led_pin.value(1)
if open_times[name] is None:
open_times[name] = utime.ticks_ms()
else:
dt = utime.ticks_diff(utime.ticks_ms(), open_times[name])
if dt > delay_for_alarm_ms:
print(name, 'has been open for more than', delay_for_alarm_ms, 'ms!')
utils.piezo_multitone(piezo_plus_pwm, *piezo_args)
else:
if open_times[name] is not None:
print(name, 'closed.')
open_times[name] = None
last_battery_time = None
open_times = {'Freezer': None, 'Fridge': None}
while True:
check_open(freezer_switch_pin, 'Freezer', open_times, ([1300,1000], 10, 500), freezer_delay_ms)
check_open(fridge_switch_pin, 'Fridge', open_times, ([1200,900], 10, 500), fridge_delay_ms)
utime.sleep_ms(sleep_time_ms)
# write out battery status if desired
if battery_time_spacing_secs is not None:
if last_battery_time is None:
last_battery_time = utime.time()
else:
if (utime.time() - last_battery_time) > battery_time_spacing_secs:
voltage = utils.read_battery_voltage()
print('Battery level:', voltage, 'V')
if write_battery_voltage:
with open('battery_voltage', 'a') as f:
f.write(str(utime.time()))
f.write(' ')
f.write(str(voltage))
f.write('\n')
last_battery_time = utime.time()
| 2.390625 | 2 |
modules/backend.py | Uncle-Yuanl/model_zoo | 0 | 16881 | import os, sys
from distutils.util import strtobool
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.python.util import nest, tf_inspect
from tensorflow.python.eager import tape
# from tensorflow.python.ops.custom_gradient import graph_mode_decorator
# 是否使用重计算
do_recompute = strtobool(os.environ.get('RECOMPUTE', '0'))
# 知乎:https://zhuanlan.zhihu.com/p/349492378
# 论文:https://arxiv.53yu.com/pdf/1606.08415.pdf
def gelu_erf(x):
"""根据erf直接计算gelu
"""
# np的精度更高,默认64位,tf默认32位
return 0.5 * x * (1.0 + tf.math.erf(x / np.sqrt(2.0)))
def gelu_tanh(x):
cdf = 0.5 * (
1 + K.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * K.pow(x,3)))
)
return x * cdf
def set_gelu(version):
"""设置gelu版本
"""
version = version.lower()
assert version in ['erf', 'tanh'], 'gelu version must in erf or tanh'
if version == 'erf':
tf.keras.utils.get_custom_objects()['gelu'] = gelu_erf
elif version == 'tanh':
tf.keras.utils.get_custom_objects()['gelu'] = gelu_tanh
def align(tensor, axes, ndim=None):
"""重新对齐tensor(批量版expand_dims)感觉更像是transpose
axes: 原来的第i维对齐新tensor的第axes[i]维;
ndim: 新tensor的维度
Example:
>>> tensor = tf.constant(np.arange(12).reshape(3,4), dtype=tf.float32)
>>> print(tensor)
tf.Tensor(
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 8. 9. 10. 11.]], shape=(3, 4), dtype=float32)
>>> same_dim = align(tensor, [0, -1], 2)
>>> print(same_dim)
tf.Tensor(
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 8. 9. 10. 11.]], shape=(3, 4), dtype=float32)
>>> more_dim = align(tensor, [0, -1], 3)
>>> print(more_dim)
tf.Tensor(
[[[ 0. 1. 2. 3.]]
<BLANKLINE>
[[ 4. 5. 6. 7.]]
<BLANKLINE>
[[ 8. 9. 10. 11.]]], shape=(3, 1, 4), dtype=float32)
"""
assert len(axes) == K.ndim(tensor)
indices = [None] * (ndim or max(axes))
for i in axes:
indices[i] = slice(None)
return tensor[indices]
def sequence_masking(x, mask, value=0, axis=None):
"""为序列条件mask的函数
parameters:
-----------
x: tensor
输入张量
mask: tensor
形如(batch_size, seq_len)的0-1矩阵
value: float or str
mask部分要被替换成的值,允许'inf'与'-inf'
axis: int
序列所在的轴,默认为1
"""
if mask is None:
return x
# 确保x类型,可以执行*运算
x_type = K.dtype(x)
if x_type == 'bool':
x = K.cast(x, 'int32')
# 确保mask类型 = x类型
if K.dtype(mask) != K.dtype(x):
mask = K.cast(mask, K.dtype(x))
if value == '-inf':
# -----------是个函数吗??---------------
value = -K.infinity
if value == 'inf':
value = K.infinity
value = K.cast(value, K.dtype(x))
# 确定axis
if axis is None:
axis = 1
if axis < 0:
axis = K.ndim(x) + axis
assert axis > 0, 'axis must be greater than 0'
# 统一shape
for _ in range(axis - 1): # > 1时生效
mask = K.expand_dims(mask, 1) # 把第0维让给batch_size
for _ in range(K.ndim(x) - K.ndim(mask)):
mask = K.expand_dims(mask, K.ndim(mask))
x = x * mask + value * (1 - mask)
# 与输入x的类型统一
if x_type == 'bool':
x = K.cast(x, x_type)
return x
def recompute_grad(call):
# ----------------------完全没看懂????------------------------
"""重计算装饰器,用来装饰keras层的call函数
目的是:通过一些额外的计算减少显存的占用
论文:https://arxiv.org/abs/1604.06174
"""
if not do_recompute:
return call
def inner(self, inputs, **kwargs):
# 2.x的tf.nest.flatten不会对numpy和tf.tensor进行展平
flat_inputs = nest.flatten(inputs)
call_args = tf_inspect.getfullargspec(call).args
for key in ['mask', 'training']:
if key not in call_args and key in kwargs:
del kwargs[key]
def kernel_call():
"""定义前向计算
"""
return call(self, inputs, **kwargs)
def call_and_grad(*inputs):
"""定义前向计算和反向计算
"""
with tape.stop_recording():
outputs = kernel_call()
outputs = tf.identity(outputs)
def grad_fn(doutputs, variables=None):
watches = list(inputs)
if variables is not None:
watches += list(variables)
with tf.GradientTape() as t:
t.watch(watches)
with tf.control_dependencies([doutputs]):
outputs = kernel_call()
grads = t.gradient(
outputs, watches, output_gradients=[doutputs]
)
del t
return grads[:len(inputs)], grads[len(inputs):]
return outputs, grad_fn
outputs, grad_fn = call_and_grad(*flat_inputs)
flat_outputs = nest.flatten(outputs)
def actual_grad_fn(*doutputs):
grads = grad_fn(*doutputs, variables=self.trainable_weights)
return grads[0] + grads[1]
watches = flat_inputs + self.trainable_weights
watches = [tf.convert_to_tensor(x) for x in watches]
tape.record_operation(
call.__name__, flat_outputs, watches, actual_grad_fn
)
return outputs
return inner
def infinity():
"""返回默认的代表无穷大的数值
"""
return tf.keras.utils.get_custom_objects().get('infinity', 1e12)
def set_infinity(value):
"""设置新的代表无穷大的数值
"""
tf.keras.utils.get_custom_objects()['infinity'] = value
# 添加到 keras.backend 上,使其可以像 K.epsilon() 那样操作
K.infinity = infinity
K.set_infinity = set_infinity
sys.modules['tensorflow.keras.backend'] = K
custom_objects = {
'gelu_erf': gelu_erf,
'gelu_tanh': gelu_tanh,
'gelu': gelu_erf,
}
tf.keras.utils.get_custom_objects().update(custom_objects)
if __name__ == '__main__':
import doctest
doctest.testmod()
| 2.203125 | 2 |
apod_daily.py | gultugaydemir/apod_daily | 0 | 16882 | import datetime
import os
import requests
import tweepy
from PIL import Image
# Get your own keys from developer.twitter.com
# You can find a detailed tutorial about authenticating accounts from github.com/gultugaydemir/Twitter_OAuth1.0a
consumer_key = ''
consumer_secret = ''
access_token = ''
access_token_secret = ''
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# You can get your own API key from api.nasa.gov. However simply writing "DEMO_KEY" works too, as it can be seen on the website.
response = requests.get("https://api.nasa.gov/planetary/apod?api_key=DEMO_KEY") #This link contains the data we needed about the photo of the day.
data = response.json() # Converts the data to JSON format so that we can retrieve data from it.
description = data["title"] # Getting the title of the photo.
date = datetime.datetime.now().strftime("%y%m%d") # We need the {yymmdd} format for the source link.
source = "https://apod.nasa.gov/apod/ap{date}.html".format(date=date) # Creating the source link for the posted photo.
message = '"' + description + '" \n' + source # The status format for the image tweets.
message_video = '"' + description + '" \n' # The status format for the YouTube tweets.
try:
image = data["hdurl"] # The image URL from API.
except KeyError: # Code throws KeyError if a video is posted that day, since API doesn't include a "hdurl" element.
image = data["url"]
image = image.replace("embed/", "watch?v=")
api.update_status(status = message_video+ source + ' \n'+ image) # Bot only tweets the YouTube link and not a picture.
print("Video tweeted successfully.")
quit()
# Tweepy's "update_with_media" function only allows us to tweet an image from the local directory.
# Since posting the picture from a URL would be more practical, I'm using a function that will complete this step for me automatically.
def tweet_image(url, message):
tweeted=False
photo = 'photo.jpg'
request = requests.get(url, stream=True)
if request.status_code == 200:
with open(photo, 'wb') as media:
for url in request:
media.write(url)
while not tweeted:
try:
im = Image.open(photo)
w,h = im.size
print(w)
print(h)
api.update_with_media(photo, status=message)
print("Image tweeted successfully.")
tweeted = True
except tweepy.error.TweepError:
print("Resizing image...")
im = Image.open(photo)
width, height = im.size
print(width)
print(height)
im_resize = im.resize((int(width*0.99999999999), int(height*0.99999999999)), Image.ANTIALIAS)
im_resize.save(photo)
tweet_image(image, message) # Tweeting the picture with the status. Image URL and the status message are used as parameters.
| 3.578125 | 4 |
datasets/dad.py | LivingSkyTechnologies/Document_Layout_Segmentation | 4 | 16883 | import pickle
import os
import tensorflow as tf
from glob import glob
import utils.DataLoaderUtils as dlu
from utils.AnnotationUtils import write_dad_masks
# Static Dataset Config Options
TAG_NAMES = {'highlights',
'urls_to_supplementary',
'abbreviation',
'abstract',
'additional_file',
'affiliation',
'appendice',
'author_bio',
'author_contribution',
'author_name',
'availability_of_data',
'caption',
'conflict_int',
'contact_info',
'copyright',
'core_text',
'date',
'doi',
'figure',
'funding_info',
'index',
'keywords',
'list',
'math_formula',
'note',
'publisher_note',
'reference',
'section_heading',
'subheading',
'table',
'title',
'nomenclature',
'code',
'publisher',
'journal',
'corresponding_author',
'editor',
'ethics',
'consent_publication',
'MSC',
'article_history',
'acknowledgment',
'background'}
TAG_MAPPING = {'abbreviation': 'background',
'acknowledgment': 'background',
'additional_file': 'background',
'affiliation': 'background',
'article_history': 'background',
'author_contribution': 'background',
'availability_of_data': 'background',
'code': 'background',
'conflict_int': 'background',
'consent_publication': 'background',
'corresponding_author': 'background',
'date': 'background',
'ethics': 'background',
'index': 'background',
'journal': 'background',
'nomenclature': 'background',
'publisher_note': 'background',
'urls_to_supplementary': 'background',
'msc': 'background',
'MSC': 'background',
'highlights': 'background',
'subheading': 'section_heading'}
SAVED_PKL_FILE = 'saved_dad_paths.pkl'
BUFFER_SIZE = 500
MASKS_DIR = "masks"
DOCUMENTS_DIR = "documents"
ANNOTATIONS_DIR = "annotations"
def write_masks(dataset_dir, border_buffer=6):
anno_dir = os.path.join(dataset_dir, ANNOTATIONS_DIR)
anno_paths = glob(anno_dir + "/*/*json")
if os.path.exists(SAVED_PKL_FILE):
all_used_tags, class_mapping = pickle.load(open(SAVED_PKL_FILE, 'rb'))
else:
print("Running full mask generation, this may take a bit.")
all_used_tags = {}
for anno_json in anno_paths:
_, class_mapping, used_tags = write_dad_masks(anno_json,
ANNOTATIONS_DIR,
DOCUMENTS_DIR,
MASKS_DIR,
tag_names=TAG_NAMES,
tag_mapping=TAG_MAPPING,
buffer_size=border_buffer,
force=True)
all_used_tags.update(used_tags)
pickle.dump((all_used_tags, class_mapping), open(SAVED_PKL_FILE, 'wb'))
return all_used_tags, class_mapping
def build_dad_dataset(dataset_dir, img_size, batch_size, seed, debug=False):
all_used_tags, class_mapping = write_masks(dataset_dir)
# Filter out any pages that have no classes (this is helpful when messing around with active classes)
filtered_used_tags = {}
for path, used_tags in all_used_tags.items():
if len(used_tags) != 0:
filtered_used_tags[path] = used_tags
# Split the paths with stratified sampling, to mainting class distribution
train_paths, test_paths = dlu.stratify_train_test_split(filtered_used_tags, 0.10, seed=seed, debug=debug)
#%% - further split the test set into test and validation sets
test_used_tags = {}
for path, used_tags in filtered_used_tags.items():
if path in test_paths:
test_used_tags[path] = used_tags
test_paths, valid_paths = dlu.stratify_train_test_split(test_used_tags, 0.50, seed=seed, debug=debug)
train_dataset = tf.data.Dataset.from_tensor_slices(train_paths)
train_dataset = train_dataset.map(lambda x: dlu.parse_image(x, 0, MASKS_DIR), num_parallel_calls=tf.data.experimental.AUTOTUNE)
valid_dataset = tf.data.Dataset.from_tensor_slices(valid_paths)
valid_dataset = valid_dataset.map(lambda x: dlu.parse_image(x, 0, MASKS_DIR), num_parallel_calls=tf.data.experimental.AUTOTUNE)
test_dataset = tf.data.Dataset.from_tensor_slices(test_paths)
test_dataset = test_dataset.map(lambda x: dlu.parse_image(x, 0, MASKS_DIR), num_parallel_calls=tf.data.experimental.AUTOTUNE)
train = train_dataset.map(lambda x: dlu.load_image_train(x, img_size), num_parallel_calls=tf.data.experimental.AUTOTUNE)
train = train.shuffle(buffer_size=BUFFER_SIZE, seed=seed, reshuffle_each_iteration=True)
train = train.padded_batch(batch_size, drop_remainder=True, padded_shapes=([img_size, img_size, 3], [img_size, img_size, 1], [None, 4]))
train = train.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
valid = valid_dataset.map(lambda x: dlu.load_image_test(x, img_size), num_parallel_calls=tf.data.experimental.AUTOTUNE)
valid = valid.padded_batch(batch_size, drop_remainder=True, padded_shapes=([img_size, img_size, 3], [img_size, img_size, 1], [None, 4]))
valid = valid.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
test = test_dataset.map(lambda x: dlu.load_image_test(x, img_size), num_parallel_calls=tf.data.experimental.AUTOTUNE)
test = test.padded_batch(batch_size, drop_remainder=True, padded_shapes=([img_size, img_size, 3], [img_size, img_size, 1], [None, 4]))
test = test.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return train, valid, test, class_mapping
| 1.710938 | 2 |
terrascript/resource/ddelnano/mikrotik.py | mjuenema/python-terrascript | 507 | 16884 | <filename>terrascript/resource/ddelnano/mikrotik.py
# terrascript/resource/ddelnano/mikrotik.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:21:43 UTC)
import terrascript
class mikrotik_bgp_instance(terrascript.Resource):
pass
class mikrotik_bgp_peer(terrascript.Resource):
pass
class mikrotik_dhcp_lease(terrascript.Resource):
pass
class mikrotik_dns_record(terrascript.Resource):
pass
class mikrotik_pool(terrascript.Resource):
pass
class mikrotik_scheduler(terrascript.Resource):
pass
class mikrotik_script(terrascript.Resource):
pass
__all__ = [
"mikrotik_bgp_instance",
"mikrotik_bgp_peer",
"mikrotik_dhcp_lease",
"mikrotik_dns_record",
"mikrotik_pool",
"mikrotik_scheduler",
"mikrotik_script",
]
| 1.601563 | 2 |
test/threaddd.py | liaohongdong/IPProxy | 0 | 16885 | import time
import queue
import threading
def aaa(i):
while True:
item = q.get()
if item is None:
print("线程%s发现了一个None,可以休息了^-^" % i)
break
time.sleep(0.01)
print('aaaaa -> ' + str(i) + " ---> " + str(item))
q.task_done()
if __name__ == '__main__':
num_of_threads = 5
source = [i for i in range(1, 21)]
q = queue.Queue()
threads = []
for i in range(1, num_of_threads + 1):
t = threading.Thread(target=aaa, args=(i,))
threads.append(t)
t.start()
for item in source:
time.sleep(0.01)
q.put(item)
q.join()
# print("-----工作都完成了-----")
# # 停止工作线程
for i in range(num_of_threads):
q.put(None)
# for t in threads:
# t.join()
# print(threads)
| 3.390625 | 3 |
Codes/Liam/203_remove_linked_list_elements.py | liuxiaohui1221/algorithm | 256 | 16886 | <filename>Codes/Liam/203_remove_linked_list_elements.py
# 执行用时 : 68 ms
# 内存消耗 : 16.6 MB
# 方案:哨兵结点 sentinel,插入在head结点之前
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
# 哨兵结点 sentinel,插入在head结点之前
sentinel = ListNode(0)
sentinel.next = head
# 初始化两个指针 curr 和 prev
prev, curr = sentinel, head
while curr:
if curr.val == val:
prev.next = curr.next
else:
prev = curr
# 遍历下一个元素
curr = curr.next
return sentinel.next
| 3.359375 | 3 |
bailleurs/migrations/0001_initial.py | MTES-MCT/appel | 0 | 16887 | # Generated by Django 3.2.5 on 2021-07-06 14:18
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Bailleur",
fields=[
("id", models.AutoField(primary_key=True, serialize=False)),
("uuid", models.UUIDField(default=uuid.uuid4, editable=False)),
("nom", models.CharField(max_length=255)),
("siret", models.CharField(max_length=14)),
("capital_social", models.CharField(max_length=255)),
("siege", models.CharField(max_length=255)),
("dg_nom", models.CharField(max_length=255)),
("dg_fonction", models.CharField(max_length=255)),
("dg_date_deliberation", models.DateField()),
("operation_exceptionnelle", models.TextField()),
("cree_le", models.DateTimeField(auto_now_add=True)),
("mis_a_jour_le", models.DateTimeField(auto_now=True)),
],
options={
"permissions": (
("can_edit_bailleur", "Créer ou mettre à jour un bailleur"),
),
},
),
]
| 1.960938 | 2 |
tests/primitives/flow/probe_tcpip_extended_unibiflow_test.py | kjerabek/netexp | 0 | 16888 | <reponame>kjerabek/netexp
from tests.primitives.flow import probe_tcpip_extended_biflow_test
from netexp.primitives.flow import TCPIPFlowExtendedUniBiFlowInfo
from netexp.common import naming
class TestTCPIPExtendedUniBiFlow(probe_tcpip_extended_biflow_test.TestTCPIPExtendedBiFlow):
flow_class = TCPIPFlowExtendedUniBiFlowInfo
def test_short_single_uni_flow_stats(self, probe_short_flow):
probe_short_flow.run()
processed_flow = self.output.send.call_args.args[0]
stats = processed_flow.to_dict()
assert stats[naming.TIMESTAMP_AB] == [1590076139670363, 1590076139673838, 1590076139676297, 1590076139696210,
1590076139696270, 1590076141095061, 1590076141098597]
assert stats[naming.L3_HEADER_LENGTH_AB] == [20, 20, 20, 20, 20, 20, 20]
assert stats[naming.L4_HEADER_LENGTH_AB] == [40, 32, 32, 32, 32, 32, 32]
assert stats[naming.L4_PAYSIZE_AB] == [0, 0, 517, 0, 0, 0, 0]
assert stats[naming.TCP_FLAG_PSH_AB] == [0, 0, 1, 0, 0, 0, 0]
assert stats[naming.TCP_FLAG_RST_AB] == [0, 0, 0, 0, 0, 0, 0]
assert stats[naming.TCP_FLAG_ACK_AB] == [0, 1, 1, 1, 1, 1, 1]
assert stats[naming.TCP_FLAG_FIN_AB] == [0, 0, 0, 0, 0, 1, 0]
assert stats[naming.TCP_FLAG_SYN_AB] == [1, 0, 0, 0, 0, 0, 0]
assert stats[naming.TIMESTAMP_BA] == [1590076139673781, 1590076139679702, 1590076139696191, 1590076139696249,
1590076141098561]
assert stats[naming.L3_HEADER_LENGTH_BA] == [20, 20, 20, 20, 20]
assert stats[naming.L4_HEADER_LENGTH_BA] == [40, 32, 32, 32, 32]
assert stats[naming.L4_PAYSIZE_BA] == [0, 0, 1418, 1740, 0]
assert stats[naming.TCP_FLAG_PSH_BA] == [0, 0, 0, 1, 0]
assert stats[naming.TCP_FLAG_RST_BA] == [0, 0, 0, 0, 0]
assert stats[naming.TCP_FLAG_ACK_BA] == [1, 1, 1, 1, 1]
assert stats[naming.TCP_FLAG_FIN_BA] == [0, 0, 0, 0, 1]
assert stats[naming.TCP_FLAG_SYN_BA] == [1, 0, 0, 0, 0]
| 1.898438 | 2 |
optionstrader/database.py | Zaitsev11/Optionstrader | 6 | 16889 | import time
import mysql.connector
from optionstrader.customlogging import CustomLog
from optionstrader.parser import Parser
MYSQL_IP_ADDR = '192.168.1.10'
# Used to debug via logs
DEBUG = False
class Database:
def __init__(self):
"""
There's some confusion with database vs table.
We will have separate environments for Dev/Stage and Prd,
so we will want to ensure that the databases are separate.
TODO: Ensure that the Dev/Stage and Prod environments are fully seggregated
with their own databases. This will allows us to migrate the databases when
the time comes.
environment = 'dev' ('dev', 'stage', 'production')
database = "algotrader_".format(environment)
table = ('accounts', 'optionchainanalysis', 'optionchains', 'stocks')
"""
# initiate the connection when the database object is created
# Standard procedure will be to open the connection,
# perform the action, then close the connection
self.log = CustomLog()
self.parser = Parser()
self.connection = self.connect_to_database()
# CONFIGURATION
# Possible Values: "Dev", "Stage", "Production"
# Changebelow code when config file exists
self.environment = "Dev"
self.database_name = "algotrader_dev"
# Below is used to determine how far back in seconds the analyzer tool should go
# The reason behind this is because we do not want to delete stock market date
# Instead, we would rather query the database and only select the records that
# are within the threshold
def connect_to_database(self):
# try:
# Using loopback for testing purposes. Might use socket level later.
return mysql.connector.connect(user='optionstrader_service_account', password='<PASSWORD>',
host=MYSQL_IP_ADDR,
port='3306')
#database='algotrader_data'
#mysql.connector.errors.InterfaceError: 2003: Can't connect to MySQL server on 'localwhost:3306'
# except Exception as e:
# msg = "Error! Please check the MySQL database connection: {error}".format(error=e)
# self.log.debug(msg)
def configure_database(self):
database_name = "algotrader_dev"
self.create_database(database_name)
table_columns = "(account_number TEXT, account_type TEXT, balance FLOAT, total_deposits FLOAT, total_withdrawls FLOAT)"
table_name = "accounts"
self.create_table(database_name, table_name, table_columns)
table_columns = "(symbol TEXT, company_name TEXT)"
table_name = "stocks"
self.create_table(database_name, table_name, table_columns)
table_columns = "(symbol TEXT)"
table_name = "optionchains"
self.create_table(database_name, table_name, table_columns)
table_columns = "(symbol TEXT)"
table_name = "optionchainanalysis"
self.create_table(database_name, table_name, table_columns)
# self.parse_symbols_and_add_to_db()
self.log.debug("Database has been configured")
return True
def create_database(self, database_name):
try:
cursor = self.connection.cursor()
query = ("CREATE DATABASE {database_name}").format(database_name=database_name)
cursor.execute(query)
output = self.connection.commit()
cursor.close()
msg = "Database `{database_name}` created.".format(
database_name=database_name)
self.log.debug(msg)
return True
except:
msg = "Database `{database_name}` can't be created.".format(
database_name=database_name)
self.log.debug(msg)
def create_table(self, database_name, table_name, table_columns):
try:
cursor = self.connection.cursor()
query = "CREATE TABLE {database_name}.{table_name} {table_columns}".format(
database_name=database_name,
table_name=table_name,
table_columns=table_columns)
cursor.execute(query)
output = self.connection.commit()
cursor.close()
msg = "Table `{table_name} created in database `{database_name}`.".format(
database_name=database_name,
table_name=table_name)
self.log.debug(msg)
return True
except:
msg = "Table `{table_name}` can't be created.".format(
table_name=table_name)
self.log.debug(msg)
def close_connection(self):
self.connection.close()
# ====================================
# ====================================
# === Code used for Account Class ====
# ====================================
# ====================================
def update_account(self, balance, account_type):
cursor = self.connection.cursor()
query = ("UPDATE {db}.accounts SET balance={balance} WHERE account_type=\'{account_type}\'".format(
db=self.database_name,
balance=balance,
account_type=account_type))
cursor.execute(query)
self.connection.commit()
cursor.close()
def get_recommended_option_purchase(self):
# TODO
results_table_cursor = self.connection.cursor()
#query = ("SELECT balance FROM accounts{env} where account_type='checking'".format(env=self.environment))
_query = ("SELECT * FROM optionchainanalysisDev ",
"WHERE `total_price_paid_1x` BETWEEN 0 and 100 AND ",
"`potential_profit_1x` BETWEEN 50 and 100 AND ",
"`stock_price_increase` < 3.5 AND ",
"`magic_number` BETWEEN 3 and 10 AND ",
"`expiration_date` LIKE '2017-03-03' AND ",
"`risk_percentage_1x` BETWEEN 0 and 18 ",
"ORDER BY `timestamp` DESC")
query = "".join(_query)
log_msg = query
#
#
self.connection.commit()
result = results_table_cursor.execute(query)
results_table = []
for record in results_table_cursor:
results_table.append(record)
return results_table
#for record in results_table:
# return record
def get_list_of_tickers(self, query_type='default'):
# TODO Implement the following:
# We will want to stream data from external to the database then stream the symbols from the database
# as they're made available.
table = 'optionchains'
if query_type == 'default':
# Run the normal code here
query = "SELECT DISTINCT symbol FROM {db}.stocks WHERE symbol is not Null".format(
db=self.database_name)
if query_type == 'options_only':
# Run the code to only retrieve symbols which have had stock options in the past
query = "SELECT DISTINCT underlying FROM {db}.{table} WHERE underlying is not Null".format(
db=self.database_name,
table=table,
env=self.environment)
if query_type == 'one_option_only':
# Arbritrary first option only.
# Usually used for testing purposes
query = "SELECT DISTINCT underlying FROM {db}.{table} WHERE underlying is not Null LIMIT 1".format(
db=self.database_name,
table=table,
env=self.environment)
else:
# Run a special SQL query here, which returns the symbols in a specific order
pass
cursor = self.connection.cursor()
# As of 2/11/17, there are 3078 total results from this query
self.connection.commit()
result = cursor.execute(query)
print(result)
list_of_tickers = list()
for ticker in cursor:
#print(ticker[0])
list_of_tickers.append(ticker[0])
# Return type is a python list [u'AAPL', ..., u'GOOG']
return list_of_tickers
def get_current_stock_price(self, symbol):
# We want to make sure that the 'last_' price is within reason. We don't want to
# pay 100x the average price of the item.
cursor = self.connection.cursor(dictionary=True)
query = "SELECT * FROM {db}.stocks WHERE symbol LIKE \'{symbol}\' ORDER BY `timestamp` DESC LIMIT 1".format(
db=self.database_name,
symbol=symbol)
self.connection.commit()
result = cursor.execute(query)
for stock_data in cursor:
return stock_data['last_']
def get_example_option_chains(self, num_chains_limit=1):
# This function has a much less accurate query than query_option_chains_for_analysis
# This function is typically used for testing purposes
cursor = self.connection.cursor(dictionary=True, buffered=True)
query = ("SELECT * from {db}.optionchains LIMIT {num_chains_limit}".format(
db=self.database_name,
num_chains_limit=num_chains_limit))
self.connection.commit()
cursor.execute(query)
self.log.debug("****Type:{0}".format(type(cursor)))
return cursor
# Only iterate once
#for option_chain in cursor:
# return option_chain, cursor[option_chain]
# list_of_option_chains is all of the option chains for the ticker
# therefore, we need to select and return the most recent one.
cursor = self.connection.cursor()
# As of 2/11/17, there are 3078 total results from this query
query = "SELECT * from {db}.optionchains LIMIT 1".format(
db=self.database_name)
self.connection.commit()
option_chain = cursor.execute(query)
return option_chain
def query_option_chains_for_analysis(self,
ticker=None, current_timestamp=int(time.time()), time_threshold=30000,
max_num_option_chains=40):
# This function has a more precise query than get_example_option_chains
# If no tickers are specified, retrieve the most recent option_chains
if ticker == None:
cursor = self.connection.cursor(dictionary=True, buffered=True)
query_1 = "SELECT * FROM {db}.optionchains WHERE type LIKE 'option' and ".format(
db=self.database_name)
query_2 = "timestamp > ({current_timestamp}-{time_threshold}) and ".format(
time_threshold=time_threshold,
current_timestamp=current_timestamp)
query_3 = "option_type LIKE 'call' ORDER BY `timestamp` DESC LIMIT {max_num_option_chains}".format(max_num_option_chains=max_num_option_chains)
query = (query_1 + query_2 + query_3)
self.log.debug(query)
result = cursor.execute(query)
self.log.debug(cursor.fetchone())
self.connection.commit()
# If a ticker is specified, retrieve the most recent option_chains
else:
# We want to return the dictionary type
# we need a MySQL buffered response
cursor = self.connection.cursor(dictionary=True, buffered=True)
query_1 = "SELECT * FROM {db}.optionchains WHERE type LIKE 'option' and ".format(
db=self.database_name)
query_2 = "timestamp > ({current_timestamp}-{time_threshold}) and underlying LIKE '{ticker}' and ".format(ticker=ticker,
time_threshold=time_threshold,
current_timestamp=current_timestamp)
query_3 = "option_type LIKE 'call' ORDER BY `timestamp` DESC LIMIT {max_num_option_chains}".format(max_num_option_chains=max_num_option_chains)
query = (query_1 + query_2 + query_3)
result = cursor.execute(query)
self.connection.commit()
"""
# cursor is a MySQLCursorDict object.
# cursor is a MySQLCursorDict: SELECT * FROM optionchainsDev WHERE type..
# retrieve results using cursor.fetchall()
"""
return cursor
# DEPRICATED
#result = cursor.execute(query)
# Iterate over all options in the option chains in the database for that ticker.
# Sorted by time in descending order
#all_options = []
#for option_chain in cursor:
# all_options.append(option_chain)
#return all_options
def sanitize_field_names(self, field_name):
sanitized_field_names_pairs = {
'change': 'change_',
'close': 'close_',
'open': 'open_',
'last': 'last_'
}
field_name = str(field_name)
for name in sanitized_field_names_pairs.keys():
if field_name == name:
sanitized_field_name = sanitized_field_names_pairs[name]
return sanitized_field_name
return field_name
def save_option_chain_to_table(self, option_chain, table='optionchains'):
# PLEASE NOTE:
# If a new keyword (column) is detected, then the INSERT INTO command will fail
# The next time that the option chain is attempted to be saved, the record
# will update.
attempt_number = 0
while True:
try:
# add timestamp here
option_chain['timestamp']=int(time.time())
cursor = self.connection.cursor()
#"{} {}".format(str(a.keys()).replace("'", ""), str(a.values()).replace("'", ""))
#option_chain.keys(), option_chain.values()
KEYS = [self.sanitize_field_names(i) for i in option_chain.keys()]
VALUES = [str(i) for i in option_chain.values()]
# Should never have the single character apostrophy.
# Error out, if it contains once
keys_error = [str(i).find("'") for i in option_chain.keys()]
values_error = [str(i).find("'") for i in option_chain.values()]
if max(max(keys_error), max(values_error)) != -1:
log_msg = ""
log_msg = "Error: single character apostrophy located in option_chain!"
keys_formatted = str("(" + str(KEYS)[1:-1] + ")").replace("'", "")
values_formatted = str("(" + str(VALUES)[1:-1] + ")")
query = ("INSERT INTO {db}.{table} {keys} VALUES {values}").format(
db=self.database_name,
table=table,
keys=keys_formatted,
values=values_formatted)
log_msg = "~~~~-----------------~~~"
query = query.replace("'None'", 'NULL')
if DEBUG is True:
print(query)
cursor.execute(query)
self.connection.commit()
cursor.close()
# Break the while loop
break
except mysql.connector.ProgrammingError:
# This means that the fields don't exist on the database
# time to add the fields to the database
log_msg = "Warning. Trying to update the database with fields which don't yet exist in the table."
# Unsure which key is the problem one.
# Try to create a field with each key.
# if the key is already a field on the database, then pass without error
for field_name in KEYS:
# mySQL database needs specific table names to be off limits
try:
field_type = self.type_conversion(option_chain[field_name])
except:
field_type = self.type_conversion(option_chain[field_name[:-1]])
try:
self.add_new_column_to_table(field_name, field_type, table=table)
except mysql.connector.ProgrammingError:
pass
log_msg = "Information. The fields were updated in table '{0}'.".format(table)
if attempt_number == 1:
log_msg = "Error: Unable to update SQL table"
break
else:
log_msg = "Retrying the update to the table"
attempt_number += 1
return True
def update_option_chain_with_analysis(self, percentage_increase_analysis):
# This is the analysis done for the percentage increase (1,2,5 percent)
# of an underlyer
result = self.save_option_chain_to_table(percentage_increase_analysis, table='optionchainanalysis')
return True
def add_new_column_to_table(self, column_name, data_type, table):
cursor = self.connection.cursor()
env = self.environment
query = "ALTER TABLE {db}.{table} ADD {column_name} {data_type}".format(
db=self.database_name,
table=table,
column_name=column_name,
data_type=data_type)
cursor.execute(query)
self.connection.commit()
return True
def add_money_to_account(self, amount_of_money, account_type):
current_balance = self.get_checking_account_balance()
output = str(current_balance + amount_of_money)
self.update_checking_account(output)
print(self.get_checking_account_balance())
def subtract_money_from_account(self, amount_of_money, account_type):
current_balance = self.get_checking_account_balance()
output = str(current_balance - amount_of_money)
self.update_checking_account(output)
print(self.get_checking_account_balance())
def add_field_to_table(self, field, _type):
cursor = self.connection.cursor()
#query = ("ALTER TABLE stocks ADD %s %s") % (field, type)
query = "ALTER TABLE {db}.stocks ADD {field} {type}".format(
db=self.database_name,
field=field,
type=_type)
cursor.execute(query)
self.connection.commit()
cursor.close()
def insert_values_into_table(self, column_string, value_string):
cursor = self.connection.cursor()
query = "INSERT INTO {db}.stocks {column_string} VALUES {value_string}".format(
db=self.database_name,
column_string=column_string,
value_string=value_string)
self.log.debug(query)
cursor.execute(query)
self.connection.commit()
cursor.close()
def type_conversion(self, object_item):
# We need to convert the types so that the sql database knows what to do
# The names of the types differs between python and mysql
# Examples: unicode, NoneType, int, float
obj_type = type(object_item)
#self.log.debug(object_item)
#self.log.debug(obj_type)
obj_type_str = str(obj_type).split("'")[1]
if obj_type_str == 'unicode':
return "text"
if obj_type_str == 'float':
return "float"
if obj_type_str == 'NoneType':
return "text"
if obj_type_str == 'int':
return "bigint(20)"
else:
return "text"
def parse_symbols_and_add_to_db(self):
# technically this should go in a separate test_parser module... TODO.
results = self.parser.extract_symbols()
for symbol_and_name in results[1:]:
column_string = "(symbol, company_name)"
value_string = "(\"{symbol}\", \"{company_name}\")".format(
symbol=symbol_and_name[0],company_name=symbol_and_name[1])
self.insert_values_into_table(column_string, value_string)
msg = "Symbols parsed and added to database"
self.log.debug(msg)
return results
| 2.625 | 3 |
backend/api/migrations/0001_initial.py | leowotzak/ljwe-db | 0 | 16890 | # Generated by Django 3.2.9 on 2021-11-24 02:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Symbol',
fields=[
('symbol_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('ticker', models.CharField(max_length=30)),
('description', models.TextField(blank=True, null=True)),
('sector', models.CharField(blank=True, max_length=30, null=True)),
('asset_type', models.CharField(blank=True, max_length=30, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
],
options={
'db_table': 'symbol',
'managed': True,
},
),
migrations.CreateModel(
name='BarDataWeekly',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close_price', models.FloatField(blank=True, null=True)),
('volume', models.IntegerField()),
('dividend_amount', models.FloatField(blank=True, null=True)),
('split_coeff', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.symbol')),
],
options={
'db_table': 'bar_data_weekly',
'managed': True,
'unique_together': {('timestamp', 'symbol')},
},
),
migrations.CreateModel(
name='BarDataMonthly',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close_price', models.FloatField(blank=True, null=True)),
('volume', models.IntegerField()),
('dividend_amount', models.FloatField(blank=True, null=True)),
('split_coeff', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.symbol')),
],
options={
'db_table': 'bar_data_monthly',
'managed': True,
'unique_together': {('timestamp', 'symbol')},
},
),
migrations.CreateModel(
name='BarDataDaily',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close_price', models.FloatField(blank=True, null=True)),
('volume', models.IntegerField()),
('dividend_amount', models.FloatField(blank=True, null=True)),
('split_coeff', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.symbol')),
],
options={
'db_table': 'bar_data_daily',
'managed': True,
'unique_together': {('timestamp', 'symbol')},
},
),
migrations.CreateModel(
name='BarData5Min',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close_price', models.FloatField(blank=True, null=True)),
('volume', models.IntegerField()),
('dividend_amount', models.FloatField(blank=True, null=True)),
('split_coeff', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.symbol')),
],
options={
'db_table': 'bar_data_5min',
'managed': True,
'unique_together': {('timestamp', 'symbol')},
},
),
migrations.CreateModel(
name='BarData30Min',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close_price', models.FloatField(blank=True, null=True)),
('volume', models.IntegerField()),
('dividend_amount', models.FloatField(blank=True, null=True)),
('split_coeff', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.symbol')),
],
options={
'db_table': 'bar_data_30min',
'managed': True,
'unique_together': {('timestamp', 'symbol')},
},
),
migrations.CreateModel(
name='BarData1Min',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close_price', models.FloatField(blank=True, null=True)),
('volume', models.IntegerField()),
('dividend_amount', models.FloatField(blank=True, null=True)),
('split_coeff', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.symbol')),
],
options={
'db_table': 'bar_data_1min',
'managed': True,
'unique_together': {('timestamp', 'symbol')},
},
),
migrations.CreateModel(
name='BarData1H',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close_price', models.FloatField(blank=True, null=True)),
('volume', models.IntegerField()),
('dividend_amount', models.FloatField(blank=True, null=True)),
('split_coeff', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.symbol')),
],
options={
'db_table': 'bar_data_1h',
'managed': True,
'unique_together': {('timestamp', 'symbol')},
},
),
migrations.CreateModel(
name='BarData15Min',
fields=[
('timestamp', models.DateTimeField(primary_key=True, serialize=False)),
('open_price', models.FloatField()),
('high_price', models.FloatField()),
('low_price', models.FloatField()),
('close_price', models.FloatField()),
('adj_close_price', models.FloatField(blank=True, null=True)),
('volume', models.IntegerField()),
('dividend_amount', models.FloatField(blank=True, null=True)),
('split_coeff', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(blank=True, null=True)),
('last_updated_date', models.DateTimeField(blank=True, null=True)),
('symbol', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='api.symbol')),
],
options={
'db_table': 'bar_data_15min',
'managed': True,
'unique_together': {('timestamp', 'symbol')},
},
),
]
| 1.78125 | 2 |
Ch2_Linked_Lists/test/test_CTCI_Ch2_Ex6.py | mtrdazzo/CTCI | 0 | 16891 | from unittest import TestCase
from CTCI.Ch2_Linked_Lists.common.SinglyLinkedList import Empty, Node
from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import PalindromeSinglyLinkedList, is_palindrome_brute_force
from CTCI.Ch2_Linked_Lists.exercises.CTCI_Ch2_Ex6 import is_palindrome_reverse
class TestPalindromeSinglyLinkedList(TestCase):
def setUp(self):
self.pll = PalindromeSinglyLinkedList()
def tearDown(self):
self.pll = None
def test_empty_list(self):
with self.assertRaises(Empty):
self.pll.is_palindrome()
def test_single_element(self):
self.pll.add(1)
self.assertTrue(self.pll.is_palindrome())
def test_two_elements(self):
self.pll.add(1)
self.pll.add(1)
self.assertTrue(self.pll.is_palindrome())
self.pll.remove(1)
self.pll.add(2)
self.assertFalse(self.pll.is_palindrome())
def test_more_than_two_elements_even(self):
self.pll.add(1)
self.pll.add(2)
self.pll.add(2)
self.pll.add(2)
self.assertFalse(self.pll.is_palindrome())
self.pll.remove(2)
self.pll.add(1)
self.assertTrue(self.pll.is_palindrome())
def test_more_than_two_elements_odd(self):
self.pll.add(1)
self.pll.add(2)
self.pll.add(2)
self.assertFalse(self.pll.is_palindrome())
self.pll.remove(2)
self.pll.add(1)
self.assertTrue(self.pll.is_palindrome())
class TestPalindromeBruteForce(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_linked_list(self):
self.assertIsNone(is_palindrome_brute_force(None))
def test_single_element(self):
list = Node(1)
self.assertTrue(is_palindrome_brute_force(list))
def test_two_elements(self):
list = Node(1)
list.next = Node(2)
self.assertFalse(is_palindrome_brute_force(list))
list.next = Node(1)
self.assertTrue(is_palindrome_brute_force(list))
def test_odd_elements(self):
list = Node(1)
list.next = Node(2)
list.next.next = Node(2)
self.assertFalse(is_palindrome_brute_force(list))
list.next.next = Node(1)
self.assertTrue(is_palindrome_brute_force(list))
def test_even_elements(self):
list = Node(1)
list.next = Node(2)
list.next.next = Node(2)
list.next.next.next = Node(3)
self.assertFalse(is_palindrome_brute_force(list))
list.next.next.next = Node(1)
self.assertTrue(is_palindrome_brute_force(list))
class TestPalindromeReverse(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_empty_node(self):
self.assertIsNone(is_palindrome_reverse(None))
def test_single_node(self):
self.assertTrue(is_palindrome_reverse(Node(1)))
def test_two_nodes(self):
l_list = Node(1)
l_list.next = Node(2)
self.assertFalse(is_palindrome_reverse(l_list))
l_list.next = Node(1)
self.assertTrue(is_palindrome_reverse(l_list))
def test_odd_nodes(self):
l_list = Node(1)
l_list.next = Node(2)
l_list.next.next = Node(3)
self.assertFalse(is_palindrome_reverse(l_list))
l_list.next.next = Node(1)
self.assertTrue(is_palindrome_reverse(l_list))
def test_even_nodes(self):
l_list = Node(1)
l_list.next = Node(2)
l_list.next = Node(2)
l_list.next = Node(3)
self.assertFalse(is_palindrome_reverse(l_list))
l_list.next.next = Node(1)
self.assertTrue(is_palindrome_reverse(l_list))
| 3.109375 | 3 |
alphafold2_pytorch/utils.py | nilbot/alphafold2 | 1 | 16892 | # utils for working with 3d-protein structures
import os
import numpy as np
import torch
from functools import wraps
from einops import rearrange, repeat
# import torch_sparse # only needed for sparse nth_deg adj calculation
# bio
from Bio import SeqIO
import itertools
import string
# sidechainnet
from sidechainnet.utils.sequence import ProteinVocabulary, ONE_TO_THREE_LETTER_MAP
from sidechainnet.utils.measure import GLOBAL_PAD_CHAR
from sidechainnet.structure.build_info import NUM_COORDS_PER_RES, BB_BUILD_INFO, SC_BUILD_INFO
from sidechainnet.structure.StructureBuilder import _get_residue_build_iter
# build vocabulary
VOCAB = ProteinVocabulary()
# constants
import alphafold2_pytorch.constants as constants
# helpers
def exists(val):
return val is not None
# constants: same as in alphafold2.py
DISTANCE_THRESHOLDS = torch.linspace(2, 20, steps = constants.DISTOGRAM_BUCKETS)
# distance binning function
def get_bucketed_distance_matrix(coords, mask, num_buckets = constants.DISTOGRAM_BUCKETS, ignore_index = -100):
distances = torch.cdist(coords, coords, p=2)
boundaries = torch.linspace(2, 20, steps = num_buckets, device = coords.device)
discretized_distances = torch.bucketize(distances, boundaries[:-1])
discretized_distances.masked_fill_(~(mask[..., None] & mask[..., None, :]), ignore_index)
return discretized_distances
# decorators
def set_backend_kwarg(fn):
@wraps(fn)
def inner(*args, backend = 'auto', **kwargs):
if backend == 'auto':
backend = 'torch' if isinstance(args[0], torch.Tensor) else 'numpy'
kwargs.update(backend = backend)
return fn(*args, **kwargs)
return inner
def expand_dims_to(t, length = 3):
if length == 0:
return t
return t.reshape(*((1,) * length), *t.shape) # will work with both torch and numpy
def expand_arg_dims(dim_len = 3):
""" pack here for reuse.
turns input into (B x D x N)
"""
def outer(fn):
@wraps(fn)
def inner(x, y, **kwargs):
assert len(x.shape) == len(y.shape), "Shapes of A and B must match."
remaining_len = dim_len - len(x.shape)
x = expand_dims_to(x, length = remaining_len)
y = expand_dims_to(y, length = remaining_len)
return fn(x, y, **kwargs)
return inner
return outer
def invoke_torch_or_numpy(torch_fn, numpy_fn):
def outer(fn):
@wraps(fn)
def inner(*args, **kwargs):
backend = kwargs.pop('backend')
passed_args = fn(*args, **kwargs)
passed_args = list(passed_args)
if isinstance(passed_args[-1], dict):
passed_kwargs = passed_args.pop()
else:
passed_kwargs = {}
backend_fn = torch_fn if backend == 'torch' else numpy_fn
return backend_fn(*passed_args, **passed_kwargs)
return inner
return outer
# preprocess data
def get_atom_ids_dict():
""" Get's a dict mapping each atom to a token. """
ids = set(["", "N", "CA", "C", "O"])
for k,v in SC_BUILD_INFO.items():
for name in v["atom-names"]:
ids.add(name)
return {k: i for i,k in enumerate(sorted(ids))}
def make_cloud_mask(aa):
""" relevent points will be 1. paddings will be 0. """
mask = np.zeros(14)
# early stop if padding token
if aa == "_":
return mask
# get num of atoms in aa
n_atoms = 4+len( SC_BUILD_INFO[ ONE_TO_THREE_LETTER_MAP[aa] ]["atom-names"] )
mask[:n_atoms] = 1
return mask
def make_atom_id_embedds(aa, atom_ids):
""" Return the tokens for each atom in the aa. """
mask = np.zeros(14)
# early stop if padding token
if aa == "_":
return mask
# get atom id
atom_list = ["N", "CA", "C", "O"] + SC_BUILD_INFO[ ONE_TO_THREE_LETTER_MAP[aa] ]["atom-names"]
for i,atom in enumerate(atom_list):
mask[i] = ATOM_IDS[atom]
return mask
ATOM_IDS = get_atom_ids_dict()
CUSTOM_INFO = {k: {"cloud_mask": make_cloud_mask(k),
"atom_id_embedd": make_atom_id_embedds(k, atom_ids=ATOM_IDS),
} for k in "ARNDCQEGHILKMFPSTWYV_"}
# common utils
# parsing to pdb for easier visualization - other example from sidechainnet is:
# https://github.com/jonathanking/sidechainnet/tree/master/sidechainnet/structure
def download_pdb(name, route):
""" Downloads a PDB entry from the RCSB PDB.
Inputs:
* name: str. the PDB entry id. 4 characters, capitalized.
* route: str. route of the destin file. usually ".pdb" extension
Output: route of destin file
"""
os.system(f"curl https://files.rcsb.org/download/{name}.pdb > {route}")
return route
def clean_pdb(name, route=None, chain_num=None):
""" Cleans the structure to only leave the important part.
Inputs:
* name: str. route of the input .pdb file
* route: str. route of the output. will overwrite input if not provided
* chain_num: int. index of chain to select (1-indexed as pdb files)
Output: route of destin file.
"""
import mdtraj
destin = route if route is not None else name
# read input
raw_prot = mdtraj.load_pdb(name)
# iterate over prot and select the specified chains
idxs = []
for chain in raw_prot.topology.chains:
# if arg passed, only select that chain
if chain_num is not None:
if chain_num != chain.index:
continue
# select indexes of chain
chain_idxs = raw_prot.topology.select(f"chainid == {str(chain.index)}")
idxs.extend( chain_idxs.tolist() )
# sort: topology and xyz selection are ordered
idxs = sorted(idxs)
# get new trajectory from the sleected subset of indexes and save
prot = mdtraj.Trajectory(xyz=raw_prot.xyz[:, idxs],
topology=raw_prot.topology.subset(idxs))
prot.save(destin)
return destin
def custom2pdb(coords, proteinnet_id, route):
""" Takes a custom representation and turns into a .pdb file.
Inputs:
* coords: array/tensor of shape (3 x N) or (N x 3). in Angstroms.
same order as in the proteinnnet is assumed (same as raw pdb file)
* proteinnet_id: str. proteinnet id format (<class>#<pdb_id>_<chain_number>_<chain_id>)
see: https://github.com/aqlaboratory/proteinnet/
* route: str. destin route.
Output: tuple of routes: (original, generated) for the structures.
"""
import mdtraj
# convert to numpy
if isinstance(coords, torch.Tensor):
coords = coords.detach().cpu().numpy()
# ensure (1, N, 3)
if coords.shape[1] == 3:
coords = coords.T
coords = np.newaxis(coords, axis=0)
# get pdb id and chain num
pdb_name, chain_num = proteinnet_id.split("#")[-1].split("_")[:-1]
pdb_destin = "/".join(route.split("/")[:-1])+"/"+pdb_name+".pdb"
# download pdb file and select appropiate
download_pdb(pdb_name, pdb_destin)
clean_pdb(pdb_destin, chain_num=chain_num)
# load trajectory scaffold and replace coordinates - assumes same order
scaffold = mdtraj.load_pdb(pdb_destin)
scaffold.xyz = coords
scaffold.save(route)
return pdb_destin, route
def coords2pdb(seq, coords, cloud_mask, prefix="", name="af2_struct.pdb"):
""" Turns coordinates into PDB files ready to be visualized.
Inputs:
* seq: (L,) tensor of ints (sidechainnet aa-key pairs)
* coords: (3, N) coords of atoms
* cloud_mask: (L, C) boolean mask of occupied spaces in scn format
* prefix: str. directory to save files.
* name: str. name of destin file (ex: pred1.pdb)
"""
scaffold = torch.zeros( cloud_mask.shape, 3 )
scaffold[cloud_mask] = coords.cpu().float()
# build structures and save
pred = scn.StructureBuilder( seq, crd=scaffold )
pred.to_pdb(prefix+name)
# adapted from https://github.com/facebookresearch/esm
def remove_insertions(sequence: str) -> str:
""" Removes any insertions into the sequence. Needed to load aligned sequences in an MSA. """
deletekeys = dict.fromkeys(string.ascii_lowercase)
deletekeys["."] = None
deletekeys["*"] = None
translation = str.maketrans(deletekeys)
return sequence.translate(translation)
def read_msa(filename: str, nseq: int):
""" Reads the first nseq sequences from an MSA file, automatically removes insertions."""
return [(record.description, remove_insertions(str(record.seq)))
for record in itertools.islice(SeqIO.parse(filename, "fasta"), nseq)]
# sidechainnet / MSA / other data utils
def ids_to_embed_input(x):
""" Returns the amino acid string input for calculating the ESM and MSA transformer embeddings
Inputs:
* x: any deeply nested list of integers that correspond with amino acid id
"""
assert isinstance(x, list), 'input must be a list'
id2aa = VOCAB._int2char
out = []
for el in x:
if isinstance(el, list):
out.append(ids_to_embed_input(el))
elif isinstance(el, int):
out.append(id2aa[el])
else:
raise TypeError('type must be either list or character')
if all(map(lambda c: isinstance(c, str), out)):
return (None, ''.join(out))
return out
def get_msa_embedd(msa, embedd_model, batch_converter, device = None):
""" Returns the MSA_tr embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* embedd_model: MSA_tr model (see train_end2end.py for an example)
* batch_converter: MSA_tr batch converter (see train_end2end.py for an example)
Outputs: tensor of (batch, n_seqs, L, embedd_dim)
* n_seqs: number of sequences in the MSA
* embedd_dim: number of embedding dimensions. 768 for MSA_Transformer
"""
# use MSA transformer
REPR_LAYER_NUM = 12
device = embedd_model.device
max_seq_len = msa.shape[-1]
embedd_inputs = ids_to_embed_input(msa.cpu().tolist())
msa_batch_labels, msa_batch_strs, msa_batch_tokens = batch_converter(embedd_inputs)
with torch.no_grad():
results = embedd_model(msa_batch_tokens.to(device), repr_layers=[REPR_LAYER_NUM], return_contacts=False)
# index 0 is for start token. so take from 1 one
token_reps = results["representations"][REPR_LAYER_NUM][..., 1:, :]
return token_reps
def get_esm_embedd(seq, embedd_model, batch_converter, msa_data=None):
""" Returns the ESM embeddings for a protein.
Inputs:
* seq: ( (b,) L,) tensor of ints (in sidechainnet int-char convention)
* embedd_model: ESM model (see train_end2end.py for an example)
* batch_converter: ESM batch converter (see train_end2end.py for an example)
Outputs: tensor of (batch, n_seqs, L, embedd_dim)
* n_seqs: number of sequences in the MSA. 1 for ESM-1b
* embedd_dim: number of embedding dimensions. 1280 for ESM-1b
"""
# use ESM transformer
device = embedd_model.device
REPR_LAYER_NUM = 33
max_seq_len = seq.shape[-1]
embedd_inputs = ids_to_embed_input(seq.cpu().tolist())
batch_labels, batch_strs, batch_tokens = batch_converter(embedd_inputs)
with torch.no_grad():
results = embedd_model(batch_tokens.to(device), repr_layers=[REPR_LAYER_NUM], return_contacts=False)
# index 0 is for start token. so take from 1 one
token_reps = results["representations"][REPR_LAYER_NUM][..., 1:, :].unsqueeze(dim=1)
return token_reps
def get_all_protein_ids(dataloader, verbose=False):
""" Given a sidechainnet dataloader for a CASP version,
Returns all the ids belonging to proteins.
Inputs:
* dataloader: a sidechainnet dataloader for a CASP version
Outputs: a set containing the ids for all protein entries.
"""
# store ids here
ids = set([])
# iterate for all batches
for i,batch in tqdm(enumerate(dataloaders['train'])):
# for breaking from 2 loops at once
try:
for i in range(batch.int_seqs.shape[0]):
# check if all fragments are : 4_LETTER_PDB + NUM + CHAIN
max_len_10 = len(batch.pids[i]) < 10
fragments = [len(x) <= 4 for x in batch.pids[i].split("_")]
fragments_under_4 = sum(fragments) == len(fragments) # AND CONDITION
# record id
if max_len_10 and fragments_under_4:
ids.add(batch.pids[i])
else:
if verbose:
print("skip:", batch.pids[i], "under 4", fragments)
except StopIteration:
break
# returns set of ids
return ids
def scn_cloud_mask(scn_seq, boolean=True, coords=None):
""" Gets the boolean mask atom positions (not all aas have same atoms).
Inputs:
* scn_seq: (batch, length) sequence as provided by Sidechainnet package
* boolean: whether to return as array of idxs or boolean values
* coords: optional .(batch, lc, 3). sidechainnet coords.
returns the true mask (solves potential atoms that might not be provided)
Outputs: (batch, length, NUM_COORDS_PER_RES) boolean mask
"""
scn_seq = expand_dims_to(scn_seq, 2 - len(scn_seq.shape))
# early check for coords mask
if coords is not None:
batch_mask = ( rearrange(coords, '... (l c) d -> ... l c d', c=14) == 0 ).sum(dim=-1) < coords.shape[-1]
if boolean:
return batch_mask.bool()
else:
return batch_mask.nonzero()
# do loop in cpu
device = scn_seq.device
batch_mask = []
scn_seq = scn_seq.cpu().tolist()
for i, seq in enumerate(scn_seq):
# get masks for each prot (points for each aa)
batch_mask.append( torch.tensor([CUSTOM_INFO[VOCAB.int2char(aa)]['cloud_mask'] \
for aa in seq]).bool().to(device).unsqueeze(0) )
# concat in last dim
batch_mask = torch.cat(batch_mask, dim=0)
# return mask (boolean or indexes)
if boolean:
return batch_mask.bool()
else:
return batch_mask.nonzero()
def scn_backbone_mask(scn_seq, boolean=True, n_aa=3):
""" Gets the boolean mask for N and CA positions.
Inputs:
* scn_seq: sequence(s) as provided by Sidechainnet package (int tensor/s)
* n_aa: number of atoms in a backbone. (may include cbeta as 4th pos)
* bool: whether to return as array of idxs or boolean values
Outputs: (N_mask, CA_mask, C_mask)
"""
wrapper = torch.zeros(*scn_seq.shape, n_aa).to(scn_seq.device)
# N is the first atom in every AA. CA is the 2nd.
wrapper[..., 0] = 1
wrapper[..., 1] = 2
wrapper[..., 2] = 3
wrapper = rearrange(wrapper, '... l c -> ... (l c)')
# find idxs
N_mask = wrapper == 1
CA_mask = wrapper == 2
C_mask = wrapper == 3
if boolean:
return N_mask, CA_mask, C_mask
return torch.nonzero(N_mask), torch.nonzero(CA_mask), torch.nonzero(C_mask)
def scn_atom_embedd(scn_seq):
""" Returns the token for each atom in the aa.
Inputs:
* scn_seq: sequence(s) as provided by Sidechainnet package (int tensor/s)
"""
device = scn_seq.device
batch_tokens = []
# do loop in cpu
scn_seq = scn_seq.cpu()
for i,seq in enumerate(scn_seq):
batch_tokens.append( torch.tensor([CUSTOM_INFO[VOCAB.int2char(aa.item())]["atom_id_embedd"] \
for aa in seq]).long().to(device).unsqueeze(0) )
batch_tokens = torch.cat(batch_tokens, dim=0)
return batch_tokens
def nth_deg_adjacency(adj_mat, n=1, sparse=False):
""" Calculates the n-th degree adjacency matrix.
Performs mm of adj_mat and adds the newly added.
Default is dense. Mods for sparse version are done when needed.
Inputs:
* adj_mat: (N, N) adjacency tensor
* n: int. degree of the output adjacency
* sparse: bool. whether to use torch-sparse module
Outputs:
* edge_idxs: ij positions of the adjacency matrix
* edge_attrs: degree of connectivity (1 for neighs, 2 for neighs^2, ... )
"""
adj_mat = adj_mat.float()
attr_mat = torch.zeros_like(adj_mat)
new_adj_mat = adj_mat.clone()
for i in range(n):
if i == 0:
attr_mat += adj_mat
continue
if i == 1 and sparse:
idxs = adj_mat.nonzero().t()
vals = adj_mat[idxs[0], idxs[1]]
new_idxs = idxs.clone()
new_vals = vals.clone()
m, k, n = 3 * [adj_mat.shape[0]] # (m, n) * (n, k) , but adj_mats are squared: m=n=k
if sparse:
new_idxs, new_vals = torch_sparse.spspmm(new_idxs, new_vals, idxs, vals, m=m, k=k, n=n)
new_vals = new_vals.bool().float()
new_adj_mat = torch.zeros_like(attr_mat)
new_adj_mat[new_idxs[0], new_idxs[1]] = new_vals
# sparse to dense is slower
# torch.sparse.FloatTensor(idxs, vals).to_dense()
else:
new_adj_mat = (new_adj_mat @ adj_mat).bool().float()
attr_mat.masked_fill( (new_adj_mat - attr_mat.bool().float()).bool(), i+1 )
return new_adj_mat, attr_mat
def prot_covalent_bond(seqs, adj_degree=1, cloud_mask=None, mat=True):
""" Returns the idxs of covalent bonds for a protein.
Inputs
* seq: (b, n) torch long.
* adj_degree: int. adjacency degree
* cloud_mask: mask selecting the present atoms.
* mat: whether to return as indexes or matrices.
for indexes, only 1 seq is supported
Outputs: edge_idxs, edge_attrs
"""
device = seqs.device
# get starting poses for every aa
adj_mat = torch.zeros(seqs.shape[0], seqs.shape[1]*14, seqs.shape[1]*14)
# not needed to device since it's only for indices.
scaff = torch.zeros(seqs.shape[1], 14)
scaff[:, 0] = 1
idxs = torch.nonzero(scaff).reshape(-1)
for s,seq in enumerate(seqs):
for i,idx in enumerate(idxs):
if i >= seq.shape[0]:
break
# offset by pos in chain ( intra-aa bonds + with next aa )
bonds = idx + torch.tensor( constants.AA_DATA[VOCAB.int2char(seq[i].item())]['bonds'] + [[2, 14]] ).t()
# delete link with next if final AA in seq
if i == idxs.shape[0]-1:
bonds = bonds[:, :-1]
# modify adj mat
adj_mat[s, bonds[0], bonds[1]] = 1
# convert to undirected
adj_mat[s] = adj_mat[s] + adj_mat[s].t()
# do N_th degree adjacency
adj_mat, attr_mat = nth_deg_adjacency(adj_mat, n=adj_degree, sparse=False) # True
if mat:
return attr_mat.bool().to(seqs.device), attr_mat.to(device)
else:
edge_idxs = attr_mat[0].nonzero().t().long()
edge_attrs = attr_mat[0, edge_idxs[0], edge_idxs[1]]
return edge_idxs.to(seqs.device), edge_attrs.to(seqs.device)
def nerf_torch(a, b, c, l, theta, chi):
""" Custom Natural extension of Reference Frame.
Inputs:
* a: (batch, 3) or (3,). point(s) of the plane, not connected to d
* b: (batch, 3) or (3,). point(s) of the plane, not connected to d
* c: (batch, 3) or (3,). point(s) of the plane, connected to d
* theta: (batch,) or (float). angle(s) between b-c-d
* chi: (batch,) or float. dihedral angle(s) between the a-b-c and b-c-d planes
Outputs: d (batch, 3) or (3,). the next point in the sequence, linked to c
"""
# safety check
if not ( (-np.pi <= theta) * (theta <= np.pi) ).all().item():
raise ValueError(f"theta(s) must be in radians and in [-pi, pi]. theta(s) = {theta}")
# calc vecs
ba = b-a
cb = c-b
# calc rotation matrix. based on plane normals and normalized
n_plane = torch.cross(ba, cb, dim=-1)
n_plane_ = torch.cross(n_plane, cb, dim=-1)
rotate = torch.stack([cb, n_plane_, n_plane], dim=-1)
rotate /= torch.norm(rotate, dim=-2, keepdim=True)
# calc proto point, rotate
d = torch.stack([-torch.cos(theta),
torch.sin(theta) * torch.cos(chi),
torch.sin(theta) * torch.sin(chi)], dim=-1).unsqueeze(-1)
# extend base point, set length
return c + l.unsqueeze(-1) * torch.matmul(rotate, d).squeeze()
def sidechain_container(backbones, n_aa, cloud_mask=None, place_oxygen=False,
n_atoms=NUM_COORDS_PER_RES, padding=GLOBAL_PAD_CHAR):
""" Gets a backbone of the protein, returns the whole coordinates
with sidechains (same format as sidechainnet). Keeps differentiability.
Inputs:
* backbones: (batch, L*3, 3): assume batch=1 (could be extended later).
Coords for (N-term, C-alpha, C-term) of every aa.
* n_aa: int. number of points for each aa in the backbones.
* cloud_mask: (batch, l, c). optional. cloud mask from scn_cloud_mask`.
returns point outside to 0. if passed, else c_alpha
* place_oxygen: whether to claculate the oxygen of the
carbonyl group via NeRF
* n_atoms: int. n of atom positions / atom. same as in sidechainnet: 14
* padding: int. padding token. same as in sidechainnet: 0
Outputs: whole coordinates of shape (batch, L, n_atoms, 3)
"""
device = backbones.device
batch, length = backbones.shape[0], backbones.shape[1] // n_aa
# build scaffold from (N, CA, C, CB)
new_coords = torch.zeros(batch, length, NUM_COORDS_PER_RES, 3).to(device)
predicted = rearrange(backbones, 'b (l back) d -> b l back d', l=length)
# set backbone positions
new_coords[:, :, :3] = predicted[:, :, :3]
# set rest of positions to c_beta if present, else c_alpha
if n_aa == 4:
new_coords[:, :, 4:] = repeat(predicted[:, :, -1], 'b l d -> b l scn d', scn=10)
else:
new_coords[:, :, 4:] = repeat(new_coords[:, :, 1], 'b l d -> b l scn d', scn=10)
if cloud_mask is not None:
new_coords[torch.logical_not(cloud_mask)] = 0.
# hard-calculate oxygen position of carbonyl group with parallel version of NERF
if place_oxygen:
# build (=O) position of revery aa in each chain
for s in range(batch):
# dihedrals phi=f(c-1, n, ca, c) & psi=f(n, ca, c, n+1)
# phi = get_dihedral_torch(*backbone[s, i*3 - 1 : i*3 + 3]) if i>0 else None
psis = torch.tensor([ get_dihedral_torch(*backbones[s, i*3 + 0 : i*3 + 4] )if i < length-1 else np.pi*5/4 \
for i in range(length) ])
# the angle for placing oxygen is opposite to psi of current res.
# psi not available for last one so pi/4 taken for now
bond_lens = repeat(torch.tensor(BB_BUILD_INFO["BONDLENS"]["c-o"]), ' -> b', b=length).to(psis.device)
bond_angs = repeat(torch.tensor(BB_BUILD_INFO["BONDANGS"]["ca-c-o"]), ' -> b', b=length).to(psis.device)
correction = repeat(torch.tensor(-np.pi), ' -> b', b=length).to(psis.device)
new_coords[:, :, 3] = nerf_torch(new_coords[:, :, 0],
new_coords[:, :, 1],
new_coords[:, :, 2],
bond_lens, bond_angs, psis + correction)
else:
# init oxygen to carbonyl
new_coords[:, :, 3] = predicted[:, :, 2]
return new_coords
# distance utils (distogram to dist mat + masking)
def center_distogram_torch(distogram, bins=DISTANCE_THRESHOLDS, min_t=1., center="mean", wide="std"):
""" Returns the central estimate of a distogram. Median for now.
Inputs:
* distogram: (batch, N, N, B) where B is the number of buckets.
* bins: (B,) containing the cutoffs for the different buckets
* min_t: float. lower bound for distances.
Outputs:
* central: (batch, N, N)
* dispersion: (batch, N, N)
* weights: (batch, N, N)
"""
shape, device = distogram.shape, distogram.device
# threshold to weights and find mean value of each bin
n_bins = ( bins - 0.5 * (bins[2] - bins[1]) ).to(device)
n_bins[0] = 1.5
n_bins[-1] = 1.33*bins[-1] # above last threshold is ignored
max_bin_allowed = torch.tensor(n_bins.shape[0]-1).to(device).long()
# calculate measures of centrality and dispersion -
magnitudes = distogram.sum(dim=-1)
if center == "median":
cum_dist = torch.cumsum(distogram, dim=-1)
medium = 0.5 * cum_dist[..., -1:]
central = torch.searchsorted(cum_dist, medium).squeeze()
central = n_bins[ torch.min(central, max_bin_allowed) ]
elif center == "mean":
central = (distogram * n_bins).sum(dim=-1) / magnitudes
# create mask for last class - (IGNORE_INDEX)
mask = (central <= bins[-2].item()).float()
# mask diagonal to 0 dist - don't do masked filling to avoid inplace errors
diag_idxs = np.arange(shape[-2])
central = expand_dims_to(central, 3 - len(central.shape))
central[:, diag_idxs, diag_idxs] *= 0.
# provide weights
if wide == "var":
dispersion = (distogram * (n_bins - central.unsqueeze(-1))**2).sum(dim=-1) / magnitudes
elif wide == "std":
dispersion = ((distogram * (n_bins - central.unsqueeze(-1))**2).sum(dim=-1) / magnitudes).sqrt()
else:
dispersion = torch.zeros_like(central, device=device)
# rescale to 0-1. lower std / var --> weight=1. set potential nan's to 0
weights = mask / (1+dispersion)
weights[weights != weights] *= 0.
weights[:, diag_idxs, diag_idxs] *= 0.
return central, weights
# distance matrix to 3d coords: https://github.com/scikit-learn/scikit-learn/blob/42aff4e2e/sklearn/manifold/_mds.py#L279
def mds_torch(pre_dist_mat, weights=None, iters=10, tol=1e-5, eigen=False, verbose=2):
""" Gets distance matrix. Outputs 3d. See below for wrapper.
Assumes (for now) distogram is (N x N) and symmetric
Outs:
* best_3d_coords: (batch x 3 x N)
* historic_stresses: (batch x steps)
"""
device, dtype = pre_dist_mat.device, pre_dist_mat.type()
# ensure batched MDS
pre_dist_mat = expand_dims_to(pre_dist_mat, length = ( 3 - len(pre_dist_mat.shape) ))
# start
batch, N, _ = pre_dist_mat.shape
diag_idxs = np.arange(N)
his = [torch.tensor([np.inf]*batch, device=device)]
# initialize by eigendecomposition: https://www.lptmc.jussieu.fr/user/lesne/bioinformatics.pdf
# follow : https://www.biorxiv.org/content/10.1101/2020.11.27.401232v1.full.pdf
D = pre_dist_mat**2
M = 0.5 * (D[:, :1, :] + D[:, :, :1] - D)
# do loop svd bc it's faster: (2-3x in CPU and 1-2x in GPU)
# https://discuss.pytorch.org/t/batched-svd-lowrank-being-much-slower-than-loop-implementation-both-cpu-and-gpu/119336
svds = [torch.svd_lowrank(mi) for mi in M]
u = torch.stack([svd[0] for svd in svds], dim=0)
s = torch.stack([svd[1] for svd in svds], dim=0)
v = torch.stack([svd[2] for svd in svds], dim=0)
best_3d_coords = torch.bmm(u, torch.diag_embed(s).sqrt())[..., :3]
# only eigen - way faster but not weights
if weights is None and eigen==True:
return torch.transpose( best_3d_coords, -1, -2), torch.zeros_like(torch.stack(his, dim=0))
elif eigen==True:
if verbose:
print("Can't use eigen flag if weights are active. Fallback to iterative")
# continue the iterative way
if weights is None:
weights = torch.ones_like(pre_dist_mat)
# iterative updates:
for i in range(iters):
# compute distance matrix of coords and stress
best_3d_coords = best_3d_coords.contiguous()
dist_mat = torch.cdist(best_3d_coords, best_3d_coords, p=2).clone()
stress = ( weights * (dist_mat - pre_dist_mat)**2 ).sum(dim=(-1,-2)) * 0.5
# perturb - update X using the Guttman transform - sklearn-like
dist_mat[ dist_mat <= 0 ] += 1e-7
ratio = weights * (pre_dist_mat / dist_mat)
B = -ratio
B[:, diag_idxs, diag_idxs] += ratio.sum(dim=-1)
# update
coords = (1. / N * torch.matmul(B, best_3d_coords))
dis = torch.norm(coords, dim=(-1, -2))
if verbose >= 2:
print('it: %d, stress %s' % (i, stress))
# update metrics if relative improvement above tolerance
if (his[-1] - stress / dis).mean() <= tol:
if verbose:
print('breaking at iteration %d with stress %s' % (i,
stress / dis))
break
best_3d_coords = coords
his.append( stress / dis )
return torch.transpose(best_3d_coords, -1,-2), torch.stack(his, dim=0)
def mds_numpy(pre_dist_mat, weights=None, iters=10, tol=1e-5, eigen=False, verbose=2):
""" Gets distance matrix. Outputs 3d. See below for wrapper.
Assumes (for now) distrogram is (N x N) and symmetric
Out:
* best_3d_coords: (3 x N)
* historic_stress
"""
if weights is None:
weights = np.ones_like(pre_dist_mat)
# ensure batched MDS
pre_dist_mat = expand_dims_to(pre_dist_mat, length = ( 3 - len(pre_dist_mat.shape) ))
# start
batch, N, _ = pre_dist_mat.shape
his = [np.inf]
# init random coords
best_stress = np.inf * np.ones(batch)
best_3d_coords = 2*np.random.rand(batch, 3, N) - 1
# iterative updates:
for i in range(iters):
# compute distance matrix of coords and stress
dist_mat = np.linalg.norm(best_3d_coords[:, :, :, None] - best_3d_coords[:, :, None, :], axis=-3)
stress = (( weights * (dist_mat - pre_dist_mat) )**2).sum(axis=(-1, -2)) * 0.5
# perturb - update X using the Guttman transform - sklearn-like
dist_mat[dist_mat == 0] = 1e-7
ratio = weights * (pre_dist_mat / dist_mat)
B = -ratio
B[:, np.arange(N), np.arange(N)] += ratio.sum(axis=-1)
# update - double transpose. TODO: consider fix
coords = (1. / N * np.matmul(best_3d_coords, B))
dis = np.linalg.norm(coords, axis=(-1, -2))
if verbose >= 2:
print('it: %d, stress %s' % (i, stress))
# update metrics if relative improvement above tolerance
if (best_stress - stress / dis).mean() <= tol:
if verbose:
print('breaking at iteration %d with stress %s' % (i,
stress / dis))
break
best_3d_coords = coords
best_stress = stress / dis
his.append(best_stress)
return best_3d_coords, np.array(his)
def get_dihedral_torch(c1, c2, c3, c4):
""" Returns the dihedral angle in radians.
Will use atan2 formula from:
https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics
Can't use torch.dot bc it does not broadcast
Inputs:
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
"""
u1 = c2 - c1
u2 = c3 - c2
u3 = c4 - c3
return torch.atan2( ( (torch.norm(u2, dim=-1, keepdim=True) * u1) * torch.cross(u2,u3, dim=-1) ).sum(dim=-1) ,
( torch.cross(u1,u2, dim=-1) * torch.cross(u2, u3, dim=-1) ).sum(dim=-1) )
def get_dihedral_numpy(c1, c2, c3, c4):
""" Returns the dihedral angle in radians.
Will use atan2 formula from:
https://en.wikipedia.org/wiki/Dihedral_angle#In_polymer_physics
Inputs:
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
* c1: (batch, 3) or (3,)
"""
u1 = c2 - c1
u2 = c3 - c2
u3 = c4 - c3
return np.arctan2( ( (np.linalg.norm(u2, axis=-1, keepdims=True) * u1) * np.cross(u2,u3, axis=-1)).sum(axis=-1),
( np.cross(u1,u2, axis=-1) * np.cross(u2, u3, axis=-1) ).sum(axis=-1) )
def calc_phis_torch(pred_coords, N_mask, CA_mask, C_mask=None,
prop=True, verbose=0):
""" Filters mirrors selecting the 1 with most N of negative phis.
Used as part of the MDScaling wrapper if arg is passed. See below.
Angle Phi between planes: (Cterm{-1}, N, Ca{0}) and (N{0}, Ca{+1}, Cterm{+1})
Inputs:
* pred_coords: (batch, 3, N) predicted coordinates
* N_mask: (batch, N) boolean mask for N-term positions
* CA_mask: (batch, N) boolean mask for C-alpha positions
* C_mask: (batch, N) or None. boolean mask for C-alpha positions or
automatically calculate from N_mask and CA_mask if None.
* prop: bool. whether to return as a proportion of negative phis.
* verbose: bool. verbosity level
Output: (batch, N) containing the phi angles or (batch,) containing
the proportions.
Note: use [0] since all prots in batch have same backbone
"""
# detach gradients for angle calculation - mirror selection
pred_coords_ = torch.transpose(pred_coords.detach(), -1 , -2).cpu()
# ensure dims
N_mask = expand_dims_to( N_mask, 2-len(N_mask.shape) )
CA_mask = expand_dims_to( CA_mask, 2-len(CA_mask.shape) )
if C_mask is not None:
C_mask = expand_dims_to( C_mask, 2-len(C_mask.shape) )
else:
C_mask = torch.logical_not(torch.logical_or(N_mask,CA_mask))
# select points
n_terms = pred_coords_[:, N_mask[0].squeeze()]
c_alphas = pred_coords_[:, CA_mask[0].squeeze()]
c_terms = pred_coords_[:, C_mask[0].squeeze()]
# compute phis for every pritein in the batch
phis = [get_dihedral_torch(c_terms[i, :-1],
n_terms[i, 1:],
c_alphas[i, 1:],
c_terms[i, 1:]) for i in range(pred_coords.shape[0])]
# return percentage of lower than 0
if prop:
return torch.tensor( [(x<0).float().mean().item() for x in phis] )
return phis
def calc_phis_numpy(pred_coords, N_mask, CA_mask, C_mask=None,
prop=True, verbose=0):
""" Filters mirrors selecting the 1 with most N of negative phis.
Used as part of the MDScaling wrapper if arg is passed. See below.
Angle Phi between planes: (Cterm{-1}, N, Ca{0}) and (N{0}, Ca{+1}, Cterm{+1})
Inputs:
* pred_coords: (batch, 3, N) predicted coordinates
* N_mask: (N, ) boolean mask for N-term positions
* CA_mask: (N, ) boolean mask for C-alpha positions
* C_mask: (N, ) or None. boolean mask for C-alpha positions or
automatically calculate from N_mask and CA_mask if None.
* prop: bool. whether to return as a proportion of negative phis.
* verbose: bool. verbosity level
Output: (batch, N) containing the phi angles or (batch,) containing
the proportions.
"""
# detach gradients for angle calculation - mirror selection
pred_coords_ = np.transpose(pred_coords, (0, 2, 1))
n_terms = pred_coords_[:, N_mask.squeeze()]
c_alphas = pred_coords_[:, CA_mask.squeeze()]
# select c_term auto if not passed
if C_mask is not None:
c_terms = pred_coords_[:, C_mask]
else:
c_terms = pred_coords_[:, (np.ones_like(N_mask)-N_mask-CA_mask).squeeze().astype(bool) ]
# compute phis for every pritein in the batch
phis = [get_dihedral_numpy(c_terms[i, :-1],
n_terms[i, 1:],
c_alphas[i, 1:],
c_terms[i, 1:]) for i in range(pred_coords.shape[0])]
# return percentage of lower than 0
if prop:
return np.array( [(x<0).mean() for x in phis] )
return phis
# alignment by centering + rotation to compute optimal RMSD
# adapted from : https://github.com/charnley/rmsd/
def kabsch_torch(X, Y, cpu=True):
""" Kabsch alignment of X into Y.
Assumes X,Y are both (Dims x N_points). See below for wrapper.
"""
device = X.device
# center X and Y to the origin
X_ = X - X.mean(dim=-1, keepdim=True)
Y_ = Y - Y.mean(dim=-1, keepdim=True)
# calculate convariance matrix (for each prot in the batch)
C = torch.matmul(X_, Y_.t()).detach()
if cpu:
C = C.cpu()
# Optimal rotation matrix via SVD
if int(torch.__version__.split(".")[1]) < 8:
# warning! int torch 1.<8 : W must be transposed
V, S, W = torch.svd(C)
W = W.t()
else:
V, S, W = torch.linalg.svd(C)
# determinant sign for direction correction
d = (torch.det(V) * torch.det(W)) < 0.0
if d:
S[-1] = S[-1] * (-1)
V[:, -1] = V[:, -1] * (-1)
# Create Rotation matrix U
U = torch.matmul(V, W).to(device)
# calculate rotations
X_ = torch.matmul(X_.t(), U).t()
# return centered and aligned
return X_, Y_
def kabsch_numpy(X, Y):
""" Kabsch alignment of X into Y.
Assumes X,Y are both (Dims x N_points). See below for wrapper.
"""
# center X and Y to the origin
X_ = X - X.mean(axis=-1, keepdims=True)
Y_ = Y - Y.mean(axis=-1, keepdims=True)
# calculate convariance matrix (for each prot in the batch)
C = np.dot(X_, Y_.transpose())
# Optimal rotation matrix via SVD
V, S, W = np.linalg.svd(C)
# determinant sign for direction correction
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = S[-1] * (-1)
V[:, -1] = V[:, -1] * (-1)
# Create Rotation matrix U
U = np.dot(V, W)
# calculate rotations
X_ = np.dot(X_.T, U).T
# return centered and aligned
return X_, Y_
# metrics - more formulas here: http://predictioncenter.org/casp12/doc/help.html
def distmat_loss_torch(X=None, Y=None, X_mat=None, Y_mat=None, p=2, q=2, custom=None, distmat_mask=None):
""" Calculates a loss on the distance matrix - no need to align structs.
Inputs:
* X: (N, d) tensor. the predicted structure. One of (X, X_mat) is needed.
* X_mat: (N, N) tensor. the predicted distance matrix. Optional ()
* Y: (N, d) tensor. the true structure. One of (Y, Y_mat) is needed.
* Y_mat: (N, N) tensor. the predicted distance matrix. Optional ()
* p: int. power for the distance calculation (2 for euclidean)
* q: float. power for the scaling of the loss (2 for MSE, 1 for MAE, etc)
* custom: func or None. custom loss over distance matrices.
ex: lambda x,y: 1 - 1/ (1 + ((x-y))**2) (1 is very bad. 0 is good)
* distmat_mask: (N, N) mask (boolean or weights for each ij pos). optional.
"""
assert (X is not None or X_mat is not None) and \
(Y is not None or Y_mat is not None), "The true and predicted coords or dist mats must be provided"
# calculate distance matrices
if X_mat is None:
X_mat = torch.cdist(X, X, p=p)
if Y_mat is None:
Y_mat = torch.cdist(Y, Y, p=p)
if distmat_mask is None:
distmat_mask = torch.ones_like(Y_mat).bool()
# do custom expression if passed
if custom is not None:
loss = custom(X_mat, Y_mat).mean()
# **2 ensures always positive. Later scale back to desired power
else:
loss = ( X_mat - Y_mat )**2
if q != 2:
loss = loss**(q/2)
return loss[distmat_mask].mean()
def rmsd_torch(X, Y):
""" Assumes x,y are both (B x D x N). See below for wrapper. """
return torch.sqrt( torch.mean((X - Y)**2, axis=(-1, -2)) )
def rmsd_numpy(X, Y):
""" Assumes x,y are both (B x D x N). See below for wrapper. """
return np.sqrt( np.mean((X - Y)**2, axis=(-1, -2)) )
def gdt_torch(X, Y, cutoffs, weights=None):
""" Assumes x,y are both (B x D x N). see below for wrapper.
* cutoffs is a list of `K` thresholds
* weights is a list of `K` weights (1 x each threshold)
"""
device = X.device
if weights is None:
weights = torch.ones(1,len(cutoffs))
else:
weights = torch.tensor([weights]).to(device)
# set zeros and fill with values
GDT = torch.zeros(X.shape[0], len(cutoffs), device=device)
dist = ((X - Y)**2).sum(dim=1).sqrt()
# iterate over thresholds
for i,cutoff in enumerate(cutoffs):
GDT[:, i] = (dist <= cutoff).float().mean(dim=-1)
# weighted mean
return (GDT*weights).mean(-1)
def gdt_numpy(X, Y, cutoffs, weights=None):
""" Assumes x,y are both (B x D x N). see below for wrapper.
* cutoffs is a list of `K` thresholds
* weights is a list of `K` weights (1 x each threshold)
"""
if weights is None:
weights = np.ones( (1,len(cutoffs)) )
else:
weights = np.array([weights])
# set zeros and fill with values
GDT = np.zeros( (X.shape[0], len(cutoffs)) )
dist = np.sqrt( ((X - Y)**2).sum(axis=1) )
# iterate over thresholds
for i,cutoff in enumerate(cutoffs):
GDT[:, i] = (dist <= cutoff).mean(axis=-1)
# weighted mean
return (GDT*weights).mean(-1)
def tmscore_torch(X, Y):
""" Assumes x,y are both (B x D x N). see below for wrapper. """
L = X.shape[-1]
d0 = 1.24 * np.cbrt(L - 15) - 1.8
# get distance
dist = ((X - Y)**2).sum(dim=1).sqrt()
# formula (see wrapper for source):
return (1 / (1 + (dist/d0)**2)).mean(dim=-1)
def tmscore_numpy(X, Y):
""" Assumes x,y are both (B x D x N). see below for wrapper. """
L = X.shape[-1]
d0 = 1.24 * np.cbrt(L - 15) - 1.8
# get distance
dist = np.sqrt( ((X - Y)**2).sum(axis=1) )
# formula (see wrapper for source):
return (1 / (1 + (dist/d0)**2)).mean(axis=-1)
def mdscaling_torch(pre_dist_mat, weights=None, iters=10, tol=1e-5,
fix_mirror=True, N_mask=None, CA_mask=None, C_mask=None,
eigen=False, verbose=2):
""" Handles the specifics of MDS for proteins (mirrors, ...) """
# batched mds for full parallel
preds, stresses = mds_torch(pre_dist_mat, weights=weights,iters=iters,
tol=tol, eigen=eigen, verbose=verbose)
if not fix_mirror:
return preds, stresses
# no need to caculate multiple mirrors - just correct Z axis
phi_ratios = calc_phis_torch(preds, N_mask, CA_mask, C_mask, prop=True)
to_correct = torch.nonzero( (phi_ratios < 0.5)).view(-1)
# fix mirrors by (-1)*Z if more (+) than (-) phi angles
preds[to_correct, -1] = (-1)*preds[to_correct, -1]
if verbose == 2:
print("Corrected mirror idxs:", to_correct)
return preds, stresses
def mdscaling_numpy(pre_dist_mat, weights=None, iters=10, tol=1e-5,
fix_mirror=True, N_mask=None, CA_mask=None, C_mask=None, verbose=2):
""" Handles the specifics of MDS for proteins (mirrors, ...) """
# batched mds for full parallel
preds, stresses = mds_numpy(pre_dist_mat, weights=weights,iters=iters,
tol=tol, verbose=verbose)
if not fix_mirror:
return preds, stresses
# no need to caculate multiple mirrors - just correct Z axis
phi_ratios = calc_phis_numpy(preds, N_mask, CA_mask, C_mask, prop=True)
for i,pred in enumerate(preds):
# fix mirrors by (-1)*Z if more (+) than (-) phi angles
if phi_ratios < 0.5:
preds[i, -1] = (-1)*preds[i, -1]
if verbose == 2:
print("Corrected mirror in struct no.", i)
return preds, stresses
def lddt_ca_torch(true_coords, pred_coords, cloud_mask, r_0=15.):
""" Computes the lddt score for each C_alpha.
https://academic.oup.com/bioinformatics/article/29/21/2722/195896
Inputs:
* true_coords: (b, l, c, d) in sidechainnet format.
* pred_coords: (b, l, c, d) in sidechainnet format.
* cloud_mask : (b, l, c) adapted for scn format.
* r_0: float. maximum inclusion radius in reference struct.
Outputs:
* (b, l) lddt for c_alpha scores (ranging between 0 and 1)
See wrapper below.
"""
device, dtype = true_coords.device, true_coords.type()
thresholds = torch.tensor([0.5, 1, 2, 4], device=device).type(dtype)
# adapt masks
cloud_mask = cloud_mask.bool().cpu()
c_alpha_mask = torch.zeros(cloud_mask.shape[1:], device=device).bool() # doesn't have batch dim
c_alpha_mask[..., 1] = True
# container for c_alpha scores (between 0,1)
wrapper = torch.zeros(true_coords.shape[:2], device=device).type(dtype)
for bi, seq in enumerate(true_coords):
# select atoms for study
c_alphas = cloud_mask[bi]*c_alpha_mask # only pick c_alpha positions
selected_pred = pred_coords[bi, c_alphas, :]
selected_target = true_coords[bi, c_alphas, :]
# get number under distance
dist_mat_pred = torch.cdist(selected_pred, selected_pred, p=2)
dist_mat_target = torch.cdist(selected_target, selected_target, p=2)
under_r0_target = dist_mat_target < r_0
compare_dists = torch.abs(dist_mat_pred - dist_mat_target)[under_r0_target]
# measure diff below threshold
score = torch.zeros_like(under_r0_target).float()
max_score = torch.zeros_like(under_r0_target).float()
max_score[under_r0_target] = 4.
# measure under how many thresholds
score[under_r0_target] = thresholds.shape[0] - \
torch.bucketize( compare_dists, boundaries=thresholds ).float()
# dont include diagonal
l_mask = c_alphas.float().sum(dim=-1).bool()
wrapper[bi, l_mask] = ( score.sum(dim=-1) - thresholds.shape[0] ) / \
( max_score.sum(dim=-1) - thresholds.shape[0] )
return wrapper
################
### WRAPPERS ###
################
@set_backend_kwarg
@invoke_torch_or_numpy(mdscaling_torch, mdscaling_numpy)
def MDScaling(pre_dist_mat, **kwargs):
""" Gets distance matrix (-ces). Outputs 3d.
Assumes (for now) distrogram is (N x N) and symmetric.
For support of ditograms: see `center_distogram_torch()`
Inputs:
* pre_dist_mat: (1, N, N) distance matrix.
* weights: optional. (N x N) pairwise relative weights .
* iters: number of iterations to run the algorithm on
* tol: relative tolerance at which to stop the algorithm if no better
improvement is achieved
* backend: one of ["numpy", "torch", "auto"] for backend choice
* fix_mirror: int. number of iterations to run the 3d generation and
pick the best mirror (highest number of negative phis)
* N_mask: indexing array/tensor for indices of backbone N.
Only used if fix_mirror > 0.
* CA_mask: indexing array/tensor for indices of backbone C_alpha.
Only used if fix_mirror > 0.
* verbose: whether to print logs
Outputs:
* best_3d_coords: (3 x N)
* historic_stress: (timesteps, )
"""
pre_dist_mat = expand_dims_to(pre_dist_mat, 3 - len(pre_dist_mat.shape))
return pre_dist_mat, kwargs
@expand_arg_dims(dim_len = 2)
@set_backend_kwarg
@invoke_torch_or_numpy(kabsch_torch, kabsch_numpy)
def Kabsch(A, B):
""" Returns Kabsch-rotated matrices resulting
from aligning A into B.
Adapted from: https://github.com/charnley/rmsd/
* Inputs:
* A,B are (3 x N)
* backend: one of ["numpy", "torch", "auto"] for backend choice
* Outputs: tensor/array of shape (3 x N)
"""
# run calcs - pick the 0th bc an additional dim was created
return A, B
@expand_arg_dims()
@set_backend_kwarg
@invoke_torch_or_numpy(rmsd_torch, rmsd_numpy)
def RMSD(A, B):
""" Returns RMSD score as defined here (lower is better):
https://en.wikipedia.org/wiki/
Root-mean-square_deviation_of_atomic_positions
* Inputs:
* A,B are (B x 3 x N) or (3 x N)
* backend: one of ["numpy", "torch", "auto"] for backend choice
* Outputs: tensor/array of size (B,)
"""
return A, B
@expand_arg_dims()
@set_backend_kwarg
@invoke_torch_or_numpy(gdt_torch, gdt_numpy)
def GDT(A, B, *, mode="TS", cutoffs=[1,2,4,8], weights=None):
""" Returns GDT score as defined here (highre is better):
Supports both TS and HA
http://predictioncenter.org/casp12/doc/help.html
* Inputs:
* A,B are (B x 3 x N) (np.array or torch.tensor)
* cutoffs: defines thresholds for gdt
* weights: list containing the weights
* mode: one of ["numpy", "torch", "auto"] for backend
* Outputs: tensor/array of size (B,)
"""
# define cutoffs for each type of gdt and weights
cutoffs = [0.5,1,2,4] if mode in ["HA", "ha"] else [1,2,4,8]
# calculate GDT
return A, B, cutoffs, {'weights': weights}
@expand_arg_dims()
@set_backend_kwarg
@invoke_torch_or_numpy(tmscore_torch, tmscore_numpy)
def TMscore(A, B):
""" Returns TMscore as defined here (higher is better):
>0.5 (likely) >0.6 (highly likely) same folding.
= 0.2. https://en.wikipedia.org/wiki/Template_modeling_score
Warning! It's not exactly the code in:
https://zhanglab.ccmb.med.umich.edu/TM-score/TMscore.cpp
but will suffice for now.
Inputs:
* A,B are (B x 3 x N) (np.array or torch.tensor)
* mode: one of ["numpy", "torch", "auto"] for backend
Outputs: tensor/array of size (B,)
"""
return A, B
| 1.835938 | 2 |
src/clean_property_file.py | wmaciel/van-crime | 2 | 16893 | <gh_stars>1-10
__author__ = 'walthermaciel'
import pandas as pd
import numpy as np
def load_csv(path):
# Load
print 'Loading', path
df = pd.read_csv(path)
# Remove unwanted columns
print 'Dropping unwanted columns'
df = df[['PID', 'TAX_ASSESSMENT_YEAR', 'CURRENT_LAND_VALUE', 'STREET_NAME', 'TO_CIVIC_NUMBER']]
df.columns = ['PID', 'YEAR', 'VALUE', 'STREET_NAME', 'STREET_NUMBER']
# Remove unwanted rows
print 'Removing null rows'
df.replace('', np.nan, inplace=True)
df.dropna(inplace=True)
# Compute average value for each property
print 'Computing average value for same address properties'
g_df = df.groupby(['STREET_NAME', 'STREET_NUMBER']).mean()
df = g_df.reset_index()
return df
def main():
for y in xrange(2006, 2016):
print y
path_in = '../data/property_tax_06_15/property_tax_report_csv' + str(y) + '.csv'
df = load_csv(path_in)
path_out = '../data/property_tax_06_15/avg_property_tax_'+ str(y) + '.csv'
print 'Saving', path_out
df.to_csv(path_or_buf=path_out, index=False)
print '\n'
if __name__ == '__main__':
main()
| 3.078125 | 3 |
homeassistant/components/sensor/verisure.py | beschouten/home-assistant | 1 | 16894 | """
Interfaces with Verisure sensors.
For more details about this platform, please refer to the documentation at
documentation at https://home-assistant.io/components/verisure/
"""
import logging
from homeassistant.components.verisure import HUB as hub
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Verisure platform."""
sensors = []
if int(hub.config.get('thermometers', '1')):
hub.update_climate()
sensors.extend([
VerisureThermometer(value.id)
for value in hub.climate_status.values()
if hasattr(value, 'temperature') and value.temperature
])
if int(hub.config.get('hygrometers', '1')):
hub.update_climate()
sensors.extend([
VerisureHygrometer(value.id)
for value in hub.climate_status.values()
if hasattr(value, 'humidity') and value.humidity
])
if int(hub.config.get('mouse', '1')):
hub.update_mousedetection()
sensors.extend([
VerisureMouseDetection(value.deviceLabel)
for value in hub.mouse_status.values()
# is this if needed?
if hasattr(value, 'amountText') and value.amountText
])
add_devices(sensors)
class VerisureThermometer(Entity):
"""Representation of a Verisure thermometer."""
def __init__(self, device_id):
"""Initialize the sensor."""
self._id = device_id
@property
def name(self):
"""Return the name of the device."""
return '{} {}'.format(
hub.climate_status[self._id].location,
"Temperature")
@property
def state(self):
"""Return the state of the device."""
# Remove ° character
return hub.climate_status[self._id].temperature[:-1]
@property
def available(self):
"""Return True if entity is available."""
return hub.available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return TEMP_CELSIUS
def update(self):
"""Update the sensor."""
hub.update_climate()
class VerisureHygrometer(Entity):
"""Representation of a Verisure hygrometer."""
def __init__(self, device_id):
"""Initialize the sensor."""
self._id = device_id
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(
hub.climate_status[self._id].location,
"Humidity")
@property
def state(self):
"""Return the state of the sensor."""
# remove % character
return hub.climate_status[self._id].humidity[:-1]
@property
def available(self):
"""Return True if entity is available."""
return hub.available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this sensor."""
return "%"
def update(self):
"""Update the sensor."""
hub.update_climate()
class VerisureMouseDetection(Entity):
"""Representation of a Verisure mouse detector."""
def __init__(self, device_id):
"""Initialize the sensor."""
self._id = device_id
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(
hub.mouse_status[self._id].location,
"Mouse")
@property
def state(self):
"""Return the state of the sensor."""
return hub.mouse_status[self._id].count
@property
def available(self):
"""Return True if entity is available."""
return hub.available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this sensor."""
return "Mice"
def update(self):
"""Update the sensor."""
hub.update_mousedetection()
| 2.734375 | 3 |
articles/blogs/tests/factories.py | MahmoudFarid/articles | 0 | 16895 | import factory
from factory.django import DjangoModelFactory as Factory
from django.contrib.auth.models import Permission
from ..models import Blog
from articles.users.tests.factories import UserFactory
class Blogfactory(Factory):
user = user = factory.SubFactory(UserFactory)
title = factory.Faker('sentence', nb_words=3)
description = factory.Faker('paragraph', nb_sentences=5)
content = factory.Faker('paragraph', nb_sentences=10)
gdoc_link = 'https://docs.google.com/document/d/1NcF8_6ZMraTXp7H7DVzR6pbqzJgNIyg3gYLUUoFoYe8/edit'
status = factory.Faker('random_element', elements=[sttaus[0] for sttaus in Blog.STATUS_CHOICES])
class Meta:
model = Blog
def create_user_writer_with_permission():
user = UserFactory()
write_blogs_perm = Permission.objects.filter(codename='can_write_blogs').first()
user.user_permissions.add(write_blogs_perm)
return user
def create_editor_user_with_permission():
user = UserFactory()
review_blogs_perm = Permission.objects.filter(codename='can_review_blogs').first()
user.user_permissions.add(review_blogs_perm)
return user
| 2.4375 | 2 |
api_formatter/serializers.py | RockefellerArchiveCenter/argo | 0 | 16896 | <filename>api_formatter/serializers.py
from datetime import datetime
from django.urls import reverse
from rest_framework import serializers
from .view_helpers import description_from_notes
class ExternalIdentifierSerializer(serializers.Serializer):
identifier = serializers.CharField()
source = serializers.CharField()
class DateSerializer(serializers.Serializer):
expression = serializers.CharField()
begin = serializers.DateField()
end = serializers.CharField(allow_null=True)
label = serializers.DateField()
type = serializers.CharField()
class ExtentSerializer(serializers.Serializer):
value = serializers.FloatField()
type = serializers.CharField()
class LanguageSerializer(serializers.Serializer):
expression = serializers.CharField()
identifier = serializers.CharField()
class SubnoteSerializer(serializers.Serializer):
type = serializers.CharField()
content = serializers.SerializerMethodField()
def get_content(self, obj):
"""Coerce content into a list so it can be serialized as JSON."""
return list(obj.content)
class NoteSerializer(serializers.Serializer):
type = serializers.CharField()
title = serializers.CharField()
source = serializers.CharField()
subnotes = SubnoteSerializer(many=True)
class RightsGrantedSerializer(serializers.Serializer):
act = serializers.CharField()
begin = serializers.DateField()
end = serializers.DateField()
restriction = serializers.CharField()
notes = NoteSerializer(many=True, allow_null=True)
class RightsStatementSerializer(serializers.Serializer):
determination_date = serializers.DateField()
type = serializers.CharField()
rights_type = serializers.CharField()
begin = serializers.DateField()
end = serializers.DateField()
copyright_status = serializers.CharField(allow_null=True)
other_basis = serializers.CharField(allow_null=True)
jurisdiction = serializers.CharField(allow_null=True)
notes = NoteSerializer(many=True, allow_null=True)
rights_granted = RightsGrantedSerializer(many=True)
class GroupSerializer(serializers.Serializer):
identifier = serializers.CharField()
title = serializers.CharField()
class ReferenceSerializer(serializers.Serializer):
title = serializers.CharField()
type = serializers.CharField(allow_null=True)
online = serializers.SerializerMethodField()
hit_count = serializers.IntegerField(allow_null=True)
online_hit_count = serializers.IntegerField(allow_null=True)
uri = serializers.SerializerMethodField()
dates = serializers.CharField(allow_null=True)
description = serializers.CharField(allow_null=True)
group = GroupSerializer(allow_null=True)
def get_online(self, obj):
return getattr(obj, "online", False)
def get_uri(self, obj):
if getattr(obj, "uri", None):
return obj.uri
basename = obj.type
if basename in ["person", "organization", "family", "software"]:
basename = "agent"
elif basename in ["cultural_context", "function", "geographic",
"genre_form", "occupation", "style_period", "technique",
"temporal", "topical"]:
basename = "term"
return reverse('{}-detail'.format(basename), kwargs={"pk": obj.identifier}).rstrip("/")
class BaseListSerializer(serializers.Serializer):
uri = serializers.SerializerMethodField()
type = serializers.CharField()
title = serializers.CharField()
dates = DateSerializer(many=True, allow_null=True)
def get_uri(self, obj):
basename = self.context.get('view').basename or obj.type
return reverse('{}-detail'.format(basename), kwargs={"pk": obj.meta.id}).rstrip("/")
class BaseDetailSerializer(serializers.Serializer):
uri = serializers.SerializerMethodField()
title = serializers.CharField()
type = serializers.CharField()
category = serializers.CharField(allow_null=True)
offset = serializers.IntegerField(allow_null=True)
group = GroupSerializer()
external_identifiers = ExternalIdentifierSerializer(many=True)
def get_uri(self, obj):
basename = self.context.get('view').basename or obj.type
return reverse('{}-detail'.format(basename), kwargs={"pk": obj.meta.id}).rstrip("/")
class AgentSerializer(BaseDetailSerializer):
agent_type = serializers.CharField()
description = serializers.CharField(allow_null=True)
dates = DateSerializer(many=True, allow_null=True)
notes = NoteSerializer(many=True, allow_null=True)
class AgentListSerializer(BaseListSerializer):
pass
class CollectionSerializer(BaseDetailSerializer):
level = serializers.CharField()
parent = serializers.CharField(allow_null=True)
languages = LanguageSerializer(many=True, allow_null=True)
description = serializers.SerializerMethodField()
extents = ExtentSerializer(many=True)
formats = serializers.ListField()
online = serializers.BooleanField()
dates = DateSerializer(many=True, allow_null=True)
notes = NoteSerializer(many=True, allow_null=True)
rights_statements = RightsStatementSerializer(many=True, allow_null=True)
agents = ReferenceSerializer(many=True, allow_null=True)
creators = ReferenceSerializer(many=True, allow_null=True)
terms = ReferenceSerializer(many=True, allow_null=True)
def get_description(self, obj):
return description_from_notes(getattr(obj, "notes", []))
class CollectionListSerializer(BaseListSerializer):
pass
class ObjectSerializer(BaseDetailSerializer):
languages = LanguageSerializer(many=True, allow_null=True)
parent = serializers.CharField(allow_null=True)
description = serializers.SerializerMethodField()
extents = ExtentSerializer(many=True, allow_null=True)
formats = serializers.ListField()
online = serializers.BooleanField()
dates = DateSerializer(many=True, allow_null=True)
notes = NoteSerializer(many=True, allow_null=True)
rights_statements = RightsStatementSerializer(many=True, allow_null=True)
agents = ReferenceSerializer(many=True, allow_null=True)
terms = ReferenceSerializer(many=True, allow_null=True)
def get_description(self, obj):
return description_from_notes(getattr(obj, "notes", []))
class ObjectListSerializer(BaseListSerializer):
pass
class TermSerializer(BaseDetailSerializer):
term_type = serializers.CharField()
collections = ReferenceSerializer(many=True, allow_null=True)
objects = ReferenceSerializer(many=True, allow_null=True)
class TermListSerializer(BaseListSerializer):
pass
class CollectionHitSerializer(serializers.Serializer):
"""Serializes data for collapsed hits."""
category = serializers.CharField(source="group.category")
dates = serializers.SerializerMethodField()
hit_count = serializers.IntegerField()
online_hit_count = serializers.IntegerField(allow_null=True)
title = serializers.CharField(source="group.title")
uri = serializers.SerializerMethodField()
creators = serializers.SerializerMethodField()
def get_dates(self, obj):
return [d.to_dict() for d in obj.group.dates]
def get_creators(self, obj):
if getattr(obj.group, "creators", None):
return [c.title for c in obj.group.creators]
else:
return []
def get_uri(self, obj):
return obj.group.identifier.rstrip("/")
class FacetSerializer(serializers.Serializer):
"""Serializes facets."""
def to_representation(self, instance):
resp = {}
for k, v in instance.aggregations.to_dict().items():
if "buckets" in v:
resp[k] = v["buckets"]
elif "name" in v: # move nested aggregations up one level
resp[k] = v["name"]["buckets"]
elif k in ["max_date", "min_date"]: # convert timestamps to year
value = (datetime.fromtimestamp(v["value"] / 1000.0).year) if v["value"] else None
resp[k] = {"value": value}
else:
resp[k] = v
return resp
class AncestorsSerializer(serializers.Serializer):
"""Provides a nested dictionary representation of ancestors."""
def serialize_ancestors(self, ancestor_list, tree, idx):
ancestor = ancestor_list[idx]
serialized = ReferenceSerializer(ancestor).data
tree_data = {**serialized, **tree}
if idx == len(ancestor_list) - 1:
new_tree = tree_data
return new_tree
else:
new_tree = {"child": tree_data}
return self.serialize_ancestors(ancestor_list, new_tree, idx + 1)
def to_representation(self, instance):
resp = {}
if instance:
resp = self.serialize_ancestors(instance, {}, 0)
return resp
| 2.375 | 2 |
cmz/cms_news/migrations/0004_auto_20160923_1958.py | inmagik/cmz | 1 | 16897 | <reponame>inmagik/cmz
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-09-23 19:58
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('cms_news', '0003_auto_20160923_1956'),
]
operations = [
migrations.AddField(
model_name='news',
name='date',
field=models.DateField(auto_now_add=True, default=datetime.datetime(2016, 9, 23, 19, 58, 10, 395979, tzinfo=utc)),
preserve_default=False,
),
migrations.AddField(
model_name='newstranslation',
name='title',
field=models.CharField(default='Hello cmz', max_length=300),
preserve_default=False,
),
]
| 1.820313 | 2 |
mopidy/audio/utils.py | grdorin/mopidy | 6,700 | 16898 | <filename>mopidy/audio/utils.py
from mopidy import httpclient
from mopidy.internal.gi import Gst
def calculate_duration(num_samples, sample_rate):
"""Determine duration of samples using GStreamer helper for precise
math."""
return Gst.util_uint64_scale(num_samples, Gst.SECOND, sample_rate)
def create_buffer(data, timestamp=None, duration=None):
"""Create a new GStreamer buffer based on provided data.
Mainly intended to keep gst imports out of non-audio modules.
.. versionchanged:: 2.0
``capabilites`` argument was removed.
"""
if not data:
raise ValueError("Cannot create buffer without data")
buffer_ = Gst.Buffer.new_wrapped(data)
if timestamp is not None:
buffer_.pts = timestamp
if duration is not None:
buffer_.duration = duration
return buffer_
def millisecond_to_clocktime(value):
"""Convert a millisecond time to internal GStreamer time."""
return value * Gst.MSECOND
def clocktime_to_millisecond(value):
"""Convert an internal GStreamer time to millisecond time."""
return value // Gst.MSECOND
def supported_uri_schemes(uri_schemes):
"""Determine which URIs we can actually support from provided whitelist.
:param uri_schemes: list/set of URIs to check support for.
:type uri_schemes: list or set or URI schemes as strings.
:rtype: set of URI schemes we can support via this GStreamer install.
"""
supported_schemes = set()
registry = Gst.Registry.get()
for factory in registry.get_feature_list(Gst.ElementFactory):
for uri in factory.get_uri_protocols():
if uri in uri_schemes:
supported_schemes.add(uri)
return supported_schemes
def setup_proxy(element, config):
"""Configure a GStreamer element with proxy settings.
:param element: element to setup proxy in.
:type element: :class:`Gst.GstElement`
:param config: proxy settings to use.
:type config: :class:`dict`
"""
if not hasattr(element.props, "proxy") or not config.get("hostname"):
return
element.set_property("proxy", httpclient.format_proxy(config, auth=False))
element.set_property("proxy-id", config.get("username"))
element.set_property("proxy-pw", config.get("password"))
class Signals:
"""Helper for tracking gobject signal registrations"""
def __init__(self):
self._ids = {}
def connect(self, element, event, func, *args):
"""Connect a function + args to signal event on an element.
Each event may only be handled by one callback in this implementation.
"""
if (element, event) in self._ids:
raise AssertionError
self._ids[(element, event)] = element.connect(event, func, *args)
def disconnect(self, element, event):
"""Disconnect whatever handler we have for an element+event pair.
Does nothing it the handler has already been removed.
"""
signal_id = self._ids.pop((element, event), None)
if signal_id is not None:
element.disconnect(signal_id)
def clear(self):
"""Clear all registered signal handlers."""
for element, event in list(self._ids):
element.disconnect(self._ids.pop((element, event)))
| 2.40625 | 2 |
day09/part1.py | mtn/advent16 | 0 | 16899 | #!/usr/bin/env python3
import re
with open("input.txt") as f:
content = f.read().strip()
ans = ""
i = 0
while i < len(content):
if content[i] == "(":
end = content[i:].find(")") + i
instr = content[i+1:end]
chars, times = map(int, content[i+1:end].split("x"))
to_copy = content[end+1:end+1+chars]
ans += times * to_copy
i = end + 1 + chars
else:
ans += content[i]
i += 1
print(len(ans))
| 3.328125 | 3 |