text stringlengths 38 1.54M |
|---|
from pathlib import Path
from postgis_helpers import PostgreSQL
def import_shapefiles(folder: Path, db: PostgreSQL):
""" Import all shapefiles within a folder into SQL.
"""
endings = [".shp", ".SHP"]
for ending in endings:
for shp_path in folder.rglob(f"*{ending}"):
print(shp_path)
idx = len(ending) * -1
pg_name = shp_path.name[:idx].replace(" ", "_").lower()
db.import_geodata(pg_name, shp_path, if_exists="replace")
|
__all__ = ["AMFError", "RTMPError", "RTMPTimeoutError"]
class AMFError(Exception):
pass
class RTMPError(IOError):
pass
class RTMPTimeoutError(RTMPError):
pass
|
from bird_interface import BirdInterface
from swimming_interface import SwimmingBirdInterface
class Penguin(BirdInterface, SwimmingBirdInterface):
def eat(self):
return 'I can eat!'
def swim(self):
return 'I can swim!' |
#!/usr/bin/python
import logging
logging.basicConfig(level=logging.INFO, format='%(message)s')
import os
import sys
sys.path.extend([os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')])
try:
from grow import submodules
submodules.fix_imports()
except ImportError:
pass
from google.apputils import appcommands
from grow import commands
def main(argv):
commands.add_commands()
if __name__ == '__main__':
appcommands.Run()
|
import torch
from torch.nn.functional import conv1d, mse_loss
import torch.nn.functional as F
import torch.nn as nn
from nnAudio import Spectrogram
from .constants import *
from model.utils import Normalization
batchNorm_momentum = 0.1
num_instruments = 1
class block(nn.Module):
def __init__(self, inp, out, ksize, pad, ds_ksize, ds_stride):
super(block, self).__init__()
self.conv1 = nn.Conv2d(inp,out, kernel_size=ksize, padding=pad)
self.bn1 = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.conv2 = nn.Conv2d(out, out, kernel_size=ksize, padding=pad)
self.bn2 = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.skip = nn.Conv2d(inp, out, kernel_size=1, padding=0)
self.ds = nn.Conv2d(out, out, kernel_size=ds_ksize, stride=ds_stride, padding=0)
def forward(self, x):
x11 = F.leaky_relu(self.bn1(self.conv1(x)))
x12 = F.leaky_relu(self.bn2(self.conv2(x11)))
x12 += self.skip(x)
xp = self.ds(x12)
return xp, xp, x12.size()
class d_block(nn.Module):
def __init__(self, inp, out, isLast, ksize, pad, ds_ksize, ds_stride):
super(d_block, self).__init__()
self.conv2d = nn.ConvTranspose2d(inp, int(inp/2), kernel_size=ksize, padding=pad)
self.bn2d = nn.BatchNorm2d(int(inp/2), momentum= batchNorm_momentum)
self.conv1d = nn.ConvTranspose2d(int(inp/2), out, kernel_size=ksize, padding=pad)
if not isLast:
self.bn1d = nn.BatchNorm2d(out, momentum= batchNorm_momentum)
self.us = nn.ConvTranspose2d(inp-out, inp-out, kernel_size=ds_ksize, stride=ds_stride)
else:
self.us = nn.ConvTranspose2d(inp, inp, kernel_size=ds_ksize, stride=ds_stride)
def forward(self, x, size=None, isLast=None, skip=None):
# print(f'x.shape={x.shape}')
# print(f'target shape = {size}')
x = self.us(x,output_size=size)
if not isLast: x = torch.cat((x, skip), 1)
x = F.leaky_relu(self.bn2d(self.conv2d(x)))
if isLast: x = self.conv1d(x)
else: x = F.leaky_relu(self.bn1d(self.conv1d(x)))
return x
class Encoder(nn.Module):
def __init__(self,ds_ksize, ds_stride):
super(Encoder, self).__init__()
self.block1 = block(1,16,(3,3),(1,1),ds_ksize, ds_stride)
self.block2 = block(16,32,(3,3),(1,1),ds_ksize, ds_stride)
self.block3 = block(32,64,(3,3),(1,1),ds_ksize, ds_stride)
self.block4 = block(64,128,(3,3),(1,1),ds_ksize, ds_stride)
self.conv1 = nn.Conv2d(64,64, kernel_size=(3,3), padding=(1,1))
self.conv2 = nn.Conv2d(32,32, kernel_size=(3,3), padding=(1,1))
self.conv3 = nn.Conv2d(16,16, kernel_size=(3,3), padding=(1,1))
def forward(self, x):
x1,idx1,s1 = self.block1(x)
x2,idx2,s2 = self.block2(x1)
x3,idx3,s3 = self.block3(x2)
x4,idx4,s4 = self.block4(x3)
c1=self.conv1(x3)
c2=self.conv2(x2)
c3=self.conv3(x1)
return x4,[s1,s2,s3,s4],[c1,c2,c3,x1]
class Decoder(nn.Module):
def __init__(self,ds_ksize, ds_stride):
super(Decoder, self).__init__()
self.d_block1 = d_block(192,64,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block2 = d_block(96,32,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block3 = d_block(48,16,False,(3,3),(1,1),ds_ksize, ds_stride)
self.d_block4 = d_block(16,num_instruments,True,(3,3),(1,1),ds_ksize, ds_stride)
def forward(self, x, s, c=[None,None,None,None]):
x = self.d_block1(x,s[3],False,c[0])
x = self.d_block2(x,s[2],False,c[1])
x = self.d_block3(x,s[1],False,c[2])
x = self.d_block4(x,s[0],True,c[3])
# reconsturction = torch.sigmoid(self.d_block4(x,s[0],True,c[3]))
# return torch.sigmoid(x) # This is required to boost the accuracy
return x # This is required to boost the accuracy
class Prestack(nn.Module):
def __init__(self, ds_ksize, ds_stride, complexity=4):
super().__init__()
self.Unet1_encoder = Encoder(ds_ksize, ds_stride)
self.Unet1_decoder = Decoder(ds_ksize, ds_stride)
def forward(self, x):
# U-net 1
x,s,c = self.Unet1_encoder(x)
x = self.Unet1_decoder(x,s,c)
return x
class Prestack_Model(nn.Module):
def __init__(self, model='resnet18'):
super().__init__()
unet = Prestack((3,3),(1,1))
resnet = torch.hub.load('pytorch/vision:v0.9.0', model, pretrained=False)
resnet.conv1 = torch.nn.Conv1d(1, 64, (7, 7), (2, 2), (3, 3), bias=False)
resnet.fc = torch.nn.Linear(512, 88, bias=True)
self.prestack_model = nn.Sequential(unet, resnet)
self.spectrogram = Spectrogram.MelSpectrogram(sr=SAMPLE_RATE, win_length=WINDOW_LENGTH, n_mels=N_BINS,
hop_length=HOP_LENGTH, fmin=MEL_FMIN, fmax=MEL_FMAX,
trainable_mel=False, trainable_STFT=False)
self.normalize = Normalization('imagewise')
def forward(self, x):
return self.prestack_model(x)
def run_on_batch(self, batch, batch_ul=None, VAT=False):
audio_label = batch['audio']
onset_label = batch['onset']
frame_label = batch['frame']
if frame_label.dim() == 2:
frame_label = frame_label.unsqueeze(0)
# Converting audio to spectrograms
spec = self.spectrogram(audio_label.reshape(-1, audio_label.shape[-1])[:, :-1]) # x = torch.rand(8,229, 640)
# log compression
spec = torch.log(spec + 1e-5)
# Normalizing spectrograms
spec = self.normalize.transform(spec)
# Change the shape such that it fits Thickstun Model
spec_padded = torch.nn.functional.pad(spec, (12, 12)) # (batch, 229, 640+24)
spec_padded = spec_padded.unfold(2, 25, 1) # extract 25 timesteps from the padded spec, stride=1, dim=2
spec_padded = spec_padded.transpose(1,2).reshape(-1, 229, 25) # Cut spectrogram into segments as a batch
spec_padded = spec_padded.unsqueeze(1) # create 1 channel for CNN
# print(f'spec_padded shape = {spec_padded.shape}')
frame_pred = torch.zeros(spec_padded.shape[0], 88).to(spec_padded.device)
for idx, i in enumerate(spec_padded):
output = self(i.unsqueeze(0)).squeeze(0)
frame_pred[idx] = output
# print(f'idx = {idx}\tfoward done = {output.shape}')
frame_pred = torch.sigmoid(frame_pred)
# frame_pred = torch.sigmoid(self(spec_padded))
# print(f'frame_pred max = {frame_pred.max()}\tframe_pred min = {frame_pred.min()}')
predictions = {
'onset': frame_pred,
'frame': frame_pred,
'r_adv': None
}
try:
losses = {
'loss/train_frame': F.binary_cross_entropy(predictions['frame'].squeeze(1), frame_label.reshape(-1,88)),
}
except:
print('The prediction contains negative values')
print(f'frame_pred min = {frame_pred.min()}')
print(f'frame_pred max = {frame_pred.max()}')
return predictions, losses, spec.squeeze(1) |
#Cagil Benibol
#2212637 , cagil.benibol@metu.edu.tr, cagil.benibol@gmail.com
#python 3.7.4
import numpy as np
import math as m
import matplotlib.pyplot as plt
#funcs
#ODE
def f(t,v):
return 1 - 2*(v**2) - t
#analytically v''(t) = -4*v*v'-1
def f2nd(t,v):
return -4*v*(f(t,v))-1
#analytically v'''(t) = -4*v'-4*v*v''
def f3rd(t,v):
return -4*(f(t,v)) -4*v*(f2nd(t,v))
# Limits: 0.0 <= t <= 1.0
a=0.0
b=1.0
# IV's
IV = (0.0,1.0)
#steps
N=100
#step size
h= (b-a)/N #h=0.01
#arrays to hold t and y
t = [0]
w = [0]
ww = [1.0]
www =[1.0]
#IC's
t[0], w[0]= IV
#Eulers meth.
for i in range(1,N+1):
t.append( h*i ) #t_i = 0.01*i
for i in range(1,N+1): #with first order derivative
w.append(w[i-1] + h * f( t[i-1], w[i-1] ))
for i in range(1,N+1): #with second order derivative
ww.append( w[i] + (h**2)*0.5*f2nd( t[i-1], w[i-1] ) )
for i in range(1,N+1): #with third order derivative
www.append( ww[i] + ((h**3)/6) *f3rd( t[i-1], w[i-1] ) )
# plot
plt.title('Euler apprx')
plt.xlabel('t')
plt.ylabel('v')
plt.plot(t, w , 'r-', label="apprx with 1st order")
plt.plot(t, ww , 'b-', label="apprx with 2nd order")
plt.plot(t, www , 'g-', label="apprx with 3rd order")
plt.grid(True)
plt.legend()
plt.savefig('euler.png')
plt.show()
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
import web
urls = (
r'/([\w\-]+(?:\.\w+)?)$', 'ShowCode',
r'/static/([\w\-]+(?:\.\w+)?)$', 'StaticFile'
)
app = web.application(urls, globals())
# for template render
common_globals = {
'str': str,
}
render = web.template.render('template/', globals=common_globals)
class CacheFileMeta(type):
def __new__(mcls, name, base, attr):
attr['_cache'] = {}
def enable_cache(f):
def ret(self, fname):
if fname not in self._cache:
self._cache[fname] = f(self, fname)
return self._cache[fname]
return ret
if 'GET' in attr:
attr['GET'] = enable_cache(attr['GET'])
if 'POST' in attr:
attr['POST'] = enable_cache(attr['POST'])
return type.__new__(mcls, name, base, attr)
class ShowCode(object):
__metaclass__ = CacheFileMeta
def GET(self, fname):
try:
flines = open(fname).readlines()
except IOError:
raise web.notfound("Source file not found")
return render.show_code(fname, flines)
class StaticFile(object):
__metaclass__ = CacheFileMeta
def GET(self, fname):
try:
return open('static/' + fname).read()
except IOError:
raise web.notfound("File not found")
if __name__ == "__main__":
app.run()
|
from django.db import models
# Create your models here.
class Subjects(models.Model):
tamil = models.IntegerField()
telugu = models.IntegerField()
english = models.IntegerField()
maths = models.IntegerField()
science = models.IntegerField()
social = models.IntegerField()
class Staff(models.Model):
staff_name = models.CharField(max_length=50)
classes = models.ManyToManyField(Subjects)
def __str__(self):
return self.staff_name
class Students(models.Model):
subjects = models.OneToOneField(Subjects)
student_name = models.CharField(max_length=50)
year_of_class = models.IntegerField()
def __str__(self):
return self.student_name
|
from __future__ import annotations
import ctypes
import ctypes.util
import sys
import traceback
from functools import partial
from itertools import count
from threading import Lock, Thread
from typing import Any, Callable, Generic, TypeVar
import outcome
RetT = TypeVar("RetT")
def _to_os_thread_name(name: str) -> bytes:
# ctypes handles the trailing \00
return name.encode("ascii", errors="replace")[:15]
# used to construct the method used to set os thread name, or None, depending on platform.
# called once on import
def get_os_thread_name_func() -> Callable[[int | None, str], None] | None:
def namefunc(
setname: Callable[[int, bytes], int], ident: int | None, name: str
) -> None:
# Thread.ident is None "if it has not been started". Unclear if that can happen
# with current usage.
if ident is not None: # pragma: no cover
setname(ident, _to_os_thread_name(name))
# namefunc on Mac also takes an ident, even if pthread_setname_np doesn't/can't use it
# so the caller don't need to care about platform.
def darwin_namefunc(
setname: Callable[[bytes], int], ident: int | None, name: str
) -> None:
# I don't know if Mac can rename threads that hasn't been started, but default
# to no to be on the safe side.
if ident is not None: # pragma: no cover
setname(_to_os_thread_name(name))
# find the pthread library
# this will fail on windows
libpthread_path = ctypes.util.find_library("pthread")
if not libpthread_path:
return None
# Sometimes windows can find the path, but gives a permission error when
# accessing it. Catching a wider exception in case of more esoteric errors.
# https://github.com/python-trio/trio/issues/2688
try:
libpthread = ctypes.CDLL(libpthread_path)
except Exception: # pragma: no cover
return None
# get the setname method from it
# afaik this should never fail
pthread_setname_np = getattr(libpthread, "pthread_setname_np", None)
if pthread_setname_np is None: # pragma: no cover
return None
# specify function prototype
pthread_setname_np.restype = ctypes.c_int
# on mac OSX pthread_setname_np does not take a thread id,
# it only lets threads name themselves, which is not a problem for us.
# Just need to make sure to call it correctly
if sys.platform == "darwin":
pthread_setname_np.argtypes = [ctypes.c_char_p]
return partial(darwin_namefunc, pthread_setname_np)
# otherwise assume linux parameter conventions. Should also work on *BSD
pthread_setname_np.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
return partial(namefunc, pthread_setname_np)
# construct os thread name method
set_os_thread_name = get_os_thread_name_func()
# The "thread cache" is a simple unbounded thread pool, i.e., it automatically
# spawns as many threads as needed to handle all the requests its given. Its
# only purpose is to cache worker threads so that they don't have to be
# started from scratch every time we want to delegate some work to a thread.
# It's expected that some higher-level code will track how many threads are in
# use to avoid overwhelming the system (e.g. the limiter= argument to
# trio.to_thread.run_sync).
#
# To maximize sharing, there's only one thread cache per process, even if you
# have multiple calls to trio.run.
#
# Guarantees:
#
# It's safe to call start_thread_soon simultaneously from
# multiple threads.
#
# Idle threads are chosen in LIFO order, i.e. we *don't* spread work evenly
# over all threads. Instead we try to let some threads do most of the work
# while others sit idle as much as possible. Compared to FIFO, this has better
# memory cache behavior, and it makes it easier to detect when we have too
# many threads, so idle ones can exit.
#
# This code assumes that 'dict' has the following properties:
#
# - __setitem__, __delitem__, and popitem are all thread-safe and atomic with
# respect to each other. This is guaranteed by the GIL.
#
# - popitem returns the most-recently-added item (i.e., __setitem__ + popitem
# give you a LIFO queue). This relies on dicts being insertion-ordered, like
# they are in py36+.
# How long a thread will idle waiting for new work before gives up and exits.
# This value is pretty arbitrary; I don't think it matters too much.
IDLE_TIMEOUT = 10 # seconds
name_counter = count()
class WorkerThread(Generic[RetT]):
def __init__(self, thread_cache: ThreadCache) -> None:
self._job: tuple[
Callable[[], RetT],
Callable[[outcome.Outcome[RetT]], object],
str | None,
] | None = None
self._thread_cache = thread_cache
# This Lock is used in an unconventional way.
#
# "Unlocked" means we have a pending job that's been assigned to us;
# "locked" means that we don't.
#
# Initially we have no job, so it starts out in locked state.
self._worker_lock = Lock()
self._worker_lock.acquire()
self._default_name = f"Trio thread {next(name_counter)}"
self._thread = Thread(target=self._work, name=self._default_name, daemon=True)
if set_os_thread_name:
set_os_thread_name(self._thread.ident, self._default_name)
self._thread.start()
def _handle_job(self) -> None:
# Handle job in a separate method to ensure user-created
# objects are cleaned up in a consistent manner.
assert self._job is not None
fn, deliver, name = self._job
self._job = None
# set name
if name is not None:
self._thread.name = name
if set_os_thread_name:
set_os_thread_name(self._thread.ident, name)
result = outcome.capture(fn)
# reset name if it was changed
if name is not None:
self._thread.name = self._default_name
if set_os_thread_name:
set_os_thread_name(self._thread.ident, self._default_name)
# Tell the cache that we're available to be assigned a new
# job. We do this *before* calling 'deliver', so that if
# 'deliver' triggers a new job, it can be assigned to us
# instead of spawning a new thread.
self._thread_cache._idle_workers[self] = None
try:
deliver(result)
except BaseException as e:
print("Exception while delivering result of thread", file=sys.stderr)
traceback.print_exception(type(e), e, e.__traceback__)
def _work(self) -> None:
while True:
if self._worker_lock.acquire(timeout=IDLE_TIMEOUT):
# We got a job
self._handle_job()
else:
# Timeout acquiring lock, so we can probably exit. But,
# there's a race condition: we might be assigned a job *just*
# as we're about to exit. So we have to check.
try:
del self._thread_cache._idle_workers[self]
except KeyError:
# Someone else removed us from the idle worker queue, so
# they must be in the process of assigning us a job - loop
# around and wait for it.
continue
else:
# We successfully removed ourselves from the idle
# worker queue, so no more jobs are incoming; it's safe to
# exit.
return
class ThreadCache:
def __init__(self) -> None:
self._idle_workers: dict[WorkerThread[Any], None] = {}
def start_thread_soon(
self,
fn: Callable[[], RetT],
deliver: Callable[[outcome.Outcome[RetT]], object],
name: str | None = None,
) -> None:
worker: WorkerThread[RetT]
try:
worker, _ = self._idle_workers.popitem()
except KeyError:
worker = WorkerThread(self)
worker._job = (fn, deliver, name)
worker._worker_lock.release()
THREAD_CACHE = ThreadCache()
def start_thread_soon(
fn: Callable[[], RetT],
deliver: Callable[[outcome.Outcome[RetT]], object],
name: str | None = None,
) -> None:
"""Runs ``deliver(outcome.capture(fn))`` in a worker thread.
Generally ``fn`` does some blocking work, and ``deliver`` delivers the
result back to whoever is interested.
This is a low-level, no-frills interface, very similar to using
`threading.Thread` to spawn a thread directly. The main difference is
that this function tries to re-use threads when possible, so it can be
a bit faster than `threading.Thread`.
Worker threads have the `~threading.Thread.daemon` flag set, which means
that if your main thread exits, worker threads will automatically be
killed. If you want to make sure that your ``fn`` runs to completion, then
you should make sure that the main thread remains alive until ``deliver``
is called.
It is safe to call this function simultaneously from multiple threads.
Args:
fn (sync function): Performs arbitrary blocking work.
deliver (sync function): Takes the `outcome.Outcome` of ``fn``, and
delivers it. *Must not block.*
Because worker threads are cached and reused for multiple calls, neither
function should mutate thread-level state, like `threading.local` objects
โ or if they do, they should be careful to revert their changes before
returning.
Note:
The split between ``fn`` and ``deliver`` serves two purposes. First,
it's convenient, since most callers need something like this anyway.
Second, it avoids a small race condition that could cause too many
threads to be spawned. Consider a program that wants to run several
jobs sequentially on a thread, so the main thread submits a job, waits
for it to finish, submits another job, etc. In theory, this program
should only need one worker thread. But what could happen is:
1. Worker thread: First job finishes, and calls ``deliver``.
2. Main thread: receives notification that the job finished, and calls
``start_thread_soon``.
3. Main thread: sees that no worker threads are marked idle, so spawns
a second worker thread.
4. Original worker thread: marks itself as idle.
To avoid this, threads mark themselves as idle *before* calling
``deliver``.
Is this potential extra thread a major problem? Maybe not, but it's
easy enough to avoid, and we figure that if the user is trying to
limit how many threads they're using then it's polite to respect that.
"""
THREAD_CACHE.start_thread_soon(fn, deliver, name)
|
import threading
import time
#ๅๆฐๅฎไนไบๆๅคๅ ไธช็บฟ็จๅฏไปฅไฝฟ็จ่ตๆบ
semaphore = threading.Semaphore(3)
def func():
if semaphore.acquire():
for i in range(2):
print(threading.current_thread().getName() + "get semapore")
time.sleep(5)
semaphore.release()
print(threading.current_thread().getName() + "release semaphore")
def func2():
print("I am running.....")
time.sleep(3)
print("I an done.......")
class MyThread(threading.Thread):
def run(self):
global num
time.sleep(1)
if mutex.acquire(1):
num = num+1
msg = self.name + " set num to "+str(num)
print(msg)
mutex.acquire()
mutex.release()
mutex.release()
def test():
for i in range(5):
t3 = MyThread()
t3.start()
if __name__ == "__main__":
t2 = threading.Timer(6,func2) #ไปฃ่กจ6็งไนๅๅผๅงๅฏๅจ็บฟ็จfunc2
t2.start()
i = 0
while True:
print("{0}*********".format(i))
time.sleep(3)
i += 1
for i in range(8):
t1 = threading.Thread(target=func,args=())
t1.start()
num = 0
mutex = threading.Lock()
test()
|
from nsepy import get_history
from datetime import date
#data = get_history(symbol="SBIN", start=date(2015,1,1), end=date(2015,1,31))
nifty_pe = get_history(symbol="NIFTY", start=date(2009,3,31), end=date(2009,3,31), index=True)
print(nifty_pe)
nifty_pe = get_history(symbol="NIFTY", start=date(2010,3,31), end=date(2010,3,31), index=True)
print(nifty_pe)
nifty_pe = get_history(symbol="NIFTY", start=date(2011,3,31), end=date(2011,3,31), index=True)
print(nifty_pe)
nifty_pe = get_history(symbol="NIFTY", start=date(2012,3,30), end=date(2012,3,30), index=True)
print(nifty_pe)
nifty_pe = get_history(symbol="NIFTY", start=date(2013,3,28), end=date(2013,3,28), index=True)
print(nifty_pe)
nifty_pe = get_history(symbol="NIFTY", start=date(2014,3,31), end=date(2014,3,31), index=True)
print(nifty_pe)
nifty_pe = get_history(symbol="NIFTY", start=date(2015,3,31), end=date(2015,3,31), index=True)
print(nifty_pe)
nifty_pe = get_history(symbol="NIFTY", start=date(2016,3,31), end=date(2016,3,31), index=True)
print(nifty_pe)
nifty_pe = get_history(symbol="NIFTY", start=date(2017,3,31), end=date(2017,3,31), index=True)
print(nifty_pe)
nifty_pe = get_history(symbol="NIFTY", start=date(2018,3,28), end=date(2018,3,28), index=True)
print(nifty_pe)
nifty_pe = get_history(symbol="NIFTY", start=date(2019,3,29), end=date(2019,3,29), index=True)
print(nifty_pe)
nifty_pe = get_history(symbol="NIFTY", start=date(2020,3,31), end=date(2020,3,31), index=True)
print(nifty_pe) |
judges_count = int(input())
presentation_name = input()
all_grades = 0
presentations_count = 0
while presentation_name != 'Finish':
presentation_grades = 0
presentations_count += 1
for judge in range(judges_count):
grade = float(input())
presentation_grades += grade
all_grades += grade
presentation_avrg_grade = presentation_grades / judges_count
print(f"{presentation_name} - {presentation_avrg_grade:.2f}.")
presentation_name = input()
final_grade = all_grades / (presentations_count * judges_count)
print(f"Student's final assessment is {final_grade:.2f}.")
|
from numpy import *
n=array([[1,2,3],[4,5,6]])
n.shape=(6,1)
m=n.reshape(1,6)
print(m)
print()
a=arange(24)
a.shape=(6,4)
print(a)
b=array([1,2,3,4,5])
print(a.itemsize)
c=zeros(5)
print(c)
c=zeros(5,int)
print(c)
c=ones(5,int)
print(c)
c=ones((3,3),int)
print(c)
d=array([1,2,3,4,5,6.0])
print(d.size)
d=array([1,2,3,4,5,6])
print(d.dtype)
|
import tqdm
from scipy.spatial import distance
from skimage.transform import resize
import tensorflow as tf
import numpy as np
from pathlib import Path
import imageio
from src.facenet.facenet.src import facenet
from src.facenet.facenet.src.align import detect_face
MODEL_DIR = str(Path('/media/neuroscout-data/scratch/face_priming_save/models/20180402-114759/').absolute())
def chunks(l, n):
# For item i in a range that is a length of l,
return [l[i:i+n]for i in range(0, len(l), n)]
def get_embeddings(images, batch_size=2000):
""" Get embeddings by class
Args:
images - Cropped faces images
batch_size - Size of batch to feed into network
Returns:
Numpy array of images x embeddings
"""
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
# Load the model
facenet.load_model(MODEL_DIR)
graph = tf.compat.v1.get_default_graph()
# Get input and output tensors
images_placeholder = graph.get_tensor_by_name("input:0")
embeddings = graph.get_tensor_by_name("embeddings:0")
phase_train_placeholder = graph.get_tensor_by_name("phase_train:0")
all_embeds = []
# Feed to network to get embeddings
print('Calculating embeddings...')
for batch in tqdm.tqdm_notebook(chunks(images, batch_size)):
# Use the facenet model to calculate embeddings
embeds = sess.run(embeddings,
feed_dict={
images_placeholder: batch,
phase_train_placeholder:False}
)
all_embeds.append(embeds)
return np.vstack(all_embeds)
def crop_face(img, bounding_box, margin=44, target_size=160):
""" Crop, resize and prewhiten face """
current_size = np.asarray(img.shape)[0:2]
det = np.squeeze(bounding_box[0:4])
bb = np.zeros(4, dtype=np.int32)
bb[0] = np.maximum(det[0]-margin/2, 0)
bb[1] = np.maximum(det[1]-margin/2, 0)
bb[2] = np.minimum(det[2]+margin/2, current_size[1])
bb[3] = np.minimum(det[3]+margin/2, current_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
aligned = resize(cropped, (target_size, target_size), mode='constant', anti_aliasing=False)
return facenet.prewhiten(aligned)
def _load_detect_nets():
""" Load MTCNN_face_detection_alignment """
with tf.Graph().as_default():
sess = tf.compat.v1.Session(
config=tf.compat.v1.ConfigProto(log_device_placement=False))
with sess.as_default():
nets = detect_face.create_mtcnn(sess, None)
return nets
def _detect_faces(img, nets, minsize=20, threshold=[0.6, 0.7, 0.7], factor=0.709):
""" Given an image path, and an initalized network, detect faces """
pnet, rnet, onet = nets
bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
return bounding_boxes
def load_detect_crop(image_paths, target_size=160, margin=44, **kwargs):
print('Creating networks and loading parameters')
nets = _load_detect_nets()
print('Loading, cropping, and aligning')
corresponding_paths = []
cropped_images = []
for path in tqdm.tqdm_notebook(image_paths):
img = imageio.imread(path)
bounding_boxes = _detect_faces(img, nets, **kwargs)
for bb in bounding_boxes:
cropped_images.append(crop_face(img, bb, margin=margin))
corresponding_paths.append(path)
return np.stack(cropped_images), corresponding_paths
|
import pygame
class Window:
def __init__(self, title, width, height, scale=1, flags=0):
self.title = title
self.width = width
self.height = height
self.scale = scale
self.widthScaled = self.width // self.scale
self.heigthScaled = self.height // self.scale
self.flags = flags
self.content = None
self.scaledContent = None
self.frame = None
self.create()
def create(self):
self.content = pygame.display.set_mode((self.width, self.height), flags=self.flags)
self.scaledContent = pygame.Surface([self.widthScaled, self.heigthScaled]).convert()
pygame.display.set_caption(self.title)
return self.scaledContent
def get(self):
return self.scaledContent
def render(self):
frame = pygame.transform.scale(self.scaledContent, (self.width, self.height))
self.content.blit(frame, frame.get_rect())
pygame.display.flip() |
from flask import Flask, redirect, url_for, request,render_template
app = Flask(__name__)
@app.route('/log/<sum>')
def log(name):
return render_template('log.html', name = sum)
@app.route('/login',methods = ['POST','GET'])
def login():
if request.method == 'POST':
user = request.form['nm']
user1 =request.form['nm1']
sum=int(user)+int(user1);
return redirect(url_for('log',name = sum))
else:
user = request.args.get('nm')
return redirect(url_for('success',name = user))
if __name__ == '__main__':
app.run(debug = True)
|
import sys
import cv
import math
import numpy
#import time
import bSpline
import argparse
import os # for retriving live picture
import pygame
global inputParser # just a reminder, it's used as a global variable
global inputArgs # just a reminder, it's used as a global variable
global screen # just a reminder, it's used as a global variable
#global imgTransMat
def parseInput() :
global inputParser
global inputArgs
inputParser = argparse.ArgumentParser(description='Apply radial transformation to images.')
inputParser.add_argument('-c', '--center', dest='center', action='store', default=0, type=int, nargs=2, help='annulus center in the image: x y')
inputParser.add_argument('-o', '--offset', dest='offset', action='store', default=0, type=float, help='offset transformation starting angle: rad')
inputParser.add_argument('-r', '--insideR', dest='insideR', action='store', default=1, type=int, help='annulus inside radius: pixels')
inputParser.add_argument('-R', '--outsideR', dest='outsideR', action='store', default=10, type=int, help='annulus outside radius: pixels')
inputParser.add_argument('-p', '--pProcess', dest='pProcess', action='store', default="lp2c", help='image processing: type (p2c, lp2c)')
inputParser.add_argument('-pp', '--ppProcess', dest='ppProcess', action='store', default="bspl", help='image post processing: type (none, bspl)')
inputParser.add_argument('-s', '--scale', dest='scale', action='store', default=1.0, type=float, help='output image scaling factor: x')
inputParser.add_argument('-m', '--magic', dest='magic', action='store', default=3.49095094408, type=float, help='"magic" number for the lp2c function to get the correct ratio (has to be more then 1.0)')
inputParser.add_argument('-l', '--live', dest='live', action='store', default="http://192.168.43.1:8080/shot.jpg", help='addres, port and port to photo')
inputArgs = inputParser.parse_args()
def pix2x(pixel, diff, offset) : # diff - diference in image size, offset - center offset
#return ( (1.0 * 2 * pixel) / (1.0 * 2 * inputArgs.center[0]) ) - 1 # x = 2 * i / width -1 : width = 2 * center
return ( (1.0 * 2 * (pixel - diff)) / (1.0 * 2 * (inputArgs.center[0] - offset - diff) ) ) - 1
def pix2y(pixel, diff, offset) :
#return ( (1.0 * 2 * pixel) / (1.0 * 2 * inputArgs.center[1]) ) - 1 # y = 2 * j / height -1 : height = 2 * center
return ( (1.0 * 2 * (pixel - diff)) / (1.0 * 2 * (inputArgs.center[1] - offset - diff) ) ) - 1
def x2pix(x, diff, offset) :
#return (int)(math.ceil((x + 1) * (1.0 * 2 * inputArgs.center[0]) / 2 )) # i = (x + 1) * width / 2 : width = 2 * center
#return (int)(math.ceil((x + 1) * (1.0 * 2 * (inputArgs.center[0] - offset - diff)) / 2 ) + diff)
return ((x + 1) * (1.0 * 2 * (inputArgs.center[0] - offset - diff)) / 2 + diff)
def y2pix(y, diff, offset) :
#return (int)(math.ceil((y + 1) * (1.0 * 2 * inputArgs.center[1]) / 2 )) # j = (x + 1) * height / 2 : height = 2 * center
#return (int)(math.ceil((y + 1) * (1.0 * 2 * (inputArgs.center[1] - offset - diff)) / 2 ) + diff)
return ((y + 1) * (1.0 * 2 * (inputArgs.center[1] - offset - diff)) / 2 + diff)
def getR(x, y) :
return math.sqrt(math.pow(x, 2) + math.pow(y, 2)) # r = sqrt(x^2 + y^2)
def getPHI(x, y) :
return math.atan2(y, x) # phi = atan2(y, x)
def getX(r, phi) :
#return (r - inputArgs.ellipseA) * math.cos(phi) # x = cos(phi) * (r + a); circle a == b == 1
return r * math.cos(phi) # x = cos(phi) * (r + a); circle a == b == 1
def getY(r, phi) :
#return (r + inputArgs.ellipseB) * math.sin(phi) # y = sin(phi) * (r + b); circle a == b == 1
return r * math.sin(phi) # y = sin(phi) * (r + b); circle a == b == 1
def processInput() :
global screen
# calculate image transformation template
if inputArgs.pProcess == "lp2c" :
#log-polar to cartasian (lp2c)
print "Calculating log-polar to cartasian image translation template ..."
os.system("wget -q %s -O /dev/shm/photo.jpg" % inputArgs.live)
(img_tpl, cv_img_roi) = translateImage_logPolar2cartasian("/dev/shm/photo.jpg") # use the first picture for calculations
else :
# default, polar ro cartasin (p2c)
print "Calculating polar to cartasian image translation template ..."
os.system("wget -q %s -O /dev/shm/photo.jpg" % inputArgs.live)
(img_tpl, cv_img_roi) = translateImage_polar2cartasian("/dev/shm/photo.jpg") # use the first picture for calculations
img_beta_f = [] # for bspline, if used
if inputArgs.ppProcess == "bspl" :
# calculate b-spline beta function
print "Calculating B-spline beta function template ..."
#bSpl = bSpline.BSpline()
#img_beta_f = bSpl.cubic_getBeta(cv_img_roi, img_tpl)
img_beta_f = bSpline.cubic_getBeta(cv_img_roi, img_tpl)
else :
# default, none
pass
# create window for displaying image
pygame.init()
w = img_tpl.shape[1]
h = img_tpl.shape[0]
size=(w,h)
screen = pygame.display.set_mode(size)
#for img in range(0,len(inputArgs.image),1) :
while (True) :
os.system("wget -q %s -O /dev/shm/photo.jpg" % inputArgs.live)
transformImage(0, img_tpl, img_beta_f)
def transformImage(img, img_tpl, img_beta_f = None) : # img - index of inputArgs.image[]
global screen
# load image
cv_img_input = cv.LoadImage("/dev/shm/photo.jpg")
# select region of interest on image (for faster computation)
cv.SetImageROI(cv_img_input,((inputArgs.center[0] - inputArgs.outsideR), (inputArgs.center[1] - inputArgs.outsideR),(inputArgs.outsideR*2),(inputArgs.outsideR*2)))
# let's work with a smaller image
cv_img_roi = cv.CreateImage(((inputArgs.outsideR*2), (inputArgs.outsideR*2)),cv.IPL_DEPTH_8U,3)
cv.Copy(cv_img_input, cv_img_roi)
# calculate the size of the output image
out_y = img_tpl.shape[0]
out_x = img_tpl.shape[1]
# create the output image
cv_img_out = cv.CreateImage((out_x, out_y), cv.IPL_DEPTH_8U,3)
if inputArgs.ppProcess == "bspl" :
# transform using b-splines (bspl)
#bSpl = bSpline.BSpline()
#cv_img_out = bSpl.cubic_setBeta(cv_img_roi, img_tpl, img_beta_f)
cv_img_out = bSpline.cubic_setBeta(cv_img_roi, img_tpl, img_beta_f)
else :
# none, pixel for pixel copy is just fine (none)
for x in range(0,out_x) :
for y in range(0,out_y) :
cv_img_out[y,x] = cv_img_input[(int)(img_tpl[y][x][0]), (int)(img_tpl[y][x][1])]
pass
# save image as a workaround for conversion
cv.SaveImage("/dev/shm/out.jpg", cv_img_out)
# display image - load as a workaround for conversion
img = pygame.image.load("/dev/shm/out.jpg")
screen.blit(img,(0,0))
pygame.display.flip()
pass
def translateImage_logPolar2cartasian(img) :
# load image
cv_img_input = cv.LoadImage(img, cv.CV_LOAD_IMAGE_COLOR)
# select region of interest on image (for faster computation)
cv.SetImageROI(cv_img_input,((inputArgs.center[0] - inputArgs.outsideR), (inputArgs.center[1] - inputArgs.outsideR), (inputArgs.outsideR*2), (inputArgs.outsideR*2)))
# let's work with a smaller image
cv_img_roi = cv.CreateImage(((inputArgs.outsideR*2), (inputArgs.outsideR*2)), cv.IPL_DEPTH_8U, 3)
cv.Copy(cv_img_input, cv_img_roi)
cv_img_width = cv.GetSize(cv_img_input)[0]
cv_img_height = cv.GetSize(cv_img_input)[1]
# !!! uncomment if you want to see the whole picture
#inputArgs.insideR = 1
cv_img_width_half = cv_img_width / 2
cv_img_height_half = cv_img_height / 2
# calibration factors to fix the ratio
#img_cal_f_p = [-6.2062e-07, 2.1236e-03, 2.3781]
#magic = img_cal_f_p[0]*(inputArgs.outsideR**2) + img_cal_f_p[1]*inputArgs.outsideR + img_cal_f_p[2]
magic = inputArgs.magic
# distance from the transform's focus to the image's farthest corner - here we cheat and reduce it to r (by multiplying one side with 0)
# this is used to calculate the iteration step across the transform's dimension
img_dist = (((cv_img_width / 2)**2 + 0*(cv_img_height / 2)**2)**0.5) * magic * inputArgs.scale
img_len = int(math.ceil(img_dist))
# recalculated inside r length
ri = math.ceil(math.log(inputArgs.insideR) / (math.log(img_dist)/img_len)) # math.ceil(math.log(inputArgs.insideR) / p_scale
# recalculated outside r length
ro = math.ceil(math.log(inputArgs.outsideR) / (math.log(img_dist)/img_len)) # math.ceil(math.log(inputArgs.outsideR) / p_scale
# recalculated length of usable image - hight
ri_ro = int(ro - ri)
img_width = int(cv_img_height * inputArgs.scale)
# scale factor determines the size of each "step" along the transformation
p_scale = math.log(img_dist) / img_len # p
fi_scale = (2.0 * math.pi) / img_width # fi
# create output image
transformed = cv.CreateImage((img_width, ri_ro),cv_img_input.depth,cv_img_input.nChannels)
# create translation template
img_tpl = numpy.zeros( (ri_ro, img_width, 2) )
# transformation
for p in range(0, img_len):
p_exp = math.exp((p+ri) * p_scale)
if p_exp >= inputArgs.insideR and p_exp <= inputArgs.outsideR:
for t in range(0, img_width):
t_rad = t * fi_scale
i = (cv_img_width_half + p_exp * math.sin(t_rad + inputArgs.offset))
j = (cv_img_height_half + p_exp * math.cos(t_rad + inputArgs.offset))
if 0 <= i < cv_img_width and 0 <= j < cv_img_height:
img_tpl[p, img_width-1-t][0] = i
img_tpl[p, img_width-1-t][1] = j
return (img_tpl, cv_img_roi)
def translateImage_polar2cartasian(img) :
# load image
cv_img_input = cv.LoadImage(img, cv.CV_LOAD_IMAGE_COLOR)
# calculate the size of the output image
out_y = (int)(inputArgs.outsideR - inputArgs.insideR)
#out_y = (int)(((inputArgs.outsideR - inputArgs.insideR) ** 2 + (inputArgs.outsideR - inputArgs.insideR) ** 2) ** 0.5)
out_x = (int)(math.pi * (pow(inputArgs.outsideR,2) - pow(inputArgs.insideR,2))) / out_y #area of annulus / hight of the image == width of the image
#out_x = (int)(2*math.pi*inputArgs.outsideR) # circumference of the outer circle
#out_x = (int)(2*math.pi*inputArgs.insideR) # circumference of the inner circle
#out_x = (int)(1.75*math.pi*inputArgs.insideR) # almost full circumference of the inner circle
out_x = (int)(out_x * inputArgs.scale)
out_y = (int)(out_y * inputArgs.scale)
# create translation template
img_tpl = numpy.zeros( (out_y, out_x,2) )
# select region of interest on image (for faster computation)
cv.SetImageROI(cv_img_input,((inputArgs.center[0] - inputArgs.outsideR), (inputArgs.center[1] - inputArgs.outsideR),(inputArgs.outsideR*2),(inputArgs.outsideR*2)))
# let's work with a smaller image
center_x = inputArgs.center[0]
center_y = inputArgs.center[1]
inputArgs.center[0] = inputArgs.outsideR
cv_img_roi = cv.CreateImage(((inputArgs.outsideR*2), (inputArgs.outsideR*2)),cv.IPL_DEPTH_8U,3)
cv.Copy(cv_img_input, cv_img_roi)
# we are not where we were in a smaller image
inputArgs.center[1] = inputArgs.outsideR
# calculate offsets
cv_img_width = cv.GetSize(cv_img_roi)[0]
cv_img_height = cv.GetSize(cv_img_roi)[1]
cv_img_width_diff = 0
cv_img_height_diff = 0
if cv_img_width != cv_img_height :
if cv_img_width > cv_img_height :
cv_img_width_diff = (cv_img_width - cv_img_height) / 2
if (cv_img_width_diff % 2) != 0 :
cv_img_width_diff-1
else :
cv_img_height_diff = (cv_img_height - cv_img_width) / 2
if (cv_img_height_diff % 2) != 0 :
cv_img_height_diff-1
cv_img_center_width_diff = 0
cv_img_center_height_diff = 0
cv_img_center_width_diff = inputArgs.center[0] - (cv_img_width / 2)
cv_img_center_height_diff = inputArgs.center[1] - (cv_img_height / 2)
# calculate stuff
r_min = getR(pix2x(inputArgs.center[0] - cv_img_center_width_diff + inputArgs.insideR, cv_img_width_diff, cv_img_center_width_diff), pix2y(inputArgs.center[1] - cv_img_center_height_diff, cv_img_height_diff, cv_img_center_height_diff))
r_max = getR(pix2x(inputArgs.center[0] - cv_img_center_width_diff + inputArgs.outsideR, cv_img_width_diff, cv_img_center_width_diff), pix2y(inputArgs.center[1] - cv_img_center_height_diff, cv_img_height_diff, cv_img_center_height_diff))
r_dif_count = (int)((inputArgs.outsideR - inputArgs.insideR) * inputArgs.scale)
r_dif = ((r_max - r_min) / r_dif_count)
phi_dif = (2 * math.pi) / out_x # 2 * pi / width
for p in range(0,out_x,1) :
phi = phi_dif * p + inputArgs.offset
for r in range(0, r_dif_count, 1) :
radius = r_min + (r * r_dif)
img_tpl[r,out_x-1-p][0] = y2pix(getY(radius, phi),cv_img_height_diff,cv_img_center_height_diff) + cv_img_center_height_diff
img_tpl[r,out_x-1-p][1] = x2pix(getX(radius, phi),cv_img_width_diff, cv_img_center_width_diff) + cv_img_center_width_diff
# lets go back where we were
inputArgs.center[0] = center_x
inputArgs.center[1] = center_y
return (img_tpl, cv_img_roi)
def progressbar(progress, prefix = "", postfix = "", size = 60) :
x = int(size*progress)
sys.stdout.write("%s [%s%s] %d%% %s\r" % (prefix, "#"*x, "."*(size-x), (int)(progress*100), postfix))
sys.stdout.flush()
#time.sleep(1.0) # long computation
if __name__ == "__main__": # this is not a module
parseInput() # what do we have to do
processInput() # doing what we have to do
print "" # for estetic output
|
from .team import (
Team, Profile, PublicInfo, MembershipInfo, Group, UserRole
)
from .board import (
BoardSettings, Board, ItemStatus, Column, Sprint, ColumnTemplate
)
from .items import (
Item, ItemStatus, Assignee, Vote, Comment
)
|
#!/usr/bin/env python
import cv2, time
import numpy as np
cap = cv2.VideoCapture('track2.avi')
threshold_60 = 150
threshold_100 = 100
width_640 = 640
scan_width_200, scan_height_20 = 200, 20
lmid_200, rmid_440 = scan_width_200, width_640 - scan_width_200
area_width_20, area_height_10 = 20, 10
vertical_430 = 430
row_begin_5 = (scan_height_20 - area_height_10) // 2
row_end_15 = row_begin_5 + area_height_10
pixel_threshold_160 = 0.8 * area_width_20 * area_height_10
while True:
ret, frame = cap.read()
if not ret:
break
if cv2.waitKey(1) & 0xFF == 27:
break
roi = frame[vertical_430:vertical_430 + scan_height_20, :]
frame = cv2.rectangle(frame, (0, vertical_430),
(width_640 - 1, vertical_430 + scan_height_20),
(255, 0, 0), 3)
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
lbound = np.array([0, 0, threshold_60], dtype=np.uint8)
ubound = np.array([131, 255, 255], dtype=np.uint8)
bin = cv2.inRange(hsv, lbound, ubound)
view = cv2.cvtColor(bin, cv2.COLOR_GRAY2BGR)
left, right = -1, -1
for l in range(area_width_20, lmid_200):
area = bin[row_begin_5:row_end_15, l - area_width_20:l]
if cv2.countNonZero(area) > pixel_threshold_160:
left = l
break
for r in range(width_640 - area_width_20, rmid_440, -1):
area = bin[row_begin_5:row_end_15, r:r + area_width_20]
if cv2.countNonZero(area) > pixel_threshold_160:
right = r
break
if left != -1:
lsquare = cv2.rectangle(view,
(left - area_width_20, row_begin_5),
(left, row_end_15),
(0, 255, 0), 3)
else:
print("Lost left line")
if right != -1:
rsquare = cv2.rectangle(view,
(right, row_begin_5),
(right + area_width_20, row_end_15),
(0, 255, 0), 3)
else:
print("Lost right line")
cv2.imshow("origin", frame)
cv2.imshow("view", view)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lbound = np.array([0, 0, threshold_60], dtype=np.uint8)
ubound = np.array([131, 255, 255], dtype=np.uint8)
hsv = cv2.inRange(hsv, lbound, ubound)
cv2.imshow("hsv", hsv)
time.sleep(0.1)
cap.release()
cv2.destroyAllWindows()
|
"""
Codes are modifeid from PyTorch and Tensorflow Versions of VGG:
https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py, and
https://github.com/keras-team/keras-applications/blob/master/keras_applications/vgg16.py
"""
import tensorflow as tf
import numpy as np
import pdb
from tensorflow.keras.applications.vgg16 import VGG16 as vgg16
from tensorflow.keras.applications.vgg19 import VGG19 as vgg19
__all__ = ['VGG11', 'VGG13', 'VGG16','VGG19']
def VGG(feature, num_cls):
with tf.variable_scope('fully_connected') as scope:
dim =np.prod(feature.shape[1:])
x = tf.reshape(feature, [-1, dim])
x = tf.keras.layers.Dense(units=4096, activation='relu', name=scope.name)(x)
x = tf.keras.layers.Dense(units=4096, activation='relu', name=scope.name)(x)
x = tf.keras.layers.Dense(units=num_cls, name=scope.name)(x)
return x
def make_layers(x, cfg):
for v in cfg:
if v == 'M':
x = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='valid')(x)
else:
x = tf.keras.layers.Conv2D(
filters=v,
kernel_size=[3, 3],
padding='SAME',
activation=tf.nn.relu
)(x)
return x
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M',
512, 512, 512, 512, 'M'],
}
def VGG11(x_images, num_cls):
feature = make_layers(x_images, cfg['A'])
return VGG(feature, num_cls)
def VGG13(x_images, num_cls):
feature = make_layers(x_images, cfg['B'])
return VGG(feature, num_cls)
def VGG16(x_images, num_cls):
feature = make_layers(x_images, cfg['D'])
return VGG(feature, num_cls)
def VGG19(x_images, num_cls):
feature = make_layers(x_images, cfg['E'])
return VGG(feature, num_cls)
|
import os
from django.db import models
from django.db.models import Max
from django.core.exceptions import ValidationError
from django.utils import timezone
from django.conf import settings
from config.storage import PrivateMediaStorage
from tag.models import Tag
def storage():
if not settings.DEBUG:
return PrivateMediaStorage()
return None
def validate_empty(value):
if len(str(value)) == 0:
raise ValidationError(
('็ฉบใใญในใใฏ่จฑๅฏใใใฆใใพใใ: %(value)s'),
params={'value': value},
)
def tweet_file_path(instance, filename):
ext = os.path.splitext(filename)[1]
name = timezone.now().strftime('%Y%m%d%H%M%S%f')
return f"identity/{name}{ext}"
class Tweet(models.Model):
class Status(models.TextChoices):
PUBLISHED = 0, 'ๅ
ฌ้'
DRAFT = 1, 'ไธๆธใ'
PRIVATE = 2, '้ๅ
ฌ้'
user = models.ForeignKey("main.User", on_delete=models.CASCADE)
text = models.TextField(validators=[validate_empty])
image = models.ImageField(upload_to=tweet_file_path,
storage=storage(),
null=True,
blank=True)
status = models.IntegerField(choices=Status.choices, default=Status.DRAFT)
tags = models.ManyToManyField("tag.Tag",
through="TweetTagRelation",
through_fields=("tweet", "tag"),
related_name="tweets",
blank=True)
created_at = models.DateTimeField(auto_now_add=True)
@classmethod
def get_from_tag(cls, tag):
return tag.tweet_set.all()
def status_display(self):
return self.get_status_display()
def get_all_tags(self):
return self.tags.all()
def add_tag(self, tag):
self.tags.add(tag)
def remove_tag(self, tag):
self.tags.remove(tag)
def get_tag_count(self):
return Tag.objects.filter(
tweettagrelation__tweet=self).distinct().count()
def get_tag_added_at(self):
latest_tag = Tag.objects.filter(
tweettagrelation__tweet=self).distinct().annotate(
tweettagrelation__created_at=Max(
'tweettagrelation__created_at')).order_by(
"-tweettagrelation__created_at").first()
if latest_tag is None:
return None
return latest_tag.created_at
class TweetTagRelation(models.Model):
tweet = models.ForeignKey("Tweet", on_delete=models.CASCADE)
tag = models.ForeignKey("tag.Tag", on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
class Meta:
constraints = [
models.UniqueConstraint(fields=["tweet", "tag"],
name="uq_tweet_tag"),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ItemUnitInfo(object):
def __init__(self):
self._amount = None
self._price = None
self._spec = None
self._title = None
self._unit = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def price(self):
return self._price
@price.setter
def price(self, value):
self._price = value
@property
def spec(self):
return self._spec
@spec.setter
def spec(self, value):
self._spec = value
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def unit(self):
return self._unit
@unit.setter
def unit(self, value):
self._unit = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.price:
if hasattr(self.price, 'to_alipay_dict'):
params['price'] = self.price.to_alipay_dict()
else:
params['price'] = self.price
if self.spec:
if hasattr(self.spec, 'to_alipay_dict'):
params['spec'] = self.spec.to_alipay_dict()
else:
params['spec'] = self.spec
if self.title:
if hasattr(self.title, 'to_alipay_dict'):
params['title'] = self.title.to_alipay_dict()
else:
params['title'] = self.title
if self.unit:
if hasattr(self.unit, 'to_alipay_dict'):
params['unit'] = self.unit.to_alipay_dict()
else:
params['unit'] = self.unit
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ItemUnitInfo()
if 'amount' in d:
o.amount = d['amount']
if 'price' in d:
o.price = d['price']
if 'spec' in d:
o.spec = d['spec']
if 'title' in d:
o.title = d['title']
if 'unit' in d:
o.unit = d['unit']
return o
|
from sys import stdin, stdout
from fractions import Fraction
# searches linearly for thumb position in 0..H-2 that satisfies
# reasonable (but wrong) constraint
W, H, L, N = map(int, stdin.readline().split())
text = ' '.join([stdin.readline().strip() for _ in range(N)])
words = text.split()
i = 0
typesettext = list()
line = ""
while i < len(words):
next_word = words[i]
if len(line) == 0 and len(next_word) >= W:
line += next_word[:W]
i += 1
continue
if len(line) + len(next_word) + 1 <= W:
line += (" " if len(line) > 0 else "") + next_word
i += 1
continue
else:
typesettext.append(line)
line = ""
if (line):
typesettext.append(line)
for widgetpos in range(H-2):
if abs( Fraction(widgetpos, H - 2) - Fraction(L, len(typesettext) - H )) <= Fraction(1, H-2):
break
else:
assert False
print ('+' + '-' * W + '+-+')
for i in range(L , L + H):
stdout.write('|')
stdout.write (typesettext[i])
stdout.write(' ' * (W - len(typesettext[i])) + '|')
if i == L:
stdout.write('^')
elif i == L + H - 1:
stdout.write('v')
elif i == L + widgetpos + 1:
stdout.write('X')
else:
stdout.write(' ')
stdout.write('|\n')
print ('+' + '-' * W + '+-+')
|
from setuptools import setup, find_packages
from os import path
VERSION = '1.1'
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'readme.md'), encoding='utf-8') as f:
long_description = f.read()
# Setting up
setup(
name='kitsupy',
version=VERSION,
author='MetaStag',
author_email='thegreek132@gmail.com',
url='https://github.com/MetaStag/Pykitsu',
description='A simple api wrapper for the kitsu api written in python',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
install_requires=['requests'],
keywords=['python', 'api-wrapper', 'kitsu', 'anime'],
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Intended Audience :: Developers',
]
)
|
ap,bp=map(int,input().split())
c=list(map(int,input().split()))
d=[[abs(i-bp),i] for i in c]
d.sort()
d=d[1:]
e=[i[1] for i in d[:3]]
print(*e)
|
import asyncio
import aiohttp
import sys
import json
import time
import re
routes = {
"Goloman": ["Hands", "Holiday", "Wilkes"],
"Hands": ["Goloman", "Wilkes"],
"Holiday": ["Goloman", "Wilkes", "Welsh"],
"Wilkes": ["Goloman", "Hands", "Holiday"],
"Welsh": ["Holiday"]
}
ports = {
"Goloman": 12759,
"Hands": 12760,
"Holiday": 12761,
"Wilkes": 12762,
"Welsh": 12763
}
endpoint = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json'
api_key = ''
locations = {}
async def whats_at(loc, radius, results_limit):
async with aiohttp.ClientSession() as session:
modified_loc = re.sub("([0-9])([+-])", r"\1,\2", loc)
modified_loc = re.sub("[+]", "", modified_loc)
params = {'key': api_key, 'location': modified_loc, 'radius': int(radius) * 1000}
async with session.get(endpoint,params=params) as http_resp:
obj_resp = await http_resp.json()
if len(obj_resp['results']) > results_limit:
del obj_resp['results'][results_limit:]
return json.dumps(obj_resp, indent=3)
async def flood(location):
print('Flooding')
for route in routes:
try:
reader, writer = await asyncio.open_connection(
'127.0.0.1', ports[route])
log.write("Connected to " + route + "\n")
writer.write(location.encode())
await writer.drain()
writer.close()
log.write("Closed connection to " + route + "\n")
except ConnectionError:
log.write("Error: Connection error to " + route + "\n")
log.flush()
def validate_query(query):
try:
if query[0] == "IAMAT":
loc = re.sub("([0-9])([+-])", r"\1,\2", query[2]).split(",")
if len(loc) == 2 and float(loc[0]) and float(loc[1]):
return True
else:
return False
else:
return False
except ValueError:
return False
async def handle_echo(reader, writer):
try:
data = await reader.read()
rx_time = time.time()
message = data.decode()
query = message.split()
if query[0] == "AT":
if query[2] != server_name and (query[4] not in locations or query[5] > locations[query[4]].split()[5]):
locations[query[4]] = message
await flood(message)
else:
log.write("Query: " + message + "\n")
if len(query) != 4 or not (query[0] == "IAMAT" or query[0] == "WHATSAT"):
response = "? " + message
elif query[0] == "IAMAT":
if validate_query(query):
time_diff = rx_time - float(query[3])
time_str = str(time_diff)
if time_diff > 0:
time_str = "+" + time_str
response = "AT " + server_name + " " + time_str + " " + " ".join(query)
locations[query[1]] = response
await flood(response)
else:
response = "? " + message
elif query[0] == "WHATSAT":
try:
if query[1] not in locations or int(query[2]) > 50 or int(query[3]) > 20:
response = "? " + message
else:
json_response = await whats_at(locations[query[1]].split()[5], query[2], int(query[3]))
response = locations[query[1]] + "\n" + re.sub('\n+', '\n', json_response.strip()) + "\n\n"
except ValueError:
response = "? " + message
log.write("Response: " + response + "\n")
log.flush()
writer.write(response.encode())
await writer.drain()
writer.close()
except ConnectionError:
log.write("Error: Connection from client dropped\n")
log.flush()
writer.close()
async def main():
if len(sys.argv) != 2:
print("Incorrect number of arguments: Please only enter the server name")
sys.exit(1)
elif sys.argv[1] not in ports:
print("Invalid server name: Please choose Goloman, Hands, Holiday, Welsh, or Wilkes")
sys.exit(1)
else:
global server_name
server_name = sys.argv[1]
global log
log = open(server_name + "_log", "a+")
global server
server = await asyncio.start_server(
handle_echo, '127.0.0.1', ports[server_name])
addr = server.sockets[0].getsockname()
print(f'Serving on {addr}')
async with server:
await server.serve_forever()
if __name__ == "__main__":
try:
asyncio.run(main())
except KeyboardInterrupt:
log.close()
exit(0)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import sqlite3
from flask import render_template, flash, redirect, session, url_for, request, g
#from flask.ext.login import login_user, logout_user, current_user, login_required
from app import app, DATABASE, STORYDIR
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
if db:db.close()
@app.route('/hello', methods=['GET'])
@app.route('/', methods=['GET'])
def gHome():
return render_template('index.html')
@app.route('/ours', methods=['GET'])
def gOurs():
return render_template('ours.html')
@app.route('/ours', methods=['POST'])
def pOurs():
title=request.form['mce_0']
story=request.form['mce_1']
db = get_db()
cur = db.cursor()
cur.execute("select max(sid) from story")
id = cur.fetchone()[0]
if not id: id = 0
id = id + 1
cur.execute("insert into story values ( null, ?, '', ?, '', datetime('now'), '', ?)", (title,'', 0))
db.commit()
f = open(STORYDIR + str(id), 'w')
f.write(story)
f.close()
# db.commit()
# cur.execute("select * from comment")
# result = ''
# for row in cur.fetchall():
# result = result + '\n' + row[1]
# flash(result)
return redirect('/theirs')
@app.route('/theirs', methods=['GET'])
def gTheirs():
story = {}
db = get_db()
cur = db.cursor()
cur.execute("select max(sid) from story")
id = cur.fetchone()[0]
if not id: return render_template('theirs.html')
f = open(STORYDIR + str(id), 'r')
story['content'] = f.read()
f.close()
cur.execute("select title from story where sid=?", (id,))
title = cur.fetchone()[0]
story['title'] = title
return render_template('theirs.html', story = story)
@app.route('/contact', methods=['GET'])
def gContact():
return render_template('contact.html')
@app.route('/user', methods=['GET'])
def gUser():
return render_template('user.html')
@app.route('/comment', methods=['GET'])
def gComment():
return render_template('comment.html')
@app.route('/comment', methods=['POST'])
def pComment():
name = request.form['name']
email = request.form['email']
message = request.form['message']
db = get_db()
cur = db.cursor()
cur.execute("insert into comment values ( null, ?, ?, datetime('now'), ?)", (message, 1, 1))
db.commit()
cur.execute("select * from comment")
result = ''
for row in cur.fetchall():
result = result + '\n' + row[1]
flash(result)
return redirect('/') |
from common import *
def draw(**kwargs):
sq=np.array((256,)*2)
imgsz=sq*6
k=8 # try 128
h,v=map(lambda im: im//k%2, meshgrid_euclidean(imgsz))
c=checkerboard(imgsz,sq)
im=c*h+(1-c)*(1-v)
return im
if __name__ == '__main__':
im=draw()
imshow(im)
imsave(im,'p20.png')
|
# ๅไธ่กๅ
่พๅ
ฅ
line = input('่พๅ
ฅ3ไธชๆฐ๏ผ็ฉบๆ ผๅ้๏ผ:')
l = line.split(' ')
for i, item in enumerate(l):
l[i] = int(item)
l.sort()
print(l)
|
from .identifier import Keyword
__all__ = [
'As',
'Create',
'Delete',
'Drop',
'Exists',
'From',
'If',
'Insert',
'Into',
'Join',
'Key',
'Not',
'On',
'Primary',
'Select',
'SetKw',
'Table',
'Update',
'Values',
'Where',
]
class As(Keyword):
def __init__(self) -> None:
super().__init__('as', True)
class Create(Keyword):
def __init__(self) -> None:
super().__init__('create', True)
class Drop(Keyword):
def __init__(self) -> None:
super().__init__('drop', True)
class Delete(Keyword):
def __init__(self) -> None:
super().__init__('delete', True)
class Select(Keyword):
def __init__(self) -> None:
super().__init__('select', True)
class From(Keyword):
def __init__(self) -> None:
super().__init__('from', False)
class Join(Keyword):
def __init__(self) -> None:
super().__init__('join', True)
class On(Keyword):
def __init__(self) -> None:
super().__init__('on', True)
class Table(Keyword):
def __init__(self) -> None:
super().__init__('table', False)
class If(Keyword):
def __init__(self) -> None:
super().__init__('if', False)
class Not(Keyword):
def __init__(self) -> None:
super().__init__('not', False)
class Exists(Keyword):
def __init__(self) -> None:
super().__init__('exists', False)
class Primary(Keyword):
def __init__(self) -> None:
super().__init__('primary', True)
class Key(Keyword):
def __init__(self) -> None:
super().__init__('key', False)
class Insert(Keyword):
def __init__(self) -> None:
super().__init__('insert', True)
class Into(Keyword):
def __init__(self) -> None:
super().__init__('into', False)
class Update(Keyword):
def __init__(self) -> None:
super().__init__('update', True)
class SetKw(Keyword):
def __init__(self) -> None:
super().__init__('set', True)
class Values(Keyword):
def __init__(self) -> None:
super().__init__('values', False)
class Where(Keyword):
def __init__(self) -> None:
super().__init__('where', True)
|
from N4_using_dataset_to_read_the_tfrecord import *
from N0_set_config import *
from tqdm import tqdm
# from N5_1_AlexNet_with_batchnorm import *
from N5_2_slim_alexnet import *
# from N5_3_slim_vgg16 import *
def main():
train_file_name = 'E:/111project/tfrecord/train.tfrecords'
validation_file_name = 'E:/111project/tfrecord/validation.tfrecords'
train_data = create_dataset(train_file_name, batch_size=batch_size,
resize_height=resize_height, resize_width=resize_width, num_class=num_class)
validation_data = create_dataset(validation_file_name, batch_size=batch_size,
resize_height=resize_height, resize_width=resize_width, num_class=num_class)
train_data = train_data.repeat()
validation_data = validation_data.repeat()
train_iterator = train_data.make_one_shot_iterator()
val_iterator = validation_data.make_one_shot_iterator()
train_images, train_labels = train_iterator.get_next()
val_images, val_labels = val_iterator.get_next()
# x = tf.placeholder(tf.float32, shape=[None, resize_height, resize_width, 3], name='x')
# y = tf.placeholder(tf.int32, shape=[None, num_class], name='y')
# keep_prob = tf.placeholder(tf.float32)
# is_training = tf.placeholder(tf.bool)
fc3 = inference(x, num_class, keep_prob, is_training)
with tf.name_scope('learning_rate'):
# global_ = tf.Variable(tf.constant(0))
global_ = tf.placeholder(tf.int32)
lr = tf.train.exponential_decay(learning_rate, global_,
decay_step, decay_rate, staircase=True)
with tf.name_scope('loss'):
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=fc3, labels=y))
with tf.name_scope('optimizer'):
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss_op)
with tf.name_scope("accuracy"):
correct_pred = tf.equal(tf.argmax(fc3, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.summary.scalar('loss', loss_op)
tf.summary.scalar('accuracy', accuracy)
# tf.summary.scalar('learning_rate', lr)
merged_summary = tf.summary.merge_all()
saver = tf.train.Saver(max_to_keep=3)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
train_writer = tf.summary.FileWriter(train_tensorboard_path, sess.graph)
val_writer = tf.summary.FileWriter(val_tensorboard_path)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
max_acc = 0
for i in tqdm(range(iteration)):
print('\n')
print('\niteration: {}'.format(i + 1))
# train_batch_images, train_batch_labels = sess.run([train_images, train_labels])
# val_batch_images, val_batch_labels = sess.run([val_images, val_labels])
# if (i+1) % 10 == 0:
# print('\niteration: {}'.format(i + 1))
try:
train_batch_images, train_batch_labels = sess.run([train_images, train_labels])
train_loss, lr1, _, train_acc = sess.run([loss_op, lr, train_op, accuracy],
feed_dict={x: train_batch_images,
y: train_batch_labels,
keep_prob: drop_rate,
is_training: True,
global_: i
})
val_batch_images, val_batch_labels = sess.run([val_images, val_labels])
val_loss, val_acc = sess.run([loss_op, accuracy], feed_dict={x: val_batch_images,
y: val_batch_labels,
keep_prob: 1.0,
is_training: False,
global_: i
})
if i % 50 == 0:
print('lr is : {}'.format(lr1))
print("train loss: %.6f, train acc:%.6f" % (train_loss, train_acc))
s = sess.run(merged_summary, feed_dict={x: train_batch_images,
y: train_batch_labels,
keep_prob: drop_rate,
is_training: True,
global_: i
})
train_writer.add_summary(summary=s, global_step=i)
print("val loss: %.6f, val acc: %.6f" % (val_loss, val_acc))
print('\n')
t = sess.run(merged_summary, feed_dict={x: val_batch_images,
y: val_batch_labels,
keep_prob: 1.0,
is_training: False,
global_: i
})
val_writer.add_summary(summary=t, global_step=i)
if val_acc >= max_acc:
max_acc = val_acc
saver.save(sess, model_save_path + '-' + 'val_acc-%.4f' % max_acc, global_step=i)
except tf.errors.OutOfRangeError:
break
print('\n********it is the end********\n')
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
x = tf.placeholder(tf.float32, shape=[None, resize_height, resize_width, 3], name='x')
y = tf.placeholder(tf.int32, shape=[None, num_class], name='y')
keep_prob = tf.placeholder(tf.float32)
is_training = tf.placeholder(tf.bool)
train_tensorboard_path = 'log/slim_alexnet/train/'
val_tensorboard_path = 'log/slim_alexnet/val/'
model_save_path = 'model/slim_alexnet/alexnet.ckpt'
main()
|
"""
Find a pair in an array of size 'n', whose sum is X
There are at most 2 operations performed on every element:
(a) the element is added to the curr_sum
(b) the element is subtracted from curr_sum.
So the upper bound on number of operations is 2n which is O(n).
"""
def find_sub_array(a,sum):
curr_sum = 0
i,j= 0,0
while i < len(a):
curr_sum = curr_sum + a[i]
#print(curr_sum)
if curr_sum == sum:
return (i,j)
elif curr_sum > sum:
while(curr_sum > sum and j<i):
print("{} - {} - {}".format(i,j,curr_sum))
curr_sum = curr_sum - a[j]
j=j+1
if(curr_sum == sum):
return i,j
else:
i=i+1
return (i,j)
"""
def find_all_sub_arrays(a,sum):
curr_sum = 0
i,j= 0,0
while i < len(a):
curr_sum = curr_sum + a[i]
#print(curr_sum)
if curr_sum == sum:
print (j,i)
curr_sum = curr_sum - a[j]
j=j+1
i=i+1
elif curr_sum > sum:
while(curr_sum > sum and j<i):
print("{} - {} - {}".format(i,j,curr_sum))
curr_sum = curr_sum - a[j]
j=j+1
if(curr_sum == sum):
return i,j
else:
i=i+1
return (i,j)
"""
if __name__ == "__main__":
a = [1,5,10,100,1000]
sum = 115
i,j = find_sub_array(a,sum)
print("Found {} - {} ".format(j,i))
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 4 15:20:14 2021
@author: Gerry
"""
teksten = ""
tekstlinje = input("Skriv inn fรธrste linje: ")
while tekstlinje != "":
teksten = teksten + tekstlinje + "\n"
# \n er newline, altsรฅ enter, altsรฅ hver linje du skriver
#inn kommer pรฅ en ny linje
tekstlinje = input("Skriv inn neste linje: ")
print("Den endelige teksten ble: ")
print(teksten)
|
'''class Solution:
def maxProfit(self, prices):
ans = 0
for i in range(1, len(prices)):
if prices[i] - prices[i - 1] >0:
ans = ans + (prices[i] - prices[i - 1])
return ans
obj = Solution()
print(obj.maxProfit([7,1,5,3,6,4]))
'''
p = [7, 5, 3, 6, 4]
ans = 0
for i in range(1, len(p)):
if p[i]-p[i-1]>0:
ans = ans + (p[i]-p[i-1])
print(ans) |
import requests
import urllib
import urllib.parse
def executeQuery(query, token):
return requests.post(_url('Query/execute'), json=query, headers={'Authorization': 'Bearer ' + token})
def getAuthToken(apikey, username, password):
return requests.post(_url('Users/getAuthToken'), json={
'ApiKey': apikey,
'UserName': username,
'PassWord': password
})
def getCreatives(token, medio, codigo):
return requests.get(_url('creatives/getFile?access_token=' + token + '&med=' + urllib.parse.urlencode(medio) + '&cod=' + codigo))
def getQuery(query, tokenString):
return requests.post(_url('Query/getQuery'), json=query, headers={'Authorization': 'Bearer ' + urllib.parse.quote(tokenString, safe='')})
def _url(path):
return 'https://infoio.infoadex.es/infoioapi/v1.2/' + path
|
from __future__ import print_function
from dronekit import connect, Vehicle
from my_vehicle import MyVehicle #Our custom vehicle class
import time
def raw_imu_callback(self, attr_name, value):
# attr_name == 'raw_imu'
# value == vehicle.raw_imu
print(value)
connection_string = '/dev/ttyACM0'#args.connect
# Connect to the Vehicle
print('Connecting to vehicle on: %s' % connection_string)
vehicle = connect(connection_string, wait_ready=True, vehicle_class=MyVehicle)
# Add observer for the custom attribute
#vehicle.add_attribute_listener('raw_imu', raw_imu_callback)
print('Display RAW_IMU messages for 1 seconds and then exit.')
#time.sleep(1)
for i in range (0, 10):
print (vehicle.raw_imu)
time.sleep(1)
#The message listener can be unset using ``vehicle.remove_message_listener``
#Close vehicle object before exiting script
print("Close vehicle object")
vehicle.close()
|
import requests
# Login
req = {
'email': 'ppark9553@naver.com',
'password': '123123'
}
res = requests.post('http://127.0.0.1:8000/api/token/', data=req)
result = res.json()
# Auth page access
refresh_token = result['refresh']
access_token = result['access']
header = {
'Authorization': f'Bearer {access_token}'
}
res = requests.get('http://127.0.0.1:8000/api/user/?username=ppark9553&email=ppark9553@gmail.com')
result = res.json()['results'][0]
user_id = result['id']
print(user_id)
# data = {
# 'user': user_id,
# 'name': 'ledger_1'
# }
#
# res = requests.post('http://127.0.0.1:8000/api/ledger/', data=data, headers=header)
# print(res.json())
res = requests.get('http://127.0.0.1:8000/api/user/2/', headers=header)
result = res.json()
print(result)
|
from utils_convert_coord import coord_regular_to_decimal, coord_decimal_to_regular
import cv2
def debug_decimal_coord(img, coord_decimal, prob = None, class_id = None):
img_cp = img.copy()
img_ht, img_wid, nchannels = img.shape
coord_regular = coord_decimal_to_regular(coord_decimal, img_wid, img_ht)
debug_regular_coord(img, coord_regular, prob, class_id)
def debug_regular_coord(img, coord_regular, prob = None, class_id = None):
img_cp = img.copy()
[x_topleft, y_topleft, w_box, h_box] = coord_regular
cv2.rectangle(img_cp,
(x_topleft, y_topleft),
(x_topleft + w_box, y_topleft + h_box),
(0,255,0), 2)
if prob is not None and class_id is not None:
assert(isinstance(prob, (float)))
assert(isinstance(class_id, (int, long)))
cv2.rectangle(img_cp,
(x_topleft, y_topleft - 20),
(x_topleft + w_box, y_topleft),
(125,125,125),-1)
cv2.putText(img_cp,
str(class_id) + ' : %.2f' % prob,
(x_topleft + 5, y_topleft - 7),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1)
cv2.imshow('debug_detection',img_cp)
cv2.waitKey(1)
def debug_3_locations( img, gt_location, yolo_location, rolo_location):
img_cp = img.copy()
for i in range(3): # b-g-r channels
if i== 0: location= gt_location; color= (0, 0, 255) # red for gt
elif i ==1: location= yolo_location; color= (255, 0, 0) # blur for yolo
elif i ==2: location= rolo_location; color= (0, 255, 0) # green for rolo
x = int(location[0])
y = int(location[1])
w = int(location[2])
h = int(location[3])
if i == 1 or i== 2: cv2.rectangle(img_cp,(x-w//2, y-h//2),(x+w//2,y+h//2), color, 2)
elif i== 0: cv2.rectangle(img_cp,(x,y),(x+w,y+h), color, 2)
cv2.imshow('3 locations',img_cp)
cv2.waitKey(100)
return img_cp
|
#
# Copyright (c) 2013 crcache contributors
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
"""Configuration of cr_cache."""
import os.path
import yaml
from cr_cache import cache
from cr_cache.source import find_source_type
from cr_cache.store import local as local_store
def default_path():
"""Return a list of directories to search for configuration data.
Defaults to ~/.config/crcache, $(pwd)/.crcache
"""
homedir_config = os.path.expanduser(os.path.join('~', '.config', 'crcache'))
cwd_config = os.path.join(os.getcwd(), '.crcache')
return [homedir_config, cwd_config]
def sources(roots):
"""Return a list of the known sources."""
return set(source_dirs(roots).keys())
def source_dirs(roots):
"""Return a map of source name -> config dir."""
result = {}
for root in roots:
try:
names = os.listdir(os.path.join(root, 'sources'))
except OSError:
continue
for name in names:
if name in result:
continue
result[name] = os.path.join(root, 'sources', name)
return result
class Config(object):
"""Represents a full configuration of crcache.
This provides a cache of sources, allowing a recursive definition for
source loading.
"""
def __init__(self):
self._source_dirs = source_dirs(default_path())
self._sources = {}
self._store = local_store.Store()
def get_source(self, name):
"""Get a cr_cache.cache.Cache configured for the source called name.
Results are cached in self._sources, and returned from there if present.
"""
if name in self._sources:
return self._sources[name]
path = self._source_dirs.get(name)
if name == 'local' and path is None:
config = {}
source_type = find_source_type('local')
else:
path = os.path.join(path, 'source.conf')
with open(path, 'rt') as f:
config = yaml.safe_load(f)
source_type = find_source_type(config['type'])
source = source_type(config, self.get_source)
kwargs = {}
if 'reserve' in config:
reserve = int(config['reserve'])
kwargs['reserve'] = reserve
result = cache.Cache(name, self._store, source, **kwargs)
self._sources[name] = result
return result
|
from enemy import Bounty_Hunter
from player import Player
from enemy import Sith
from enemy import Stomtrooper
class Game:
def setup(self):
self.player = Player()
self.enemies = [
Stomtrooper(),
Bounty_Hunter(),
Sith()
]
def get_next_enemy(self):
try:
return self.enemies.pop(0)
except IndexError:
return None
def ask_to_dodge(self):
dodge_answer = input("Do you want to try and dodge? [Y]es/[N]o").lower()
if dodge_answer in 'yn':
if self.player.dodge():
continue
else:
self.player.hit_points -= 1
else:
print("Please press Y or N to answer.")
return self.ask_to_dodge()
def enemy_turn(self):
current_enemy = self.get_next_enemy()
# check to see if the monster attacks
if current_enemy.attack():
# if so, tell the player
attack_message = "You are being attacked by {}".format(current_enemy.__str__())
print(attack_message)
# check if the player wants to dodge
self.ask_to_dodge()
# if the monster isn't attacking, tell that to the player too.
else:
print("The enemy Imperial isn't attacking you. Phew!")
def player_turn(self):
# let the player attack, rest or quit
player_choice = input("Do you want to [a]ttack, [r]est, or [q]uit the game?").lower()
if player_choice in 'arq':
# if they attack:
if player_choice === 'a':
# see if the attack is successful
if self.player.attack():
current_enemy.hit_points +
# if dodged, print that
# if not dodged, subtract the right num of hit points from the monster
# if not a good attack, tell the player
# if they rest:
# call the player.rest() method
# if they quit, exit the game
# if they pick anything else, re-run this method
def cleanup(self):
# if the monster has no more hit points:
# up the player's experience
# print a message
# get a new monster
def __init__(self):
self.setup()
while self.player.hit_points and self.enemies:
print(self.player)
self.enemy_turn()
self.player_turn()
self.cleanup()
if self.player.hit_points:
print("You win!")
elif self.enemies:
print("You lose!") |
import math
import bpy
import yerface_blender.DriverUtilities as dru
class YerFaceSceneUpdater:
def __init__(self, context, myReader, fps):
self.props = context.scene.yerFaceBlenderProperties
self.keyframeHelper = dru.KeyframeHelper()
self.translationTarget = context.scene.objects.get(self.props.translationTargetObject)
if self.translationTarget is not None:
if self.translationTarget.type == "ARMATURE" and len(self.props.translationTargetBone) > 0:
bone = self.translationTarget.pose.bones.get(self.props.translationTargetBone)
if bone is not None:
self.translationTarget = bone
self.translationScale = self.props.translationScale
self.translationAxisMap = {
'x': self.interpretAxisMapProp(self.props.translationAxisMapX),
'y': self.interpretAxisMapProp(self.props.translationAxisMapY),
'z': self.interpretAxisMapProp(self.props.translationAxisMapZ)
}
self.rotationTarget = context.scene.objects.get(self.props.rotationTargetObject)
if self.rotationTarget is not None:
if self.rotationTarget.type == "ARMATURE" and len(self.props.rotationTargetBone) > 0:
bone = self.rotationTarget.pose.bones.get(self.props.rotationTargetBone)
if bone is not None:
self.rotationTarget = bone
self.rotationScale = self.props.rotationScale
self.rotationAxisMap = {
'x': self.interpretAxisMapProp(self.props.rotationAxisMapX),
'y': self.interpretAxisMapProp(self.props.rotationAxisMapY),
'z': self.interpretAxisMapProp(self.props.rotationAxisMapZ)
}
self.faceArmature = None
self.faceArmatureBones = None
if len(self.props.faceArmatureObject) > 0:
self.faceArmature = context.scene.objects[self.props.faceArmatureObject]
self.faceArmatureBones = self.faceArmature.pose.bones
self.faceBoneTranslationScale = self.props.faceBoneTranslationScale
self.faceBoneAxisMap = {
'x': self.interpretAxisMapProp(self.props.faceBoneAxisMapX),
'y': self.interpretAxisMapProp(self.props.faceBoneAxisMapY),
'z': self.interpretAxisMapProp(self.props.faceBoneAxisMapZ)
}
self.phonemesTarget = None
if len(self.props.phonemesTargetObject) > 0:
self.phonemesTarget = context.scene.objects.get(self.props.phonemesTargetObject)
self.phonemesScale = self.props.phonemesScale
self.locationOffsetX = 0.0
self.locationOffsetY = 0.0
self.locationOffsetZ = 0.0
self.translationScaleX = self.props.translationScaleX
self.translationScaleY = self.props.translationScaleY
self.translationScaleZ = self.props.translationScaleZ
self.rotationOffsetX = 0.0
self.rotationOffsetY = 0.0
self.rotationOffsetZ = 0.0
self.rotationScaleX = self.props.rotationScaleX
self.rotationScaleY = self.props.rotationScaleY
self.rotationScaleZ = self.props.rotationScaleZ
self.trackerOffsets = {}
self.trackerWarnedAlready = {}
self.phonemesWarnedAlready = {}
self.reader = myReader
self.fps = fps
def flushFrame(self, flushFrameNumber = -1, discardFrameData = False):
self.keyframeHelper.flushFrame(flushFrameNumber, discardFrameData, self.props.samplingMode)
if self.props.tickCallback != "":
tickProps = {
'userData': self.props.tickUserData,
'resetState': False,
'perfcapPacket': {},
'insertKeyframes': True,
'currentFrameNumber': flushFrameNumber,
'flushLastFrame': True,
'discardLastFrameData': discardFrameData,
'samplingMode': self.props.samplingMode,
'framesPerSecond': self.fps
}
bpy.app.driver_namespace[self.props.tickCallback](properties=tickProps)
def runUpdate(self, insertKeyframes = False, currentFrameNumber = -1):
packets = self.reader.returnNextPackets()
tickProps = {}
if self.props.tickCallback != "":
tickProps = {
'userData': self.props.tickUserData,
'resetState': False,
'perfcapPacket': {},
'insertKeyframes': insertKeyframes,
'currentFrameNumber': currentFrameNumber,
'flushLastFrame': False,
'discardLastFrameData': False,
'samplingMode': None,
'framesPerSecond': self.fps
}
if len(packets) < 1:
bpy.app.driver_namespace[self.props.tickCallback](properties=tickProps)
for packet in packets:
if 'events' in packet:
del packet["events"]
if self.props.tickCallback != "":
tickProps['perfcapPacket'] = packet
bpy.app.driver_namespace[self.props.tickCallback](properties=tickProps)
if packet['meta']['basis']:
if 'pose' in packet:
translation = self.TranslationTargetCoordinateMapper(packet['pose']['translation'])
self.locationOffsetX = translation['x']
self.locationOffsetY = translation['y']
self.locationOffsetZ = translation['z']
rotation = self.RotationTargetRotationMapper(packet['pose']['rotation'])
self.rotationOffsetX = rotation['x']
self.rotationOffsetY = rotation['y']
self.rotationOffsetZ = rotation['z']
if 'trackers' in packet:
for name, tracker in packet['trackers'].items():
translation = self.FaceBoneCoordinateMapper(tracker['position'])
self.trackerOffsets[name] = {}
self.trackerOffsets[name]['x'] = translation['x']
self.trackerOffsets[name]['y'] = translation['y']
self.trackerOffsets[name]['z'] = translation['z']
if 'pose' in packet:
if self.translationTarget is not None:
translation = self.TranslationTargetCoordinateMapper(packet['pose']['translation'])
newValues = {
"x": self.translationScaleX * (translation['x'] - self.locationOffsetX),
"y": self.translationScaleY * (translation['y'] - self.locationOffsetY),
"z": self.translationScaleZ * (translation['z'] - self.locationOffsetZ)
}
if insertKeyframes:
self.keyframeHelper.accumulateFrameData(localKey="translationTarget", target=self.translationTarget, dataPath="location", newValues=newValues, anticipation=self.props.translationAnticipationFrames)
else:
dru.handleUpdateTargetAll(target=self.translationTarget, dataPath="location", newValues=newValues)
if self.rotationTarget is not None:
rotation = self.RotationTargetRotationMapper(packet['pose']['rotation'])
self.rotationTarget.rotation_mode = 'XYZ'
newValues = {
"x": math.radians(self.rotationScaleX * (rotation['x'] - self.rotationOffsetX)),
"y": math.radians(self.rotationScaleY * (rotation['y'] - self.rotationOffsetY)),
"z": math.radians(self.rotationScaleZ * (rotation['z'] - self.rotationOffsetZ))
}
if insertKeyframes:
self.keyframeHelper.accumulateFrameData(localKey="rotationTarget", target=self.rotationTarget, dataPath="rotation_euler", newValues=newValues, anticipation=self.props.rotationAnticipationFrames)
else:
dru.handleUpdateTargetAll(target=self.rotationTarget, dataPath="rotation_euler", newValues=newValues)
if 'trackers' in packet and self.faceArmatureBones is not None:
for name, tracker in packet['trackers'].items():
if name not in self.trackerOffsets:
self.trackerOffsets[name] = {'x': 0.0, 'y': 0.0, 'z': 0.0}
if name not in self.faceArmatureBones:
if name not in self.trackerWarnedAlready:
print("Could not operate on bone " + name + " because it does not exist within armature!")
self.trackerWarnedAlready[name] = True
else:
bone = self.faceArmatureBones[name]
translation = self.FaceBoneCoordinateMapper(tracker['position'])
newValues = {
"x": translation['x'] - self.trackerOffsets[name]['x'],
"y": translation['y'] - self.trackerOffsets[name]['y'],
"z": translation['z'] - self.trackerOffsets[name]['z']
}
if insertKeyframes:
self.keyframeHelper.accumulateFrameData(localKey="armatureBone-" + name, target=bone, dataPath="location", newValues=newValues, anticipation=self.props.faceAnticipationFrames)
else:
dru.handleUpdateTargetAll(target=bone, dataPath="location", newValues=newValues)
if 'phonemes' in packet and self.phonemesTarget is not None:
for p, val in packet['phonemes'].items():
name = "Phoneme." + p
if name not in self.phonemesTarget:
if name not in self.phonemesWarnedAlready:
print("Could not operate on phoneme property " + name + " because it does not exist as an object property!")
self.phonemesWarnedAlready[name] = True
else:
newValues = {
"phoneme": val * self.phonemesScale
}
self.phonemesTarget[name] = newValues["phoneme"]
if insertKeyframes:
self.keyframeHelper.accumulateFrameData(localKey="phoneme-" + name, target=self.phonemesTarget, dataPath="[\"" + name + "\"]", newValues=newValues, anticipation=self.props.phonemesAnticipationFrames)
else:
dru.handleUpdateTargetAll(target=self.phonemesTarget, dataPath="[\"" + name + "\"]", newValues=newValues)
### FIXME: Not sure of the best way to mark the object dirty after updating custom properties. This works, but it's a hack.
if not insertKeyframes:
self.phonemesTarget.location.x += 0.0
def TranslationTargetCoordinateMapper(self, inputs):
outputs = {}
inputs['_'] = 0.0
for i in ['x', 'y', 'z']:
outputs[i] = inputs[self.translationAxisMap[i]['axis']] * self.translationAxisMap[i]['invert'] * self.translationScale
return outputs
def RotationTargetRotationMapper(self, inputs):
outputs = {}
inputs['_'] = 0.0
for i in ['x', 'y', 'z']:
outputs[i] = inputs[self.rotationAxisMap[i]['axis']] * self.rotationAxisMap[i]['invert'] * self.rotationScale
return outputs
def FaceBoneCoordinateMapper(self, inputs):
outputs = {}
inputs['_'] = 0.0
for i in ['x', 'y', 'z']:
outputs[i] = inputs[self.faceBoneAxisMap[i]['axis']] * self.faceBoneAxisMap[i]['invert'] * self.faceBoneTranslationScale
return outputs
def interpretAxisMapProp(self, prop):
parts = prop.split('.')
invert = 1.0
if parts[0] == 'n':
invert = -1.0
return {'invert': invert, 'axis': parts[1]}
|
import torch
from torch import nn
from torch import optim
from torch.utils import data
import torch.nn.functional as F
import numpy as np
import pandas as pd
import os
class Dataset(data.Dataset):
def __init__(self, input_file, output_file):
self.input = pd.read_csv(input_file).values
self.output = pd.read_csv(output_file).values
def __getitem__(self, item):
return self.input[item], self.output[item]
def __len__(self):
return self.input.shape[0]
class net_Task8(torch.nn.Module): # accuracy achieves 0.70 within 1 epoch
def __init__(self):
super(net_Task8, self).__init__()
self.emb = nn.Embedding(10, 8) #0-9
self.gru1 = nn.GRU(8, 16, batch_first=True, bidirectional=True)
self.gru2 = nn.GRU(32, 20, batch_first=True, bidirectional=True)
self.gru3 = nn.GRU(40, 20, batch_first=True)
self.dense = nn.Linear(400, 200)
def forward(self,x):
x = self.emb(x) # 32,20,8
x, _ = self.gru1(x) # 32,20,32
x = F.relu(x)
x, _ = self.gru2(x) #32,20,40
x = F.relu(x)
x, _ = self.gru3(x) # 32, 20, 20
x = F.relu(x)
x = x.reshape(x.shape[0], -1) # 32,400
x = self.dense(x) #32,200
x = x.reshape(x.shape[0], 20, 10) # 32, 20, 10
return x
def accuracy(predict, output):
predict = F.softmax(predict, dim=-1)
predict = torch.max(predict, dim=-1)[1]
pre_num = predict.numpy()
out_num = output.numpy()
acc = np.mean(pre_num==out_num)
return acc
if __name__=='__main__':
cwd = os.getcwd()
input_file = cwd + '/task8_train_input.csv'
output_file = cwd + '/task8_train_output.csv'
params = {'lr': 0.02,
'epoches': 1,
'batch_size': 32}
dataset = Dataset(input_file, output_file)
dataloader = data.DataLoader(dataset, batch_size=params['batch_size'], shuffle=True)
net = net_Task8()
optimizer = optim.Adam(net.parameters(), lr=params['lr'])
loss_fn = nn.CrossEntropyLoss()
for e in range(params['epoches']):
result = []
for step, batch_data in enumerate(dataloader):
net.zero_grad()
input = batch_data[0]
output = batch_data[1]
predict = net(input)
predict = predict.reshape(-1, 10)
output = output.reshape(-1)
loss = loss_fn(predict, output)
loss.backward()
optimizer.step() # apply gradients
temp_acc = accuracy(predict, output)
result.append(temp_acc)
print('Epoch [ %d] step: %d Accuracy : %s'%(e, step, temp_acc))
print('Final 100 step mean accuracy:', np.mean(result[-100:]))
# eval
test_input_file = cwd + '/task8_test_input.csv'
test_dataset = pd.read_csv(test_input_file).values
test_dataset = torch.from_numpy(test_dataset)
test_predict = net(test_dataset)
labels = []
for i in test_dataset:
label = np.zeros(20)
length = len(np.nonzero(i))
label[:length] = list(reversed(i[:length]))
labels.append(label)
labels = torch.tensor(labels)
acc = accuracy(test_predict, labels)
print(acc)
|
from django.urls import path
from user import views
app_name = 'user'
urlpatterns = [
path('register_page/', views.register_page, name='register_page'),
path('check_username/', views.check_username, name='check_username'),
path('check_captcha/', views.check_captcha, name='check_captcha'),
path('register_logic/', views.register_logic, name='register_logic'),
path('login_page/', views.login_page, name='login_page'),
path('login_logic/', views.login_logic, name='login_logic'),
path('register_ok/', views.register_ok, name='register_ok'),
path('sign_out/', views.sign_out, name='sign_out'),
path('send_email/', views.send_email, name='send_email'),
path('check_code/', views.check_code, name='check_code'),
]
|
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
if len(strs)==0:
return ""
if len(strs)==1:
return strs[0]
#intially consider first element of list as prefix
strs.sort(key=len)
prefix=strs[0]
prefix_length=len(prefix)
for s in strs[1:]:
while prefix!=s[0:prefix_length]:
prefix = prefix[0:(prefix_length-1)]
prefix_length-=1
if prefix_length==0:
return ""
return(prefix) |
from PyQt5.QtWidgets import QPushButton, QWidget
from PyQt5.QtGui import QFont
class Dashboard(QWidget):
def __init__(self, parent = None):
super(Dashboard, self).__init__(parent)
self.initDashboardUI()
def initDashboardUI(self):
self.setGeometry(525, 225, 1080, 720)
#initialize buttons
self.makeRecordEntryButton = QPushButton('Enter a new record', self)
self.viewProfileButton = QPushButton('View Profile', self)
self.editPrevRecordButton = QPushButton('Edit a record', self)
self.viewPatientRecordButton = QPushButton('View Patient Records', self)
self.registerPatientButton = QPushButton('Register Patient', self)
#define custom font
customFont = QFont("SansSerif", 16)
#set custom font
self.makeRecordEntryButton.setFont(customFont)
self.viewProfileButton.setFont(customFont)
self.editPrevRecordButton.setFont(customFont)
self.viewPatientRecordButton.setFont(customFont)
self.registerPatientButton.setFont(customFont)
#resize buttons
self.makeRecordEntryButton.resize(250, 50)
self.viewProfileButton.resize(250, 50)
self.editPrevRecordButton.resize(250, 50)
self.viewPatientRecordButton.resize(250, 50)
self.registerPatientButton.resize(250, 50)
#position widgets
self.makeRecordEntryButton.move(230, 200)
self.viewProfileButton.move(600, 200)
self.editPrevRecordButton.move(230, 260)
self.viewPatientRecordButton.move(600, 260)
self.registerPatientButton.move(230, 320) |
# Como importar diccionarios en Python 3.5+
diccionario_x = {'nombre': 'Javier', 'Edad': 34, 'Ciudad': 'Santiago'}
diccionario_y = {'nombre': 'Claudia', 'Edad': 29}
merge_diccionarios = {**diccionario_x, **diccionario_y}
print(merge_diccionarios)
|
#!/usr/bin/python
#lmx2322 freq set
# CK PA5
# DI PA7
# LOAD PA4
# EN PA3
from __future__ import print_function
from uakeh import Uakeh
import sys
import os
import struct
import time
gp_config_str = ["gp setcfg a 4 out pp 10",
"gp setcfg a 5 out pp 10",
"gp setcfg a 7 out pp 10",
"gp setcfg a 3 out pp 2"]
gpio_write_str = "gp wr {:s} {:d}"
usage_str = "Usage: {:s} <serial_dev>"
def init_uakeh(uakeh):
for s in gp_config_str:
uakeh.write_waitok(s)
def set_gpio(uakeh, g, val):
cmd = gpio_write_str.format(g, val)
uakeh.write_waitok(cmd)
def write(uakeh, word):
assert(len(word) == 18)
set_gpio(uakeh, "a 5", 0) # ck low
set_gpio(uakeh, "a 4", 1) # CS
time.sleep(0.01)
set_gpio(uakeh, "a 4", 0) # CS
time.sleep(0.01)
for bit in word:
push_bit(uakeh, bit)
set_gpio(uakeh, "a 4", 1) # CS
time.sleep(0.01)
def push_bit(uakeh, bit):
set_gpio(uakeh, "a 5", 0) # ck low
time.sleep(0.01)
set_gpio(uakeh, "a 7", bit) # data
time.sleep(0.01)
set_gpio(uakeh, "a 5", 1) # ck high
time.sleep(0.01)
def enable(uakeh):
set_gpio(uakeh, "a 3", 1) # enable
def calc_a_b(N):
P = 32
B = N / P;
A = N - (B * P);
return (A, B)
def set_N(uakeh, n):
a, b = calc_a_b(n)
word = []
for i in reversed(range(10)):
bit = (b >> i) & 1
word.append(bit)
for i in reversed(range(5)):
bit = (a >> i) & 1
word.append(bit)
word.append(0) #cnt_rst
word.append(0) #pwdn
word.append(0) # select N
write(uakeh, word)
def set_R(uakeh, r):
# X X X tst rs pol cptri
word = [0, 0, 0, 0, 0, 0, 0]
for i in reversed(range(10)):
bit = (r >> i) & 1
word.append(bit)
word.append(1) # select R
write(uakeh, word)
if len(sys.argv) != 2:
print (usage_str.format(sys.argv[0]))
exit(-1)
try:
uakeh = Uakeh(sys.argv[1])
except:
print("cannot open serial dev")
exit(-2)
init_uakeh(uakeh)
set_R(uakeh, 317)
set_N(uakeh, 31800)
enable(uakeh)
|
import json
import argparse
import random
import sys
import requests
BIGHUGELABS_API_KEY = 'f79909b74265ba8593daf87741f3c874'
buzzWords = ['alignment','bot', 'collusion', 'derivative', 'engagement', 'focus', 'gathering' ,'housing','liability','management','nomenclature','operation','procedure','reduction','strategic','technology','undertaking','vision','widget','yardbird']
forbiddenWords = ['who','what','when','where','why','were','am','and','there','their']
class AcronymLetter:
def __init__(self, letter, word_list):
self.letter = letter.upper()
self.words = word_list
def __str__(self):
outString = ''
for word in self.words:
if len(outString) == 0:
outString = self.letter + " - " + str(word)
else:
outString = outString + ", " + str(word)
return outString
class Word:
def __init__(self, word, priority):
self.word = word
self.priority = priority
def __str__(self):
return self.word + " : " + str(self.priority)
def acronym_finder(inputAcronym, inputGeneralKeywords, numOutputs=5, minWordLength=2):
# holds letter objects
acronym = []
inputError = False
if minWordLength < 2:
print('You dun goofed. Minimum word length must be greater than 1')
inputError = True
if numOutputs < 1:
print('WTF! How does it make sense to print any less than 1 output?')
inputError = True
if inputError:
sys.exit()
# Generate possible word names from the synonym API
for keyword in inputGeneralKeywords:
thesaurusList_url = "http://words.bighugelabs.com/api/2/" + BIGHUGELABS_API_KEY + "/" + keyword + "/json"
thesaurusResponse = requests.get(thesaurusList_url)
if thesaurusResponse.status_code == 200:
thesaurusJson = json.loads(thesaurusResponse.text)
# this is normal for some words.
elif thesaurusResponse.status_code == 404:
continue
else:
print("Shit: " + str(thesaurusResponse.status_code))
letters = []
for i, c in enumerate(inputAcronym):
letters.append(c)
distinctLetters = list(set(letters))
# Rank possible synonym words for each letter in the acronym
for letter in distinctLetters:
firstLetter = letter.lower()
wordList = []
if thesaurusResponse.status_code == 200:
for wordType in thesaurusJson.keys():
for meaningType in thesaurusJson[wordType].keys():
for word in thesaurusJson[wordType][meaningType]:
if word[0] == firstLetter and word.count(' ') == 0 and len(word) >= minWordLength:
for w in wordList:
if w.word == word:
priority = w.priority + 1
wordList.remove(w)
wordList.insert(0,Word(word,priority))
break
else:
wordList.append(Word(word,1))
randomWords_url = "http://api.wordnik.com:80/v4/words.json/search/" + firstLetter + "?caseSensitive=false&includePartOfSpeech=noun&minCorpusCount=5&maxCorpusCount=-1&minDictionaryCount=1&maxDictionaryCount=-1&minLength=" + str(minWordLength) + "&maxLength=-1&skip=0&limit=" + str(4 * minWordLength * minWordLength * minWordLength) + "&api_key=a2a73e7b926c924fad7001ca3111acd55af2ffabf50eb4ae5"
randomWordsResponse = requests.get(randomWords_url)
if randomWordsResponse.status_code == 200:
randomWordsJson = json.loads(randomWordsResponse.text)
for entry in randomWordsJson["searchResults"]:
word = entry["word"]
if word[0] == firstLetter and len(word) >= minWordLength and word.count(' ') == 0:
wordList.append(Word(word,0))
sorted(wordList, key=lambda word: word.priority)
acronym.append(AcronymLetter(firstLetter,wordList))
# Generate possible acronym results
winners = []
for x in range (0,numOutputs):
winner = ''
for i, c in enumerate(inputAcronym):
for letter in acronym:
if letter.letter == c:
try:
word = letter.words[0]
if len(winner) == 0:
winner = word.word
letter.words.remove(word)
else:
winner = winner + ' ' + word.word
letter.words.remove(word)
except IndexError:
print("Can't get all {} words".format(len(acronym)))
# Sanity Check if the winner is a valid acronym
#if len(winner.split(' ')) == len(acronym):
winners.append(winner)
return winners
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='De-Generate Acronym')
parser.add_argument('acronym', metavar='ACR',help='the acronym')
parser.add_argument('--numOutputs', metavar='NOU',type=int, help='number of outputs', default=1)
parser.add_argument('--minLength', metavar='MIN',type=int, help='minimum length of words used', default=2)
parser.add_argument('keywords', metavar='KEY', nargs='+',help='some keywords')
args = parser.parse_args()
winner_list = acronym_finder(
inputAcronym=args.acronym,
numOutputs=args.numOutputs,
inputGeneralKeywords=args.keywords,
minWordLength=args.minLength)
print('\n'.join(winner_list))
# Test call
# print(acronym_finder('hello', 5, 'world'))
|
s = input()
letters = "hello"
i = 0
for c in s:
if c == letters[i]:
i += 1
if i == 5:
break
if i == 5:
print("YES")
else:
print("NO")
|
from django.urls import reverse
from restaurants.models import Restaurant
def test_list(client, db, make_restaurant):
make_restaurant()
response = client.get(reverse("restaurant-list"))
assert response.status_code == 200
assert response.json() == [{"name": "Fifolino"}]
def test_post(client, db):
data = {"name": "Fifocoin"}
response = client.post(reverse("restaurant-list"), data=data)
assert response.status_code == 201
assert response.json() == data
def test_put(client, db, make_restaurant):
restaurant = make_restaurant()
data = {"name": "Fifocoins"}
response = client.put(
reverse("restaurant-detail", kwargs={"pk": restaurant.id}),
data=data,
content_type="application/json",
)
assert response.status_code == 200
assert response.json() == data
def test_delete(client, db, make_restaurant):
restaurant = make_restaurant()
response = client.delete(
reverse("restaurant-detail", kwargs={"pk": restaurant.id}),
)
assert response.status_code == 204
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 18 11:04:19 2020
@author: chandan
"""
import time
import random
import pandas as pd
from Optimizer.solution import Solution
from Optimizer.coordinator import CoOrdinator
from Optimizer.Genetic_alg import GASelectionEnum
from Optimizer.data_fjs import Data_Flexible_Job_Shop
from Optimizer.data_normal_job_shop import Data_Normal_Job_Shop
from Rescheduling.utility import get_machine_data
benchmark = True
verbose = True
progress_bar = False
def perform_Tabu_Search(co_ordinator_agent, initial_population=""):
"""
Starting point of Tabu search
Paramters
--------------------------
co_ordinator_agent : object of CoOrdinator class
initial_population : algorithm name to retrieve solutions
Returns
---------------
best_solution from Tabu search
"""
runtime = 15 # in seconds
num_processes = 4
num_solutions_per_process = 1
tabu_list_size = 10
neighborhood_size = 50
neighborhood_wait = 0.12
probability_change_machine = 0.25
reset_threshold = 50
population = None
if initial_population is not "":
population = get_initial_population(co_ordinator_agent, initial_population)
return co_ordinator_agent.tabu_search_time(runtime=runtime,
initial_solutions = population,
num_processes=num_processes,
num_solutions_per_process=num_solutions_per_process,
tabu_list_size=tabu_list_size,
neighborhood_size=neighborhood_size,
neighborhood_wait=neighborhood_wait,
probability_change_machine=probability_change_machine,
reset_threshold=reset_threshold,
benchmark=benchmark,
verbose=verbose,
progress_bar=progress_bar)
def perform_Genetic_Search(co_ordinator_agent, initial_population=""):
"""
Starting point of Genetic algorithm
Paramters
--------------------------
co_ordinator_agent : object of CoOrdinator class
initial_population : algorithm name to retrieve solutions
Returns
---------------
best_solution from Genetic algorithm
"""
runtime = 15 # in seconds
mutation_probability = 0.25
population_size = 100
selection_method = GASelectionEnum.FITNESS_PROPORTIONATE
selection_size = 8
population = None
if initial_population is not "":
population = get_initial_population(co_ordinator_agent, initial_population)
co_ordinator_agent.genetic_algorithm_time(runtime=runtime,
population=population,
population_size=population_size,
selection_method_enum=selection_method,
selection_size=selection_size,
mutation_probability=mutation_probability,
benchmark=benchmark,
verbose=verbose,
progress_bar=progress_bar)
def get_initial_population(co_ordinator_agent, agent_type):
"""
Returns initial solutions from other algorithm
Paramters
--------------------------
co_ordinator_agent : object of CoOrdinator class
agent_type : algorithm name to retrieve solutions
Returns
---------------
population : solutions from defined algorithm
"""
if agent_type is "" or agent_type is None:
return None
population = []
if agent_type == "tabu_search":
for ts_agent in co_ordinator_agent.ts_agent_list:
if ts_agent is not None:
population += ts_agent.all_solutions
population = random.sample(population, 4)
elif agent_type == "genetic":
if co_ordinator_agent.ga_agent is not None:
population = co_ordinator_agent.ga_agent.memory # Pareto memory from GA
initial_solution = list()
initial_solution.append(co_ordinator_agent.ga_agent.solution_df) # best solution from GA
for i in range(3):
solution = population.sample()
initial_solution.append(solution)
population.remove(solution)
population = pd.concat(initial_solution)
result_population = list()
# Solution dataframe to list of solution objects
for index, row in population.iterrows():
sol_obj = Solution(row.data, row.operation_2d_array, dict_to_obj=True, makespan=row.makespan, stock_cost=row.stock_cost, machine_makespans=row.machine_makespans)
result_population.append(sol_obj)
population = result_population
return population
def generate_output(co_ordinator_agent, job_mapping, schedule_type='initial',
preschedule_idle=False, schedule_alg="hybrid"):
"""
Provides solution in excel format and also algorithm performance plots
Paramters
--------------------------
co_ordinator_agent : object of CoOrdinator class
Returns
---------------
None
"""
output_dir = './example_output'
return co_ordinator_agent.output_benchmark_results(output_dir, job_mapping,
schedule_type=schedule_type,
preschedule_idle=preschedule_idle,
schedule_alg=schedule_alg)
'''
Practical problems and rescheduling experiments
'''
# =============================================================================
#
# def main(machine_df, job_operation_df, objective_params, reschedule=False,
# preschedule_idle=False, schedule_alg="hybrid"):
# while True:
# try:
# data_agent = Data_Flexible_Job_Shop('data/seq_dep_matrix_2.xlsx',
# machine_df,
# job_operation_df)
# break
# except Exception as e:
# print(str(e))
# print('Waiting for machine to get fixed')
# time.sleep(60)
# machine_df = get_machine_data()
# machine_df['machine_id'] = machine_df['machine_id'].replace({'M':''}, regex=True)
# machine_df['machine_id'] = machine_df['machine_id'].astype(int)
#
#
# data_agent = Data_Flexible_Job_Shop('data/seq_dep_matrix_2.xlsx',
# machine_df,
# job_operation_df)
# job_mapping = data_agent.job_mapping.drop_duplicates(subset=['prod_name'], keep='last')
# co_ordinator_agent = CoOrdinator(data_agent, objective_params, reschedule, preschedule_idle)
# perform_Genetic_Search(co_ordinator_agent, initial_population="")
# best_solution = perform_Tabu_Search(co_ordinator_agent, initial_population="genetic")
# schedule_type = 'initial'
# if reschedule:
# schedule_type = 'reschedule'
# schedule = generate_output(co_ordinator_agent, job_mapping,
# schedule_type, preschedule_idle,
# schedule_alg)
# return schedule, best_solution
#
# =============================================================================
'''
Benchmark Problems
'''
if __name__ == "__main__":
objective_params = {'makespan': 0.6, 'stock_cost': 0.2, 'tardiness_cost': 0.2, 'stability': 0}
data_agent = Data_Normal_Job_Shop('a.txt')
co_ordinator_agent = CoOrdinator(data_agent, objective_params)
perform_Genetic_Search(co_ordinator_agent, initial_population="")
best_solution = perform_Tabu_Search(co_ordinator_agent, initial_population="genetic")
print(best_solution.makespan)
print(best_solution.stock_cost)
print(best_solution.tardiness_cost) |
import aws_encryption_sdk
from aws_encryption_sdk.key_providers.raw import RawMasterKey
from aws_encryption_sdk.internal.crypto.wrapping_keys import WrappingKey
from aws_encryption_sdk.key_providers.raw import RawMasterKeyProvider
from aws_encryption_sdk.identifiers import CommitmentPolicy, EncryptionKeyType, WrappingAlgorithm
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.primitives.asymmetric import rsa
"""class OwnPublicKeyProvider(RawMasterKeyProvider):
def __init__(self, **kwargs):
self._keys = {}
def _get_raw_key(self, key_id):
public_key_pem = None
with open(key_id, "rb") as key_file:
public_key_pem = key_file.read()
#self._public_keys[key_id] = public_key
return WrappingKey(
wrapping_key=public_key_pem,
wrapping_key_type=EncryptionKeyType.PUBLIC,
# The wrapping algorithm tells the raw RSA master key
# how to use your wrapping key to encrypt data keys.
#
# We recommend using RSA_OAEP_SHA256_MGF1.
# You should not use RSA_PKCS1 unless you require it for backwards compatibility.
wrapping_algorithm=WrappingAlgorithm.RSA_OAEP_SHA256_MGF1
)
"""
class OwnPublicKeyProvider(RawMasterKeyProvider):
"""Randomly generates and provides 4096-bit RSA keys consistently per unique key id."""
provider_id = "own-public"
def __init__(self, **kwargs): # pylint: disable=unused-argument
"""Initialize empty map of keys."""
self._static_keys = {}
def _get_raw_key(self, key_id):
"""Retrieves a static, randomly generated, RSA key for the specified key id.
:param str key_id: User-defined ID for the static key
:returns: Wrapping key that contains the specified static key
:rtype: :class:`aws_encryption_sdk.internal.crypto.WrappingKey`
"""
public_key_pem = None
with open(key_id, "rb") as key_file:
public_key_pem = key_file.read()
#public_key_pem = load_pem_public_key(public_key_pem, backend=default_backend())
print(public_key_pem)
escrow_encrypt_master_key = RawMasterKey(
# The provider ID and key ID are defined by you
# and are used by the raw RSA master key
# to determine whether it should attempt to decrypt
# an encrypted data key.
provider_id="own-public", # provider ID corresponds to key namespace for keyrings
key_id=key_id, # key ID corresponds to key name for keyrings
wrapping_key=WrappingKey(
wrapping_key=public_key_pem,
wrapping_key_type=EncryptionKeyType.PUBLIC,
# The wrapping algorithm tells the raw RSA master key
# how to use your wrapping key to encrypt data keys.
#
# We recommend using RSA_OAEP_SHA256_MGF1.
# You should not use RSA_PKCS1 unless you require it for backwards compatibility.
wrapping_algorithm=WrappingAlgorithm.RSA_OAEP_SHA256_MGF1
)
)
return escrow_encrypt_master_key
return WrappingKey(
wrapping_key=public_key_pem,
wrapping_key_type=EncryptionKeyType.PUBLIC,
# The wrapping algorithm tells the raw RSA master key
# how to use your wrapping key to encrypt data keys.
#
# We recommend using RSA_OAEP_SHA256_MGF1.
# You should not use RSA_PKCS1 unless you require it for backwards compatibility.
wrapping_algorithm=WrappingAlgorithm.RSA_OAEP_SHA256_MGF1,
)
class OwnPrivateKeyProvider(RawMasterKeyProvider):
"""Randomly generates and provides 4096-bit RSA keys consistently per unique key id."""
provider_id = "own-public"
def __init__(self, **kwargs): # pylint: disable=unused-argument
"""Initialize empty map of keys."""
self._static_keys = {}
def _get_raw_key(self, key_id):
"""Retrieves a static, randomly generated, RSA key for the specified key id.
:param str key_id: User-defined ID for the static key
:returns: Wrapping key that contains the specified static key
:rtype: :class:`aws_encryption_sdk.internal.crypto.WrappingKey`
"""
private_key_pem = None
with open(key_id, "rb") as key_file:
private_key_pem = key_file.read()
#public_key_pem = load_pem_public_key(public_key_pem, backend=default_backend())
print(private_key_pem)
escrow_decrypt_master_key = RawMasterKey(
# The key namespace and key name MUST match the encrypt master key.
provider_id="own-public", # provider ID corresponds to key namespace for keyrings
key_id=key_id, # key ID corresponds to key name for keyrings
wrapping_key=WrappingKey(
wrapping_key=private_key_pem,
wrapping_key_type=EncryptionKeyType.PRIVATE,
# The wrapping algorithm MUST match the encrypt master key.
wrapping_algorithm=WrappingAlgorithm.RSA_OAEP_SHA256_MGF1
)
)
return escrow_decrypt_master_key
return WrappingKey(
wrapping_key=private_key_pem,
wrapping_key_type=EncryptionKeyType.PRIVATE,
# The wrapping algorithm tells the raw RSA master key
# how to use your wrapping key to encrypt data keys.
#
# We recommend using RSA_OAEP_SHA256_MGF1.
# You should not use RSA_PKCS1 unless you require it for backwards compatibility.
wrapping_algorithm=WrappingAlgorithm.RSA_OAEP_SHA256_MGF1,
)
|
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
import gzip
import os
import struct
import numpy as np
import PIL.Image
from downloader import DataDownloader
class MnistDownloader(DataDownloader):
"""
See details about the MNIST dataset here:
http://yann.lecun.com/exdb/mnist/
"""
def urlList(self):
return [
'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',
]
def uncompressData(self):
for zipped, unzipped in [
('train-images-idx3-ubyte.gz', 'train-images.bin'),
('train-labels-idx1-ubyte.gz', 'train-labels.bin'),
('t10k-images-idx3-ubyte.gz', 'test-images.bin'),
('t10k-labels-idx1-ubyte.gz', 'test-labels.bin'),
]:
zipped_path = os.path.join(self.outdir, zipped)
assert os.path.exists(zipped_path), 'Expected "%s" to exist' % zipped
unzipped_path = os.path.join(self.outdir, unzipped)
if not os.path.exists(unzipped_path):
print "Uncompressing file=%s ..." % zipped
with gzip.open(zipped_path) as infile, open(unzipped_path, 'wb') as outfile:
outfile.write(infile.read())
def processData(self):
self.__extract_images('train-images.bin', 'train-labels.bin', 'train')
self.__extract_images('test-images.bin', 'test-labels.bin', 'test')
def __extract_images(self, images_file, labels_file, phase):
"""
Extract information from binary files and store them as images
"""
labels = self.__readLabels(os.path.join(self.outdir, labels_file))
images = self.__readImages(os.path.join(self.outdir, images_file))
assert len(labels) == len(images), '%d != %d' % (len(labels), len(images))
output_dir = os.path.join(self.outdir, phase)
self.mkdir(output_dir, clean=True)
with open(os.path.join(output_dir, 'labels.txt'), 'w') as outfile:
for label in xrange(10):
outfile.write('%s\n' % label)
with open(os.path.join(output_dir, '%s.txt' % phase), 'w') as outfile:
for index, image in enumerate(images):
dirname = os.path.join(output_dir, labels[index])
self.mkdir(dirname)
filename = os.path.join(dirname, '%05d.%s' % (index, self.file_extension))
image.save(filename)
outfile.write('%s %s\n' % (filename, labels[index]))
def __readLabels(self, filename):
"""
Returns a list of ints
"""
print 'Reading labels from %s ...' % filename
labels = []
with open(filename, 'rb') as infile:
infile.read(4) # ignore magic number
count = struct.unpack('>i', infile.read(4))[0]
data = infile.read(count)
for byte in data:
label = struct.unpack('>B', byte)[0]
labels.append(str(label))
return labels
def __readImages(self, filename):
"""
Returns a list of PIL.Image objects
"""
print 'Reading images from %s ...' % filename
images = []
with open(filename, 'rb') as infile:
infile.read(4) # ignore magic number
count = struct.unpack('>i', infile.read(4))[0]
rows = struct.unpack('>i', infile.read(4))[0]
columns = struct.unpack('>i', infile.read(4))[0]
for i in xrange(count):
data = infile.read(rows * columns)
image = np.fromstring(data, dtype=np.uint8)
image = image.reshape((rows, columns))
image = 255 - image # now black digit on white background
images.append(PIL.Image.fromarray(image))
return images
|
import tkinter as tk
from PIL import Image,ImageTk
import os
from tkinter import filedialog
#GUI Application window
win = tk.Tk()
win.title("Image to pdf converter")
win.geometry("700x600")
win.iconphoto(False, tk.PhotoImage(file = 'pdf.png'))
win.resizable(0,0)
#Krishna_Image
can = tk.Canvas(win,bg = "yellow", width = 250,height = 240)
can.grid(row=0,column=0, sticky = tk.N, padx=225,pady = 30)
im = ImageTk.PhotoImage(Image.open('krishna.jpg'))
can.create_image(127,121,image = im)
#welcome image
canv = tk.Canvas(win,bg='yellow', width = 600,height =120)
canv.grid(row=1,column=0,padx=25)
img = ImageTk.PhotoImage(Image.open('welc.png'))
canv.create_image(302,62,image =img)
#functions
def disable(btn):
btn['state']='disabled'
def enable(btn):
btn['state'] = 'active'
files = {}
def upload():
global files
files['filename'] = filedialog.askopenfilenames(filetypes=[('JPG','*.jpg'),('PNG','*.png'),('JPEG','*.jpeg')],
initialdir = os.getcwd(), title='Select File/Files')
if len(files['filename'])!=0:
enable(down_button)
def save():
l = []
for file in files['filename']:
l.append(Image.open(file).convert('RGB'))
file_name = filedialog.asksaveasfilename(filetypes = [('PDF','*.pdf')], initialdir=os.getcwd(), title='Save File')
l[0].save(f'{file_name}.pdf', save_all=True, append_images = l[1:])
disable(down_button)
#Upload Butoon
button = tk.Button(win,text="Upload Images",width = 20, height = 1,font = ('arial',14,'bold'),bg='white',fg='blue',command=upload)
button.grid(row=2,column=0,padx=200,pady=20)
#download button
down_button = tk.Button(win,text="Download Pdf",width = 20,height =1,font = ('arial',14,'bold'),bg= 'white',fg='green',command=save)
down_button.grid(row=3,column=0)
disable(down_button)
win.mainloop() |
def getSeq(x):
n = [x]
tmp = x
while tmp > 9:
print tmp
tmp2 = 0
for i in str(tmp):
tmp2 += int(i)**2
tmp = tmp2
n.append(tmp)
return n
maks = 10 ** 7
dic = {}
def juletall(x):
if x in dic:
return dic[x] == 1
tmp = x
while tmp > 9:
tmp2 = 0
for i in str(tmp):
tmp2 += int(i)**2
if tmp2 in dic:
return dic[tmp2] == 1
tmp = tmp2
if tmp > maks: return False
dic[x] = tmp
return tmp == 1
dic2 = {maks:False, 1:True, 13:True, 10:True}
def juletall2(x):
if x > maks:
for n in curr_seq:
dic2[n] = False
return False
if x in dic2:
return dic2[x]
if x == 1:
for n in curr_seq:
dic2[n] = True
return True
if x < 10:
for n in curr_seq:
dic2[n] = False
return False
x2 = 0
for i in str(x):
x2 += int(i) ** 2
curr_seq.append(x2)
return juletall2(x2)
s = 0
for i in xrange(maks):
global curr_seq
curr_seq = [i]
if i in dic2 and dic2[i]:
s += i
continue
if juletall2(i):
s+=i
print s
|
from django.contrib import admin
from .models import ProfileEvaluation,Registration
# Register your models here.
#username:prima
#owd:1234
admin.site.register(Registration)
admin.site.register(ProfileEvaluation) |
import sys
from typing import NamedTuple, Tuple, List, Dict, Set
class Food(NamedTuple):
ingredients: Tuple[str, ...]
allergens: Tuple[str, ...]
def parse_food(line: str) -> Food:
"""Return a valid Food from the given line."""
left, right = line.strip().split(" (contains ")
foods: List[str] = [food for food in left.split(" ")]
# Slice off everything except the ending parentheses
allergens: List[str] = [allergen for allergen in right[:-1].split(", ")]
return Food(tuple(foods), tuple(allergens))
def collect_foods_allergen(foods: Tuple[Food, ...], allergen: str) -> Tuple[Food, ...]:
"""Return a tuple of all foods that contain the given allergen."""
return tuple(food for food in foods if allergen in food.allergens)
def collect_foods_ingredient(
foods: Tuple[Food, ...], ingredient: str
) -> Tuple[Food, ...]:
"""Return a tuple of all foods that contain the given ingredient."""
return tuple(food for food in foods if ingredient in food.ingredients)
def collect_unknown_ingredients(
decoded_allergens: Dict[str, str], foods: Tuple[Food, ...], allergen: str
):
"""Collect all of the unknown ingredients for each food that contains the given allergen."""
unique_ingredients: List[List[str]] = []
for food in foods:
filtered_ingredients: List[str] = []
for ingredient in food.ingredients:
if (
allergen in food.allergens
and ingredient not in decoded_allergens.values()
):
filtered_ingredients.append(ingredient)
# No empty lists
if filtered_ingredients:
unique_ingredients.append(filtered_ingredients)
return unique_ingredients
def decode_allergens(foods: Tuple[Food, ...]) -> Dict[str, str]:
"""Populate our decoded allergens by searching for foods that contain an allergen and exactly 1 unknown ingredient."""
allergens: Set[str] = set(allergen for food in foods for allergen in food.allergens)
decoded_allergens: Dict[str, str] = {}
# To decode our allergens, iterate through the allergens until we can find a set intersection of ingredients for that allergen that has exactly one commonality
while len(decoded_allergens) < len(allergens):
for allergen in allergens:
unknown_ingredients: List[List[str]] = collect_unknown_ingredients(
decoded_allergens, foods, allergen
)
# Can't pass a generator expression, make a tuple of sets and then explode it
intersecting_ingredients = set.intersection(
*tuple(set(ingredients) for ingredients in unknown_ingredients)
)
if len(intersecting_ingredients) == 1:
decoded_allergens[allergen] = intersecting_ingredients.pop()
return decoded_allergens
if __name__ == "__main__":
filename = sys.argv[1]
contents = open(filename).readlines()
foods: Tuple[Food, ...] = tuple(parse_food(line) for line in contents)
decoded_allergens: Dict[str, str] = decode_allergens(foods)
answer: int = sum(
1
for food in foods
for ingredient in food.ingredients
if ingredient not in decoded_allergens.values()
)
dangerous_ingredients: str = ",".join(
v for k, v in sorted(decoded_allergens.items())
)
print(f"Part 1: answer= {answer}")
print(f"Part 2: dangerous_ingredients= {dangerous_ingredients}") |
import pytest
from darklyrics import get_lyrics, LyricsNotFound, get_albums, get_songs, get_all_lyrics
def test_not_found():
with pytest.raises(LyricsNotFound):
get_lyrics('fakesong', 'fakeartist')
get_albums('fakeartist')
def test_song_only():
lyric = get_lyrics('your familiar face')
assert 'but love is meaningless' in lyric.lower()
def test_get_songs():
songs = get_songs("katatonia")
assert 'Criminals' in songs
def test_get_songs_fail():
with pytest.raises(LyricsNotFound):
get_songs("fakeartist")
def test_get_all_songs():
lyrics = get_all_lyrics('in vain')
assert 'Before it was torn by hands of man' in lyrics
def test_integration():
lyric = get_lyrics('steelheart', 'she\'s gone')
assert 'i\'m to blame,' in lyric.lower()
def test_get_albums():
albums = get_albums('shadows fall')
assert 'Retribution' in albums
def test_get_albums_fail():
with pytest.raises(LyricsNotFound):
get_albums("fakeartist")
|
import os
import cv2
from scipy.io import savemat
n_authentic_trained = 7000
n_tampered_trained = 4000
dict = {}
ntest =500
# todo: authentic images testing
au_folder = 'Casia2/au/images'
test_data_folder = 'test_data'
count = 0
for filename in os.listdir(au_folder):
count += 1
if count <= n_authentic_trained: continue
if count > n_authentic_trained + ntest: continue
print(count)
path = au_folder + '/' + filename
img = cv2.imread(path)
cv2.imwrite(test_data_folder + '/' + filename, img)
dict[filename] = 0
# todo: tampered images testing
tp_folder = 'Casia2/tp/images'
count = 0
for filename in os.listdir(tp_folder):
count += 1
if count <= n_tampered_trained: continue
if count > n_tampered_trained + ntest: continue
print(count)
path = tp_folder + '/' + filename
img = cv2.imread(path)
cv2.imwrite(test_data_folder + '/' + filename, img)
dict[filename] = 1
savemat('filename_labels.csv', dict)
|
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import classification_report, confusion_matrix
from question_b import df, df_1, df_2, df_3, df_4, df_5
# X is the dataset that contains the required features (all columns except stroke, that will need to be predicted)
X = df_5.drop('stroke', axis=1)
# y is a vector that contains only the stroke data (that we want to predict essentially)
y = df_5['stroke']
# split training-testing
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
forest_cls = RandomForestClassifier(max_features='sqrt', n_estimators=161)
# gia veltiwsh tha prepei grid search + standard h minmax scaler(den beltiwnei kati telika) !
'''
#setting max_depth to any number(default is none) worsens alot f1 score
parameters = {
'n_estimators': list(range(1,200,20)),
'max_features': ['auto', 'sqrt', 'log2'],
}
forest_cls = GridSearchCV(forest_cls, parameters, cv= 5)
'''
# fit the train dataset (fitting is equal to training)
forest_cls.fit(X_train, y_train)
# make predictions with a the predict method call
y_pred = forest_cls.predict(X_test)
# evaluate the accuracy of the classification
# parameters are y_true (y_test as they are from the orignial dataset) and what we predicted (y_pred)
matrix = confusion_matrix(y_test,y_pred)
print(matrix)
# get a report showing the main classification metrics
# parameters are y_true (y_test as they are from the orignial dataset) and what we predicted (y_pred)
print(classification_report(y_test, y_pred))
# for the grid searchCV
# print(forest_cls.best_params_)
|
#!/usr/bin/env python
# -*- encoding=utf-8 -*-
########################################################
# DataSave Class to save daily curves and 5-min curves #
########################################################
import os
import time
import threading
# import json
import sqlite3
import arrow
# import datetime
# import fileutil
# import struct
# from pytz import utc
# from apscheduler.schedulers.background import BackgroundScheduler
abspath = os.path.abspath(__file__)
dirname = os.path.dirname(abspath)
# datetime.datetime(2016, 1, 1, tzinfo=datetime.timezone.utc).timestamp()
TS_2016_UTC = 1451606400.0
DAILY_PATH = "C:\\Users\\Shaohua\\Documents\\export\\daily-qfq-20130304-20170814"
FIVE_MIN_PATH = "C:\\Users\\Shaohua\\Documents\\export\\5-min-20170502-20170814"
class DataSave(threading.Thread):
"""
@brief DataSave Class to save daily curves and 5-min curves
"""
def __init__(self):
super(DataSave, self).__init__()
self._stop = threading.Event()
self._stop.clear()
self.count = 0
self.conn = None
self.c = None
def run(self):
# self.create_5m_table()
self.savedata('day')
# self.savedata('5m')
while not self._stop.is_set():
time.sleep(1)
self.stop()
def stop(self):
self._stop.set()
def savedata(self, stype):
self.conn = sqlite3.connect('stock.db')
self.c = self.conn.cursor()
if stype == '5m':
five_min_files = os.listdir(FIVE_MIN_PATH)
elif stype == 'day':
five_min_files = os.listdir(DAILY_PATH)
total_count = len(five_min_files)
for five_min_file in five_min_files:
self.count += 1
print("%d/%d %s" % (self.count, total_count, five_min_file))
if stype == '5m':
filePath = FIVE_MIN_PATH + "\\" + five_min_file
self.savedata_file(filePath, '5m')
elif stype == 'day':
filePath = DAILY_PATH + "\\" + five_min_file
self.savedata_file(filePath, 'day')
self.conn.close()
def savedata_file(self, filePath, stype):
stockCode = filePath[:len(filePath) - 4]
if stockCode == "SH#000001":
stockCode = "SH#660000"
stockCode = stockCode.split("#")[1]
stockCodeInt = int(stockCode)
try:
jfile = open(filePath, "r")
lines = jfile.readlines()
except IOError as err:
print('read file: ' + str(err))
else:
# conn = sqlite3.connect('stock.db')
# c = conn.cursor()
for line in lines:
t1 = line.strip().split(",")
# print(t1)
if stype == '5m':
if len(t1) < 7:
continue
t1[0] = t1[0].replace('/', '-')
t1[1] = t1[1][:2] + ":" + t1[1][2:]
t2 = t1[0] + "T" + t1[1] + "+0800"
# print(t2)
ts = arrow.get(t2)
stockDate = ts.timestamp
stockOpen = float(t1[2])
stockHigh = float(t1[3])
stockLow = float(t1[4])
stockClose = float(t1[5])
stockVol = float(t1[6])
stockAmount = float(t1[7])
elif stype == 'day':
if len(t1) < 6:
continue
t1[0] = t1[0].replace('/', '-')
t2 = t1[0] + "T" + "00:00:00+0800"
# print(t2)
ts = arrow.get(t2)
stockDate = ts.timestamp
stockOpen = float(t1[1])
stockHigh = float(t1[2])
stockLow = float(t1[3])
stockClose = float(t1[4])
stockVol = float(t1[5])
stockAmount = float(t1[6])
stockAvg = (stockOpen + stockHigh +
stockLow + stockClose) / 4.0
if stype == '5m':
sql1 = 'INSERT INTO "main"."daydata5m" ("stockCode", "stockDate", "stockOpen", "stockHigh", "stockLow", "stockClose", "stockVol", "stockAmount", "stockAvg")'
elif stype == 'day':
sql1 = 'INSERT INTO "main"."daydata" ("stockCode", "stockDate", "stockOpen", "stockHigh", "stockLow", "stockClose", "stockVol", "stockAmount", "stockAvg")'
sql2 = "VALUES ('%d', '%s', '%.2f', '%.2f', '%.2f', '%.2f', '%.2f', '%.2f', '%.2f');" % (
stockCodeInt, stockDate, stockOpen, stockHigh, stockLow, stockClose, stockVol, stockAmount, stockAvg)
self.c.execute(sql1 + " " + sql2)
# break
self.conn.commit()
# conn.close()
finally:
if 'jfile' in locals():
jfile.close()
def create_5m_table(self):
conn = sqlite3.connect('stock.db')
print("Opened database successfully")
c = conn.cursor()
# stockCode,stockDate,stockOpen,stockHigh,stockLow,stockClose,stockVol,stockAmount
c.execute("PRAGMA foreign_keys = false;")
c.execute('''DROP TABLE IF EXISTS "daydata5m";''')
c.execute('''
CREATE TABLE "daydata5m" (
"ID" integer NOT NULL,
"stockCode" integer NOT NULL,
"stockDate" integer NOT NULL,
"stockOpen" real,
"stockHigh" real,
"stockLow" real,
"stockClose" real,
"stockVol" real,
"stockAmount" real,
"stockAvg" real,
PRIMARY KEY ("ID")
);''')
c.execute('''
CREATE INDEX "_stockCode"
ON "daydata5m" ("stockCode" ASC);
''')
c.execute('''
CREATE INDEX "_stockDate"
ON "daydata5m" ("stockDate" ASC);
''')
c.execute("PRAGMA foreign_keys = true;")
conn.commit()
print("Table created successfully")
conn.close()
if __name__ == '__main__':
da = DataSave()
da.start()
da.join()
# import sqlite3
# conn = sqlite3.connect('test.db')
# c = conn.cursor()
# print "Opened database successfully";
# cursor = c.execute("SELECT id, name, address, salary from COMPANY")
# for row in cursor:
# print "ID = ", row[0]
# print "NAME = ", row[1]
# print "ADDRESS = ", row[2]
# print "SALARY = ", row[3], "\n"
# print "Operation done successfully";
# conn.close()
|
from typing import (
Any,
Sequence,
Tuple,
Type,
)
from eth_utils.toolz import accumulate
from p2p.abc import (
CommandAPI,
ProtocolAPI,
TransportAPI,
)
from p2p.constants import P2P_PROTOCOL_COMMAND_LENGTH
from p2p.typing import Capability
from p2p._utils import get_logger
class BaseProtocol(ProtocolAPI):
def __init__(self,
transport: TransportAPI,
command_id_offset: int,
snappy_support: bool) -> None:
self.logger = get_logger('p2p.protocol.Protocol')
self.transport = transport
self.command_id_offset = command_id_offset
self.snappy_support = snappy_support
self.command_id_by_type = {
command_type: command_id_offset + command_type.protocol_command_id
for command_type
in self.commands
}
self.command_type_by_id = {
command_id: command_type
for command_type, command_id
in self.command_id_by_type.items()
}
def __repr__(self) -> str:
return "(%s, %d)" % (self.name, self.version)
@classmethod
def supports_command(cls, command_type: Type[CommandAPI[Any]]) -> bool:
return command_type in cls.commands
@classmethod
def as_capability(cls) -> Capability:
return (cls.name, cls.version)
def get_command_type_for_command_id(self, command_id: int) -> Type[CommandAPI[Any]]:
return self.command_type_by_id[command_id]
def send(self, command: CommandAPI[Any]) -> None:
message = command.encode(self.command_id_by_type[type(command)], self.snappy_support)
self.transport.send(message)
def get_cmd_offsets(protocol_types: Sequence[Type[ProtocolAPI]]) -> Tuple[int, ...]:
"""
Computes the `command_id_offsets` for each protocol. The first offset is
always P2P_PROTOCOL_COMMAND_LENGTH since the first protocol always begins
after the base `p2p` protocol. Each subsequent protocol is the accumulated
sum of all of the protocol offsets that came before it.
"""
return tuple(accumulate(
lambda prev_offset, protocol_class: prev_offset + protocol_class.command_length,
protocol_types,
P2P_PROTOCOL_COMMAND_LENGTH,
))[:-1] # the `[:-1]` is to discard the last accumulated offset which is not needed
|
#!/usr/bin/env python3
import sys
import argparse
import h5py
import math
import scipy
from scipy import ndimage
from scipy import integrate
import numpy as np
import pandas as pd
import datetime
import time
import matplotlib
import matplotlib.pyplot as plt
import os
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
matplotlib.rcParams['figure.dpi'] = 150
from mpi4py import MPI
import hdf5plugin
# plot a 3D cube and grid points specified by x, y, z arrays
def plot_cube(cube_definition, x, y, z, view):
cube_definition_array = [
np.array(list(item))
for item in cube_definition
]
points = []
points += cube_definition_array
vectors = [
cube_definition_array[1] - cube_definition_array[0],
cube_definition_array[2] - cube_definition_array[0],
cube_definition_array[3] - cube_definition_array[0]
]
points += [cube_definition_array[0] + vectors[0] + vectors[1]]
points += [cube_definition_array[0] + vectors[0] + vectors[2]]
points += [cube_definition_array[0] + vectors[1] + vectors[2]]
points += [cube_definition_array[0] + vectors[0] + vectors[1] + vectors[2]]
points = np.array(points)
edges = [
[points[0], points[3], points[5], points[1]],
[points[1], points[5], points[7], points[4]],
[points[4], points[2], points[6], points[7]],
[points[2], points[6], points[3], points[0]],
[points[0], points[2], points[4], points[1]],
[points[3], points[6], points[7], points[5]]
]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
faces = Poly3DCollection(edges, linewidths=1, edgecolors='xkcd:grey')
faces.set_facecolor((0,0,1,0.05))
ax.add_collection3d(faces)
# Plot the points themselves to force the scaling of the axes
ax.scatter(points[:,0], points[:,1], points[:,2], s=0)
#[(1500, 500, 0), (1700, 500, 0), (1500, 650, 0), (1500, 500, 200)]
x_min = cube_definition[0][0]
x_max = cube_definition[1][0]
y_min = cube_definition[0][1]
y_max = cube_definition[2][1]
z_min = cube_definition[0][2]
z_max = cube_definition[3][2]
x_len = x_max - x_min
y_len = y_max - y_min
z_len = z_max - z_min
x_plot_min = x_min
x_plot_max = x_max
y_plot_min = y_min
y_plot_max = y_max
z_plot_min = z_min
z_plot_max = z_max
#print('plot min/max %.2f %.2f %.2f %.2f %.2f %.2f' %( x_plot_min, x_plot_max, y_plot_min, y_plot_max, z_plot_min, z_plot_max))
x_y_len_diff = abs(x_len-y_len)
if x_len < y_len:
x_plot_min = x_min - x_y_len_diff/2
x_plot_max = x_max + x_y_len_diff/2
elif x_len > y_len:
y_plot_min = y_min - x_y_len_diff/2
y_plot_max = y_max + x_y_len_diff/2
else:
tmp0 = 0.95
tmp1 = 1+(1-tmp0)
x_plot_min *= tmp0
x_plot_max *= tmp1
y_plot_min *= tmp0
y_plot_max *= tmp1
#print('plot min/max', x_plot_min, x_plot_max, y_plot_min, y_plot_max, z_plot_min, z_plot_max)
ax.set_xlabel('Y(SW4)')
ax.set_ylabel('X(SW4)')
ax.set_zlabel('Z(SW4)')
ax.set_xlim(x_plot_min, x_plot_max)
ax.set_ylim(y_plot_min, y_plot_max)
ax.set_zlim(z_plot_max, 0)
lblsize = 5
ax.zaxis.set_tick_params(labelsize=lblsize)
ax.yaxis.set_tick_params(labelsize=lblsize)
ax.xaxis.set_tick_params(labelsize=lblsize)
ax.dist = 12
#ax.set_aspect('equal')
ax.text(cube_definition[2][0], cube_definition[2][1], cube_definition[2][2]-z_len*.05, 'SW4-ESSI domain', fontsize=7)
xcolor = 'xkcd:azure'
ycolor = 'xkcd:green'
zcolor = 'xkcd:goldenrod'
xyzmarker = 'x'
xyzalpha = 0.1
markersize=2
xs = x + cube_definition[0][1]
ys = y + cube_definition[0][0]
zs = z + cube_definition[0][2]
#print(xs)
#print(ys)
#print(zs)
ax.scatter(ys, xs, zs, c='r', marker='.')
ax.plot(ys, zs, linestyle = 'None', marker=xyzmarker, markersize=markersize, color=ycolor, zdir='y', zs=y_plot_max, alpha=xyzalpha)
ax.plot(xs, zs, linestyle = 'None', marker=xyzmarker, markersize=markersize, color=xcolor, zdir='x', zs=x_plot_min, alpha=xyzalpha)
ax.plot(ys, xs, linestyle = 'None', marker=xyzmarker, markersize=markersize, color=zcolor, zdir='z', zs=z_plot_max, alpha=xyzalpha)
if view == 'XZ':
ax.view_init(azim=0, elev=0) # XZ
elif view == 'XY':
ax.view_init(azim=0, elev=90) # XY
#ax.view_init(azim=0, elev=-90) # XZ
fname = 'input_coords' + view + '.png'
plt.savefig(fname)
# Plot user specified grid points along with the ESSI domain, and its relative location in the SW4 domain
def plot_coords(essi_x0, essi_y0, essi_z0, essi_h, essi_nx, essi_ny, essi_nz, user_essi_x, user_essi_y, user_essi_z):
sw4_start_x = essi_x0
sw4_end_x = essi_x0 + (essi_nx-1)*essi_h
sw4_start_y = essi_y0
sw4_end_y = essi_y0 + (essi_ny-1)*essi_h
sw4_start_z = essi_z0
sw4_end_z = essi_z0 + (essi_nz-1)*essi_h
cube_definition = [ (sw4_start_y,sw4_start_x,sw4_start_z),
(sw4_end_y,sw4_start_x,sw4_start_z),
(sw4_start_y,sw4_end_x,sw4_start_z),
(sw4_start_y,sw4_start_x,sw4_end_z) ]
# print(cube_definition)
plot_cube(cube_definition, user_essi_x, user_essi_y, user_essi_z, 'XYZ')
plot_cube(cube_definition, user_essi_x, user_essi_y, user_essi_z, 'XZ')
plot_cube(cube_definition, user_essi_x, user_essi_y, user_essi_z, 'XY')
def read_coord_drm(drm_filename, verbose):
if verbose:
print('Reading coordinates from input file [%s]' % drm_filename)
# Get the coordinates from DRM file
drm_file = h5py.File(drm_filename, 'r')
coordinates = drm_file['Coordinates']
n_coord = int(coordinates.shape[0] / 3)
drm_x = np.zeros(n_coord)
drm_y = np.zeros(n_coord)
drm_z = np.zeros(n_coord)
internal = drm_file['Is Boundary Node'][:]
# need to know reference point
for i in range(0, n_coord):
drm_x[i] = coordinates[i*3]
drm_y[i] = coordinates[i*3+1]
drm_z[i] = coordinates[i*3+2]
drm_file.close()
return drm_x, drm_y, drm_z, n_coord, internal
# changed ref coord as just offsets
def convert_to_essi_coord(coord_sys, from_x, from_y, from_z, ref_essi_xyz, essi_nx, essi_ny, essi_nz):
from_xyz = [from_x, from_y, from_z]
for i in range(0, 3):
if coord_sys[i] == 'x':
# user_essi_x = from_xyz[i] - ref_essi_xyz[0]
user_essi_x = from_xyz[i] + ref_essi_xyz[0]
elif coord_sys[i] == '-x':
# user_essi_x = essi_nx - from_xyz[i] + ref_essi_xyz[0]
user_essi_x = - from_xyz[i] + ref_essi_xyz[0]
elif coord_sys[i] == 'y':
# user_essi_y = from_xyz[i] - ref_essi_xyz[1]
user_essi_y = from_xyz[i] + ref_essi_xyz[1]
elif coord_sys[i] == '-y':
# user_essi_y = essi_ny - from_xyz[i] + ref_essi_xyz[1]
user_essi_y = - from_xyz[i] + ref_essi_xyz[1]
elif coord_sys[i] == 'z':
# user_essi_z = from_xyz[i] - ref_essi_xyz[2]
user_essi_z = from_xyz[i] + ref_essi_xyz[2]
elif coord_sys[i] == '-z':
# user_essi_z = essi_nz - from_xyz[i] + ref_essi_xyz[2]
user_essi_z = - from_xyz[i] + ref_essi_xyz[2]
return user_essi_x, user_essi_y, user_essi_z
def get_coords_range(x, x_min_val, x_max_val, add_ghost):
x_min = min(x) - add_ghost
x_max = max(x) + add_ghost
if x_min < x_min_val:
x_min = x_min_val
if x_max > x_max_val:
x_max = x_max_val
return x_min, x_max
def get_essi_meta(ssi_fname, verbose):
# Get parameter values from HDF5 data
essiout = h5py.File(ssi_fname, 'r')
h = essiout['ESSI xyz grid spacing'][0]
x0 = essiout['ESSI xyz origin'][0]
y0 = essiout['ESSI xyz origin'][1]
z0 = essiout['ESSI xyz origin'][2]
t0 = essiout['time start'][0]
dt = essiout['timestep'][0]
nt = essiout['vel_0 ijk layout'].shape[0]
nx = essiout['vel_0 ijk layout'].shape[1]
ny = essiout['vel_0 ijk layout'].shape[2]
nz = essiout['vel_0 ijk layout'].shape[3]
t1 = dt*(nt-1)
timeseq = np.linspace(t0, t1, nt+1)
essiout.close()
return x0, y0, z0, h, nx, ny, nz, nt, dt, timeseq
def get_essi_data_btw_step(ssi_fname, start, end, verbose):
stime = float(time.perf_counter())
essiout = h5py.File(ssi_fname, 'r')
nt = essiout['vel_0 ijk layout'].shape[0]
if start < 0:
print('start cannot be negative!', start)
return
if end > nt:
end = nt
vel_0_all = essiout['vel_0 ijk layout'][start:end, :, :, :]
vel_1_all = essiout['vel_1 ijk layout'][start:end, :, :, :]
vel_2_all = essiout['vel_2 ijk layout'][start:end, :, :, :]
essiout.close()
etime = float(time.perf_counter())
if verbose:
print('Read from ESSI file took %.2f seconds.' % (etime-stime))
return vel_0_all, vel_1_all, vel_2_all
def get_essi_data_range(ssi_fname, xstart, xend, ystart, yend, zstart, zend, verbose):
stime = float(time.perf_counter())
essiout = h5py.File(ssi_fname, 'r')
vel_0_all = essiout['vel_0 ijk layout'][:, xstart:xend, ystart:yend, zstart:zend]
vel_1_all = essiout['vel_1 ijk layout'][:, xstart:xend, ystart:yend, zstart:zend]
vel_2_all = essiout['vel_2 ijk layout'][:, xstart:xend, ystart:yend, zstart:zend]
essiout.close()
etime = float(time.perf_counter())
if verbose:
print('Read from ESSI file took %.2f seconds.' % (etime-stime))
return vel_0_all, vel_1_all, vel_2_all
def read_input_coord_txt(fname, verbose):
f = open(fname, 'r')
lines = f.readlines()
max_len = len(lines)
x = np.zeros(max_len)
y = np.zeros(max_len)
z = np.zeros(max_len)
coord_sys = np.zeros(3)
ref_coord = np.zeros(3)
unit = 'n/a'
n_coord = 0
i = 0
# For number of nodes
while i < max_len:
line = lines[i]
if 'Coordinate system' in line:
i += 1
coord_sys = lines[i].split(',')
for j in range(0, 3):
coord_sys[j] = coord_sys[j].rstrip()
coord_sys[j] = coord_sys[j].replace(' ', '')
if verbose:
print('Coordinate system: (%s, %s, %s)' % (coord_sys[0], coord_sys[1], coord_sys[2]))
elif 'Reference coordinate' in line:
i += 1
tmp = lines[i].split(',')
ref_coord[0] = float(tmp[0])
ref_coord[1] = float(tmp[1])
ref_coord[2] = float(tmp[2])
if verbose:
print('Reference Coordinate: (%d, %d, %d)' % (ref_coord[0], ref_coord[1], ref_coord[2]))
elif 'Unit' in line:
i += 1
unit = lines[i].rstrip()
if verbose:
print('Unit: (%s)' % unit)
elif 'Coordinates' in line:
#print('Coordinate:')
while(i < max_len - 1):
i += 1
if '#' in lines[i]:
break
tmp = lines[i].split(',')
x[n_coord] = float(tmp[0])
y[n_coord] = float(tmp[1])
z[n_coord] = float(tmp[2])
#print('(%d, %d, %d)' % (x[n_coord], y[n_coord], z[n_coord]))
n_coord += 1
i += 1
if verbose:
print('Read %d coordinates' % n_coord)
print('First (%d, %d, %d), Last (%d, %d, %d)' % (x[0], y[0], z[0], x[n_coord-1], y[n_coord-1], z[n_coord-1]))
x = np.resize(x, n_coord)
y = np.resize(y, n_coord)
z = np.resize(z, n_coord)
f.close()
return coord_sys, ref_coord, unit, x, y, z, n_coord
def write_to_hdf5(h5_fname, gname, dname, data):
h5file = h5py.File(h5_fname, 'r+')
if gname == '/':
if dname in h5file.keys():
dset = h5file[dname]
else:
dset = h5file.create_dataset(dname, data.shape, dtype='f4')
else:
if gname in h5file.keys():
grp = h5file[gname]
else:
grp = h5file.create_group(gname)
if dname in grp.keys():
dset = grp[dname]
else:
dset = grp.create_dataset(dname, data.shape, dtype='f4')
dset[:] = data[:]
h5file.close()
def write_to_hdf5_range(h5_fname, gname, dname, data, mystart, myend):
h5file = h5py.File(h5_fname, 'r+')
if gname == '/':
dset = h5file[dname]
else:
grp = h5file[gname]
dset = grp[dname]
#print('write_to_hdf5_range, data shape:', data.shape, 'dset shape:', dset.shape)
#print('mystart=%d, myend=%d' %(mystart, myend))
dset[mystart:myend, :] = data[:]
h5file.close()
def write_to_hdf5_range_1d(h5_fname, gname, dname, data, mystart, myend):
h5file = h5py.File(h5_fname, 'r+')
if gname == '/':
dset = h5file[dname]
else:
grp = h5file[gname]
dset = grp[dname]
#print('mystart=%d, myend=%d' %(mystart, myend))
dset[mystart:myend] = data[:]
h5file.close()
def write_to_hdf5_range_2d(h5_fname, gname, dname, data, mystart, myend):
h5file = h5py.File(h5_fname, 'r+')
if gname == '/':
dset = h5file[dname]
else:
grp = h5file[gname]
dset = grp[dname]
#print('mystart=%d, myend=%d' %(mystart, myend))
dset[mystart:myend,:] = data[:,:]
h5file.close()
def create_hdf5_opensees(h5_fname, ncoord, nstep, dt, gen_vel, gen_acc, gen_dis, extra_dname):
h5file = h5py.File(h5_fname, 'w')
data_grp = h5file.create_group('DRM_Data')
data_location = np.zeros(ncoord, dtype='i4')
for i in range(0, ncoord):
data_location[i] = 3*i
if gen_vel:
dset = data_grp.create_dataset('velocity', (ncoord*3, nstep), dtype='f4')
if gen_acc:
dset = data_grp.create_dataset('acceleration', (ncoord*3, nstep), dtype='f4')
if gen_dis:
dset = data_grp.create_dataset('displacement', (ncoord*3, nstep), dtype='f4')
dset = data_grp.create_dataset('data_location', data=data_location, dtype='i4')
dset = data_grp.create_dataset(extra_dname, (ncoord,), dtype='i4')
dset = data_grp.create_dataset('xyz', (ncoord, 3), dtype='f4')
data_grp = h5file.create_group('DRM_Metadata')
dset = data_grp.create_dataset('dt', data=dt, dtype='f8')
tstart = 0.0
tend = nstep*dt
dset = data_grp.create_dataset('tend', data=tend, dtype='f8')
dset = data_grp.create_dataset('tstart', data=tstart, dtype='f8')
h5file.close()
def create_hdf5_essi(h5_fname, ncoord, nstep, dt, gen_vel, gen_acc, gen_dis, extra_dname):
h5file = h5py.File(h5_fname, 'r+')
if gen_vel:
dset = h5file.create_dataset('Velocity', (ncoord*3, nstep), dtype='f4')
if gen_acc:
dset = h5file.create_dataset('Accelerations', (ncoord*3, nstep), dtype='f4')
if gen_dis:
dset = h5file.create_dataset('Displacements', (ncoord*3, nstep), dtype='f4')
timeseq = np.linspace(0, nstep*dt, nstep+1)
h5file.create_dataset('Time', data=timeseq, dtype='i4')
h5file.close()
def coord_to_chunkid(x, y, z, chk_x, chk_y, chk_z, nchk_x, nchk_y, nchk_z):
val = int(np.floor(x/chk_x)*nchk_y*nchk_z + np.floor(y/chk_y)*nchk_z + np.floor(z/chk_z))
#print('coord_to_chunkid:', x, y, z, chk_x, chk_y, chk_z, nchk_x, nchk_y, nchk_z, val)
return val
def chunkid_to_start(cid, chk_x, chk_y, chk_z, nchk_x, nchk_y, nchk_z):
#print('cid2:', cid, chk_x, chk_y, chk_z, nchk_x, nchk_y, nchk_z)
x = math.floor(cid / (nchk_z * nchk_y))
y = math.floor((cid - x*nchk_z*nchk_y) / nchk_z)
z = cid - y*nchk_z - x*nchk_z*nchk_y
return int(x*chk_x), int(y*chk_y), int(z*chk_z)
def get_chunk_size(ssi_fname):
fid = h5py.File(ssi_fname, 'r')
dims = fid['vel_0 ijk layout'].chunks
if not dims:
dims = fid['vel_0 ijk layout'].shape
fid.close()
#print('Chunk size:', dims)
return int(dims[0]), int(dims[1]), int(dims[2]), int(dims[3])
def get_nchunk_from_coords(x, y, z, chk_x, chk_y, chk_z, nchk_x, nchk_y, nchk_z):
if len(x) != len(y) or len(y) != len(z):
print('Not equal sizes of the x,y,z coordinates array')
chk_ids = {}
cnt = 0
for i in range(0, len(x)):
cid = coord_to_chunkid(x[i], y[i], z[i], chk_x, chk_y, chk_z, nchk_x, nchk_y, nchk_z)
if cid not in chk_ids:
chk_ids[cid] = cnt
cnt += 1
return len(chk_ids), chk_ids
def coord_to_str_3d(x, y, z):
return str(x)+','+str(y)+','+str(z)
def str_to_coord_3d(s):
val = s.split(',')
return int(val[0]), int(val[1]), int(val[2])
def allocate_neighbor_coords_8(data_dict, x, y, z, n):
nadd = 0
neighbour = 2
for i0 in range(0, neighbour):
for i1 in range(0, neighbour):
for i2 in range(0, neighbour):
coord_str = coord_to_str_3d(int(x+i0), int(y+i1), int(z+i2))
if coord_str not in data_dict:
data_dict[coord_str] = np.zeros(n)
nadd += 1
#print(coord_str)
#else:
# print(coord_str, 'alread in dict')
return nadd
def read_hdf5_by_chunk(ssi_fname, data_dict, comp, cids_dict, chk_x, chk_y, chk_z, nchk_x, nchk_y, nchk_z, chk_t, mpi_rank, verbose):
fid = h5py.File(ssi_fname, 'r')
dset_name = 'vel_' + str(int(comp)) + ' ijk layout'
for cids_iter in cids_dict:
# Read chunk
nread = math.ceil(fid[dset_name].shape[0] / chk_t)
for start_t in range(0, nread):
start_x, start_y, start_z = chunkid_to_start(cids_iter, chk_x, chk_y, chk_z, nchk_x, nchk_y, nchk_z)
#print('Read chunk cid =', cids_iter, start_x, chk_x, start_y, chk_y, start_z, chk_z)
starttime = time.time()
chk_data = fid[dset_name][int(chk_t*start_t):int(chk_t*(start_t+1)), int(start_x):int(start_x+chk_x), int(start_y):int(start_y+chk_y), int(start_z):int(start_z+chk_z)]
endtime = time.time()
if verbose:
print('Rank', mpi_rank, 'read', dset_name, 'chunk', start_t+1, '/', nread, 'time:', endtime-starttime)
#sys.stdout.flush()
starttime = time.time()
for coord_str in data_dict:
x, y, z = str_to_coord_3d(coord_str)
cid = coord_to_chunkid(x, y, z, chk_x, chk_y, chk_z, nchk_x, nchk_y, nchk_z)
if cid == cids_iter:
# assign values from chunk to data_dict[coord_str][0:3]
# print('==assign values for', x, y, z, '->', x%chk_x, y%chk_y, z%chk_z, 'cid', cid, 'is in ', cids_iter, 'timestep', chk_t*start_t)
# print('shape is:', data_dict[coord_str].shape, chk_data.shape)
data_dict[coord_str][chk_t*start_t:chk_t*(start_t+1)] = chk_data[:,x%chk_x,y%chk_y,z%chk_z]
endtime = time.time()
#print('assign value time', endtime-starttime)
fid.close()
def linear_interp(data_dict, x, y, z):
neighbour = 2
neighbour_3d = np.zeros([neighbour,neighbour,neighbour])
xd = x - int(x)
yd = y - int(y)
zd = z - int(z)
if xd > 1 or xd < 0 or yd > 1 or yd < 0 or zd > 1 or zd < 0:
print('Error with linear interpolation input:', x, y, z)
c000 = data_dict[coord_to_str_3d(int(x+0), int(y+0), int(z+0))]
c001 = data_dict[coord_to_str_3d(int(x+0), int(y+0), int(z+1))]
c010 = data_dict[coord_to_str_3d(int(x+0), int(y+1), int(z+0))]
c011 = data_dict[coord_to_str_3d(int(x+0), int(y+1), int(z+1))]
c100 = data_dict[coord_to_str_3d(int(x+1), int(y+0), int(z+0))]
c101 = data_dict[coord_to_str_3d(int(x+1), int(y+0), int(z+1))]
c110 = data_dict[coord_to_str_3d(int(x+1), int(y+1), int(z+0))]
c111 = data_dict[coord_to_str_3d(int(x+1), int(y+1), int(z+1))]
result = ((c000 * (1-xd) + c100 * xd) * (1-yd) + (c010 * (1-xd) + c110 * xd) * yd) * (1-zd) + ((c001 * (1-xd) + c101 * xd) * (1-yd) + (c011 * (1-xd) + c111 * xd) * yd) * zd
return result
def generate_acc_dis_time(ssi_fname, coord_sys, ref_coord, user_x, user_y, user_z, n_coord, start_ts, end_ts, gen_vel, gen_acc, gen_dis, verbose, plot_only, output_fname, mpi_rank, mpi_size, extra_data, extra_dname, output_format):
# Read ESSI metadata
essi_x0, essi_y0, essi_z0, essi_h, essi_nx, essi_ny, essi_nz, essi_nt, essi_dt, essi_timeseq = get_essi_meta(ssi_fname, verbose)
essi_x_len_max = (essi_nx-1) * essi_h
essi_y_len_max = (essi_ny-1) * essi_h
essi_z_len_max = (essi_nz-1) * essi_h
if end_ts == 0:
end_ts = int(essi_nt)
if verbose and mpi_rank == 0:
print('\nESSI origin x0, y0, z0, h: ', essi_x0, essi_y0, essi_z0, essi_h)
print('ESSI origin nx, ny, nz, nt, dt: ', essi_nx, essi_ny, essi_nz, essi_nt, essi_dt)
print('ESSI max len x, y, z: ', essi_x_len_max, essi_y_len_max, essi_z_len_max)
print('Reference coordinate:', ref_coord)
print(' ')
print('Generate output file with timesteps between', start_ts, 'and', end_ts, 'in', output_format, 'format')
# Convert user coordinate to sw4 coordinate, relative to ESSI domain (subset of SW4 domain)
user_essi_x, user_essi_y, user_essi_z = convert_to_essi_coord(coord_sys, user_x, user_y, user_z, ref_coord, essi_x_len_max, essi_y_len_max, essi_z_len_max)
# debug print
nprint = 0
for i in range(0, nprint):
if i == 0:
print('converted essi coordinate:')
print('(%d, %d, %d)' % (user_essi_x[i], user_essi_y[i], user_essi_z[i]))
if mpi_rank == 0:
plot_coords(essi_x0, essi_y0, essi_z0, essi_h, essi_nx, essi_ny, essi_nz, user_essi_x, user_essi_y, user_essi_z)
if plot_only:
if mpi_rank == 0:
print('Only generate the plots of input nodes')
exit(0)
# Convert to array location (spacing is 1), floating-point
coord_x = user_essi_x / essi_h
coord_y = user_essi_y / essi_h
coord_z = user_essi_z / essi_h
# Check if we actually need interpolation
ghost_cell = 0
do_interp = False
for nid in range(0, n_coord):
if user_essi_x[nid] % essi_h != 0 or user_essi_y[nid] % essi_h != 0 or user_essi_z[nid] % essi_h != 0:
do_interp = True
ghost_cell = 1
if verbose and mpi_rank == 0:
print('Use spline interpolation.')
break
# print('Force to not interpolate')
# do_interp = False
#for i in range(0, len(user_essi_x)):
# print('(%.2f, %.2f, %.2f)' % (coord_x[i], coord_y[i], coord_z[i]))
chk_t, chk_x, chk_y, chk_z = get_chunk_size(ssi_fname)
if chk_t <= 0 or chk_x <= 0 or chk_y <= 0 or chk_z <= 0:
print('Error getting chunk size from essi file', chk_t, chk_x, chk_y, chk_z)
nchk_x = int(np.ceil(essi_nx/chk_x))
nchk_y = int(np.ceil(essi_ny/chk_y))
nchk_z = int(np.ceil(essi_nz/chk_z))
if nchk_x <= 0 or nchk_y <= 0 or nchk_z <= 0:
print('Error getting number of chunks', nchk_x, nchk_y, nchk_z)
ntry = 0
ntry_max = 1
nchk = 0
# Try to reduce the chunk size if the number of chunks is less than half the number of ranks
while(nchk < 0.5*mpi_size):
if ntry > 0:
if ntry % 3 == 1 and chk_x % 2 == 0:
chk_x /= 2
elif ntry % 3 == 2 and chk_y % 2 == 0:
chk_y /= 2
elif ntry % 3 == 0 and chk_z % 2 == 0:
chk_z /= 2
# Find how many chunks the current
nchk, cids_dict = get_nchunk_from_coords(coord_x, coord_y, coord_z, chk_x, chk_y, chk_z, nchk_x, nchk_y, nchk_z)
if ntry == 0 and mpi_rank == 0 and nchk != mpi_size:
print('Recommend using', nchk, 'MPI ranks', 'instead of currently used', mpi_size)
# Don't try too manny times
ntry += 1
if ntry > ntry_max:
break
if verbose and mpi_rank == 0:
print(nchk, 'total chunks to read/distribute', 'using chunk size (', chk_x, chk_y, chk_z, ')')
# Get the coordinates assigned to this rank
read_coords_vel_0 = {}
read_coords_vel_1 = {}
read_coords_vel_2 = {}
coords_str_dict = {}
is_boundary = np.zeros(n_coord, dtype='i4')
my_ncoord = np.zeros(1, dtype='int')
my_user_coordinates = np.zeros((n_coord,3), dtype='f4')
my_converted_coordinates = np.zeros((n_coord,3), dtype='f4')
for i in range(0, n_coord):
cid = coord_to_chunkid(coord_x[i], coord_y[i], coord_z[i], chk_x, chk_y, chk_z, nchk_x, nchk_y, nchk_z)
if cid < 0:
print('Error with coord_to_chunkid', coord_x[i], coord_y[i], coord_z[i], cid)
# Debug
if mpi_rank == 0:
tmp0, tmp1, tmp2 = chunkid_to_start(cid, chk_x, chk_y, chk_z, nchk_x, nchk_y, nchk_z)
#print('cid', cid, coord_x[i], coord_y[i], coord_z[i], 'reverse:', tmp0, tmp1, tmp2)
# cids_dict stores the actual unique ids of chunks that contain input coordinates
if cids_dict[cid] % mpi_size == mpi_rank:
#if verbose:
# print(i, coord_x[i], coord_y[i], coord_z[i], 'goes to chunk', cid, 'and rank', mpi_rank)
my_user_coordinates[my_ncoord[0], 0] = user_x[i]
my_user_coordinates[my_ncoord[0], 1] = user_y[i]
my_user_coordinates[my_ncoord[0], 2] = user_z[i]
my_converted_coordinates[my_ncoord[0], 0] = coord_x[i]
my_converted_coordinates[my_ncoord[0], 1] = coord_y[i]
my_converted_coordinates[my_ncoord[0], 2] = coord_z[i]
coord_str = coord_to_str_3d(int(coord_x[i]), int(coord_y[i]), int(coord_z[i]))
coords_str_dict[coord_str] = 1
if do_interp:
# Linear interpolation requires 8 neighbours' data
nadded = allocate_neighbor_coords_8(read_coords_vel_0, coord_x[i], coord_y[i], coord_z[i], essi_nt)
nadded = allocate_neighbor_coords_8(read_coords_vel_1, coord_x[i], coord_y[i], coord_z[i], essi_nt)
nadded = allocate_neighbor_coords_8(read_coords_vel_2, coord_x[i], coord_y[i], coord_z[i], essi_nt)
#print(int(coord_x[i]), int(coord_y[i]), int(coord_z[i]), 'added', nadded, 'nodes /', len(read_coords_vel_0))
else:
if coord_str not in read_coords_vel_0:
read_coords_vel_0[coord_str] = np.zeros(essi_nt)
read_coords_vel_1[coord_str] = np.zeros(essi_nt)
read_coords_vel_2[coord_str] = np.zeros(essi_nt)
is_boundary[my_ncoord[0]] = extra_data[i]
my_ncoord[0] += 1
#end if assigned to my rank
#end for i in all coordinates
# Allocated more than needed previously, adjust
my_user_coordinates.resize(my_ncoord[0], 3)
is_boundary.resize(my_ncoord[0])
comm = MPI.COMM_WORLD
all_ncoord = np.empty(mpi_size, dtype='int')
comm.Allgather([my_ncoord, MPI.INT], [all_ncoord, MPI.INT])
my_nchk = len(cids_dict)
if verbose:
print('Rank', mpi_rank, ': assigned', my_ncoord, 'nodes, need to read', len(read_coords_vel_0), 'nodes, in', my_nchk, 'chunk')
if my_ncoord[0] > 0:
# Read data by chunk and assign to read_coords_vel_012
read_hdf5_by_chunk(ssi_fname, read_coords_vel_0, 0, cids_dict, chk_x, chk_y, chk_z, nchk_x, nchk_y, nchk_z, chk_t, mpi_rank, verbose)
read_hdf5_by_chunk(ssi_fname, read_coords_vel_1, 1, cids_dict, chk_x, chk_y, chk_z, nchk_x, nchk_y, nchk_z, chk_t, mpi_rank, verbose)
read_hdf5_by_chunk(ssi_fname, read_coords_vel_2, 2, cids_dict, chk_x, chk_y, chk_z, nchk_x, nchk_y, nchk_z, chk_t, mpi_rank, verbose)
if verbose:
print('Coordinate offset:', ref_coord)
#print('Rank %d, %d %d, %d %d, %d %d' %(mpi_rank, my_x_start, my_x_end, my_y_start, my_y_end, my_z_start, my_z_end))
# Calculate the offset from the global array
my_offset = 0
for i in range(0, mpi_rank):
my_offset += all_ncoord[i]
if verbose:
print('Rank %d offset %d ' % (mpi_rank, my_offset))
output_acc_all = np.zeros((my_ncoord[0]*3, essi_nt), dtype='f4')
output_dis_all = np.zeros((my_ncoord[0]*3, essi_nt), dtype='f4')
output_vel_all = np.zeros((my_ncoord[0]*3, essi_nt), dtype='f4')
# Loop over 3 dimensions with dim_iter
for dim_iter in range(0, 3):
if coord_sys[dim_iter] == '-x':
for vel_iter in read_coords_vel_0:
read_coords_vel_0[vel_iter][:] *= -1
elif coord_sys[dim_iter] == '-y':
for vel_iter in read_coords_vel_1:
read_coords_vel_1[vel_iter][:] *= -1
elif coord_sys[dim_iter] == '-z':
for vel_iter in read_coords_vel_2:
read_coords_vel_2[vel_iter][:] *= -1
# end for dim_iter in range(0, 3)
# Iterate over all coordinates, all the vel data (vel_0 to 2) in read_coords_vel_012 dict for this rank
if do_interp:
read_coords_acc_0 = {}
read_coords_acc_1 = {}
read_coords_acc_2 = {}
read_coords_dis_0 = {}
read_coords_dis_1 = {}
read_coords_dis_2 = {}
# Convert all data (including 8 neighbours) to acc and dis
for vel_iter in read_coords_vel_0:
if gen_acc:
read_coords_acc_0[vel_iter] = np.gradient(read_coords_vel_0[vel_iter][:], essi_dt, axis=0)
read_coords_acc_1[vel_iter] = np.gradient(read_coords_vel_1[vel_iter][:], essi_dt, axis=0)
read_coords_acc_2[vel_iter] = np.gradient(read_coords_vel_2[vel_iter][:], essi_dt, axis=0)
if gen_dis:
read_coords_dis_0[vel_iter] = scipy.integrate.cumtrapz(y=read_coords_vel_0[vel_iter][:], dx=essi_dt, initial=0, axis=0)
read_coords_dis_1[vel_iter] = scipy.integrate.cumtrapz(y=read_coords_vel_1[vel_iter][:], dx=essi_dt, initial=0, axis=0)
read_coords_dis_2[vel_iter] = scipy.integrate.cumtrapz(y=read_coords_vel_2[vel_iter][:], dx=essi_dt, initial=0, axis=0)
# Iterate over all actual coordinates (no neighbour)
iter_count = 0
for coords_str in coords_str_dict:
x = my_converted_coordinates[iter_count, 0]
y = my_converted_coordinates[iter_count, 1]
z = my_converted_coordinates[iter_count, 2]
if gen_acc:
output_acc_all[iter_count*3+0, :] = linear_interp(read_coords_acc_0, x, y, z)
output_acc_all[iter_count*3+1, :] = linear_interp(read_coords_acc_1, x, y, z)
output_acc_all[iter_count*3+2, :] = linear_interp(read_coords_acc_2, x, y, z)
if gen_dis:
output_dis_all[iter_count*3+0, :] = linear_interp(read_coords_dis_0, x, y, z)
output_dis_all[iter_count*3+1, :] = linear_interp(read_coords_dis_1, x, y, z)
output_dis_all[iter_count*3+2, :] = linear_interp(read_coords_dis_2, x, y, z)
if gen_vel:
output_vel_all[iter_count*3+0, :] = linear_interp(read_coords_vel_0, x, y, z)
output_vel_all[iter_count*3+1, :] = linear_interp(read_coords_vel_1, x, y, z)
output_vel_all[iter_count*3+2, :] = linear_interp(read_coords_vel_2, x, y, z)
iter_count += 1
# end if with interpolation
else:
# no interpolation needed, just go through all coordinates' data and convert to acc and dis
iter_count = 0
print('Rank', mpi_rank, 'size of read_coords_vel_0:', len(read_coords_vel_0))
for vel_iter in read_coords_vel_0:
if gen_acc:
output_acc_all[iter_count*3+0, :] = np.gradient(read_coords_vel_0[vel_iter][:], essi_dt, axis=0)
output_acc_all[iter_count*3+1, :] = np.gradient(read_coords_vel_0[vel_iter][:], essi_dt, axis=0)
output_acc_all[iter_count*3+2, :] = np.gradient(read_coords_vel_2[vel_iter][:], essi_dt, axis=0)
#if iter_count == 0:
# print('acc_0 for', vel_iter, 'is:', output_acc_all[iter_count,:])
if gen_dis:
output_dis_all[iter_count*3+0, :] = scipy.integrate.cumtrapz(y=read_coords_vel_0[vel_iter][:], dx=essi_dt, initial=0, axis=0)
output_dis_all[iter_count*3+1, :] = scipy.integrate.cumtrapz(y=read_coords_vel_1[vel_iter][:], dx=essi_dt, initial=0, axis=0)
output_dis_all[iter_count*3+2, :] = scipy.integrate.cumtrapz(y=read_coords_vel_2[vel_iter][:], dx=essi_dt, initial=0, axis=0)
#if iter_count == 0:
# print('dis_0 for', vel_iter, 'is:', output_dis_all[iter_count,:])
if gen_vel:
output_vel_all[iter_count*3+0, :] = read_coords_vel_0[vel_iter][:]
output_vel_all[iter_count*3+1, :] = read_coords_vel_1[vel_iter][:]
output_vel_all[iter_count*3+2, :] = read_coords_vel_2[vel_iter][:]
# debug
#if iter_count == 0:
# print('vel_0 for', vel_iter, 'is:', read_coords_vel_0[vel_iter])
iter_count += 1
#end for
print('Written', iter_count, 'coordinates')
# end else no interpolation
# Write coordinates and boundary nodes (file created previously), in serial with baton passing
comm.Barrier()
if output_format == "OpenSees":
if mpi_rank == 0:
create_hdf5_opensees(output_fname, n_coord, end_ts-start_ts, essi_dt, gen_vel, gen_acc, gen_dis, extra_dname)
if my_ncoord[0] > 0:
write_to_hdf5_range_2d(output_fname, 'DRM_Data', 'xyz', my_user_coordinates, my_offset, (my_offset+my_ncoord[0]))
write_to_hdf5_range_1d(output_fname, 'DRM_Data', extra_dname, is_boundary, my_offset, my_offset+my_ncoord[0])
if gen_acc:
write_to_hdf5_range(output_fname, 'DRM_Data', 'acceleration', output_acc_all[:,start_ts:end_ts], my_offset*3, (my_offset+my_ncoord[0])*3)
if gen_dis:
write_to_hdf5_range(output_fname, 'DRM_Data', 'displacement', output_dis_all[:,start_ts:end_ts], my_offset*3, (my_offset+my_ncoord[0])*3)
if gen_vel:
write_to_hdf5_range(output_fname, 'DRM_Data', 'velocity', output_vel_all[:,start_ts:end_ts], my_offset*3, (my_offset+my_ncoord[0])*3)
if mpi_size > 1:
comm.send(my_ncoord, dest=1, tag=11)
else:
data = comm.recv(source=mpi_rank-1, tag=11)
if my_ncoord[0] > 0:
if verbose:
print('Rank', mpi_rank, 'start to write data')
write_to_hdf5_range_2d(output_fname, 'DRM_Data', 'xyz', my_user_coordinates, my_offset, (my_offset+my_ncoord[0]))
write_to_hdf5_range_1d(output_fname, 'DRM_Data', extra_dname, is_boundary, my_offset, my_offset+my_ncoord[0])
if gen_acc:
write_to_hdf5_range(output_fname, 'DRM_Data', 'acceleration', output_acc_all[:,start_ts:end_ts], my_offset*3, (my_offset+my_ncoord[0])*3)
if gen_dis:
write_to_hdf5_range(output_fname, 'DRM_Data', 'displacement', output_dis_all[:,start_ts:end_ts], my_offset*3, (my_offset+my_ncoord[0])*3)
if gen_vel:
write_to_hdf5_range(output_fname, 'DRM_Data', 'velocity', output_vel_all[:,start_ts:end_ts], my_offset*3, (my_offset+my_ncoord[0])*3)
if mpi_rank != mpi_size-1:
comm.send(my_ncoord, dest=mpi_rank+1, tag=11)
elif output_format == "ESSI":
if mpi_rank == 0:
create_hdf5_essi(output_fname, n_coord, end_ts-start_ts, essi_dt, gen_vel, gen_acc, gen_dis, extra_dname)
# Write to the template file
if my_ncoord[0] > 0:
write_to_hdf5_range_1d(output_fname, '/', 'Coordinates', my_user_coordinates.reshape(my_ncoord[0]*3), my_offset, (my_offset+my_ncoord[0])*3)
write_to_hdf5_range_1d(output_fname, '/', extra_dname, is_boundary, my_offset, my_offset+my_ncoord[0])
if gen_acc:
write_to_hdf5_range(output_fname, '/', 'Accelerations', output_acc_all[:,start_ts:end_ts], my_offset*3, (my_offset+my_ncoord[0])*3)
if gen_dis:
write_to_hdf5_range(output_fname, '/', 'Displacements', output_dis_all[:,start_ts:end_ts], my_offset*3, (my_offset+my_ncoord[0])*3)
if gen_vel:
write_to_hdf5_range(output_fname, '/', 'Velocity', output_vel_all[:,start_ts:end_ts], my_offset*3, (my_offset+my_ncoord[0])*3)
if mpi_size > 1:
comm.send(my_ncoord, dest=1, tag=111)
else:
data = comm.recv(source=mpi_rank-1, tag=111)
if my_ncoord[0] > 0:
write_to_hdf5_range_1d(output_fname, '/', 'Coordinates', my_user_coordinates.reshape(my_ncoord[0]*3), my_offset, (my_offset+my_ncoord[0])*3)
write_to_hdf5_range_1d(output_fname, '/', extra_dname, is_boundary, my_offset, my_offset+my_ncoord[0])
if gen_acc:
write_to_hdf5_range(output_fname, '/', 'Accelerations', output_acc_all[:,start_ts:end_ts], my_offset*3, (my_offset+my_ncoord[0])*3)
if gen_dis:
write_to_hdf5_range(output_fname, '/', 'Displacements', output_dis_all[:,start_ts:end_ts], my_offset*3, (my_offset+my_ncoord[0])*3)
if gen_vel:
write_to_hdf5_range(output_fname, '/', 'Velocity', output_vel_all[:,start_ts:end_ts], my_offset*3, (my_offset+my_ncoord[0])*3)
if mpi_rank != mpi_size-1:
comm.send(my_ncoord, dest=mpi_rank+1, tag=111)
else:
if mpi_rank == 0:
print('Invalid output format', output_format)
comm.Barrier()
if mpi_rank == 0:
print('Rank', mpi_rank, 'Finished writing data')
return
def convert_drm(drm_fname, ssi_fname, ref_coord, start_ts, end_ts, plot_only, mpi_rank, mpi_size, verbose):
if mpi_rank == 0:
print('Start time:', datetime.datetime.now().time())
print('Input DRM [%s]' %drm_fname)
print('Input ESSI [%s]' %ssi_fname)
output_fname = drm_fname + '.h5drm'
coord_sys = ['y', 'x', '-z']
user_x, user_y, user_z, n_coord, extra_data = read_coord_drm(drm_fname, verbose)
if verbose and mpi_rank == 0:
print('Done read %d coordinates, first is (%d, %d, %d), last is (%d, %d, %d)' % (n_coord, user_x[0], user_y[0], user_z[0], user_x[-1], user_y[-1], user_z[-1]))
print('x, y, z (min/max): (%.0f, %.0f), (%.0f, %.0f), (%.0f, %.0f)' % (np.min(user_x), np.max(user_x), np.min(user_y), np.max(user_y), np.min(user_z), np.max(user_z)) )
gen_vel = True
gen_dis = True
gen_acc = True
extra_dname = 'internal'
output_format = 'OpenSees'
generate_acc_dis_time(ssi_fname, coord_sys, ref_coord, user_x, user_y, user_z, n_coord, start_ts, end_ts, gen_vel, gen_acc, gen_dis, verbose, plot_only, output_fname, mpi_rank, mpi_size, extra_data, extra_dname, output_format)
return
def convert_csv(csv_fname, ssi_fname, plotonly, mpi_rank, mpi_size, verbose):
if mpi_rank == 0:
print('Start time:', datetime.datetime.now().time())
print('Input CSV [%s]' %csv_fname)
print('Input ESSI [%s]' %ssi_fname)
df = pd.read_csv(csv_fname)
# reference point, which is the ESSI/OPS origin in the SW4 coordinate system
ref_coord[0] = df['essiXstart'][0]
ref_coord[1] = df['essiYstart'][0]
ref_coord[2] = df['essiZstart'][0]
# start time and end time for truncation
start_ts = int(df['startTime'][0])
end_ts = int(df['endTime'][0])
coord_sys = ['y', 'x', '-z']
gen_vel = True
gen_dis = True
gen_acc = True
extra_dname = 'nodeTag'
# output motion for selected OpenSees nodes
output_fname = csv_fname + '.h5drm'
node_tags = df['nodeTag'][:].tolist()
n_nodes = len(node_tags)
node_x = np.zeros(n_nodes)
node_y = np.zeros(n_nodes)
node_z = np.zeros(n_nodes)
for i in range(0, n_nodes):
node_x[i] = df.loc[i, 'x']
node_y[i] = df.loc[i, 'y']
node_z[i] = df.loc[i, 'z']
if mpi_rank == 0:
print('Found motions for %i nodes...' % (n_nodes))
output_format = 'OpenSees'
generate_acc_dis_time(ssi_fname, coord_sys, ref_coord, node_x, node_y, node_z, n_coord, start_ts, end_ts, gen_vel, gen_acc, gen_dis, verbose, plot_only, output_fname, mpi_rank, mpi_size, node_tags, extra_dname, output_format)
return
def dframeToDict(dFrame):
dFrame = list(dFrame.iterrows())
return {i[1].to_list()[0] : i[1].to_list()[1] for i in dFrame}
def convert_template(csv_fname, template_fname, ssi_fname, start_ts, end_ts, plot_only, mpi_rank, mpi_size, verbose):
if mpi_rank == 0:
print('Start time:', datetime.datetime.now().time())
print('Input CSV [%s]' %csv_fname)
print('Input ESSI [%s]' %ssi_fname)
sw4ToESSI_params = dframeToDict(pd.read_csv(csv_fname))
sw4_i_start = sw4ToESSI_params["sw4_i_start"]
sw4_i_end = sw4ToESSI_params["sw4_i_end"]
sw4_j_start = sw4ToESSI_params["sw4_j_start"]
sw4_j_end = sw4ToESSI_params["sw4_j_end"]
sw4_k_start = sw4ToESSI_params["sw4_k_start"]
sw4_k_end = sw4ToESSI_params["sw4_k_end"]
essi_x_start = sw4ToESSI_params["essi_x_start"]
essi_x_end = sw4ToESSI_params['essi_x_end']
essi_y_start = sw4ToESSI_params["essi_y_start"]
essi_y_end = sw4ToESSI_params["essi_y_end"]
essi_z_start = sw4ToESSI_params["essi_z_start"]
essi_z_end = sw4ToESSI_params["essi_z_end"]
# reference point, which is the ESSI/OPS origin in the SW4 coordinate system
ref_coord[0] = essi_x_start
ref_coord[1] = essi_y_start
ref_coord[2] = essi_z_end
coord_sys = ['y', 'x', '-z']
gen_vel = True
gen_dis = True
gen_acc = True
extra_dname = 'Is Boundary Node'
# output motion for selected OpenSees nodes
output_fname = template_fname
template_file = h5py.File(template_fname)
coordinates = template_file['Coordinates'][:]
node_tags = template_file['DRM Nodes'][:].tolist()
n_coord = len(node_tags)
user_x = np.zeros(n_coord)
user_y = np.zeros(n_coord)
user_z = np.zeros(n_coord)
for i in range(0, n_coord):
user_x[i] = coordinates[i*3]
user_y[i] = coordinates[i*3+1]
user_z[i] = coordinates[i*3+2]
template_file.close()
if verbose and mpi_rank == 0:
print('Done read %d coordinates, first is (%d, %d, %d), last is (%d, %d, %d)' % (n_coord, user_x[0], user_y[0], user_z[0], user_x[-1], user_y[-1], user_z[-1]))
print('x, y, z (min/max): (%.0f, %.0f), (%.0f, %.0f), (%.0f, %.0f)' % (np.min(user_x), np.max(user_x), np.min(user_y), np.max(user_y), np.min(user_z), np.max(user_z)) )
print('Start/end timestep', start_ts, end_ts)
output_format = 'ESSI'
generate_acc_dis_time(ssi_fname, coord_sys, ref_coord, user_x, user_y, user_z, n_coord, start_ts, end_ts, gen_vel, gen_acc, gen_dis, verbose, plot_only, output_fname, mpi_rank, mpi_size, node_tags, extra_dname, output_format)
return
if __name__ == "__main__":
os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
os.environ['PYTHONUNBUFFERED'] = 'TRUE'
verbose=False
plotonly=False
use_drm=False
use_csv=False
use_template=False
ssi_fname=''
drm_fname=''
csv_fname=''
template_fname=''
ref_coord=np.zeros(3)
start_ts=int(0)
end_ts=int(0)
parser=argparse.ArgumentParser()
parser.add_argument("-c", "--csv", help="full path to the CSV setting file", default="")
parser.add_argument("-d", "--drm", help="full path to the DRM file with node coordinates", default="")
parser.add_argument("-e", "--essi", help="full path to the SW4 ESSI output file", default="")
parser.add_argument("-t", "--template", help="full path to the ESSI template file with node coordinates", default="")
parser.add_argument("-p", "--plotonly", help="only generate plots of the input nodes", action="store_true")
parser.add_argument("-r", "--reference", help="reference node coordinate offset, default 0 0 0", nargs='+', type=float)
parser.add_argument("-s", "--steprange", help="timestep range, default 0 total_steps", nargs='+', type=int)
parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")
args = parser.parse_args()
if args.verbose:
verbose=True
if args.plotonly:
plotonly=True
if args.drm:
drm_fname=args.drm
use_drm=True
if args.csv:
csv_fname=args.csv
use_csv=True
if args.template:
template_fname=args.template
use_template=True
if args.essi:
ssi_fname=args.essi
if args.reference:
ref_coord[0]=args.reference[0]
ref_coord[1]=args.reference[1]
ref_coord[2]=args.reference[2]
if args.steprange:
start_ts=int(args.steprange[0])
end_ts=int(args.steprange[1])
comm = MPI.COMM_WORLD
mpi_size = comm.Get_size()
mpi_rank = comm.Get_rank()
if drm_fname == '' and csv_fname == '' and template_fname == '':
print('Error, no node coordinate input file is provided, exit...')
exit(0)
if ssi_fname == '':
print('Error, no SW4 ESSI output file is provided, exit...')
exit(0)
if use_drm:
convert_drm(drm_fname, ssi_fname, ref_coord, start_ts, end_ts, plotonly, mpi_rank, mpi_size, verbose)
elif use_csv and not use_template:
convert_csv(csv_fname, ssi_fname, plotonly, mpi_rank, mpi_size, verbose)
elif use_csv and use_template:
convert_template(csv_fname, template_fname, ssi_fname, start_ts, end_ts, plotonly, mpi_rank, mpi_size, verbose)
if mpi_rank == 0:
print('End time:', datetime.datetime.now().time()) |
# Generated by Django 3.0.8 on 2020-07-15 12:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('service_chat', '0002_auto_20200715_1205'),
]
operations = [
migrations.AlterField(
model_name='message',
name='author',
field=models.ForeignKey(default='None', on_delete=models.SET('none'), to='service_chat.Author'),
),
migrations.AlterField(
model_name='message',
name='date',
field=models.DateTimeField(auto_now_add=True),
),
]
|
# Generated by Django 3.0.5 on 2021-08-04 22:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('erp', '0033_auto_20210804_1728'),
]
operations = [
migrations.AlterModelOptions(
name='proveedor',
options={'ordering': ['id'], 'verbose_name': 'Proveedor', 'verbose_name_plural': 'Clientes'},
),
migrations.AddField(
model_name='proveedor',
name='adress',
field=models.CharField(default=34, max_length=150, verbose_name='Direcciรณn'),
preserve_default=False,
),
]
|
"""
Constants for application
"""
USER_NAME = "ะะผั"
USER_NAME_HELP_TEXT = "ะะฐัะต ะธะผั. ะะฐะฟะพะปะฝััั ะฝะต ะพะฑัะทะฐัะตะปัะฝะพ."
LAST_NAME = "ะคะฐะผะธะปะธั"
LAST_NAME_HELP_TEXT = "ะะฐัะฐ ัะฐะผะธะปะธั. ะะฐะฟะพะปะฝััั ะฝะต ะพะฑัะทะฐัะตะปัะฝะพ."
PASSWD1 = "ะะฐัะพะปั"
PASSWD1_HELP_TEXT = "ะะฒะตะดะธัะต ะฟะฐัะพะปั."
PASSWD2 = "ะะฒะตะดะธัะต ะฟะฐัะพะปั ะตัะต ัะฐะท"
PASSWD2_HELP_TEXT = "ะะฒะตะดะธัะต ะฟะฐัะพะปั ะตัะต ัะฐะท."
VALIDATION_ERROR = "ะะฐัะพะปะธ ะฝะต ัะพะฒะฟะฐะดะฐัั"
EMAIL_LABEL = "ะญะปะตะบััะพะฝะฝะฐั ะฟะพััะฐ"
EMAIL_HELP_TEXT = "ะกััะตััะฒัััะธะน ะฐะดัะตั ัะปะตะบััะพะฝะฝะพะน ะฟะพััั. ะัะต ะฟะพััะพะฒัะต ัะพะพะฑัะตะฝะธั ั ัะฐะนัะฐ ะฑัะดัั ะพัััะปะฐัััั ะฝะฐ ััะพั ะฐะดัะตั."
LOGIN_ERROR = "ะะพะปัะทะพะฒะฐัะตะปั ะฝะต ัััะตััะฒัะตั"
REGISTER_ERROR = "ะัะธะฑะบะฐ ัะพะทะดะฐะฝะธั ะฟะพะปัะทะพะฒะฐัะตะปั"
|
# -*- coding: utf-8 -*-
import datetime
import os
import pyowm
from app import config
CITY = "Ann Arbor"
def get_weather_description():
api_key = os.environ.get("WEATHERMAP_KEY") or config['weather']['api_key']
weather = pyowm.OWM(api_key)
forecast = weather.three_hours_forecast(CITY)
# Discard weather readings not from today
today = datetime.datetime.today().day
today_weather = []
for w in forecast.get_forecast().get_weathers():
date = datetime.datetime.fromtimestamp(w.get_reference_time())
if date.day == today:
today_weather.append(w)
return weather_description(forecast, today_weather)
def weather_description(forecast, today_weather):
stats = {
"clouds": forecast.when_clouds(),
"rain": forecast.when_rain(),
"sun": forecast.when_sun(),
"fog": forecast.when_fog(),
"snow": forecast.when_snow()
}
# Discard weather readings not from today
today = datetime.datetime.today().day
for key in stats:
today_weather = []
for w in stats[key]:
date = datetime.datetime.fromtimestamp(w.get_reference_time())
if date.day == today:
today_weather.append(w)
stats[key] = today_weather
weather_types = stats.keys()
weather_types.sort(key=lambda t: len(stats[t]), reverse=True)
high_temp = get_high(today_weather)
t1_when = [x.get_reference_time() for x in stats[weather_types[0]]]
t2_when = [x.get_reference_time() for x in stats[weather_types[1]]]
t1_hours = [datetime.datetime.fromtimestamp(x).hour for x in t1_when]
t2_hours = [datetime.datetime.fromtimestamp(x).hour for x in t2_when]
weather_adjectives = {
"clouds": "cloudy",
"rain": "rainy",
"sun": "sunny",
"fog": "foggy",
"snow": "snowing"
}
msg = ""
if len(t1_hours) != 0:
if len(t2_hours) == 0:
msg = "and "
msg = weather_adjectives[weather_types[0]]
msg += " " + when_occuring(t1_when)
if len(t2_hours) != 0:
msg += ", and " + weather_adjectives[weather_types[1]]
msg += " " + when_occuring(t2_when)
else:
return "The day will be great."
return "Today in {0}, it'll be {1}ยฐ, {2}.".format(CITY, high_temp, msg)
def get_datetime_at_hour(hour):
today = datetime.date.today()
hour_time = datetime.time(hour)
return datetime.datetime.combine(today, hour_time)
def when_occuring(times):
morning = 0
afternoon = 0
evening = 0
for time in times:
hour = datetime.datetime.fromtimestamp(time).hour
print hour
if hour <= 11:
morning += 1
elif hour <= 16:
afternoon += 1
else:
evening += 1
number_nonzero = len([x for x in [morning, afternoon, evening] if x != 0])
if number_nonzero == 0:
return ""
elif number_nonzero == 1:
if morning:
return "in the morning"
if afternoon:
return "in the afternoon"
if evening:
return "in the evening"
elif number_nonzero == 2:
if morning and afternoon:
return "in the morning and afternoon"
if morning and evening:
return "in the morning and evening"
if afternoon and evening:
return "in the afternoon and evening"
else:
return "all day"
def get_high(today_weather):
if len(today_weather) == 0:
return "?"
kelvin = max([x.get_temperature()['temp_max'] for x in today_weather])
return int((kelvin - 273.15) * 1.8 + 32)
|
import sys
ref=[sys.argv[1]]
rfh=open(sys.argv[1].split('/')[-1]+'_summary.txt','w')
rfh.write('Sample\tSequenced_Read-pair\tMapped_Read-pair\tNonclonal_Read-pair\tIntra_less\tIntra_more\tInter\tCpG_met\tCHG_met\tCHH_met\n')
for fn in ref:
fn=fn.split()[-1].split('/')[-1]
dfh1=open(fn+'_trimmed_bismark_SE_report.txt','r')
dfh2=open(fn+'_trimmed.fastq_all_merged_3split.bam_multi_split_aligned.txt_Aligned_Fragment_count.txt','r')
dfh3=open(fn+'_trimmed.fastq_all_merged_3split.bam_multi_split_aligned.txt_man_dedupped.txt_2_contacts.stats.txt','r')
data=[0,0,0,0,0,0,0,0,0,0]
for i in dfh1:
line=i.split()
if 'Sequences analysed in total:' in i:
data[0]=line[-1]
if 'Number of alignments with a unique best hit from the different alignments:' in i:
data[1]=line[-1]
if 'C methylated in CpG context:' in i:
data[-3]=line[-1]
if 'C methylated in CHG context:' in i:
data[-2]=line[-1]
if 'C methylated in CHH context:' in i:
data[-1]=line[-1]
line=dfh2.readline().split()
data[2]=line[-1]
line=dfh3.readline().split()
data[3]=line[-7]
data[4]=line[-6]+' '+line[-5]
data[5]=line[-4]+' '+line[-3]
data[6]=line[-2]+' '+line[-1]
rfh.write(fn+'\t'+'\t'.join(map(lambda x:str(x),data))+'\n')
|
from django.conf.urls import patterns, url
from django.views.generic import RedirectView
from django.conf.urls import patterns, include, url
from rest_framework import routers
from messages.views import InboxListView, SentListView, ComposeMessageObjView
from messages.views import DeletedListView, DeleteMessageObjView, UnDeleteMessageObjView
from messages.views import ReplyMessageObjView, ConnectionView, MessageView
# urlpatterns = patterns('',
# url(r'^$', RedirectView.as_view(url='inbox/'), name='messages_redirect'),
# url(r'^compose/$', compose, name='messages_compose'),
# url(r'^compose/(?P<recipient>[\w.@+-]+)/$', compose, name='messages_compose_to'),
# url(r'^reply/(?P<message_id>[\d]+)/$', reply, name='messages_reply'),
# )
#COMPLETED
# url(r'^inbox/$', inbox, name='messages_inbox'),
# url(r'^outbox/$', outbox, name='messages_outbox'),
# url(r'^delete/(?P<message_id>[\d]+)/$', delete, name='messages_delete'),
# url(r'^undelete/(?P<message_id>[\d]+)/$', undelete, name='messages_undelete'),
# url(r'^trash/$', trash, name='messages_trash'),
router = routers.SimpleRouter(trailing_slash=False)
urlpatterns = patterns('',
url(r'^inbox$', InboxListView.as_view()),
url(r'^sent$', SentListView.as_view()),
url(r'^trash$', DeletedListView.as_view()),
url(r'^compose$', ComposeMessageObjView.as_view()),
url(r'^connection/(?P<pk>[0-9]+)$', ConnectionView.as_view()),
url(r'^reply/(?P<message_id>[\d]+)$', ReplyMessageObjView.as_view()),
url(r'^delete/(?P<pk>[0-9]+)$', DeleteMessageObjView.as_view()),
url(r'^undelete/(?P<pk>[0-9]+)$', UnDeleteMessageObjView.as_view()),
url(r'^message/(?P<pk>[0-9]+)$', MessageView.as_view()),
)
|
# Austin Zebrowski
# E.Cloudmesh.Common.3
# Develop a program that demonstrates the use of FlatDict
from cloudmesh.common.FlatDict import FlatDict
mydictionary = {
"name": "Katelyn"
, "type": "Student"
, "biometrics": {
"height": 5.5,
"weight": 120,
"eye": "brown"
}
}
mydictionary = FlatDict(mydictionary)
print(mydictionary.name, "weighs", mydictionary.biometrics__weight, "pounds") |
from django.contrib import admin
from authentication.models import Account
admin.site.register(Account)
|
import numpy as np
from PIL import Image
import scipy.io
#calculate the intersection over union
#box format : [x_left,y_top, width, height]
def get_iou(a, b):
xmin = min(a[0],b[0])
xmax = max(a[2]+a[0],b[2]+b[0])
ymin = min(a[1]-a[3], b[1]-b[3])
ymax = max(a[1],b[1])
union = max(0, xmax - xmin) * max(0, ymax - ymin)
return (a[2]*a[3] + b[2]*b[3] - union)/union
#currently unimplemented
#desgined to parse a annotation file and return a list of bounding boxes in the format [x_left,y_top, width, height]
def get_bounding_boxes(annotation):
return []
#currently unimplemented
#loads the dataset in the form of : images,[scores,scores_and_coordinates]
#returns randomly generated data, for the purpose of testing the training loop.
def dataloader(path_to_dataset = ''):
return np.float32(np.random.random((64, 128, 128, 3))), [np.float32(np.random.random((64,320,1))),np.float32(np.random.random((64,320,9)))]
#For the purpose of simplicity, currently we have 1 anchor box (aspect ratio 1:1) for each point in the 8*8 and 16*16 feaature map.
#maps the anchors to the original image (128*128*3).
#will be called by dataloader to provide the target scores and coordinates for each anchor.
def map_anchors(path_to_annotations):
annotations = os.listdir(path_to_annotations)
batch_score_8 = []
batch_loc_8 = []
batch_score_16 = []
batch_loc_16 = []
for annotation in annotations:
#boxes : [x_left,y_top, width, height]
boxes = get_bounding_boxes(annotation)
boxes = []
scores_16 = []
loc_16 = []
threshold = 0.5
for y in range(128,0,-8):
for x in range(0,128,8):
iou = 0
box = []
for b in boxes:
overlap = get_iou(b,[x,y,8,8])
if iou < overlap:
iou = overlap
box = b
if iou > threshold:
scores_16.append(1)
loc_16.append([1,(box[0]+box[2]/2)/128,(box[1]-box[3]/2)/128,box[2]/128,box[3]/128, (x+4)/128,(y-4)/128,8/128,8/128])
else:
scores_16.append(0)
loc_16.append([0]*9)
scores_8 = []
loc_8 = []
for y in range(128,0,-16):
for x in range(0,128,16):
iou = 0
box = []
for b in boxes:
overlap = get_iou(b,[x,y,16,16])
if iou < overlap:
iou = overlap
box = b
if iou > threshold:
scores_8.append(1)
loc_8.append([1,(box[0]+box[2]/2)/128,(box[1]-box[3]/2)/128,box[2]/128,box[3]/128,(x+8)/128,(y-8)/128,16/128,16/128])
else:
scores_8.append(0)
loc_8.append([0]*9)
batch_loc_8.append(loc_8)
batch_score_8.append(scores_8)
batch_score_16.append(scores_16)
batch_loc_16.append(loc_16)
# print(np.array(batch_score_8).shape, np.array(batch_loc_8).shape)
# return np.array(batch_score_8), np.array(batch_loc_8), np.array(batch_score_16), np.array(batch_loc_16)
# print(np.concatenate((np.array(batch_score_16),np.array(batch_score_8)),axis=1).shape, np.concatenate((np.array(batch_loc_16),np.array(batch_loc_8)),axis=1).shape)
return np.concatenate((np.array(batch_score_16),np.array(batch_score_8)),axis=1), np.concatenate((np.array(batch_loc_16),np.array(batch_loc_8)),axis=1)
|
from ..models import *
from rest_framework import serializers
class SponsorSerializers(serializers.ModelSerializer):
class Meta:
model = Sponsor
fields = ('name', 'description', 'url', 'image')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ysgxuyu', '0005_auto_20150218_1622'),
('wyxbcga', '0004_remove_xqjugixefj_xvbpvsvwrw'),
]
operations = [
migrations.AddField(
model_name='xqjugixefj',
name='bbwlhkxvqd',
field=models.OneToOneField(null=True, related_name='+', to='ysgxuyu.Bmovnbnmed'),
),
]
|
"""
This challenge asks you to redefine the mysum function we defined in chapter 1, such that it can take any number of arguments. The arguments must all be of the same type and know how to respond to the + operator. (Thus, the function should work with numbers, strings, lists, and tuples, but not with sets and dictionaries.)
The result should be a new, longer sequence of the type provided by the parameters. Thus, the result of mysum('abc', 'def') will be the string abcdef , and the result of mysum([1,2,3], [4,5,6]) will be the six-element list [1,2,3,4,5,6] . Of course, it should also still return the integer 6 if we invoke mysum(1,2,3)
"""
import unittest
def mysum(*args):
if not args:
return args
result = args[0]
for item in args[1:]:
result += item
return result
class TeststringListTranspose(unittest.TestCase):
def test_blank(self):
self.assertEqual(mysum(), ())
def test_valid01(self):
self.assertEqual(mysum('abc', 'def'), 'abcdef')
def test_valid02(self):
self.assertEqual(mysum([1,2,3], [4,5,6]), [1,2,3,4,5,6])
def test_valid03(self):
self.assertEqual(mysum(1,2,3), 6)
def test_valid04(self):
self.assertEqual(mysum(1), 1)
def test_valid05(self):
self.assertEqual(mysum([1], ), [1])
# def test_invalid(self):
# self.assertEqual(mysum(1,2,'d',3), ValueError)
# def test_positive(self):
# self.assertEqual(mysum(1,2,4,3), 10)
# def test_negative(self):
# self.assertEqual(mysum(1,2,-4,3), 2)
# def test_list(self):
# self.assertEqual(mysum(*[1,2,4,3]), 10)
if __name__ == '__main__':
unittest.main()
|
"""
NBComet: Jupyter Notebook extension to track full notebook history
"""
import os
import nbformat
# TODO see if we can use nbdime to do diff, or continue using our own code
def get_nb_diff(action_data, dest_fname, compare_outputs = False):
"""
find diff between two notebooks
action_data: (dict) new notebook data to compare
dest_fname: (str) name of file to compare to
compare_outputs: (bool) compare cell outputs, or just the sources
"""
# don't even compare if the old version of the notebook does not exist
if not os.path.isfile(dest_fname):
diff = {}
cell_order = []
nb_b = action_data['model']['cells']
if valid_ids([], nb_b):
cell_order = [c['metadata']['comet_cell_id'] for c in nb_b]
else:
cell_order = list(range(len(nb_b)))
return diff, cell_order
nb_a = nbformat.read(dest_fname, nbformat.NO_CONVERT)['cells']
nb_b = action_data['model']['cells']
diff = {}
cell_order = []
# either use a diff method based on cell ids
if valid_ids(nb_a, nb_b):
nb_a_cell_ids = [c['metadata']['comet_cell_id'] for c in nb_a]
nb_b_cell_ids = [c['metadata']['comet_cell_id'] for c in nb_b]
cell_order = nb_b_cell_ids
for i in nb_b_cell_ids:
# if it is a cell id seen in prior nb, check if contents changed
if i in nb_a_cell_ids:
# get the old and new cell contents
cell_a = nb_a[nb_a_cell_ids.index(i)]
cell_b = nb_b[nb_b_cell_ids.index(i)]
if cells_different(cell_a, cell_b, compare_outputs):
diff[i] = cell_b
# the cell is entirely new, so it is part of the diff
else:
diff[i] = nb_b[nb_b_cell_ids.index(i)]
# or if no cell ids, rely on more targeted method based on type of action
else:
action = action_data['name']
selected_index = action_data['index']
selected_indices = action_data['indices']
cell_order = list(range(len(nb_b)))
check_indices = indices_to_check(action, selected_index,
selected_indices, nb_a, nb_b)
for i in check_indices:
# don't compare cells that don't exist in the current notebook
if i >= len(nb_b):
continue
# if its a new cell at the end of the nb, it is part of the diff
elif i >= len(nb_a):
diff[i] = nb_b[i]
else:
cell_a = nb_a[i]
cell_b = nb_b[i]
if cells_different(cell_a, cell_b, compare_outputs):
diff[i] = cell_b
return diff, cell_order
def valid_ids(nb_a, nb_b):
"""
Ensure each notebook we are comparing has a full set of unique cell ids
nb_a: (dict) one notebook to compare
nb_b: (dict) the other notebook to compare
"""
prior_a_ids = []
prior_b_ids = []
for c in nb_a:
if "comet_cell_id" not in c["metadata"]:
return False
elif c["metadata"]["comet_cell_id"] in prior_a_ids:
return False
else:
prior_a_ids.append(c["metadata"]["comet_cell_id"])
for c in nb_b:
if "comet_cell_id" not in c["metadata"]:
return False
elif c["metadata"]["comet_cell_id"] in prior_b_ids:
return False
else:
prior_b_ids.append(c["metadata"]["comet_cell_id"])
return True
def cells_different(cell_a, cell_b, compare_outputs):
# check if cell type or source is different
if (cell_a["cell_type"] != cell_b["cell_type"]
or cell_a["source"] != cell_b["source"]):
return True
# otherwise compare outputs if it is a code cell
elif compare_outputs and cell_b["cell_type"] == "code":
# get the outputs
cell_a_outs = cell_a['outputs']
cell_b_outs = cell_b['outputs']
# if different number of outputs, the cell has changed
if len(cell_a_outs) != len(cell_b_outs):
return True
# compare the outputs one by one
for j in range(len(cell_b_outs)):
# check that the output type matches
if cell_b_outs[j]['output_type'] != cell_a_outs[j]['output_type']:
return True
# and that the relevant data matches
elif((cell_a_outs[j]['output_type'] in ["display_data","execute_result"]
and cell_a_outs[j]['data'] != cell_b_outs[j]['data'])
or (cell_a_outs[j]['output_type'] == "stream"
and cell_a_outs[j]['text'] != cell_b_outs[j]['text'])
or (cell_a_outs[j]['output_type'] == "error"
and cell_a_outs[j]['evalue'] != cell_b_outs[j]['evalue'])):
return True
return False
def indices_to_check(action, selected_index, selected_indices, nb_a, nb_b):
"""
Identify which notebook cells may have changed based on the type of action
action: (str) action name
selected_index: (int) single selected cell
selected_indices: (list of ints) all selected cells
nb_a: (dict) one notebook to compare
nb_b: (dict) the other notebook to compare
"""
len_a = len(nb_a)
len_b = len(nb_b)
# actions that apply to all selected cells
if action in['run-cell', 'clear-cell-output', 'change-cell-to-markdown',
'change-cell-to-code', 'change-cell-to-raw',
'toggle-cell-output-collapsed', 'toggle-cell-output-scrolled']:
return [x for x in selected_indices]
# actions that apply to all selected cells, and the next one
elif action in ['run-cell-and-insert-below','run-cell-and-select-next']:
ind = [x for x in selected_indices]
ind.append(selected_indices[-1] + 1)
return ind
# actions that apply to the cell before or after first or last selected cell
elif action in ['insert-cell-above']:
return [selected_indices[0]]
elif action in ['insert-cell-below']:
return [selected_indices[-1] + 1]
# actions that may insert multiple cells
elif action in ['paste-cell-above']:
start = selected_indices[0] # first cell in selection
num_inserted = len_b - len_a
return [x for x in range(start, start + num_inserted)]
elif action in ['paste-cell-below']:
start = selected_indices[-1] + 1 # first cell after last selected
num_inserted = len_b - len_a
return [x for x in range(start, start + num_inserted)]
elif action in ['paste-cell-replace']:
start = selected_indices[0] # first cell in selelction
num_inserted = len_b - len_a + len(selected_indices)
return [x for x in range(start, start + num_inserted)]
# actions to move groups of cells up and down
elif action in ['move-cell-down']:
if selected_indices[-1] < len_b-1:
ind = [x for x in selected_indices]
ind.append(selected_indices[-1] + 1)
return ind
else:
return []
elif action in ['move-cell-up']:
if selected_index == 0:
return []
else:
ind = [x for x in selected_indices]
ind.append(selected_indices[0] - 1)
return ind
# split, merege, and selection
elif action in ['merge-cell-with-next-cell', 'unselect-cell']:
return [selected_index]
elif action in ['merge-cell-with-previous-cell']:
return [max([0, selected_index-1])]
elif action in ['merge-selected-cells','merge-cells']:
return min(selected_indices)
elif action in ['split-cell-at-cursor']:
return [selected_indices[0], selected_index + 1]
# actions applied to all cells in the notebook, or could affect all cells
elif action in ['run-all-cells','restart-kernel-and-clear-output',
'confirm-restart-kernel-and-run-all-cells']:
return [x for x in range(len_b)]
# actions applied to all cells above or below the selected one
elif action in ['run-all-cells-above']:
return [x for x in range(selected_index)]
elif action in ['run-all-cells-below']:
return [x for x in range(selected_index, len_b)]
# special case for undo deletion which could put a new cell anywhere
elif action in ['undo-cell-deletion']:
num_inserted = len_b - len_a
if num_inserted > 0:
first_diff = 0
for i in range(len_b):
# a new cell at the end of the nb
if i >= len(nb_a):
first_diff = i
return range(first_diff, first_diff + num_inserted)
elif nb_a[i]["source"] != nb_b[i]["source"]:
first_diff = i
return range(first_diff, first_diff + num_inserted)
# do nothing for remaining acitons such as delete-cell, cut-cell
else:
return []
|
from Analyzer import *
file = open('file.txt', 'r')
parse(file)
#TODO:
# add code to iterate something |
# https://github.com/LambdaSchool/DS-Unit-4-Sprint-1-NLP/tree/master/module4-topic-modeling
# https://learn.lambdaschool.com/ds/module/recbYIWnPYs2J4AWC/
#
# Topic Modeling
#
# At the end of this module, you should be able to:
# + describe the latent dirichlet allocation process
# + implement a topic model using the gensim library
# + interpret document topic distributions and summarize findings
# Part 1: Describe how an LDA Model works
# Part 2: Estimate a LDA Model with Gensim
# Part 3: Interpret LDA results
# Part 4: Select the appropriate number of topics
import os
NOVELS_DIRPATH = os.path.join(os.path.dirname(__file__), "..", "data", "novels")
if __name__ == "__main__":
print("TOPIC MODELING, YO")
|
# coding: utf-8
#import ipdb
import pyrax
#ipdb.set_trace()
pyrax.set_credential_file("/home/admin/pyrax_credentials")
cs_syd = pyrax.connect_to_cloudservers(region="SYD")
sydservers = cs_syd.servers.list()
sydimages = cs_syd.images.list()
#
print sydimages[0]
sydimages[0]
type(sydimages[0])
sydimages[0].id
sydimages[0].name
sydimages[0].human_id
cs_syd.list_snapshots()
cs = cs_syd
flvs = cs.list_flavors()
#for flv in flvs:
# print "Name:", flv.name
# print " ID:", flv.id
# print " RAM:", flv.ram
# print " Disk:", flv.disk
# print " VCPUs:", flv.vcpus
#server = cs.servers.create("pony46", u'5140b7e1-77a7-4ffb-ad9d-76bb834bd6f9', 2)
server=sydservers[0]
server.id
server.status
server.networks
#server.adminPass
#get_ipython().magic(u'save')
#get_ipython().magic(u'save booma6712')
#get_ipython().magic(u'save booma6713 1-24')
server.status
server.status
server.status
server.progress
server.human_id
server.hostId
server.id
server.image
server.get_vnc_console.__doc__
server.get_vnc_console('novnc')
server.user_id
#server.adminPass
#server.get_password
#server.get_password()
#server.delete
server.delete.__doc__
#server.delete()
cs.servers.list()
cs.list_base_images()
for i in cs.list_base_images():
print i.name, i.id
#server = cs.servers.create("pony48", u'6110edfe-8589-4bb1-aa27-385f12242627', 2)
server.name
server.id
server.status
server.image.values
server.image.values()
#server = cs.servers.create("pony50", u'052ce2a1-6038-4834-8228-e377211cf059', 2)
server.status
server.id
cs_syd.servers
cs_syd.servers.list()
for i in cs_syd.servers.list():
print i.name, i.id, i.image
for i in cs_syd.servers.list():
print i
klist = cs_syd.servers.list()
klist
klist[0].id
klist[0].image
klist = {}
for i in cs_syd.servers.list():
klist[i.name] = i
print klist
#klist['pony48']
#klist['pony48'].id
#klist['pony48'].delete()
#klist['pony50'].delete()
#get_ipython().magic(u'save ipython-pyrax1-dump 1-76') |
import numpy as np
import pandas as pd
from sklearn.cluster import MeanShift
from collections import Counter
def out(filename, s):
f = open(filename, 'w')
f.write(s)
f.close()
raw_data = pd.read_csv('checkins.csv')
raw_data.columns = [c.strip() for c in raw_data.columns]
data = raw_data[['latitude', 'longitude']]
data.replace(r'\s+', np.nan, inplace=True, regex=True)
data = data.dropna()
subset = data[0:100000]
#print subset.shape
ms = MeanShift(bandwidth=0.1, bin_seeding=True)
ms.fit(subset)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
cnt = Counter(labels)
min_cluster_size = 15
real_cluster_labels = [c for c, n in cnt.items() if n > min_cluster_size]
real_cluster_centers = cluster_centers[real_cluster_labels]
print len(real_cluster_centers)
offices = np.array([[33.751277, -118.188740],
[25.867736, -80.324116],
[51.503016, -0.075479],
[52.378894, 4.885084],
[39.366487, 117.036146],
[-33.868457, 151.205134]])
# def min_dist(a):
# dist = np.array([np.linalg.norm(a - b) for b in offices])
# return dist.min()
cluster_min_distances = [min(map(lambda a: np.linalg.norm(a-c), offices)) for c in real_cluster_centers]
print len(cluster_min_distances)
min_dist = min(cluster_min_distances)
nearest_center = real_cluster_centers[cluster_min_distances.index(min(cluster_min_distances))]
print "Smallest distance %f" % min_dist
print nearest_center
out('result.txt', str(nearest_center[0]) + ' ' + str(nearest_center[1])) |
import json
import csv
import sys
import re
import text_to_vector
import pandas as pd
import nltk
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from functools import reduce
from nltk.corpus import stopwords
nltk.download('stopwords')
import string
if len(sys.argv) < 3:
print("USAGE <input-tweets.json> <out.json>")
sys.exit(0)
TWEET_FILE_NAME = sys.argv[1]
OUTPUT_FILE_NAME = sys.argv[2]
STOP_WORDS = set(stopwords.words('english'))
SENTIMENT_ANALYZER = SentimentIntensityAnalyzer()
print("LOADING DOC2VEC MODEL, this might take a minute...")
text_to_vector.load_doc_model();
full_dataset = []
print("LOADING DATA, please wait for about 30 seconds.")
with open(TWEET_FILE_NAME) as f:
full_dataset = json.loads(f.read())
'''
ideologies = {}
with open(CONGRESS_FILE_NAME) as f:
congress = json.loads(f.read())
for politician_wrapper in congress:
politician_name = list(politician_wrapper.keys())[0]
data = politician_wrapper[politician_name]
ideologies[data["twitter_handler"]] = data["ideology"]
'''
# OLD DATA STRUCTURE
# full_dataset[TWITTER HANDLE][0:250][obj with keys ['created_at', 'favorite_count', 'hashtags', 'id', 'id_str', 'lang', 'retweet_count', 'source', 'text', 'truncated', 'urls', 'user', 'user_mentions']]
output = []
features = ["id", "favorite_count", "text", "hashtags"]
# NEW DATA STRUCTURE
# [0:537] list of { twitter_handle, tweets: [{ features } x 250], ideology rating: }
total_progress = len(list(full_dataset.keys()))
progress = 0
for twitter_handle in full_dataset.keys():
progress += 1
print("PROGRESS: ", progress, total_progress)
for tweet in full_dataset[twitter_handle]:
if not all([f in tweet for f in features]):
continue
d = {}
# Clean up the tweets!
s = str(tweet["text"].replace("\u00a0\u2026", "")) # weird tags at the end of tweets, not adding information.
url_pattern = '(https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?:\/\/(?:www\.|(?!www))[a-zA-Z0-9]+\.[^\s]{2,}|www\.[a-zA-Z0-9]+\.[^\s]{2,}|pic\.[a-zA-Z0-9]+\.[^\s]{2,})'
s = re.sub(url_pattern, '', s)
s = re.sub("#[a-zA-Z]+", '', s) # remove hash tags
s = s.translate(str.maketrans('', '', string.punctuation))
words = [word.lower() for word in s.split(" ") if word not in STOP_WORDS and \
word not in string.punctuation and \
not word.isdigit()]
d["handle"] = twitter_handle
# Add Doc2Vec features
ls = text_to_vector.infer(s.split(" ")).tolist()
for i in range(len(ls)):
d["x" + str(i)] = ls[i]
ix = len(ls)
# Analyze Sentiment
sentiment = SENTIMENT_ANALYZER.polarity_scores(" ".join(words))
d["x" + str(ix)] = sentiment["pos"]
d["x" + str(ix + 1)] = sentiment["neg"]
d["x" + str(ix + 2)] = sentiment["neu"]
d["x" + str(ix + 3)] = tweet["favorite_count"]
#d["ideology"] = ideologies[twitter_handle]
output.append(d)
df = pd.DataFrame(output)
df.to_csv(OUTPUT_FILE_NAME)
|
import turtle
import hd
steps = 50
numbers = list()
turtle.speed(100)
turtle.ht()
turtle.tracer(0,0)
def genfib():
global numbers
a = 0
numbers.append(1)
numbers.append(1)
while a < steps:
a += 1
b = numbers.__len__()
c = b - 1
d = b - 2
numbers.append(numbers[c] + numbers[d])
def genspiral():
global steps
global numbers
a = -1
turtle.left(90)
while a < (steps - 1):
a += 1
turtle.circle(numbers[a],9)
def genspiralwsquare():
global steps
global numbers
a = -1
turtle.left(90)
while a < (steps - 1):
a += 1
for i in "aaaa":
turtle.forward(numbers[a])
turtle.left(90)
turtle.circle(numbers[a],90)
genfib()
genspiralwsquare()
turtle.update
|
# How to deal with nested dict??
#
from collections import defaultdict
class Node(object):
def __init__(self, val, parent, child=None):
self.val = val
self.parent = parent
self.child = dict() if not child else child
def update_node(self, val):
if val not in self.child:
new_node = Node(val, self)
self.child[val] = self.child.get(val, new_node)
# print("newnode2", self.child[val].val, self.child[val].child)
return self.child[val]
class Trie(object):
def __init__(self, root):
self.root = root
def search(self):
pass
def insert(self):
pass
def draw(self):
"""
dfs
"""
stack = []
stack.append(self.root)
visited = {}
while len(stack) != 0:
node = stack.pop(-1)
visited[node.val] = True
for c in node.child:
if c not in visited:
stack.append(c)
# if node.val =
class Solution(object):
"""
apple
application
apples
ans:app
"""
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
trie = Trie(Node(None, None))
for s in strs:
node_prev = trie.root
for c in s:
node_prev = node_prev.update_node(c)
node_prev = node_prev.update_node("NIL")
#traverse trie for longest common suffix
node = trie.root
suffix = ""
while len(node.child) == 1 and "NIL" not in node.child:
next_val = list(node.child.keys())[0]
# print(next_val)
if next_val == None:
break
suffix += next_val
node = node.child[next_val]
print(suffix)
return suffix
s = Solution()
tc = [
"abcggg",
"abcfe",
"abcde"
]
s.longestCommonPrefix(tc)
|
#!/usr/bin/env python
# encoding=utf-8
# maintainer: rgaudin
import urllib
import thread
import logging
from django.http import HttpResponse, Http404
from django.shortcuts import redirect
from nosms.models import Message
from nosms.utils import process_incoming_message
logger = logging.getLogger(__name__)
def handler_get(request):
if request.method == 'GET':
if 'from' in request.GET and 'text' in request.GET:
return redirect('handler', request.GET.get('from'), \
request.GET.get('text'))
raise Http404(u"Oh nooozzee")
def handler(request, identity, text):
def _str(uni):
try:
return str(uni)
except:
return uni.encode('utf-8')
def _plus(str):
return str.replace('%2B', '+')
text = urllib.unquote(_str(_plus(text.replace('+', ' '))))
message = Message(identity=identity, text=text, \
status=Message.STATUS_CREATED, \
direction=Message.DIRECTION_INCOMING)
message.save()
process_incoming_message(message)
return HttpResponse(u"Thanks %s, the following message " \
"have been processed:\n%s" % (identity, text), \
mimetype='text/plain', status=202)
|
import tensorflow
import tensorflow.keras as k
import os
import pandas as pd
import numpy as np
import subprocess
from sklearn.linear_model import LinearRegression
from termcolor import colored
import config
import sys
import scipy
from sklearn.model_selection import train_test_split
os.system('color')
#def get_data(keyword, samples, var_name, level, n_input, model_path_folder=None, normalize=True, scaler="m", rs=None):
def get_data(keyword, samples, var_name, level, n_input, model_path_folder=None, normalize=True, scaler="m", point="sobol", rs=None):
if point != "sobol" and point != "random":
raise ValueError("check point argument")
if keyword == "parab":
dataset = pd.read_csv("./CaseStudies/Parabolic/Data/solution_"+point+"_deltaT_" + str(level) + ".csv", header=0, sep=",")
elif keyword == "shock":
dataset = pd.read_csv("./CaseStudies/ShockTube/Data/shock_tube_" + str(level) + ".csv", header=0, sep=",")
elif keyword == "airf":
dataset = pd.read_csv("./CaseStudies/Airfoil/Data/airfoil_data_"+str(level)+".csv", header=0, sep=",")
else:
raise ValueError("Chose one option between parab, shock and airf")
if point == "random" and keyword!="parab":
raise ValueError("Random Point available only for Projectile Motion")
#print(dataset.head())
if samples == "all" or samples == "All":
samples = len(dataset)-1
if scaler == "m":
min_val = min(dataset[var_name])
max_val = max(dataset[var_name])
elif scaler == "s":
min_val = dataset[var_name].mean()
max_val = dataset[var_name].std()
else:
raise ValueError("Select one scaler between MinMax (m) and Standard (s)")
# change here, don't like it
if normalize:
if scaler == "m":
dataset[var_name] = (dataset[var_name] - min_val)/(max_val - min_val)
elif scaler == "s":
dataset[var_name] = (dataset[var_name] - min_val)/max_val
else:
min_val = 0
max_val = 1
#print("Mean: ",dataset[var_name].mean())
#print("Deviation: ",dataset[var_name].std())
print(dataset.head())
loc_var_name = dataset.columns.get_loc(var_name)
if rs is not None:
X, X_test, y, y_test = train_test_split(dataset.iloc[:, :n_input].values,dataset.iloc[:, loc_var_name].values,train_size=samples,shuffle=True, random_state=rs)
else:
X = dataset.iloc[:samples, :n_input].values
y = dataset.iloc[:samples, loc_var_name].values
X_test = dataset.iloc[samples:, :n_input].values
y_test = dataset.iloc[samples:, loc_var_name].values
print(X)
if model_path_folder is not None:
with open(model_path_folder + '/InfoData.txt', 'w') as file:
file.write("dev_norm_train,dev_norm_test,dev_train,dev_test,mean_norm_train,mean_norm_test,mean_train,mean_test,\n")
file.write(str(np.std(y)) + "," +
str(np.std(y_test)) + "," +
str(np.std(y*(max_val-min_val)+min_val)) + "," +
str(np.std(y_test*(max_val-min_val)+min_val)) + "," +
str(np.mean(y)) + "," +
str(np.mean(y_test)) + "," +
str(np.mean(y * (max_val - min_val) + min_val)) + "," +
str(np.mean(y_test * (max_val - min_val) + min_val))
)
return X, y, X_test, y_test, min_val, max_val
def get_data_diff(keyword, samples, var_name, level_c, level_f, n_input, model_path_folder=None, normalize=True, scaler ="m", point="sobol", rs=None):
if point != "sobol" and point !="random":
raise ValueError("check point argument")
if keyword == "parab_diff":
CaseStudy = "Parabolic"
base = "solution_"+point+"_deltaT_"
elif keyword == "shock_diff":
CaseStudy = "ShockTube"
base = "shock_tube_"
elif keyword == "airf_diff":
CaseStudy = "Airfoil"
base = "airfoil_data_"
else:
raise ValueError("Chose one option between parab and shock")
if point == "random" and keyword != "parab_diff":
raise ValueError("Random Point available only for Projectile Motion")
dataset_dt0 = pd.read_csv("./CaseStudies/" + str(CaseStudy) + "/Data/" + base + str(level_c) + ".csv", header=0, sep=",")
dataset_dt1 = pd.read_csv("./CaseStudies/" + str(CaseStudy) + "/Data/" + base + str(level_f) + ".csv", header=0, sep=",")
dataset = dataset_dt1
new_var_name = "diff_" + var_name
dataset[new_var_name] = (dataset_dt1[var_name] - dataset_dt0[var_name])
dataset = dataset.drop(var_name, axis=1)
print(dataset.head())
if scaler == "m":
min_val = min(dataset[new_var_name])
max_val = max(dataset[new_var_name])
elif scaler == "s":
min_val = dataset[new_var_name].mean()
max_val = dataset[new_var_name].std()
else:
raise ValueError("Select one scaler between MinMax (m) and Standard (s)")
if normalize:
if scaler == "m":
dataset[new_var_name] = (dataset[new_var_name] - min_val) / (max_val - min_val)
elif scaler == "s":
dataset[new_var_name] = (dataset[new_var_name] - min_val) / max_val
else:
min_val = 0
max_val = 1
#print("Mean: ",dataset[new_var_name].mean())
#print("Deviation: ",dataset[new_var_name].std())
# print("min difference:", min_val)
# print("max difference:", max_val)
# print("Mean:", dataset[new_var_name].mean())
# print("Dev:", dataset[new_var_name].std())
if samples == "all" or samples == "All":
samples = len(dataset[new_var_name])-1
loc_var_name = dataset.columns.get_loc(new_var_name)
if rs is not None:
X, X_test, y, y_test = train_test_split(dataset.iloc[:, :n_input].values, dataset.iloc[:, loc_var_name].values, train_size=samples, shuffle=True, random_state=rs)
else:
X = dataset.iloc[:samples, :n_input].values
y = dataset.iloc[:samples, loc_var_name].values
X_test = dataset.iloc[samples:, :n_input].values
y_test = dataset.iloc[samples:, loc_var_name].values
#print(X.shape)
#print(y.shape)
if model_path_folder is not None:
with open(model_path_folder + '/InfoData.txt', 'w') as file:
file.write("dev_norm_train,dev_norm_test,dev_train,dev_test,mean_norm_train,mean_norm_test,mean_train,mean_test,\n")
file.write(str(np.std(y)) + "," +
str(np.std(y_test)) + "," +
str(np.std(y*(max_val-min_val)+min_val)) + "," +
str(np.std(y_test*(max_val-min_val)+min_val)) + "," +
str(np.mean(y)) + "," +
str(np.mean(y_test)) + "," +
str(np.mean(y * (max_val - min_val) + min_val)) + "," +
str(np.mean(y_test * (max_val - min_val) + min_val))
)
return X, y, X_test, y_test, min_val, max_val
'''
elif keyword == "airf_diff":
CaseStudy = "Airfoil"
var_name_1 = var_name
dataset_1 = pd.read_csv("./CaseStudies/" + str(CaseStudy) + "/Data/airfoil_level_" + str(level_c) + ".csv", header=0, sep=",", index_col=0)
dataset_2 = pd.read_csv("./CaseStudies/" + str(CaseStudy) + "/Data/airfoil_level_" + str(level_f) + ".csv", header=0, sep=",", index_col=0)
dataset_finest = pd.read_csv("./CaseStudies/" + str(CaseStudy) + "/Data/airfoil_level_4.csv", header=0, sep=",", index_col=0)
mean_value = np.mean((dataset_finest[var_name]).values ** 2)
loc_var_name = dataset_1.columns.get_loc(var_name)
filtered_1 = dataset_1.loc[dataset_2.index]
filtered_1 = filtered_1.dropna()
filtered_2 = dataset_2.loc[filtered_1.index]
filtered_2 = filtered_2.dropna()
vec_diff = filtered_2.iloc[:, loc_var_name] - filtered_1.iloc[:, loc_var_name]
relative_var_diff_f = np.var(vec_diff) / np.var(filtered_2.iloc[:, loc_var_name])
relative_var_diff_c = np.var(vec_diff) / np.var(filtered_1.iloc[:, loc_var_name])
relative_var_diff_finest = np.var(vec_diff) / np.var(dataset_finest.iloc[:, loc_var_name])
realtive_var_mean = np.var(vec_diff) / mean_value
dataset = dataset_2.loc[vec_diff.index]
dataset = dataset.iloc[:, :6]
var_name = "diff"
dataset[var_name] = vec_diff.values
min_val = min(dataset[var_name])
max_val = max(dataset[var_name])
relative_mean_diff = (np.mean((dataset["diff"]).values ** 2) / mean_value) ** (1 / 2)
print("mean difference:", relative_mean_diff)
print("min difference:", min_val)
print("max difference:", max_val)
print("variances:", relative_var_diff_c, relative_var_diff_f, relative_var_diff_finest, realtive_var_mean)
dataset[var_name] = (dataset[var_name] - min_val) / (max_val - min_val)
# with open("Files/AirfoilData/Info_200_new.txt", "a") as file:
# file.write("\n" + str(dataset[var_name].var()))
# with open('./Models/GPModels_SL/MinMax_' + str(samples) + '.txt', 'w') as file:
# file.write(str(min_val) + "," + str(max_val))
X = dataset.iloc[:samples, :6].values
y = dataset.iloc[:samples, dataset.columns.get_loc(var_name)].values
X_test = dataset.iloc[samples:, :6].values
y_test = dataset.iloc[samples:, dataset.columns.get_loc(var_name)].values
with open('./Files/AirfoilData/Info_' + str(samples) + var_name_1 + '.txt', 'w') as file:
file.write("Train_Sample,rel_var_finest,mean_relative\n")
file.write(str(samples)
+ "," + str(relative_var_diff_finest)
+ "," + str(relative_mean_diff)
)
'''
def load_data(folder_name):
folder_path = folder_name + os.sep
info = pd.read_csv(folder_path+"Information.csv", sep=",", header=0)
optimizer = info.optimizer[0]
loss = info.loss_function[0]
learning_rate = info.learning_rate[0]
if optimizer == "adam":
optimizer = tensorflow.train.AdamOptimizer(learning_rate=learning_rate)
with open(folder_path + "model.json") as json_file:
loaded_model_json = json_file.read()
loaded_model = k.models.model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(folder_path + "model.h5")
loaded_model.compile(optimizer=optimizer, loss=loss)
return loaded_model
def save_model(best_model, information, mean_prediction_error, std_prediction_error, name="Network"):
i = 0
folder_name = name
while True:
# folder_name = name + "_" + str(i)
# Check if "Number_0" exists
if not os.path.exists("Models/"+folder_name):
os.makedirs("Models/"+folder_name)
# Save model
model_json = best_model.to_json()
with open("Models" + os.sep + folder_name + os.sep + "model.json", "w") as json_file:
json_file.write(model_json)
# Save weights
best_model.save_weights("Models" + os.sep + folder_name + os.sep + "model.h5")
# Save info
with open("Models" + os.sep + folder_name + os.sep + "Information.csv", "w") as w:
keys = list(information.keys())
vals = list(information.values())
w.write(keys[0])
for i in range(1, len(keys)):
w.write(","+keys[i])
w.write("\n")
w.write(str(vals[0]))
for i in range(1, len(vals)):
w.write("," + str(vals[i]))
with open("Models" + os.sep + folder_name + os.sep + "Scores.csv", "w") as w:
w.write("mean_prediction_error" + ":" + str(mean_prediction_error)+"\n")
w.write("std_prediction_error" + ":" + str(std_prediction_error)+"\n")
break
else:
folder_name = name + "_" + str(i)
i = i + 1
def compute_mean_prediction_error(data, predicted_data, order):
base = np.mean(abs(data)**order)
samples = abs(data-predicted_data)**order
return (np.mean(samples) / base)**(1.0/order)
def compute_prediction_error_variance(data, predicted_data, order):
base = np.mean(abs(data) ** order)
samples = abs(data - predicted_data) ** order
return np.std(samples) / base
def compute_p_relative_norm(data, predicted_data, order):
base = np.linalg.norm(data, order)
samples = np.linalg.norm(data - predicted_data, order)
return samples / base
def set_model_folder_name(keyword, variable_name, level_c, level_f, level_single, samples):
if "diff" in keyword:
folder_name = variable_name + "_" + str(level_c) + str(level_f) + "_" + str(samples) + "_" + keyword
else:
folder_name = variable_name + "_" + str(level_single) + "_" + str(samples) + "_" + keyword
return folder_name
def compute_time(keyword, level_c, level_f, level_single, samples):
time = 0
if "parab" in keyword:
table_time = pd.read_csv("CaseStudies/Parabolic/Data/ComputationalTime.txt", header=0, sep=",")
elif "airf" in keyword:
table_time = pd.read_csv("CaseStudies/Airfoil/Data/time_level.txt", header=0, sep=",")
elif "shock" in keyword:
table_time = pd.read_csv("CaseStudies/ShockTube/Data/ComputationalTime.txt", header=0, sep=",")
elif "burg" in keyword:
table_time = pd.read_csv("CaseStudies/Burger/Data/ComputationalTime.txt", header=0, sep=",")
else:
raise ValueError()
if "_diff" in keyword:
time_c = table_time["comp_time"].values[level_c]
time_f = table_time["comp_time"].values[level_f]
time = (time_c + time_f)*samples
else:
time = table_time["comp_time"].values[level_single]*samples
return time
def call_GaussianProcess(key_word, var_name, sample_coarsest, lev_coarsest, lev_c, lev_f, string_norm, scaler, point, cluster="true"):
arguments = list()
arguments.append(str(key_word))
arguments.append(str(var_name))
arguments.append(str(sample_coarsest))
arguments.append(str(lev_coarsest))
arguments.append(str(lev_c))
arguments.append(str(lev_f))
arguments.append(str(string_norm))
arguments.append(str(scaler))
arguments.append(str(point))
if sys.platform == "linux" or sys.platform == "linux2":
if cluster == "true":
string_to_exec = "bsub python3 GaussianProcess.py "
else:
string_to_exec = "python3 GaussianProcess.py "
for arg in arguments:
string_to_exec = string_to_exec + " " + arg
os.system(string_to_exec)
elif sys.platform == "win32":
python = os.environ['PYTHON36']
p = subprocess.Popen([python, "GaussianProcess.py"] + arguments)
p.wait()
def call_NeuralNetwork_cluster(key_word, n_sample, loss_func, folder_path, var_name, lev_c, lev_f, lev_coarsest, string_norm, validation_size, selection_method, scaler, setup, point, cluster="true"):
arguments = list()
arguments.append(str(key_word))
arguments.append(str(n_sample))
arguments.append(str(loss_func))
for value in setup:
arguments.append(str(value))
# arguments.append(str(previous_error))
arguments.append(str(folder_path))
arguments.append(str(var_name))
arguments.append(str(lev_c))
arguments.append(str(lev_f))
arguments.append(str(lev_coarsest))
# arguments.append(str(number_input))
arguments.append(str(string_norm))
arguments.append(str(validation_size))
arguments.append(str(selection_method))
arguments.append(str(scaler))
arguments.append(str(point))
if sys.platform == "linux" or sys.platform == "linux2":
if cluster == "true":
string_to_exec = "bsub python3 NetworkSingleConf_tesr.py "
else:
string_to_exec = "python3 NetworkSingleConf_tesr.py "
for arg in arguments:
string_to_exec = string_to_exec + " " + arg
print(string_to_exec)
os.system(string_to_exec)
elif sys.platform == "win32":
python = os.environ['PYTHON36']
p = subprocess.Popen([python, "NetworkSingleConf_tesr.py"] + arguments)
p.wait()
def linear_regression(keyword, variable_name, sample, level_c, level_f, level_single, n_input, norm, scaler, point):
if "diff" in keyword:
X, y, X_test, y_test, min_val, max_val = get_data_diff(keyword, sample, variable_name, level_c, level_f, n_input, normalize=norm, scaler=scaler, point=point)
else:
X, y, X_test, y_test, min_val, max_val = get_data(keyword, sample, variable_name, level_single, n_input, normalize=norm, scaler=scaler, point=point)
reg = LinearRegression().fit(X, y)
y_pred = reg.predict(X_test)
y_test = y_test*(max_val - min_val) + min_val
y_pred = y_pred*(max_val - min_val) + min_val
mean_error = compute_mean_prediction_error(y_test, y_pred, 2) * 100
stdv_error = compute_prediction_error_variance(y_test, y_pred, 2) * 100
print(colored("\nEvaluate linearity data:", "green", attrs=['bold']))
print(str(mean_error) + "%")
print(str(stdv_error) + "%")
return mean_error, stdv_error, reg
def get_network_conf(keyword, variable_name, level_single, level_diff_c, level_diff_f):
if keyword == "parab":
param_grid = config.parameter_grid_parab
elif keyword == "parab_diff":
param_grid = config.parameter_grid_parab_diff
elif keyword == "shock":
param_grid = config.parameter_grid_shock
elif keyword == "shock_diff":
param_grid = config.parameter_grid_shock_diff
elif keyword == "airf":
if variable_name == "Lift":
param_grid = config.parameter_grid_airf
elif variable_name == "Drag":
param_grid = config.parameter_grid_airf_drag
else:
raise ValueError()
elif keyword == "airf_diff":
if variable_name == "Lift":
param_grid = config.parameter_grid_airf_diff
elif variable_name == "Drag":
if level_diff_c == 0 and level_diff_f == 1:
param_grid = config.parameter_grid_airf_diff_drag_01
elif level_diff_c == 1 and level_diff_f == 2:
param_grid = config.parameter_grid_airf_diff_drag_12
elif level_diff_c == 2 and level_diff_f == 3:
param_grid = config.parameter_grid_airf_diff_drag_23
elif level_diff_c == 3 and level_diff_f == 4:
param_grid = config.parameter_grid_airf_diff_drag_34
elif level_diff_c == 0 and level_diff_f == 2:
param_grid = config.parameter_grid_airf_diff_drag_02
elif level_diff_c == 2 and level_diff_f == 4:
param_grid = config.parameter_grid_airf_diff_drag_24
elif level_diff_c == 0 and level_diff_f == 4:
param_grid = config.parameter_grid_airf_diff_drag_04
elif level_diff_c == 0 and level_diff_f == 3:
param_grid = config.parameter_grid_airf_diff_drag_03
else:
param_grid = config.parameter_grid_airf_diff_drag_01
else:
raise ValueError()
return param_grid
def compute_wasserstein_distance(y, y_pred):
return scipy.stats.wasserstein_distance(y, y_pred)
def scale_inverse_data(data, scaler, min_val, max_val):
if scaler == "m":
data = data * (max_val - min_val) + min_val
elif scaler == "s":
data = data * (max_val) + min_val
return data
def compute_mean_depth(levels):
depth_mean = 0
n=0
for i in range(len(levels)-1):
depth_mean = depth_mean + (levels[i] - levels[i+1])
n = n +1
return depth_mean/n
def ensemble_model(y_models, y_true):
ensemble = LinearRegression()
ensemble.fit(y_models, y_true)
print(ensemble.coef_)
return ensemble
|
from time import sleep
print('Hello annonymous user, let me get to know you.')
sleep(1.5)
name = raw_input('What is your name? ')
print('Hello ' + name + '!')
sleep(1)
print('My name is a secret')
sleep(1)
def askpermission(question):
answer = raw_input(question).lower()
if answer == 'yes':
return True
else:
if answer == 'no':
return False
else:
print('Sorry I didn\'t quite understand that!')
return askpermission(question)
if askpermission('Would you mind telling me your age? '):
print('Thank you!')
age = raw_input('So, Whats your age? ')
sleep(1)
print('Once again, thank you! Your '+ age)
else:
print('Thats ok!')
|
n, k = map(int, input().split())
coins = [int(input()) for _ in range(n)]
total = 0
while True:
n -= 1
if k == 0:
break
if coins[n] <= k:
total += k//coins[n]
k -= (k//coins[n])*coins[n]
print(total) |
#Aaina Vannan
#12/4/18
#holds html code
<html>
<head>
{% if title %} #when variable is avaliable
<title>{{ title }} - Microblog</title>
{% else %} #when variable is not avaliable
<title>Welcome to Microblog!</title>
{% endif %}
</head>
<body>
<div>Microblog:
<a href = "/index">Home</a></div>
<a href = "/login"> login </a>
{% block content %}{% endblock %}
</body>
</html>
#stuff in double brackets is an HTML template place holder that passes in user values
|
import pytest
from easyci.hooks.hooks_manager import get_hook, HookNotFoundError
def test_get_hook():
assert get_hook('pre-commit')
with pytest.raises(HookNotFoundError):
get_hook('doesnotexist')
|
'''
์๋ฃํ
type( a ) # type( ๋ณ์๋ช
) : ์๋ฃํ
isinstance( 42, int ) # isinstance( ๊ฐ, ์๋ฃํ ) : ์๋ฃํ ๊ฒ์ฌ
ํด๋์ค
ํจ์๋ ๋ณ์๋ค์ ๋ชจ์ ๋์ ์งํฉ์ฒด
์ธ์คํด์ค
ํด๋์ค์ ์ํด ์์ฑ๋ ๊ฐ์ฒด
์ธ์คํด์ค ๊ฐ์ ์์ ์ ๊ฐ์ ๊ฐ์ง๊ณ ์๋ค.
ํด๋์ค ์ ์ธ
class Human( ):
'''์ฌ๋'''
์ธ์คํด์ค ์์ฑ
person1 = Human( )
person2 = Human( )
ํด๋์ค์ ์ธ์คํด์ค๋ฅผ ์ด์ฉํ๋ฉด ๋ฐ์ดํฐ์ ์ฝ๋๋ฅผ ์ฌ๋์ด ์ดํดํ๊ธฐ ์ฝ๊ฒ ํฌ์ฅํ ์ ์๋ค.
๋ชจ๋ธ๋ง(modeling)
ํด๋์ค๋ก ํ์ค์ ๊ฐ๋
์ ํํํ๋ ๊ฒ
๋ฉ์๋(Method)
๋ฉ์๋๋ ํจ์์ ๋น์ทํ๋ค.
ํด๋์ค์ ๋ฌถ์ฌ์ ํด๋์ค์ ์ธ์คํด์ค์ ๊ด๊ณ๋๋ ์ผ์ ํ๋ ํจ์
ํด๋์ค ๋ด๋ถ์ ํจ์๋ฅผ ํฌํจ์ํจ ์
class Human( ):
'''์ธ๊ฐ'''
def create( name, weight ): # ๋ค์ ๊ฐ์์์ ์์ธํ ์ค๋ช
person = Human()
person.name = name
person.weight = weight
return person
def eat( self ):
self.weight += 0.1
print("{}๊ฐ ๋จน์ด์ {}kg์ด ๋์์ต๋๋ค".format(self.name, self.weight))
def walk( self ):
self.weight -= 0.1
print("{}๊ฐ ๊ฑธ์ด์ {}kg์ด ๋์์ต๋๋ค".format(self.name, self.weight))
person = Human.create("์ฒ ์", 60.5)
person.eat()
self
๋ฉ์๋์ ์ฒซ๋ฒ์งธ ์ธ์
์ธ์คํด์ค์ ๋งค๊ฐ๋ณ์๋ฅผ ์ ๋ฌ ํ ๋๋ self ๋งค๊ฐ๋ณ์๋ ์๋ตํ๊ณ ์ ๋ฌ
์ด๊ธฐํ ํจ์
__init__ : ์ธ์คํด์ค๋ฅผ ๋ง๋ค ๋ ์คํ๋๋ ํจ์
๋ฌธ์์ดํ ํจ์
__str__ : ์ธ์คํด์ค ์์ฒด๋ฅผ ์ถ๋ ฅ ํ ๋์ ํ์์ ์ง์ ํด์ฃผ๋ ํจ์
class Human( ):
def __init__( self, name, weight ):
self.name = name
self.weight = weight
def __str__( self )
return "{} ( ๋ชธ๋ฌด๊ฒ {}kg )".format( self.name, self.weight )
person = Human( "์ฌ๋", 60.5 ) # ์ด๊ธฐํ ํจ์ ์ฌ์ฉ
print( person ) # ๋ฌธ์์ดํ ํจ์ ์ฌ์ฉ
์์(Inheritance)
์์ํ๋ ํด๋์ค๋ฅผ ๋ถ๋ชจ ํด๋์ค
์์๋ฐ๋ ํด๋์ค๋ฅผ ์์ ํด๋์ค
์์ ํด๋์ค๊ฐ ๋ถ๋ชจ ํด๋์ค์ ๋ด์ฉ์ ๊ฐ์ ธ๋ค ์ธ ์ ์๋ ๊ฒ
class Animal( ):
def walk( self ):
print( "๊ฑท๋๋ค" )
def eat( self ):
print( "๋จน๋๋ค" )
class Human( Animal ):
def wave( self ):
print( "์์ ํ๋ ๋ค" )
class Dog( Animal ):
def wag( self ):
print( "๊ผฌ๋ฆฌ๋ฅผ ํ๋ ๋ค" )
class Animal( ):
def greet( self ):
print( "์ธ์ฌํ๋ค" )
class Human( Animal ):
def greet( self ):
print( "์์ ํ๋ ๋ค" )
class Dog( Animal ):
def greet( self ):
print( "๊ผฌ๋ฆฌ๋ฅผ ํ๋ ๋ค" )
super()
์์ํด๋์ค์์ ๋ถ๋ชจํด๋์ค์ ๋ด์ฉ์ ์ฌ์ฉํ๊ณ ์ถ์ ๊ฒฝ์ฐ
super().๋ถ๋ชจํด๋์ค๋ด์ฉ
class Animal( ):
def __init__( self, name ):
self.name = name
class Human( Animal ):
def __init__( self, name, hand ):
super().__init__( name ) # ๋ถ๋ชจํด๋์ค์ __init__ ๋ฉ์๋ ํธ์ถ
self.hand = hand
person = Human( "์ฌ๋", "์ค๋ฅธ์" )
์์ธ ์ ์
์ฌ์ฉ์๊ฐ ์ง์ ์์ธ์ฒ๋ฆฌ๋ฅผ ํ๋ฉด ์ฝ๋์ ์ง๊ด์ฑ์ ๋์ผ ์ ์๋ค.
ํ์ผ์ ํ๋ ๋ง๋ค์ด ์์ธ๋ฅผ ์ ์
Exception ํด๋์ค๋ฅผ ์์๋ฐ์ ๋ง๋ ๋ค
try:
sign_up( )
except BadUserName:
print( "์ด๋ฆ์ผ๋ก ์ฌ์ฉํ ์ ์๋ ์
๋ ฅ" )
except PasswordNotMatched:
print( "์
๋ ฅํ ํจ์ค์๋ ๋ถ์ผ์น")
'''
|
import io
import os
from google.cloud import vision_v1
from google.cloud.vision_v1 import types
import pandas as pd
#Set the os GCP APP Variable
os.environ['GOOGLE_APPLICATION_CREDENTIALS']=r'ENTER JSON FILE HERE.json'
#client for image annotate vision
client = vision_v1.ImageAnnotatorClient()
file_name = os.path.abspath('ENTER IMAGE NAME')
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
# construct an iamge instance
image = vision_v1.types.Image(content=content)
"""
# or send the image url
image = vision.types.Image()
image.source.image_uri = 'https://edu.pngfacts.com/uploads/1/1/3/2/11320972/grade-10-english_orig.png'
"""
# annotate Image Response
response = client.text_detection(image=image) # returns TextAnnotation
df = pd.DataFrame(columns=['locale', 'description'])
texts = response.text_annotations
for text in texts:
df = df.append(
dict(
locale=text.locale,
description=text.description
),
ignore_index=True
)
## Output convnersion here
print(df['description'][0])
'''
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
#response = client.label_detection(image=image)
response = client.text_detection(image=image)
labels = response.label_annotations
for label in labels:
print(label)
'''
|
#! /usr/bin/env python
# utils/sorting.py
"""This module provides common sorting functionality for a variety of structures.
"""
import os, os.path
import files
def by_size(paths, reverse=False):
"Sorts a list of files/folders by their sizes."
return [filename for filename, filesize in
sorted([(path, files.getsize(path)) for path in paths],
key=lambda (f, s) : s, reverse=reverse)]
def dict_by_value(adict, reverse=False):
"""Returns the entries of a dictionary as list of key-value tuples, sorted by value.
This is similar to the standard sorting order of nltk.FreqDist.
"""
return [(key, value) for key, value in
sorted(adict.iteritems(), key=lambda (k, v) : (v, k), reverse=reverse)]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 10 22:43:07 2019
@author: larry1285
"""
import keras
import numpy as np
np.random.seed(1337)
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense,Activation,Convolution2D, MaxPooling2D, Flatten
from keras.optimizers import Adam
(X_train, y_train), (X_test,y_test) = mnist.load_data()
print(X_train.shape)
print("hi")
X_train = X_train.reshape(-1, 1,28, 28)
X_test = X_train.reshape(-1, 1,28, 28)
y_train=np_utils.to_categorical(y_train, num_classes=10)
y_test=np_utils.to_categorical(y_test, num_classes=10)
print(X_train.shape)
model = Sequential()
#Conv layer 1 output shape(32, 28, 28)
model.add(Convolution2D(
nb_filter=32,
nb_row=5,
nb_col=5,
border_mode='same', #padding method
input_shape=(1, # channels
28,28), #height & width
))
model.add(Activation('relu'))
#Pooling layer 1 (max pooling) output shape(32,14,14)
model.add(MaxPooling2D(
pool_size=(2, 2),
strides=(2, 2),
border_mode='same', # padding method
))
#Con layer 2 output shape(64,14,14)
model.add(Convolution2D(64,5,5,border_mode='same'))
model.add(Activation('relu'))
#poolint layer 2 (max pooling ) output shape(64,7,7)
model.add(MaxPooling2D(pool_size=(2,2),border_mode='same'))
#fullt connected layer 1 input shape(64 * 7 * 7) = 3136, output shape(1024)
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation('relu'))
#fullt connected layer 2 to shape(10) for 10 classes
model.add(Dense(10))
model.add(Activation('softmax'))
#another way to define your optimizer
adam=Adam(lr=1e-4)
#we add metrics to get more results you want to see
model.compile(optimizer=adam,
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(X_train, y_train, nb_epoch=1, batch_size=32)
loss, accuracy= model.evaluate(X_test,y_test)
print("test loss:",loss)
print("test loss:",accuracy)
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
--- Part Two ---
During the second Go / No Go poll, the Elf in charge of the Rocket Equation Double-Checker
stops the launch sequence. Apparently, you forgot to include additional fuel for the fuel
you just added.
Fuel itself requires fuel just like a module - take its mass, divide by three,
round down, and subtract 2. However, that fuel also requires fuel, and that fuel
requires fuel, and so on. Any mass that would require negative fuel should instead be
treated as if it requires zero fuel; the remaining mass, if any, is instead handled by
wishing really hard, which has no mass and is outside the scope of this calculation.
So, for each module mass, calculate its fuel and add it to the total. Then, treat the fuel
amount you just calculated as the input mass and repeat the process, continuing until
a fuel requirement is zero or negative. For example:
- A module of mass 14 requires 2 fuel. This fuel requires no further fuel (2
divided by 3 and rounded down is 0, which would call for a negative fuel),
so the total fuel required is still just 2.
- At first, a module of mass 1969 requires 654 fuel. Then, this fuel requires 216
more fuel (654 / 3 - 2). 216 then requires 70 more fuel, which requires 21 fuel,
which requires 5 fuel, which requires no further fuel. So, the total fuel
required for a module of mass 1969 is 654 + 216 + 70 + 21 + 5 = 966.
- The fuel required by a module of mass 100756 and its fuel is: 33583 + 11192 +
3728 + 1240 + 411 + 135 + 43 + 12 + 2 = 50346.
What is the sum of the fuel requirements for all of the modules on your spacecraft when
also taking into account the mass of the added fuel? (Calculate the fuel requirements
for each module separately, then add them all up at the end.)
"""
import dataclasses
import pathlib
import typic
from aoc.util.helpers import timer
from aoc.day1.part1 import Module, fuel_counter_upper, PositiveInt, INPUT1
DIR = pathlib.Path(__file__).parent
@typic.al
@dataclasses.dataclass
class Module(Module):
@typic.cached_property
def fuel(self) -> PositiveInt:
remainder = total = int(self.mass / 3) - 2
while remainder:
value = int(remainder / 3) - 2
remainder = value if value > 0 else 0
total += remainder
return PositiveInt(total)
@timer
def get_total_fuel():
return fuel_counter_upper(*(Module(x) for x in INPUT1.read_text().splitlines()))
if __name__ == "__main__":
print("Day 1, Part 2:", f"Total Fuel: {get_total_fuel()}", sep="\n")
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
import argparse
import re
import itertools
from time import sleep
__author__ = 'n30'
class WracostArgs():
def __init__(self): # Initial checks
self.parser = argparse.ArgumentParser(description="Web Race Condition and Stress Tester")
self.args = self.get_args()
if not (self.args.threads or self.args.params or self.args.payloads):
exit("[E] Please specify either --threads or --param/--payloads arguments")
# Prevent usage of threads with params/payloads
if (self.args.threads and (self.args.params or self.args.payloads)):
"""
These args are incompatible because the threads specify how many concurrent requests are going to be made
And the params/payloads also specify how many requests are going to be made depending on permutations
"""
print "[-] Either specify the --threads or the --params/payloads arguments. Not both."
exit()
if (self.args.getreq):
self.args.getreq = self.parse_param_inline()
else:
self.args.getreq = {}
# Prevent usage of params without payloads
if (bool(self.args.payloads) ^ bool(self.args.params)): #XOR
print "[-] Can't use --params without --payloads argument or viceversa"
exit()
elif (self.args.payloads and self.args.getreq):
print \
"[W] the params with the same name specified in both payloads and getreq arguments will" \
" be overwriten with the values in the \"payloads\" arguments."
# Proxy parsing
if (self.args.proxy):
self.args.proxy = self.parse_proxy()
# Verbosity checks
if (self.args.v >= 2):
print "[i] Consider piping output to a file. This displays sourcecode of the request... [Ctrl-C] to stop."
sleep(2)
# Parse headers and store as dictionary
if (self.args.headers):
self.args.headers = dict([header.split(":") for header in self.args.headers])
def get_args(self):
self.parser.add_argument("--auto", action="store_true", default=False,
help="Launches the attack automatically without prompting the user.")
self.parser.add_argument("-f", "--forceurl", action="store_true", default=False,
help="Force payload to be sent within the url as in a GET request")
self.parser.add_argument("-t", "--threads", type=int,
help="Number of threads/connections to run. Can't be used with --params/payloads args.")
self.parser.add_argument("-g", "--getreq", type=str, default=None,
help="Params specified in a GET request format: ?a=1&b=2&c=3. NOTE: If used with the params/payload arguments "\
"the params that have the same name will be replaced with the values in the \"payloads\" arguments.")
self.parser.add_argument("-p", "--params", type=str, nargs="+", default=None,
help="Params to inject values into. Can't be used with --threads args.")
self.parser.add_argument("-y", "--payloads", type=str, nargs="+", default=None,
help="Values for the params - Example: -p foo bar -y 0:intofoo 0:intofoo2 1:intobar. This will make 2 requests"\
" making permutations with the parameters until all payloads are used for that parameter.")
self.parser.add_argument("-H", "--headers", type=str, nargs="+", default=None,
help="Custom headers to be added. --headers \"User-Agent:Mozilla/5.0\" \"X-Forwarded-For:127.0.0.1\"")
self.parser.add_argument("--cfile",
help="Load cookie from specified CFILE file. COOKIE FILE FORMAT: this=is;a=valid;for=mat;")
self.parser.add_argument("-x", "--proxy", type=str,
help="Proxy to use specified by: Protocol:http://IP:PORT. Example: https:http://user:pass@192.168.0.1."
"See the 'requests' library docs. on proxies for further info.")
self.parser.add_argument("-v", action="count", default=0,
help="Be verbose. -v shows headers and params sent. -vv like -v plus outputs the sourcecode from the request")
self.parser.add_argument("url",
help="Url to test.")
self.parser.add_argument("method",
help="Request method (http://www.w3.org/Protocols/HTTP/Methods.html).")
return self.parser.parse_args()
def get_params_dict(self):
# TODO: Make this work -> --params foo bar --payloads 0:a 0:aa 0:aaa 1:b 1:bb 2:c 2:cc 2:ccc
# TODO: First result -> { foo : 'a', bar : 'b' }
# TODO: Second -> { foo : 'aa', bar : 'bb' }
# TODO: Third -> { foo : 'aaa', bar : 'b' }
matchdict = {}
numparamrepeat = {}
for payload in self.args.payloads:
try:
match = re.search(r'(^\d+):(.*)',payload)
num_match = int(match.group(1))
payl_match = match.group(2)
if num_match in numparamrepeat:
numparamrepeat[num_match] = 1+numparamrepeat[num_match]
else:
numparamrepeat[num_match] = 1
if num_match not in matchdict:
matchdict[num_match] = []
matchdict[num_match].append(payl_match)
except AttributeError:
print '[-] Wrong payload format:', payload
exit()
for i in range(max(numparamrepeat.values())):
paramsdict = {}
paramsdict.update(self.args.getreq)
for j, key in enumerate(self.args.params):
if j < len(matchdict):
paramsdict[key] = matchdict[j][i%numparamrepeat[j]]
else:
print "[W] WARNING: No value(s) given for \"%s\". Add --params %d:VALUE" % (key, j)
yield paramsdict
def parse_param_inline(self):
if (self.args.getreq):
mydict = {}
matches = re.findall(r'(?:\?|\&|)([^=]+)\=([^&]+)', self.args.getreq)
for m in matches:
mydict[m[0]] = m[1]
return mydict
else:
return {}
def parse_proxy(self):
proxydict = {}
try:
match = re.search(r'((?:http|https)):(.*)', self.args.proxy, re.I)
proxydict[match.group(1)] = match.group(2)
return proxydict
except AttributeError:
print "[-] Wrong proxy format."
exit()
if __name__ == "__main__":
argsie = WracostArgs()
print argsie.args.proxy
|
from utils.mesh_convert_helper import *
from utils.data_io import *
from utils.CoMA_dataloader import *
from utils.KNU_dataloader import *
# from utils.tf_helper import *
from utils.sort import *
from utils.dataloader_factory import * |
"""Utility functions for Markive."""
import datetime
import os
import yaml
def get_current_entry(markive_folder: str, date=None) -> str:
"""Get the current entry based on today's date.
Arguments:
markive_folder: String path of where entries are stored
Returns:
entry: Today's entry as a filepath.
"""
date = date if date else datetime.datetime.now()
month = os.path.join(markive_folder, date.strftime("%B-%Y"))
entry = os.path.join(month, date.strftime("%b-%d.md"))
return entry
def read_config(markive_folder: str) -> dict:
"""Read the configuration YAML file from the Markive folder.
Arguments:
markive_folder: String path of the markive folder.
Returns:
config: Dictionary of configuration parameters.
"""
config = {
"pre_write": '',
"post_write": '',
"template": ("---\n"
"date: %Y-%m-%d\n"
"---\n"),
}
config_file = os.path.join(markive_folder, "config.yml")
if os.path.exists(config_file):
with open(config_file, 'r') as file:
content = yaml.safe_load(file.read())
if content:
config.update(content)
return config
|
from .. import db
class Users(db.Document):
full_name = db.StringField(required=True)
phone_no = db.StringField(required=True)
photo_url = db.StringField(required=True)
is_verify = db.BooleanField(request=True) |
# -*- coding: utf-8 -*-
__author__ = 'chuter'
import os
from django.conf import settings
from core.jsonresponse import create_response
from core.exceptionutil import unicode_full_stack
from modules.member import util as member_util
from market_tools.prize.module_api import *
from models import *
def create_ballot(request):
option_id = int(request.GET['option_id'])
member = request.member
webapp_user = request.webapp_user
try:
option = VoteOption.objects.get(id=option_id)
vote = option.vote
VoteOption.vote_by_webapp_user(option_id, webapp_user)
if member:
prize_info = PrizeInfo.from_json(vote.award_prize_info)
award(prize_info, member, u'ๆ็ฅจ่ทๅพ็งฏๅ')
response = create_response(200)
except:
response = create_response(500)
response.innerErrMsg = unicode_full_stack()
return response.get_response()
def __serialize_options_to_jsonarray(vote_options):
if vote_options is None or len(vote_options) == 0:
return []
options_jsonarray = []
for option in vote_options:
option_json = option.to_json()
option_json['has_voted'] = option.has_voted if hasattr(option, 'has_voted') else False
options_jsonarray.append(option_json)
return options_jsonarray
########################################################################
# get_vote_options: ่ทๅๆ็ฅจ็ๆๆ้้กน
########################################################################
def get_vote_options(request):
member = request.member
webapp_user = request.webapp_user
try:
vote_id = int(request.POST['id'])
count = int(request.POST['count'])
next_display_index = int(request.POST['item_index'])
search = request.POST.get('search', '').strip()
vote_options = VoteOption.objects.filter(vote_id=vote_id)
if len(search) > 0:
vote_options = vote_options.filter(name__contains=search)
vote_options = vote_options.order_by('-vote_count')
end_index = next_display_index + count
response = create_response(200)
if vote_options.count() > end_index:
response.data.is_has_more = True
select_options = vote_options[0:end_index]
response.data.item_index = end_index
else:
response.data.is_has_more = False
if vote_options.count() == 0:
response.data.item_index = 0
select_options = []
else:
select_options = vote_options[0:end_index]
response.data.item_index = end_index
#ๆ ่ฏๅฝๅไผๅๅทฒ็ปๆ่ฟ็ฅจ็ๆ็ฅจ้้กน
webapp_user_voted_options_for_vote = VoteOptionHasWebappUser.voted_options_by_webapp_user_for_vote(webapp_user, vote_id)
for vote_option in select_options:
for voted_option_for_vote in webapp_user_voted_options_for_vote:
if vote_option.id == voted_option_for_vote.id:
vote_option.has_voted = True
break
response.data.items = __serialize_options_to_jsonarray(select_options)
response.data.is_webapp_user_vote = Vote.has_voted_by_webapp_user(vote_id, webapp_user)
return response.get_response()
except:
response = create_response(500)
response.errMsg = u'่ทๅๅคฑ่ดฅ'
response.innerErrMsg = unicode_full_stack()
print response.innerErrMsg
return response.get_response() |
import re
def main():
"""
6 or 7 ใงๆงๆใใใใฉใใญใผใใณใใผ
ไธไธๆก: 2,3,4(6+6,6+7,7+7)
2: (3,4,5): (2,3,4) + ็นฐใไธใ1
3: (3,4,5): (2,3,4) + ็นฐใไธใ1
ๆไธไฝ: (1) : ็นฐใไธใ1
or (6,7,8): (6,7) + ็นฐใไธใ(0/1)
66| 666| 6666
+ 66|+ 66|+ 66
----------------
132| 732| 6732
"""
p = input()
if len(p) == 1 or p[-1] not in list("234"):
print("No")
return
if p[0] == "1":
# ๅใๆกใฎ่ถณใ็ฎ
m = re.match("^1[3-5]*[2-4]$", p)
else:
# ๆกใใใฎใจใใใฏๅฟ
ใ็นฐใไธใใใฎใง6ใฏใชใ)
# ใใใซๆกใใใฎ้จๅใงใฏ6 or 7
m = re.match("^[6-7]*[7-8][3-5]*[2-4]$", p)
if m:
print("Yes")
else:
print("No")
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.