blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b37b2e218d9b6497281ffcb42383e42614c8930c | f0a5ad7b8aa39f51f233391fead0da3eabecc4ee | /.history/toolbox/abreFile_20191127163354.py | 2da87cf946b4539934df6748b231c06528e4165f | [] | no_license | OseiasBeu/webScrapping | e0a524847e55b24dbbd3d57bbe7fa43b4e101f48 | 1e72c7551aea355a891043baecfcbab8a89e719a | refs/heads/master | 2022-10-25T18:12:50.858653 | 2020-06-18T01:29:24 | 2020-06-18T01:29:24 | 224,681,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | import pandas as pd
import os
def abreFile():
oldAddres = 'C:/Users/beuo/Documents/Demandas/AtualizaMiddleIntegrationVtex/files/'
newFile = 'C:/Users/beuo/Documents/Demandas/AtualizaMiddleIntegrationVtex/files/extract.xlsx'
def encontraArquivosEmPastaRecursivamente(pasta, extensao):
arquivosTxt = []
caminhoAbsoluto = os.path.abspath(pasta)
for pastaAtual, subPastas, arquivos in os.walk(caminhoAbsoluto):
arquivosTxt.extend([os.path.join(pastaAtual,arquivo) for arquivo in arquivos if arquivo.endswith('.xls')])
return arquivosTxt
old = encontraArquivosEmPastaRecursivamente(oldAddres, '.xls')
print(old[0])
os.rename(old[0],newFile)
# wb = pd.ExcelFile('./file/extract.xlsx')
# df = pd.read_excel(wb)
# print(df.head())
abreFile() | [
"oseiasbeu@outlook.com"
] | oseiasbeu@outlook.com |
3855a95c8084c4bb4923ae0522d68b3251d55a9c | bfb1db9b58064f63ed8040b50d5fe3b4664adc01 | /wechat_django/decorators.py | 689644e5a65f53d8e12040fa0cf847b3d445f9e8 | [
"MIT"
] | permissive | hvv1616/wechat-django | 74947d7ea126e507d649cb152af1a66d68593a8f | 5599f237bc1781a594102ce7ff491086f8cf69d2 | refs/heads/master | 2020-04-30T07:22:38.427671 | 2019-03-18T12:56:20 | 2019-03-18T12:56:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,249 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from functools import wraps
from six import text_type
__all__ = ("message_handler", )
def message_handler(names_or_func=None):
"""自定义回复业务需加装该装饰器
被装饰的自定义业务接收一个``wechat_django.models.WeChatMessageInfo``对象
并且返回一个``wechatpy.replies.BaseReply``对象
:param names_or_func: 允许使用该message_handler的appname 不填所有均允许
:type names_or_func: str or list or tuple or callable
@message_handler
def custom_business(message):
user = message.user
# ...
return TextReply("hello", message=message.message)
@message_handler(("app_a", "app_b"))
def app_ab_only_business(message):
# ...
"""
def decorator(view_func):
@wraps(view_func)
def decorated_view(message):
return view_func(message)
decorated_view.message_handler = names or True
return decorated_view
if isinstance(names_or_func, text_type):
names = [names_or_func]
elif callable(names_or_func):
names = None
return decorator(names_or_func)
return decorator
| [
"13599838712@hotmail.com"
] | 13599838712@hotmail.com |
7d7bb90c1a2334efb7726e0953374e69e0d6d84e | d6e9aa31c05dd22857b27353d26a69ed5c472d56 | /books/apps/index/migrations/0012_auto_20200612_2121.py | 2bb7a3371bcff48ed37e486e3bec788abc034b64 | [] | no_license | Zhukowych/books | 4ac3490c3bdb369c3e524cdc4317b29781af9133 | 70c785bbb87eaa07958dd70e94caef87ddcea04d | refs/heads/main | 2023-02-15T01:27:45.886052 | 2021-01-06T18:55:47 | 2021-01-06T18:55:47 | 324,331,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | # Generated by Django 3.0.6 on 2020-06-12 18:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('index', '0011_comment'),
]
operations = [
migrations.AddField(
model_name='book',
name='can_change_public',
field=models.BooleanField(default=True),
),
migrations.CreateModel(
name='Info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('messange', models.TextField()),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='index.Book')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"mzmzhuk@gmail.com"
] | mzmzhuk@gmail.com |
d3c7a00bae095d7bb9b37789a0f2c1d9dc77ff3d | 788b4d987b2fa95defe6b6775cdefc296ff3bd94 | /lab-2/code/problem_1_c_1.py | 3209c8d59f60bf935907361e67a8bdfb5c67d148 | [] | no_license | pvarsh/applied_data_science | c6f34f4547397c0be6802913e84362acd41b923b | 6c3cc21e40ef992cdfaa302f0c6b7e1bdfff410e | refs/heads/master | 2016-09-05T20:47:05.994935 | 2014-12-26T16:32:24 | 2014-12-26T16:32:24 | 24,041,861 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | #learn (A and B) or C with a nerual network with saving of the learned paramaters
import pybrain
from pybrain.datasets import *
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
import pickle
if __name__ == "__main__":
ds = SupervisedDataSet(3, 1)
ds.addSample( (1,1,1) , (1,))
ds.addSample( (1,1,0) , (1,))
ds.addSample( (1,0,1) , (1,))
ds.addSample( (0,1,1) , (1,))
ds.addSample( (1,0,0) , (0,))
ds.addSample( (0,1,0) , (0,))
ds.addSample( (0,0,1) , (1,))
ds.addSample( (0,0,0) , (0,))
net = buildNetwork(3, 4, 1, bias=True)
# try:
# f = open('_learned', 'r')
# net = pickle.load(f)
# f.close()
# except:
trainer = BackpropTrainer(net, learningrate = 0.01, momentum = 0.99)
trainer.trainOnDataset(ds, 3000)
trainer.testOnData()
# f = open('_learned', 'w')
# pickle.dump(net, f)
# f.close()
abc = [(1,1,1),
(1,1,0),
(1,0,1),
(0,1,1),
(1,0,0),
(0,1,0),
(0,0,1),
(0,0,0)]
for triple in abc:
print triple, net.activate(triple)
| [
"pvarsh@gmail.com"
] | pvarsh@gmail.com |
91e8a8aae3250955ee90e174bfdd9f30bae1d120 | 91e885847af288a03fbd92f08acebb39ae23ab38 | /Catching Error-Try:Except.py | 4ca1af59712299377b1973bcf4e80b021ba22eeb | [] | no_license | leah0328/Python-tutorial | 4ce63a1a21b4c595477bcfdc1c7928e9e164fe3b | c74a9a4982b8ca255904f680ab2f9989e2ea74fb | refs/heads/main | 2023-07-11T05:16:16.394513 | 2021-08-16T08:26:35 | 2021-08-16T08:26:35 | 345,490,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | # Try / Except block : for times when something goes wrong
# eg. user insert an indesired input
try:
answer=10/0
number=int(input("Enter a number: "))
print(number)
except ZeroDivisionError:
print("Invalid Input")
except ValueError:
print("Value Input")
#we can also store the ErrorType as a variable, see ##
try:
number=int(input("Enter a number: "))
print(number)
except ZeroDivisionError as err: ##
print("err") # in this way, it will print out what went wrong,
# instead of the error type
except ValueError:
print("Value Input")
# Best practice is to specify the return value according to the Error type
# otherwise it is too broad(people wouldnt know what went wrong) | [
"noreply@github.com"
] | leah0328.noreply@github.com |
db8cdad93128a19ba84640c54d3a3bcf21458506 | dc798f062b15e6ad060a5cfb731db5f286e2088b | /lesson7/task4/tests.py | 0dc1cb67e0b5856510c204ea5431b442ee148c04 | [] | no_license | DmitryTsybulkin/stepik-python | dce78c4fe616fe3f5bd26e0dad9c80bc5c5c4ab2 | 0726346f43e21623a1200aa76b9c7e9ff5476844 | refs/heads/master | 2020-04-27T22:28:33.695541 | 2019-09-13T14:03:28 | 2019-09-13T14:03:28 | 174,738,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | from test_helper import check_samples
if __name__ == '__main__':
check_samples(samples=[["480\n1\n2","9\n2"],["475\n1\n55","9\n50"]]) | [
"tsydimasik@gmail.com"
] | tsydimasik@gmail.com |
c5c41296aa3ad72e552e536f5ad0f48f08536010 | 6a1a668310b6c2a58018093f4de3d2c12fedcdd6 | /scratchpad.py | fc0e00d26dc1f6a280a64a3ce52b489bacc4d562 | [] | no_license | jaydee829/Deep-Learning-with-Python | 25c26f4254e3dd872639b7fdc866ba3e9a22d46b | c9dfafd7ce23f30057cf493bef83b60569631daf | refs/heads/master | 2020-03-30T16:33:33.789180 | 2018-10-03T13:15:17 | 2018-10-03T13:15:17 | 151,414,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | import tensorflow as tf
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
hello=tf.constant('Hello, TensorFlow!')
sess=tf.Session()
print(sess.run(hello))
import os
os.environ['CUDA_VISIBLE_DEVICES']='-1'
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
import tensorflow as tf
from keras import backend as K
num_cores = 4
CPU=True
GPU=False
if GPU:
num_GPU = 1
num_CPU = 1
if CPU:
num_CPU = 1
num_GPU = 0
config = tf.ConfigProto(intra_op_parallelism_threads=num_cores,\
inter_op_parallelism_threads=num_cores, allow_soft_placement=True,\
device_count = {'CPU' : num_CPU, 'GPU' : num_GPU})
session = tf.Session(config=config)
K.set_session(session) | [
"jaydee829@gmail.com"
] | jaydee829@gmail.com |
a5a5c9f6bb355fd5a5453b35ac6518d45307fe70 | c703adfdffd50674f578a0300454b2a50c6d5f5d | /software/mechanical_computations/PID.py | 789bf1b2da788c5471a68ab533f81e2a0702f41c | [] | no_license | tarragoesteve/TFM | 7e8fe9e2fa4c11d7717ffa5657d3f29ece18fb4f | 4a6d0117ab8ea88172cce4674300e7b05b65e240 | refs/heads/master | 2023-01-31T01:39:50.537688 | 2019-09-30T20:48:34 | 2019-09-30T20:48:34 | 156,758,326 | 0 | 0 | null | 2023-01-07T03:24:10 | 2018-11-08T19:30:43 | Python | UTF-8 | Python | false | false | 1,067 | py | class PID:
accumulated_error = 0
previous_error = 0
previous_error_time = 0
first_error = True
kp = 0
ki = 0
kd = 0
def __init__(self,kp,ki,kd):
self.kp = kp
self.ki = ki
self.kd = kd
pass
def control_variable(self, error, time):
if self.first_error:
self.first_error = False
self.previous_error = error
self.previous_error_time = time
return self.kp * error
else:
if time - self.previous_error_time > 0:
self.accumulated_error = self.accumulated_error + \
error * (time - self.previous_error_time)
output = self.kp * error + self.ki * self.accumulated_error + self.kd * \
(error - self.previous_error) / (time - self.previous_error_time)
else:
output = self.kp * error + self.ki * self.accumulated_error
self.previous_error = error
self.previous_error_time = time
return output | [
"tarragoesteve@gmail.com"
] | tarragoesteve@gmail.com |
e04aa1d7d13ed57853a99a21818241b1f4347d18 | 4def96c476252c48c0122c6eb47dab3f155ceb91 | /telnet.py | 1c745dfde002de3d504cbc216a7b46dd814c6ba6 | [] | no_license | helioncneto/AdvancedProg | c1a52933b144dbaae67439999fbf85673fe3f190 | 3f57aadeb235b6323295ddd83884541687b462f7 | refs/heads/master | 2020-07-27T05:21:33.702901 | 2020-01-24T14:11:56 | 2020-01-24T14:11:56 | 208,884,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | import telnetlib
from time import sleep
# Definindo Usuário e Senha.
usuario = 'teste'
senha = 'teste'
Host = 'localhost'
class Telnet:
def __init__(self, usuario, senha, host):
self.tn = telnetlib.Telnet(host)
#self.tn.set_debuglevel(100)
# Enviando Usuário e Senha (Estabelecendo a Conexão)
self.tn.read_until(b'midiacom-taesa login:', 5)
self.tn.write(usuario.encode('ascii') + b"\n")
self.tn.read_until(b'Password:', 5)
self.tn.write(senha.encode('ascii') + b"\n")
sleep(2)
def enviar(self, cmd):
# Enviando os Comandos
self.tn.write(cmd.encode('ascii') + b"\n")
def imprimir_tudo(self):
# imprimir todas as linhas
self.tn.write(b"exit\n")
print(self.tn.read_all().decode('utf-8'))
if __name__ == '__main__':
t = Telnet(usuario, senha, Host)
t.enviar('cat /etc/passwd')
t.imprimir_tudo()
| [
"helioncneto@gmail.com"
] | helioncneto@gmail.com |
68cdea4e70011e9f6aed99dc512556fe7e0e6826 | b81668a2cc43654cf6a3ed952d781310876838f9 | /venv/Lib/site-packages/thinc/backends/ops.py | 838ae8c0ffa67b6b448fc765e4e95f30422fb0bd | [] | no_license | gowthamr1999/docbot-1 | 6a8b873407f15035fb8b30b69ed66ded343bd1e4 | 3119958d68e95673b4c9187d58d8cad5c18a6b2c | refs/heads/master | 2023-04-07T02:16:55.574750 | 2021-04-16T02:52:38 | 2021-04-16T02:52:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,936 | py | from typing import Optional, List, Tuple, Sequence, Union, cast, TypeVar
from typing import Iterator, overload
import numpy
import itertools
from ..types import Xp, Shape, DTypes, DTypesInt, DTypesFloat, List2d, ArrayXd
from ..types import Array2d, Array3d, Floats1d, Floats2d, Floats3d, Floats4d
from ..types import FloatsXd, Ints1d, Ints2d, Ints3d, Ints4d, IntsXd, _Floats
from ..types import DeviceTypes, Generator, Padded, Batchable, SizedGenerator
from ..util import get_array_module, is_xp_array, to_numpy
ArrayT = TypeVar("ArrayT", bound=ArrayXd)
FloatsT = TypeVar("FloatsT", bound=_Floats)
class Ops:
name: str = "base"
xp: Xp = numpy
def __init__(
self, device_type: DeviceTypes = "cpu", device_id: int = -1, **kwargs
) -> None:
self.device_type = device_type
self.device_id = device_id
def to_numpy(self, data): # pragma: no cover
if isinstance(data, numpy.ndarray):
return data
else:
raise ValueError("Cannot convert non-numpy from base Ops class")
def minibatch(
self,
size: Union[int, Generator],
sequence: Batchable,
*,
shuffle: bool = False,
buffer: int = 1,
) -> SizedGenerator:
"""Iterate slices from a sequence, optionally shuffled. Slices
may be either views or copies of the underlying data.
The `size` argument may be either an integer, or a sequence of integers.
If a sequence, a new size is drawn before every output.
If shuffle is True, shuffled batches are produced by first generating
an index array, shuffling it, and then using it to slice into the
sequence.
An internal queue of `buffer` items is accumulated before being each
output. Buffering is useful for some devices, to allow the
network to run asynchronously without blocking on every batch.
"""
if not hasattr(sequence, "__len__"):
err = f"Can't minibatch data. Expected sequence, got {type(sequence)}"
raise ValueError(err)
sizes = self._get_batch_sizes(
len(sequence), itertools.repeat(size) if isinstance(size, int) else size
)
indices = numpy.arange(len(sequence))
# This is a bit convoluted, but it's a time where convenience makes
# trickery worthwhile: instead of being an actual generator, we
# return our SizedGenerator object, which provides a __len__.
def _iter_items():
if shuffle:
numpy.random.shuffle(indices)
queue = []
i = 0
for size in sizes:
queue.append(self._get_batch(sequence, indices[i : i + size]))
if len(queue) >= buffer:
yield from queue
queue = []
i += size
yield from queue
return SizedGenerator(_iter_items, len(sizes))
def multibatch(
self,
size: Union[int, Generator],
sequence: Batchable,
*others: Batchable,
shuffle: bool = False,
buffer: int = 1,
) -> SizedGenerator:
"""Minibatch one or more sequences of data, and yield
lists with one batch per sequence. See ops.minibatch.
"""
# You'd think we could just do this by calling into minibatch and zip...
# But the shuffling makes it really hard.
sequences = (sequence,) + tuple(others)
if not all(hasattr(seq, "__len__") for seq in sequences):
values = ", ".join([f"{type(seq)}" for seq in sequences])
err = f"Can't multibatch data. Expected sequences, got {values}"
raise ValueError(err)
sizes = self._get_batch_sizes(
len(sequence), itertools.repeat(size) if isinstance(size, int) else size
)
indices = numpy.arange(len(sequence))
def _iter_items():
if shuffle:
numpy.random.shuffle(indices)
queue = []
i = 0
for size in sizes:
idx_batch = indices[i : i + size]
queue.append([])
for sequence in sequences:
queue[-1].append(self._get_batch(sequence, idx_batch))
if len(queue) >= buffer:
yield from queue
queue = []
i += size
yield from queue
return SizedGenerator(_iter_items, len(sizes))
def _get_batch(self, sequence, indices):
if isinstance(sequence, list):
subseq = [sequence[i] for i in indices]
elif isinstance(sequence, tuple):
subseq = tuple(sequence[i] for i in indices) # type: ignore
else:
subseq = sequence[indices] # type: ignore
if is_xp_array(subseq):
subseq = self.as_contig(
cast(ArrayXd, self.xp.asarray(subseq))
) # type: ignore
return subseq
def _get_batch_sizes(self, length: int, sizes: Iterator[int]):
output = []
i = 0
while i < length:
output.append(next(sizes))
i += output[-1]
return output
def seq2col(self, seq: Floats2d, nW: int) -> Floats2d:
"""Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1))
sequence. The new sequence is constructed by concatenating nW preceding
and succeeding vectors onto each column in the sequence, to extract a
window of features.
"""
# This is a test implementation that only supports nW=1
assert nW == 1
B = seq.shape[0]
I = seq.shape[1]
cols = self.alloc3f(B, (nW * 2 + 1), I)
# Copy left contexts. The last words aren't the left-context for anything.
cols[nW:, :nW] = self.reshape3f(seq[:-nW], -1, nW, I)
cols[:, nW] = seq
cols[:-nW, nW + 1 :] = self.reshape3f(seq[nW:], -1, nW, I)
return self.reshape2f(cols, B, I * (2 * nW + 1))
def backprop_seq2col(self, dY: Floats2d, nW: int) -> Floats2d:
"""The reverse/backward operation of the `seq2col` function: calculate
the gradient of the original `(M, N)` sequence, as a function of the
gradient of the output `(M, N*(nW*2+1))` sequence.
"""
# This is a test implementation that only supports nW=1
assert nW == 1
nF = nW * 2 + 1
B = dY.shape[0]
I = dY.shape[1] // nF
# Having trouble getting the kernel to work...
dX = self.alloc2f(B, I)
dY3d = self.reshape3f(dY, B, nF, I)
dX[:-nW] += self.reshape2f(dY3d[nW:, :nW], -1, I)
dX += dY3d[:, nW]
dX[nW:] += self.reshape2f(dY3d[:-nW, nW + 1 :], -1, I)
return dX
def gemm(
self,
x: Floats2d,
y: Floats2d,
out: Optional[Floats2d] = None,
trans1: bool = False,
trans2: bool = False,
) -> Floats2d:
"""Perform General Matrix Multiplication (GeMM) and optionally store
the result in the specified output variable.
"""
if trans1:
x = x.T
if trans2:
y = y.T
if out is None:
return self.xp.dot(x, y)
else:
self.xp.dot(x, y, out=out)
return out
def affine(self, X: Floats2d, W: Floats2d, b: Floats1d) -> Floats2d:
"""Apply a weights layer and a bias to some inputs, i.e.
Y = X @ W.T + b
"""
Y = self.gemm(X, W, trans2=True)
Y += b
return Y
def flatten(
self,
X: Sequence[ArrayT],
dtype: Optional[DTypes] = None,
pad: int = 0,
ndim_if_empty: int = 2,
) -> ArrayT:
"""Flatten a list of arrays into one large array."""
if X is None or len(X) == 0:
return self.alloc((0,) * ndim_if_empty, dtype=dtype or "f")
xp = get_array_module(X[0])
X = [x for x in X if x.size != 0]
if int(pad) >= 1:
padded = []
for x in X:
padded.append(xp.zeros((pad,) + x.shape[1:], dtype=x.dtype))
padded.append(x)
padded.append(xp.zeros((pad,) + x.shape[1:], dtype=x.dtype))
X = padded
result = xp.concatenate(X)
if dtype is not None:
result = xp.asarray(result, dtype=dtype)
return result
def unflatten(self, X: Floats2d, lengths: Ints1d, pad: int = 0) -> List[Floats2d]:
"""The reverse/backward operation of the `flatten` function: unflatten
a large array into a list of arrays according to the given lengths.
"""
unflat = []
pad = int(pad)
for length in lengths:
length = int(length)
if pad >= 1 and length != 0:
X = X[pad:]
unflat.append(X[:length])
X = X[length:]
if pad >= 1:
X = X[pad:]
assert len(X) == 0
assert len(unflat) == len(lengths)
return unflat
@overload
def pad(self, seqs: List[Ints2d], round_to=1) -> Ints3d:
...
@overload # noqa: F811
def pad(self, seqs: List[Floats2d], round_to=1) -> Floats3d:
...
def pad( # noqa: F811
self, seqs: Union[List[Ints2d], List[Floats2d]], round_to=1
) -> Array3d:
"""Perform padding on a list of arrays so that they each have the same
length, by taking the maximum dimension across each axis. This only
works on non-empty sequences with the same `ndim` and `dtype`.
"""
# TODO: This should be generalized to handle different ranks
if not seqs:
raise ValueError("Cannot pad empty sequence")
if len(set(seq.ndim for seq in seqs)) != 1:
raise ValueError("Cannot pad sequences with different ndims")
if len(set(seq.dtype for seq in seqs)) != 1:
raise ValueError("Cannot pad sequences with different dtypes")
if len(set(seq.shape[1:] for seq in seqs)) != 1:
raise ValueError("Cannot pad sequences that differ on other dimensions")
# Find the maximum dimension along each axis. That's what we'll pad to.
length = max(len(seq) for seq in seqs)
# Round the length to nearest bucket -- helps on GPU, to make similar
# array sizes.
length = (length + (round_to - 1)) // round_to * round_to
final_shape = (len(seqs), length) + seqs[0].shape[1:]
output: Array3d = self.alloc(final_shape, dtype=seqs[0].dtype)
for i, arr in enumerate(seqs):
# It's difficult to convince this that the dtypes will match.
output[i, : arr.shape[0]] = arr # type: ignore
return output
def unpad(self, padded: Array3d, lengths: List[int]) -> List2d:
"""The reverse/backward operation of the `pad` function: transform an
array back into a list of arrays, each with their original length.
"""
output = []
for i, length in enumerate(lengths):
output.append(padded[i, :length])
return cast(List2d, output)
def list2padded(self, seqs: List[Floats2d]) -> Padded:
"""Pack a sequence of 2d arrays into a Padded datatype."""
if not seqs:
return Padded(
self.alloc3f(0, 0, 0), self.alloc1i(0), self.alloc1i(0), self.alloc1i(0)
)
elif len(seqs) == 1:
data = self.reshape3f(seqs[0], seqs[0].shape[0], 1, seqs[0].shape[1])
size_at_t = self.asarray1i([1] * data.shape[0])
lengths = self.asarray1i([data.shape[0]])
indices = self.asarray1i([0])
return Padded(data, size_at_t, lengths, indices)
lengths_indices = [(len(seq), i) for i, seq in enumerate(seqs)]
lengths_indices.sort(reverse=True)
indices_ = [i for length, i in lengths_indices]
lengths_ = [length for length, i in lengths_indices]
nS = max([len(seq) for seq in seqs])
# Reorder the sequences, by length. This looks the same in either
# direction: you're swapping elements between their original and sorted
# position.
seqs = [seqs[x] for x in indices_]
arr: Floats3d = self.pad(seqs)
arr = self.as_contig(arr.transpose((1, 0, 2)))
# Build a lookup table so we can find how big the batch is at point t.
batch_size_at_t_ = self.alloc1i(nS)
batch_size_at_t_ += 1
i = len(lengths_)
for t in range(nS):
if t == lengths_[i - 1]:
i -= 1
if i == 0:
break
batch_size_at_t_[t] = i
return Padded(
cast(Floats3d, arr),
self.asarray1i(batch_size_at_t_),
self.asarray1i(lengths_),
self.asarray1i(indices_),
)
def padded2list(self, padded: Padded) -> List2d:
"""Unpack a Padded datatype to a list of 2-dimensional arrays."""
data = padded.data
indices = to_numpy(padded.indices)
lengths = to_numpy(padded.lengths)
unpadded: List[Optional[Floats2d]] = [None] * len(lengths)
data = self.as_contig(data.transpose((1, 0, 2)))
for i in range(data.shape[0]):
unpadded[indices[i]] = data[i, : int(lengths[i])]
return cast(List2d, unpadded)
def get_dropout_mask(self, shape: Shape, drop: Optional[float]) -> FloatsXd:
"""Create a random mask for applying dropout, with a certain percent of
the mask (defined by `drop`) will contain zeros. The neurons at those
positions will be deactivated during training, resulting in a more
robust network and less overfitting.
"""
if drop is None or drop <= 0:
return self.xp.ones(shape, dtype="f")
elif drop >= 1.0:
return self.alloc(shape)
coinflips = self.xp.random.uniform(0.0, 1.0, shape)
mask = (coinflips >= drop) / (1.0 - drop)
return cast(FloatsXd, self.asarray(mask, dtype="float32"))
def alloc1f(self, d0: int, *, dtype: Optional[DTypesFloat] = "float32") -> Floats1d:
return self.alloc((d0,), dtype=dtype)
def alloc2f(
self, d0: int, d1: int, *, dtype: Optional[DTypesFloat] = "float32"
) -> Floats2d:
return self.alloc((d0, d1), dtype=dtype)
def alloc3f(
self, d0: int, d1: int, d2: int, *, dtype: Optional[DTypesFloat] = "float32"
) -> Floats3d:
return self.alloc((d0, d1, d2), dtype=dtype)
def alloc4f(
self,
d0: int,
d1: int,
d2: int,
d3: int,
*,
dtype: Optional[DTypesFloat] = "float32",
) -> Floats4d:
return self.alloc((d0, d1, d2, d3), dtype=dtype)
def alloc_f(
self, shape: Shape, *, dtype: Optional[DTypesFloat] = "float32"
) -> FloatsXd:
return self.alloc(shape, dtype=dtype)
def alloc1i(self, d0: int, *, dtype: Optional[DTypesInt] = "int32") -> Ints1d:
return self.alloc((d0,), dtype=dtype)
def alloc2i(
self, d0: int, d1: int, *, dtype: Optional[DTypesInt] = "int32"
) -> Ints2d:
return self.alloc((d0, d1), dtype=dtype)
def alloc3i(
self, d0: int, d1: int, d2: int, *, dtype: Optional[DTypesInt] = "int32"
) -> Ints3d:
return self.alloc((d0, d1, d2), dtype=dtype)
def alloc4i(
self,
d0: int,
d1: int,
d2: int,
d3: int,
*,
dtype: Optional[DTypesInt] = "int32",
) -> Ints4d:
return self.alloc((d0, d1, d2, d3), dtype=dtype)
def alloc_i(self, shape: Shape, *, dtype: Optional[DTypesInt] = "int32") -> IntsXd:
return self.alloc(shape, dtype=dtype)
def alloc(self, shape: Shape, *, dtype: Optional[DTypes] = "float32") -> ArrayT:
"""Allocate an array of a certain shape."""
if isinstance(shape, int):
shape = (shape,)
return self.xp.zeros(shape, dtype=dtype)
def reshape1f(self, array: FloatsXd, d0: int) -> Floats1d:
return cast(Floats1d, self.reshape(array, (d0,)))
def reshape2f(self, array: FloatsXd, d0: int, d1: int) -> Floats2d:
return cast(Floats2d, self.reshape(array, (d0, d1)))
def reshape3f(self, array: FloatsXd, d0: int, d1: int, d2: int) -> Floats3d:
return cast(Floats3d, self.reshape(array, (d0, d1, d2)))
def reshape4f(
self, array: FloatsXd, d0: int, d1: int, d2: int, d3: int
) -> Floats4d:
return cast(Floats4d, self.reshape(array, (d0, d1, d2, d3)))
def reshape_f(self, array: FloatsXd, shape: Shape) -> FloatsXd:
return self.reshape(array, shape)
def reshape1i(self, array: IntsXd, d0: int) -> Ints1d:
return cast(Ints1d, self.reshape(array, (d0,)))
def reshape2i(self, array: IntsXd, d0: int, d1: int) -> Ints2d:
return cast(Ints2d, self.reshape(array, (d0, d1)))
def reshape3i(self, array: IntsXd, d0: int, d1: int, d2: int) -> Ints3d:
return cast(Ints3d, self.reshape(array, (d0, d1, d2)))
def reshape4i(self, array: IntsXd, d0: int, d1: int, d2: int, d3: int) -> Ints4d:
return cast(Ints4d, self.reshape(array, (d0, d1, d2, d3)))
def reshape_i(self, array: IntsXd, shape: Shape) -> IntsXd:
return self.reshape(array, shape)
def reshape(self, array: ArrayT, shape: Shape) -> ArrayT:
"""Reshape an array."""
if isinstance(shape, int):
shape = (shape,)
return cast(ArrayT, array.reshape(shape))
def asarray4f(
self,
data: Union[Floats4d, Sequence[int]],
*,
dtype: Optional[DTypes] = "float32",
) -> Floats4d:
return cast(Floats4d, self.asarray(data, dtype=dtype))
def asarray3f(
self,
data: Union[Floats3d, Sequence[int]],
*,
dtype: Optional[DTypes] = "float32",
) -> Floats3d:
return cast(Floats3d, self.asarray(data, dtype=dtype))
def asarray2f(
self,
data: Union[Floats2d, Sequence[int]],
*,
dtype: Optional[DTypes] = "float32",
) -> Floats2d:
return cast(Floats2d, self.asarray(data, dtype=dtype))
def asarray1f(
self,
data: Union[Floats1d, Sequence[int]],
*,
dtype: Optional[DTypes] = "float32",
) -> Floats1d:
return cast(Floats1d, self.asarray(data, dtype=dtype))
def asarray_f(
self,
data: Union[FloatsXd, Sequence[float]],
*,
dtype: Optional[DTypes] = "float32",
) -> FloatsXd:
return cast(FloatsXd, self.asarray(data, dtype=dtype))
def asarray1i(
self, data: Union[Ints1d, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> Ints1d:
return cast(Ints1d, self.asarray(data, dtype=dtype))
def asarray2i(
self, data: Union[Ints2d, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> Ints2d:
return cast(Ints2d, self.asarray(data, dtype=dtype))
def asarray3i(
self, data: Union[Ints3d, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> Ints3d:
return cast(Ints3d, self.asarray(data, dtype=dtype))
def asarray4i(
self, data: Union[Ints4d, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> Ints4d:
return cast(Ints4d, self.asarray(data, dtype=dtype))
def asarray_i(
self, data: Union[IntsXd, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> IntsXd:
return cast(IntsXd, self.asarray(data, dtype=dtype))
def asarray(
self,
data: Union[ArrayXd, Sequence[ArrayXd], Sequence[float], Sequence[int]],
*,
dtype: Optional[DTypes] = None,
) -> ArrayXd:
"""Ensure a given array is of the correct type."""
if isinstance(data, self.xp.ndarray):
if dtype is not None:
return self.xp.asarray(data, dtype=dtype)
else:
return self.xp.asarray(data)
elif hasattr(data, "numpy"):
# Handles PyTorch Tensor
return data.numpy() # type: ignore
elif dtype is not None:
return self.xp.array(data, dtype=dtype)
else:
return self.xp.array(data)
def as_contig(self, data: ArrayT, dtype: Optional[DTypes] = None) -> ArrayT:
"""Allow the backend to make a contiguous copy of an array.
Implementations of `Ops` do not have to make a copy or make it
contiguous if that would not improve efficiency for the execution engine.
"""
kwargs = {"dtype": dtype} if dtype is not None else {}
return self.xp.ascontiguousarray(data, **kwargs)
def sigmoid(self, X: FloatsT, *, inplace: bool = False) -> FloatsT:
if inplace:
self.xp.exp(-X, out=X)
X += 1.0
X **= -1.0
return X
else:
return 1.0 / (1.0 + self.xp.exp(-X))
def dsigmoid(self, Y: FloatsT, *, inplace: bool = False) -> FloatsT:
if inplace:
Y *= 1 - Y
return Y
else:
return Y * (1.0 - Y)
def dtanh(self, Y: FloatsT, *, inplace: bool = False) -> FloatsT:
if inplace:
Y **= 2
Y *= -1.0
Y += 1.0
return Y
else:
return 1 - Y ** 2
def softmax(self, x: FloatsT, *, inplace: bool = False, axis: int = -1) -> FloatsT:
maxes = self.xp.max(x, axis=axis, keepdims=True)
shifted = x - maxes
new_x = self.xp.exp(shifted)
new_x /= new_x.sum(axis=axis, keepdims=True)
return new_x
def softmax_sequences(
self, Xs: Floats2d, lengths: Ints1d, *, inplace: bool = False, axis: int = -1
) -> Floats2d:
if Xs.ndim >= 3:
err = f"Softmax currently only supports 2d. Got: {Xs.ndim}"
raise NotImplementedError(err)
# This loses almost no fidelity, and helps the numerical stability.
Xs = self.xp.clip(Xs, -20.0, 20.0)
new_x = self.xp.exp(Xs)
summed = self.backprop_reduce_sum(self.reduce_sum(new_x, lengths), lengths)
new_x /= summed
return new_x
def backprop_softmax(self, Y: FloatsT, dY: FloatsT, *, axis: int = -1) -> FloatsT:
dX = Y * dY
dX -= Y * dX.sum(axis=axis, keepdims=True)
return dX
def backprop_softmax_sequences(
self, dY: Floats2d, Y: Floats2d, lengths: Ints1d
) -> Floats2d:
dX = Y * dY
sum_dX = self.backprop_reduce_sum(self.reduce_sum(dX, lengths), lengths)
dX -= Y * sum_dX
return dX
def recurrent_lstm(
self,
W: Floats2d,
b: Floats1d,
h_init: Floats1d,
c_init: Floats1d,
inputs: Floats3d,
is_train: bool = True,
) -> Tuple[Floats3d, Tuple[Floats3d, Floats3d, Floats3d]]:
Y, (G, C, S) = recurrent_lstm_forward(W, b, h_init, c_init, inputs)
return Y, (G, C, S)
def backprop_recurrent_lstm(
self,
dY: Floats3d,
fwd_state: Tuple[Floats3d, Floats3d, Floats3d],
params: Tuple[Floats2d, Floats1d],
) -> Tuple[Floats3d, Tuple[Floats2d, Floats1d, Floats1d, Floats1d]]:
dCt = self.alloc2f(dY.shape[1], dY.shape[2])
empty_row = self.alloc3f(1, dY.shape[1], dY.shape[2])
# Offset dY by 1
dY = self.xp.vstack((empty_row, dY))
dW, db, dX, dY, dC0 = backprop_recurrent_lstm(dY, dCt, (fwd_state, params))
return dX, (dW, db, dY[0].sum(axis=0), dC0.sum(axis=0))
def maxout(self, X: Floats3d) -> Tuple[Floats2d, Ints2d]:
which = X.argmax(axis=-1, keepdims=False)
return X.max(axis=-1), which
def backprop_maxout(self, dY: Floats2d, which: Ints2d, P: int) -> Floats3d:
dX = self.alloc3f(dY.shape[0], dY.shape[1], P)
for b in range(dY.shape[0]):
for o in range(dY.shape[1]):
dX[b, o, which[b, o]] = dY[b, o]
return dX
def relu(self, X: Floats2d, inplace: bool = False) -> Floats2d:
if not inplace:
return X * (X > 0)
else:
X *= X > 0
return X
def backprop_relu(
self, dY: Floats2d, Y: Floats2d, inplace: bool = False
) -> Floats2d:
if not inplace:
return dY * (Y > 0)
dY *= Y > 0
return dY
def mish(self, X: Floats2d, threshold: float = 20.0) -> Floats2d:
Y = self.alloc2f(*X.shape, dtype=X.dtype)
tmp = X * self.xp.tanh(self.xp.log(1.0 + self.xp.exp(X)))
for i in range(X.shape[0]):
for j in range(X.shape[1]):
if X[i, j] >= threshold:
Y[i, j] = X[i, j]
else:
Y[i, j] = tmp[i, j]
return Y
def backprop_mish(
self,
dY: Floats2d,
X: Floats2d,
threshold: float = 20.0,
out: Optional[Floats2d] = None,
) -> Floats2d:
xp = get_array_module(X)
indices = X < threshold
Xsub = X[indices]
dYsub = dY[indices]
omega = 4.0 * (Xsub + 1.0)
omega += 4.0 * xp.exp(2.0 * Xsub)
omega += xp.exp(Xsub) * ((4.0 * Xsub) + 6.0)
delta = 2.0 * xp.exp(Xsub)
delta += xp.exp(2.0 * Xsub)
delta += 2.0
dXsub = dYsub * ((xp.exp(Xsub) * omega) / (delta ** 2))
if out is None:
out = xp.zeros(dY.shape, dtype="f")
# Gradient when above threshold will ignore softplus.
out[:] = dY + dY * self.dtanh(X)
out[indices] = dXsub
return out
def update_averages(
self, ema: FloatsT, weights: FloatsT, t: int, max_decay: float = 0.9999
) -> None:
# Internals for optimizer
decay = (1.0 + t) / (10.0 + t)
if decay > max_decay:
decay = max_decay
ema -= (1 - decay) * (ema - weights)
def adam(
self,
weights: Floats1d,
gradient: Floats1d,
mom1: Floats1d,
mom2: Floats1d,
beta1: float,
beta2: float,
eps: float,
learn_rate: float,
mod_rate: float = 1.0,
) -> Tuple[Floats1d, Floats1d, Floats1d, Floats1d]:
# Internals for optimizer
mom1 *= beta1
mom2 *= beta2
mom1 += gradient * (1.0 - beta1)
mom2 += gradient * gradient * (1.0 - beta2)
# Here we assume learn rate is calculated by the caller.
# cdef weight_t a_t = learn_rate * sqrt(1-beta2**hp.t) / (1-beta1**hp.t);
weights -= learn_rate * (mom1 / (mod_rate * self.xp.sqrt(mom2) + eps))
return weights, gradient, mom1, mom2
def clip_gradient(self, gradient: FloatsT, threshold: float) -> FloatsT:
# Internals for optimizer
xp = get_array_module(gradient)
grad_norm = xp.linalg.norm(gradient)
if grad_norm >= threshold:
gradient *= threshold / grad_norm
return gradient
def logloss(self, y_true: FloatsT, y_pred: FloatsT) -> float:
# Currently not used
log_yp = self.xp.log(y_pred + 1e-8)
loss = (y_true * log_yp) + (1 - y_true) * self.xp.log((1 - y_pred) + 1e-8)
return -loss
def reduce_sum(self, X: Floats2d, lengths: Ints1d) -> Floats2d:
Y = self.alloc2f(lengths.shape[0], X.shape[1])
start = 0
for i, length in enumerate(lengths):
Y[i] = X[start : start + length].sum(axis=0)
start += length
return Y
def reduce_mean(self, X: Floats2d, lengths: Ints1d) -> Floats2d:
Y = self.alloc2f(lengths.shape[0], X.shape[1])
start = 0
for i, length in enumerate(lengths):
if length:
Y[i] = X[start : start + length].mean(axis=0)
start += length
return Y
def reduce_max(self, X: Floats2d, lengths: Ints1d) -> Tuple[Floats2d, Ints2d]:
Y = self.alloc2f(lengths.shape[0], X.shape[1])
which = self.alloc2i(lengths.shape[0], X.shape[1])
start = 0
for i, length in enumerate(lengths):
if length:
which[i] = X[start : start + length].argmax(axis=0)
Y[i] = X[start : start + length].max(axis=0)
start += length
return Y, which
def backprop_reduce_sum(self, d_sums: Floats2d, lengths: Ints1d) -> Floats2d:
dX = self.alloc2f(lengths.sum(), d_sums.shape[1])
start = 0
for i, length in enumerate(lengths):
dX[start : start + length] = d_sums[i]
start += length
return dX
def backprop_reduce_mean(self, d_means: Floats2d, lengths: Ints1d) -> Floats2d:
dX = self.alloc2f(lengths.sum(), d_means.shape[1])
start = 0
for i, length in enumerate(lengths):
dX[start : start + length] = d_means[i] / length
start += length
return dX
def backprop_reduce_max(
self, d_maxes: Floats2d, which: Ints2d, lengths: Ints1d
) -> Floats2d:
dX = self.alloc2f(lengths.sum(), d_maxes.shape[1])
start = 0
for i, length in enumerate(lengths):
dX[start : start + length, which[i]] = d_maxes[i]
start += length
return dX
def hash(self, ids: Ints1d, seed: int) -> Ints2d:
"""Hash a sequence of 64-bit keys into a table with 4 32-bit keys, using
murmurhash3.
"""
from .numpy_ops import NumpyOps
numpy_ops = NumpyOps()
return self.asarray2i(
numpy_ops.hash(numpy_ops.asarray(ids, dtype="uint64"), seed)
)
def ngrams(self, n: int, keys: Ints1d) -> Ints1d:
from .numpy_ops import NumpyOps
numpy_ops = NumpyOps()
return self.asarray1i(
numpy_ops.ngrams(n, numpy_ops.asarray(keys, dtype="uint64"))
)
def position_encode(
self, N: int, D: int, period: int = 10000, out: Optional[Floats2d] = None
) -> Floats2d:
# Currently internals only
from .numpy_ops import NumpyOps
numpy_ops = NumpyOps()
return self.asarray2f(numpy_ops.position_encode(N, D, period, out))
def scatter_add(
self, table: FloatsXd, indices: IntsXd, values: FloatsXd
) -> FloatsXd:
return self.xp.add.at(table, indices, values)
def insert_into(self, shape, Xs):
"""Maybe don't need this? Just a quicky to get Jax working."""
output = self.alloc(shape, dtype=Xs[0].dtype)
for i, x in enumerate(Xs):
output[i, : x.shape[0]] = x
return output
# This code is intentionally almost-duplicate with the Jax one. It's kind
# of hard to condition on jax vs not jax without messing up the jax JIT,
# and we'll want to have a more specialised implementation for non-Jax
# versions. But for now this has been tested and works, so we'll just leave
# it as a reference implementation.
"""
LSTM Notation (kind of involved, but made it a lot easier to write)
X: Inputs
Y: Outputs (aka hiddens)
C: Cells
G: Gates (Output of non-linearity, i.e. lstm_gates(X @ W.T)
A: Activations (X @ W.T, before non-linearity)
Imagine we have the input:
batch = [
["apple", "banana", "cantaloupe", "date", "elderberry"],
["aardvark", "bat", "capybara", "dingo", "elephant"]
]
The input variable X will have one vector per word, so X[0, 1] will be banana's
vector, X[0, 1, 0] will be a float, the first element of that vector.
We're computing an output variable Y of shape (nL, nB, nO), so that Y[0, 1] is
the output variable of banana.
A problem with variables for RNNs is keeping the timesteps straight. It's hard
to distinguish the current, previous, and next timesteps. To solve this problem,
we follow the convention that **we are at timestep 3**.
Additionally, the variables for Y and C are offset by one, as the 0th elements
have the initial hiddens and initial cells. So:
t=3
Xt3: The input vectors for 'dingo' and 'date', i.e. X[t]
Yt3: The output vectors for 'dingo' and 'date', i.e. Y[t+1] (Y is offset.)
Ct2: The cells calculated at 'c...', that are the input for 'd...'
Ct3: The cells calculated at 'd...', that are the input for 'e...'
At3: The activations at 'd...'
Gt3: The gates at 'd...'
"""
def recurrent_lstm_forward(W, b, c_init, h_init, X):
xp = get_array_module(W)
nL, nB, nI = X.shape
nO = h_init.shape[0]
# Preallocate these so we can pass them through for loop.
Y = xp.zeros((nL + 1, nB, nO), dtype="f")
G = xp.zeros((nL, nB, nO * 4), dtype="f")
C = xp.zeros((nL + 1, nB, nO), dtype="f")
# Set initial hidden and cell states. The Y and C will be shifted 1,
# so that we can have fewer arrays.
Y[0] = h_init
C[0] = c_init
state = ((W, b, X), (Y, C, G))
for i in range(X.shape[0]):
state = lstm_stepper_forward(i, state)
(W, b, X), (Y, C, G) = state
# Recall that Y and C are both offset by 1. Y[1] is the output for
# X[1], while Y[0] was used as an input for Y[1]. We use
# the S values to backprop the weights, so we need X the previous Ys.
S = xp.concatenate((X, Y[:-1]), axis=-1)
return Y[1:], (G, C, S)
def lstm_stepper_forward(t, state):
(W, b, X), (Y, C, G) = state
# Get the activations for this timestep.
At3 = lstm_weights_forward(X[t], Y[t], W, b)
# The offsets here are a bit unintuitive, because Y and C are 1-offset.
Ct2 = C[t]
Yt3, Ct3, Gt3 = lstm_gates_forward(At3, Ct2)
Y[t + 1] = Yt3
C[t + 1] = Yt3
G[t] = Gt3
return (W, b, X), (Y, C, G)
def backprop_recurrent_lstm(dY, dCt, fwd_vars):
xp = get_array_module(dY)
(G, C, S), (W, b) = fwd_vars
nL = S.shape[0]
nB = dY.shape[1]
nI = S.shape[2] - dY.shape[2]
# Preallocate these so we can pass them through for loop.
dX = xp.zeros((nL, nB, nI), dtype="f")
dW = xp.zeros(W.shape, dtype="f")
db = xp.zeros(b.shape, dtype="f")
state = (
(dW, db, dX), # The gradi-outs (Write-only)
(dY, dCt), # The gradi-ins (Read and write)
(G, C, S), # Forward state (Read-only)
(W, b), # Params (Read-only)
)
for t in range(nL - 1, -1, -1):
state = backprop_lstm_stepper(t, state)
(dW, db, dX), (dY, dCt), (G, C, S), (W, b) = state
return dW, db, dX, dY, dCt
def backprop_lstm_stepper(t, state):
(dW, db, dX), (dY, dCt3), (G, C, S), (W, b) = state
# Recall, we're at step 3, Y and C are offset by 1. See above.
dYt3 = dY[t + 1]
Ct3 = C[t + 1]
St3 = S[t]
Gt3 = G[t]
Ct2 = C[t]
dAt3, dCt2 = backprop_lstm_gates(dCt3, dYt3, Gt3, Ct3, Ct2)
dXt3, dYt2, dW3, db3 = backprop_lstm_weights(dAt3, (St3, W, b))
dX[t] = dXt3
dY[t] = dYt2
return (dW + dW3, db + db3, dX), (dY, dCt2), (G, C, S), (W, b)
def lstm_weights_forward(Xt3, Yt2, W, b):
xp = get_array_module(Yt2)
St3 = xp.concatenate((Xt3, Yt2), axis=-1)
At3 = St3 @ W.T + b
return At3
def backprop_lstm_weights(dAt3, fwd_state):
St3, W, b = fwd_state
dW = dAt3.T @ St3
db = dAt3.sum(axis=0)
dSt3 = dAt3 @ W
nO = W.shape[0] // 4
nI = St3.shape[1] - nO
dXt3 = dSt3[:, :nI]
dYt2 = dSt3[:, nI:]
return dXt3, dYt2, dW, db
def lstm_gates_forward(At3, Ct2):
xp = get_array_module(At3)
# hf, hi, ho, hc: Forget, input, output, cell gates.
At3_hf, At3_hi, At3_ho, At3_hc = xp.split(At3, 4, axis=-1)
# Number the steps here, to refer back for backward pass.
# 1. Activations
hf = sigmoid(At3_hf) # 1a
hi = sigmoid(At3_hi) # 1b
ho = sigmoid(At3_ho) # 1c
hc = xp.tanh(At3_hc) # 1d
Ct3 = hf * Ct2 # 2a
Ct3 += hi * hc # 2b
tanhCt3 = xp.tanh(Ct3) # 3a
Yt3 = tanhCt3 * ho # 3b
# We don't need the gradient for this, it's just for backprop calculation.
Gt3 = xp.concatenate((hf, hi, ho, hc), axis=-1)
return Yt3, Ct3, Gt3
def backprop_lstm_gates(
dYt3: Array2d, dCt3: Array2d, Gt3: Array2d, Ct3: Array2d, Ct2: Array2d
) -> Tuple[Array3d, Array2d]:
# See above for notation. Step numbering refers to forward_lstm_gates
xp = get_array_module(dYt3)
hf, hi, ho, hc = xp.split(Gt3, 4, axis=-1)
tanhCt3 = xp.tanh(Ct3)
# 3b: Yt3 = tanhCt3 * ho
d_ho = dYt3 * tanhCt3
d_tanhCt3 = dYt3 * ho
# 3a: tanhCt3 = tanh(Ct3)
dCt3 += d_tanhCt3 * dtanh(tanhCt3)
# 2b: Ct3 += hi * hc
d_hi = dCt3 * hc
d_hc = dCt3 * hi
# 2a: Ct3 = hf * Ct2
d_hf = dCt3 * Ct2
dCt2 = dCt3 * hf
d_At3_hc = d_hc * dtanh(hc) # 1d
d_At3_ho = d_ho * dsigmoid(ho) # 1c
d_At3_hi = d_hi * dsigmoid(hi) # 1b
d_At3_hf = d_hf * dsigmoid(hf) # 1a
dAt3 = xp.concatenate((d_At3_hf, d_At3_hi, d_At3_ho, d_At3_hc), axis=-1)
return dAt3, dCt2
def sigmoid(X):
xp = get_array_module(X)
return 1.0 / (1.0 + xp.exp(-X))
def dsigmoid(Y: ArrayT) -> ArrayT:
return Y * (1.0 - Y)
def dtanh(Y: ArrayT) -> ArrayT:
return 1 - Y ** 2
| [
"42891786+kiranm211@users.noreply.github.com"
] | 42891786+kiranm211@users.noreply.github.com |
e07d63a1fbeffe6c57894e08f9d8cb4e1e015a6f | 564d6a4d305a8ac6a7e01c761831fb2081c02d0f | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/operations/_default_security_rules_operations.py | 1845d2fdfbb4b739407b79adfc58bf53747e21ea | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | paultaiton/azure-sdk-for-python | 69af4d889bac8012b38f5b7e8108707be679b472 | d435a1a25fd6097454b7fdfbbdefd53e05029160 | refs/heads/master | 2023-01-30T16:15:10.647335 | 2020-11-14T01:09:50 | 2020-11-14T01:09:50 | 283,343,691 | 0 | 0 | MIT | 2020-07-28T22:43:43 | 2020-07-28T22:43:43 | null | UTF-8 | Python | false | false | 8,956 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DefaultSecurityRulesOperations(object):
"""DefaultSecurityRulesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.SecurityRuleListResult"]
"""Gets all default security rules in a network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecurityRuleListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_04_01.models.SecurityRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SecurityRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
default_security_rule_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.SecurityRule"
"""Get the specified default network security rule.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param default_security_rule_name: The name of the default security rule.
:type default_security_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecurityRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.SecurityRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.SecurityRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'defaultSecurityRuleName': self._serialize.url("default_security_rule_name", default_security_rule_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SecurityRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules/{defaultSecurityRuleName}'} # type: ignore
| [
"noreply@github.com"
] | paultaiton.noreply@github.com |
7ce4f1de6d47f67a0b6736268752def4ef9f8bc6 | bb487c51cf7df31d449c6d07f5337ea5ff19136f | /src/prep/main.py | 66a0dc82409a6ab3792c054ac059f0cf75eb5981 | [
"BSD-3-Clause"
] | permissive | HubTou/prep | ab2432e255137bf0b25b4cf0cce45e7fab103905 | ae3aad6a817f978141f0d79f577b6dd46b2bd1b1 | refs/heads/main | 2023-08-01T23:46:09.649612 | 2021-09-26T16:05:21 | 2021-09-26T16:05:21 | 376,159,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,169 | py | #!/usr/bin/env python
""" prep - prepare text for statistical processing
License: 3-clause BSD (see https://opensource.org/licenses/BSD-3-Clause)
Author: Hubert Tournier
"""
import getopt
import logging
import os
import string
import sys
import unicodedata
# Version string used by the what(1) and ident(1) commands:
ID = "@(#) $Id: prep - prepare text for statistical processing v1.0.3 (September 26, 2021) by Hubert Tournier $"
# Default parameters. Can be overcome by environment variables, then command line options
parameters = {
"Ascii": False,
"Number": False,
"Hyphen": False,
"Ignore": "",
"Only": "",
"Ponctuate": False
}
PONCTUATION = "!(),.:;?"
################################################################################
def initialize_debugging(program_name):
"""Debugging set up"""
console_log_format = program_name + ": %(levelname)s: %(message)s"
logging.basicConfig(format=console_log_format, level=logging.DEBUG)
logging.disable(logging.INFO)
################################################################################
def display_help():
"""Displays usage and help"""
print("usage: prep [-a|--ascii] [-d|--number] [-h|--hyphen]", file=sys.stderr)
print(" [-i|--ignore FILE] [-o|--only FILE] [-p|--ponctuate] [--debug]", file=sys.stderr)
print(" [--help|-?] [--version] [--] filename [...]", file=sys.stderr)
print(" ------------------ ------------------------------------------------", file=sys.stderr)
print(" -a|--ascii Try to convert Unicode letters to ASCII", file=sys.stderr)
print(" -d|--number Print the word number", file=sys.stderr)
print(" -h|--hyphen Don't break words on hyphens", file=sys.stderr)
print(" -i|--ignore FILE Take the next file as an ignore file", file=sys.stderr)
print(" -o|--only FILE Take the next file as an only file", file=sys.stderr)
print(" -p|--ponctuate Include punctuation marks", file=sys.stderr)
print(" --debug Enable debug mode", file=sys.stderr)
print(" --help|-? Print usage and this help message and exit", file=sys.stderr)
print(" --version Print version and exit", file=sys.stderr)
print(" -- Options processing terminator", file=sys.stderr)
print(file=sys.stderr)
################################################################################
def process_command_line():
"""Process command line options"""
# pylint: disable=C0103
global parameters
# pylint: enable=C0103
# option letters followed by : expect an argument
# same for option strings followed by =
character_options = "adhi:o:p?"
string_options = [
"ascii",
"debug",
"help",
"hyphen",
"ignore",
"number",
"only",
"ponctuate",
"version",
]
try:
options, remaining_arguments = getopt.getopt(
sys.argv[1:], character_options, string_options
)
except getopt.GetoptError as error:
logging.critical("Syntax error: %s", error)
display_help()
sys.exit(1)
for option, argument in options:
if option in ("-a", "--ascii"):
parameters["Ascii"] = True
elif option in ("-d", "--number"):
parameters["Number"] = True
elif option in ("-h", "--hyphen"):
parameters["Hyphen"] = True
elif option in ("-i", "--ignore"):
if os.path.isfile(argument):
parameters["Ignore"] = argument
else:
logging.critical("-h|--ignore argument is not a file")
sys.exit(1)
elif option in ("-o", "--only"):
if os.path.isfile(argument):
parameters["Only"] = argument
else:
logging.critical("-o|--only argument is not a file")
sys.exit(1)
elif option in ("-p", "--ponctuate"):
parameters["Ponctuate"] = True
elif option == "--debug":
logging.disable(logging.NOTSET)
elif option in ("--help", "-?"):
display_help()
sys.exit(0)
elif option == "--version":
print(ID.replace("@(" + "#)" + " $" + "Id" + ": ", "").replace(" $", ""))
sys.exit(0)
logging.debug("process_command_line(): parameters:")
logging.debug(parameters)
logging.debug("process_command_line(): remaining_arguments:")
logging.debug(remaining_arguments)
return remaining_arguments
################################################################################
def load_words_file(filename):
"""Load an ignore or only file as a list of words"""
if not filename:
return []
with open(filename, "r") as file:
return file.read()
################################################################################
def print_word(word, ignore_words, only_words, word_number):
"""Print a word in the different formats if needed"""
if word not in ignore_words:
if (not only_words) or word in only_words:
if parameters["Number"]:
print("{: 6d} {}".format(word_number, word))
else:
print(word)
################################################################################
def is_unicode_letter(character):
"""Return True if character is a Unicode letter"""
if ord(character) > 127:
return unicodedata.category(character)[0] == 'L'
return False
################################################################################
def to_ascii(character):
"""Return Unicode letters to their ASCII equivalent and the rest unchanged"""
if parameters["Ascii"] and ord(character) > 127:
return unicodedata.normalize('NFKD', character).encode('ASCII', 'ignore').decode("utf-8")
return character
################################################################################
def process_file(file, ignore_words, only_words, word_number):
"""Process a file and return the last word_number"""
word = ""
hyphen = False
for line in file.readlines():
for character in line.strip():
if not word:
if character in string.ascii_letters \
or is_unicode_letter(character):
word = to_ascii(character.lower())
elif parameters["Ponctuate"] and character in PONCTUATION:
print_word(character, [], [], 0)
# hyphen inside a line:
elif hyphen:
print_word(word, ignore_words, only_words, word_number)
word_number += 1
if character in string.ascii_letters \
or is_unicode_letter(character):
word = to_ascii(character.lower())
else:
word = ""
if parameters["Ponctuate"] and character in PONCTUATION:
print_word(character, [], [], 0)
hyphen = False
else:
if character in string.ascii_letters + "'" \
or is_unicode_letter(character):
word += to_ascii(character.lower())
elif character == "-":
if parameters["Hyphen"]:
word += "-"
else:
hyphen = True
# Let's see if next character is the end of line or not
else:
if word[-1:] == "-":
word = word[:-1]
print_word(word, ignore_words, only_words, word_number)
word_number += 1
word = ""
if parameters["Ponctuate"] and character in PONCTUATION:
print_word(character, [], [], 0)
# This is the end... of the line
if word:
if word[-1:] == "-":
word = word[:-1]
# hyphen at the end of line:
elif hyphen:
hyphen = False
else:
print_word(word, ignore_words, only_words, word_number)
word_number += 1
word = ""
return word_number
################################################################################
def main():
"""The program's main entry point"""
program_name = os.path.basename(sys.argv[0])
initialize_debugging(program_name)
arguments = process_command_line()
ignore_words = load_words_file(parameters["Ignore"])
only_words = load_words_file(parameters["Only"])
word_number = 1
if len(arguments) :
for filename in arguments:
if not os.path.isfile(filename):
continue
with open(filename, "r") as file:
word_number = process_file(file, ignore_words, only_words, word_number)
else:
# Filtering standard input:
process_file(sys.stdin, ignore_words, only_words, word_number)
sys.exit(0)
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | HubTou.noreply@github.com |
070868723de51b8ed49075381288046cc3a10261 | 28667eace86d572ad8f3a49873f1beab2e68d671 | /test_model.py | 6c28b441948433fd38b52a29f5f419477a1f5c84 | [] | no_license | akshatkg/Augmented-Reality-Sudoku-Solver | 21d27d70d8b2559a69894c913321f1dd37b81d31 | 57545517e293cb23ee4c3a4452832e409f479377 | refs/heads/main | 2023-08-07T07:48:21.201302 | 2021-10-07T14:05:46 | 2021-10-07T14:05:46 | 414,625,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 788 | py | from sudoku import Detector
# from model import Trainer
def testSolver():
d = Detector()
# with open('assets/sudokus/sudoku1.txt', 'r') as file:
# answer = [list(map(int, line.strip('\n'))) for line in file.readlines()]
# ## Add correction array
# result = d.run(path='assets/sudokus/sudoku1.jpg', corrections=[])
# for i in range(9):
# for j in range(9):
# assert result[i][j] == answer[i][j]
# with open('assets/sudokus/sudoku2.txt', 'r') as file:
# answer = [list(map(int, line.strip('\n'))) for line in file.readlines()]
# d = Detector()
# # Add correction array
# result = d.run(path='assets/sudokus/sudoku2.jpg', corrections=[(7,5,9), (7,7,4)])
# for i in range(9):
# for j in range(9):
# assert result[i][j] == answer[i][j] | [
"noreply@github.com"
] | akshatkg.noreply@github.com |
cc5efee86d9bd9204bbc9ff243e80878e33ea5a6 | ae4be4a17468f89e06975a402cddd7dabf692ec9 | /ABC/137/C/source.py | 5da7bff89fd85a546813bb268e62e676e9596f88 | [] | no_license | naru380/AtCoder | 95ae61230d3182dc2a317a77f8e9300c68443199 | 296d071d6a91ea7e061ee3923b5c26b0c7536119 | refs/heads/master | 2020-09-20T02:12:29.405393 | 2020-05-31T09:58:08 | 2020-05-31T09:58:08 | 224,354,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | import itertools
from collections import Counter
import math
N = int(input())
def generate_prime_numbers():
search_range = 150
search_list = [True for i in range(0, search_range+1)]
search_list[0] = False
search_list[1] = False
search_list[2] = True
for i in range(2, search_range+1):
for j in range(i*2, search_range+1, i):
search_list[j] = False
prime_numbers = [i for i in range(search_range+1) if search_list[i] == True]
return prime_numbers[:27]
def combination(n, r):
return math.factorial(n) // math.factorial(r) // math.factorial(n-r)
prime_numbers = generate_prime_numbers()
encoded_strings = []
for i in range(N):
S = input()
encoded_string = 1
for c in S:
char_to_int = ord(c) - ord('a')
encoded_string *= prime_numbers[char_to_int]
encoded_strings.append(encoded_string)
# print(encoded_strings)
ans = 0
# for comb in itertools.combinations(encoded_strings, 2):
# if comb[0] == comb[1]:
# ans += 1
counter = Counter(encoded_strings)
for i in counter.values():
if i > 1:
ans += combination(i, 2)
print(ans)
| [
"naruhey1211guitar.m@gmail.com"
] | naruhey1211guitar.m@gmail.com |
1177c41442555fdd9d8a72d00ab4d9638cc68521 | d10c66290bac1006676c96ef6c3b56328c388f69 | /ball.py | 22258e595be302f8b1fc8b0ae199aaacca8aaca8 | [] | no_license | MrBizChicken/clock | 5e5495a439b5a4414bc3f34203a8c8f88fa2c80a | e90802e81d07ddbc20e6a3fa512f56e8a5d286f5 | refs/heads/main | 2023-06-03T09:50:29.379505 | 2021-06-27T23:17:57 | 2021-06-27T23:17:57 | 380,831,617 | 0 | 1 | null | 2021-06-27T23:17:57 | 2021-06-27T20:24:22 | Python | UTF-8 | Python | false | false | 928 | py | from constants import *
from main_block import *
import pygame, main_block, random
class Ball(main_block.Main_block):
def __init__(self, size, speed, color):
x = random.randint(0, GAME_WIDTH - size)
y = random.randint(0, GAME_HEIGHT - size)
super().__init__(x, y, size, color)
min = int(speed * 0.8)
max = int(speed * 1.2)
self.x_speed = random.randint(min, max)
self.y_speed = random.randint(min, max)
# print("****", size, self.x_speed, self.y_speed)
def update(self):
self.rect.x += self.x_speed
self.rect.y += self.y_speed
if self.rect.x < 0:
self.x_speed = -self.x_speed
if self.rect.y < 0:
self.y_speed = -self.y_speed
if self.rect.right > GAME_WIDTH:
self.x_speed = -self.x_speed
if self.rect.bottom > GAME_HEIGHT:
self.y_speed = -self.y_speed
| [
"ravenstudiosrob@yahoo.com"
] | ravenstudiosrob@yahoo.com |
2c947291686c8c971951c9f816300f5d96395751 | 331b1318d6034baaf659089e600a654537b9dba0 | /Ex1/sheet1/3_csv/csv.py | 9ef00e70ab065d60bf1acd2c9a23bf7e12303218 | [] | no_license | avtione-repo/BIG_DATA_ANALYTICS | 76805aeb06a7d344a9569f6272625069ccefe846 | adf80095ba6d6304aef22c0628d1cee4f92b38e9 | refs/heads/master | 2021-09-06T05:11:33.952850 | 2018-02-02T16:45:44 | 2018-02-02T16:45:44 | 110,289,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | import sys
from itertools import groupby
csv = sys.argv[1]
column = int(sys.argv[2])
def computeMeanCSV():
data = []
try:
with open(csv) as f:
csv_file = iter(f)
next(f)
for line in csv_file:
data.append(line.split(",")[column])
if(is_number(data[0])):
mean(data)
else:
mode(data)
except IndexError:
print("Index is out of range")
def is_number(s):
"""checks is the string is a number
(from https://stackoverflow.com/questions/354038/how-do-i-check-if-a-string-is-a-number-float-in-python)"""
try:
float(s)
return True
except ValueError:
return False
def mean(data):
"""computes the mean for numeric values"""
floats = [float(s) for s in data]
mean = sum(floats) / len(data)
print("Mean of column %d: %f" % (column, mean))
def mode(data):
"""computes the mode for non-numeric values."""
amounts = {k: len(list(v)) for k, v in groupby(sorted(data))}
mode = max(amounts, key=amounts.get)
print("Mode of column %d: %s (%d times in the data)" % (column, mode, amounts[mode]))
computeMeanCSV()
| [
"alvinrindra@gmail.com"
] | alvinrindra@gmail.com |
fc5ec76b2c61c9a61954ea8f1b2e9852e60d3573 | f4c5186861d13ba9ddaef4dbcdde04cd46f42c7d | /polynomial.py | 3ed59c1638864fb786afe9f5fc7eb3dcc64617f7 | [] | no_license | KindOfFun/Polynoms | b23451e2e9a2d0036cbd10c61d01074fe64f4551 | 87e232bb7f12b806ccace8520128dabcfbdc4798 | refs/heads/master | 2020-11-25T01:52:07.265348 | 2019-12-16T17:23:35 | 2019-12-16T17:23:35 | 228,438,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,183 | py | # polynomial.py
from copy import deepcopy
class Polynomial:
def __init__(self, *coefficients):
coefs = []
if len(coefficients) > 1:
for i in range(len(coefficients)):
coefs.append(coefficients[i])
else:
if type(coefficients[0]) is list:
coefs = deepcopy(coefficients[0])
elif type(coefficients[0]) is dict:
n = max(coefficients[0]) + 1
coefs = [0] * n
for pow in coefficients[0]:
coefs[pow] = coefficients[0][pow]
elif type(coefficients[0]) is Polynomial:
coefs = deepcopy(coefficients[0].coefs)
elif type(coefficients[0]) is int:
coefs.append(coefficients[0])
elif type(coefficients[0]) is float:
coefs.append(coefficients[0])
while coefs[-1] == 0 and len(coefs) > 1:
coefs = coefs[:-1]
self.coefs = coefs
def __repr__(self):
return 'Polynomial ' + str(self.coefs)
def __str__(self):
ans = ''
n = len(self.coefs) - 1
for i in range(n - 1):
if self.coefs[n - i] > 0 and self.coefs[n - i] != 1:
ans += ' + '
ans += str(self.coefs[n - i]) + 'x^' + str(n - i)
elif self.coefs[n - i] < 0 and self.coefs[n - i] != -1:
ans += ' - '
ans += str(-self.coefs[n - i]) + 'x^' + str(n - i)
elif self.coefs[n - i] == 1:
ans += ' + ' + 'x^' + str(n - i)
elif self.coefs[n - i] == -1:
ans += ' - ' + 'x^' + str(n - i)
if len(self.coefs) > 1 and self.coefs[1] > 0 and self.coefs[1] != 1:
ans += ' + ' + str(self.coefs[1]) + 'x'
elif len(self.coefs) > 1 and self.coefs[1] < 0 and self.coefs[1] != -1:
ans += ' - ' + str(-self.coefs[1]) + 'x'
elif len(self.coefs) > 1 and self.coefs[1] == 1:
ans += ' + ' + 'x'
elif len(self.coefs) > 1 and self.coefs[1] == -1:
ans += ' - ' + 'x'
if ans == '' and self.coefs[0] == 0:
return '0'
elif ans == '':
return(str(self.coefs[0]))
elif self.coefs[0] > 0:
first = ans[:3]
if first == ' + ':
ans = ans[3:]
ans += ' + ' + str(self.coefs[0])
return ans
else:
ans = '-' + ans[3:]
ans += ' + ' + str(self.coefs[0])
return ans
elif self.coefs[0] < 0:
first = ans[:3]
if first == ' + ':
ans = ans[3:]
ans += ' - ' + str(-self.coefs[0])
return ans
else:
ans = '-' + ans[3:]
ans += ' - ' + str(-self.coefs[0])
return ans
else:
first = ans[:3]
if first == ' + ':
ans = ans[3:]
return ans
else:
ans = '-' + ans[3:]
return ans
def __eq__(self, other):
other = Polynomial(other)
return self.coefs == other.coefs
def __add__(self, other):
other = Polynomial(other)
if len(self.coefs) < len(other.coefs):
ans = [0] * len(other.coefs)
for i in range(len(self.coefs)):
ans[i] = self.coefs[i] + other.coefs[i]
for i in range(len(self.coefs), len(other.coefs)):
ans[i] = other.coefs[i]
return Polynomial(ans)
else:
ans = [0] * len(self.coefs)
for i in range(len(other.coefs)):
ans[i] = self.coefs[i] + other.coefs[i]
for i in range(len(other.coefs), len(self.coefs)):
ans[i] = self.coefs[i]
return Polynomial(ans)
__radd__ = __add__
def __neg__(self):
ans = deepcopy(self.coefs)
for i in range(len(ans)):
ans[i] *= -1
return Polynomial(ans)
def __sub__(self, other):
return self + (-other)
def __rsub__(self, other):
return -(self) + other
def __call__(self, x):
if len(self.coefs) == 1:
return self.coefs[0]
else:
b = 0
n = len(self.coefs)
for i in range(len(self.coefs)):
b = self.coefs[n - 1 - i] + b * x
return b
def degree(self):
return len(self.coefs) - 1
def der(self, d=1):
if d == 0:
return self
if d == 1:
ans = []
for i in range(1, len(self.coefs)):
ans.append(i * self.coefs[i])
return Polynomial(ans)
else:
cur = self.der(d - 1)
return cur.der()
def __mul__(self, other):
other = Polynomial(other)
n = len(self.coefs) - 1
m = len(other.coefs) - 1
ans = [0] * (n + m + 1)
for i in range(n + m + 1):
k = 0
while k <= i and k <= n:
if i - k > m:
k += 1
else:
ans[i] += self.coefs[k] * other.coefs[i - k]
k += 1
return Polynomial(ans)
__rmul__ = __mul__
def __mod__(self, other):
other = Polynomial(other)
n = self.degree()
m = other.degree()
if n < m:
return self
else:
cur = {}
cur[n - m] = self.coefs[-1] / other.coefs[-1]
cur = Polynomial(cur)
ans = self - cur * other
return ans % other
def __rmod__(self, other):
other = Polynomial(other)
return other % self
def gcd(self, other):
other = Polynomial(other)
n = self.degree()
m = other.degree()
if n > m:
new = self % other
if new == Polynomial(0):
return other
else:
res = other.gcd(new)
return res
else:
new = other % self
if new == Polynomial(0):
return self
else:
res = self.gcd(new)
return res
def __iter__(self):
self.n = (0, self.coefs[0])
return self
def __next__(self):
if self.n[0] < self.degree():
res = self.n
self.n = (self.n[0] + 1, self.coefs[self.n[0] + 1])
return res
if self.n[0] == self.degree():
res = self.n
self.n = (self.n[0] + 1, 0)
return res
else:
raise StopIteration
class RealPolynomial(Polynomial):
def find_root(self):
eps = 1e-12
a = 2
while self(a) * self(-a) > eps:
a *= 2
b = -a
while a - b > eps:
c = (a + b) / 2
if abs(self(c)) < eps:
return c
if self(c) * self(a) < -eps:
b = c
else:
a = c
return a
class QuadraticPolynomial(Polynomial):
def solve(self):
n = self.degree()
if n == 0:
return []
if n == 1:
return [-self.coefs[0] / self.coefs[1]]
if n == 2:
eps = 1e-12
a = self.coefs[2]
b = self.coefs[1]
c = self.coefs[0]
d = b * b - 4 * a * c
if d < -eps:
return []
if -eps < d < eps:
return [-b / (2 * a)]
else:
x1 = (-b + d**0.5) / (2 * a)
x2 = (-b - d**0.5) / (2 * a)
if x1 - x2 < -eps:
(x1, x2) = (x2, x1)
return [x2, x1]
if __name__ == '__main__':
poly = QuadraticPolynomial([-15, 7, 2])
print(sorted(poly.solve()) == [-5.0, 1.5])
| [
"noreply@github.com"
] | KindOfFun.noreply@github.com |
5b3fba87ce10ec08239078cb864fd1f049fa8e19 | e888171a028d297dca5120fc748d5816d47b3be6 | /cnn_aenc_genome_aug_tr.py | a4cf151e3c38eeaca8fd6f04047793e1d00f7690 | [] | no_license | AmirUCR/CRISPER-CAS9 | f130a3a2c1df1f6f7e7082ed05b869d0421ffce0 | 4207b794662acfefa82077a88be5fcd3afd0ef41 | refs/heads/master | 2023-04-27T01:24:51.956009 | 2021-05-27T23:45:26 | 2021-05-27T23:45:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,455 | py | from collections import OrderedDict
import os
import sys
import warnings
import argparse
import logging
import h5py as h5
import numpy as np
import pandas as pd
import scipy.io
import six
from six.moves import range
import matplotlib.pyplot as plt
#from dna import *
from sklearn.metrics import roc_auc_score, confusion_matrix
from keras.preprocessing import sequence
from keras.optimizers import RMSprop,Adam, SGD
from keras.models import Sequential, Model
from keras.layers.core import Dropout, Activation, Flatten
from keras.regularizers import l1,l2,l1_l2
from keras.constraints import maxnorm
#from keras.layers.recurrent import LSTM, GRU
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.layers import Conv1D, MaxPooling1D, Dense, LSTM, Bidirectional, BatchNormalization, MaxPooling2D, AveragePooling1D, Input, Multiply, Add, UpSampling1D
from sklearn.metrics import mean_squared_error as mse
import scipy.stats as st
from keras.models import load_model
#from keras.utils import plot_model
#from keras.utils.layer_utils import print_layer_shapes
# fix random seed for reproducibility
from random import shuffle
np.random.seed(1369)
def PREPROCESS(lines):
data_n = len(lines) - 1
SEQ = np.zeros((data_n, 40, 4), dtype=int)
CA = np.zeros((data_n, 1), dtype=float)
Score = np.zeros((data_n, 1), dtype=float)
for l in range(1, data_n+1):
data = lines[l].split(',')
seq = data[2]
Score[l-1] = float(data[6])
CA[l-1] = float(data[5])
for i in range(40):
if seq[i] in "Aa":
SEQ[l-1, i, 0] = 1
elif seq[i] in "Cc":
SEQ[l-1, i, 1] = 1
elif seq[i] in "Gg":
SEQ[l-1, i, 2] = 1
elif seq[i] in "Tt":
SEQ[l-1, i, 3] = 1
#CA[l-1,0] = int(data[2])*100
return SEQ, CA, Score
def PREPROCESS_aug(lines):
data_n = len(lines) - 1
SEQ = np.zeros((data_n, 40, 4), dtype=int)
CA = np.zeros((data_n, 1), dtype=float)
Score = np.zeros((data_n, 1), dtype=float)
for l in range(1, data_n+1):
data = lines[l].split(',')
seq = data[0]
Score[l-1] = float(data[2])
CA[l-1] = float(data[3])
for i in range(40):
if seq[i] in "Aa":
SEQ[l-1, i, 0] = 1
elif seq[i] in "Cc":
SEQ[l-1, i, 1] = 1
elif seq[i] in "Gg":
SEQ[l-1, i, 2] = 1
elif seq[i] in "Tt":
SEQ[l-1, i, 3] = 1
#CA[l-1,0] = int(data[2])*100
return SEQ, CA, Score
if __name__ == '__main__':
print ("Loading train data")
FILE = open("aug_data_yeast.csv", "r")
data = FILE.readlines()
print(len(data))
SEQ, CA, score = PREPROCESS_aug(data)
score = np.dot(score,-1)
score = st.zscore(score)
FILE.close()
print(SEQ.shape)
print(score.shape)
print(CA.shape)
score = np.dot(score,-1)
score = st.zscore(score)
trainsize = int(0.95* len(data))
train_x = SEQ[0:trainsize,:]
train_nu = CA[0:trainsize]
train_y = score[0:trainsize]
print(train_x.shape)
print(train_y.shape)
print(train_nu.shape)
valsize = trainsize + int(0.03*len(data))
val_x = SEQ[trainsize:valsize,:]
val_y = score[trainsize:valsize]
val_nu = CA[trainsize:valsize]
print(val_x.shape)
print(val_y.shape)
print(val_nu.shape)
test_x = SEQ[valsize:,:]
test_y = score[valsize:]
test_nu = CA[valsize:]
print(test_x.shape)
print(test_y.shape)
print(test_nu.shape)
print('loading model')
basemodel = load_model('auto_encode.h5')
#basemodel.summary()
basemodel.layers.pop()
basemodel.layers.pop()
basemodel.layers.pop()
basemodel.layers.pop()
basemodel.layers.pop()
#basemodel.summary()
#print(basemodel.layers)
for layer in basemodel.layers:
layer.trainable = True
#model = basemodel.output
flatten = Flatten()(basemodel.layers[-1].output)
dropout_1 = Dropout(0.5)(flatten)
dense_1 = Dense(80, activation='relu', kernel_initializer='glorot_uniform')(dropout_1)
dropout_2 = Dropout(0.5)(dense_1)
dense_2 = Dense(units=40, activation="relu",kernel_initializer='glorot_uniform')(dropout_2)
dropout_3 = Dropout(0.3)(dense_2)
dense_3 = Dense(units=40, activation="relu",kernel_initializer='glorot_uniform')(dropout_3)
out = Dense(units=1, activation="linear")(dense_3)
model_seq = Model(inputs = basemodel.layers[0].output, output = out)
model_seq.summary()
#model for epigenetics feature
#NU = Input(shape=(1,))
#dense1_nu = Dense(units=40, activation="relu",kernel_initializer='glorot_uniform')(NU)
#mult = Multiply()([dense_3, dense1_nu])
adam = Adam(lr = 0.001)
model_seq.compile(loss='mean_squared_error', optimizer=adam)
checkpointer = ModelCheckpoint(filepath="cas9_seq.hdf5",verbose=1, monitor='val_loss',save_best_only=True)
earlystopper = EarlyStopping(monitor='val_loss', patience=20, verbose=1)
model_seq.fit(train_x, train_y, batch_size=64, epochs=150, shuffle=True, validation_data=( val_x, val_y), callbacks=[checkpointer,earlystopper])
pred_y = model_seq.predict(test_x)
print('testset')
print('mse ' + str(mse(test_y, pred_y)))
print(st.spearmanr(test_y, pred_y))
y_pred_tr = model_seq.predict(train_x)
print(st.spearmanr(train_y, y_pred_tr))
np.savetxt("train.csv", train_y, delimiter= ",")
np.savetxt("trainpred.csv", y_pred_tr, delimiter = ",")
np.savetxt("test.csv" , test_y, delimiter = ",")
np.savetxt("testpred.csv", pred_y, delimiter = ",")
| [
"dbais001@dipankar.cs.ucr.edu"
] | dbais001@dipankar.cs.ucr.edu |
c307d8c1ba605a31d9a9948f911f4ef045636762 | dd7c044b0bd48746cd0f26395979e7cfba9bf1c6 | /Python2.x/Python2.x-0-basic/015_module_1.py | b1368fd8c2d9ebfc0e4c7df5061bf20620f2abec | [
"MIT"
] | permissive | mrxuyong/Python-dev | bfacc702e88abe9f694e9a5b28384d08b46041d9 | ec8d1e59ebb648a4b007ac495dd74ebe792c1795 | refs/heads/master | 2023-07-26T23:15:13.427666 | 2023-07-09T15:15:02 | 2023-07-09T15:15:02 | 125,294,625 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | # -*- coding: UTF-8 -*-
# module
# 1. 使用 import 语句来引入模块;
import myPrint;
myPrint.myPrint1('This is 015_module, using python module...');
print '入参为空时,打印默认的-->>\n'
myPrint.myPrint1();
# 2. From…import 从模块中导入一个指定的部分到当前命名空间中;
from myPrint import myPrint2;
myPrint2('from...import');
# 3. from...import* 把一个模块的所有内容全都导入到当前的命名空间;#
from myPrint import *
myPrint3('from...import*');
| [
"xuyong@qianmi.com"
] | xuyong@qianmi.com |
9cbda54ef472acc40645ac27096c2c1040cf1c41 | db494575881f27537ec1a9661592062f58aa7bbb | /pandas_describe.py | 89158e9897ffbdf88c6fd6ce700fb607297735d6 | [] | no_license | MrCat9/Pandas_Note | 3518072dc05a2ca1865bf9d5b070f283a54fd4fb | 7d0abd9e0c0d54c0dec712224a37c25534a1e498 | refs/heads/master | 2022-10-01T09:01:44.937992 | 2022-09-16T08:37:17 | 2022-09-16T08:37:17 | 182,781,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | # -*- coding: utf-8 -*-
# describe()方法
import pandas as pd
df = pd.DataFrame({'A': ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b'], 'B': [1, 1, 2, 3, 1, 2, 2, 3, 3]})
df
'''
A B
0 a 1
1 a 1
2 a 2
3 a 3
4 b 1
5 b 2
6 b 2
7 b 3
8 b 3
'''
df.describe() # 可以使用 to_csv() 后在 Excel 中操作
'''
B
count 9.000000
mean 2.000000
std 0.866025
min 1.000000
25% 1.000000
50% 2.000000
75% 3.000000
max 3.000000
'''
grouped = df.groupby(['A', ])
grouped.describe() # 可以使用 to_csv() 后在 Excel 中操作
'''
B
count mean std min 25% 50% 75% max
A
a 4.0 1.75 0.957427 1.0 1.0 1.5 2.25 3.0
b 5.0 2.20 0.836660 1.0 2.0 2.0 3.00 3.0
'''
| [
"noreply@github.com"
] | MrCat9.noreply@github.com |
3b30c93eabcd27038c83049c2ee79ddeb97f9bac | f8e8e365c9cf58b61d72655bc2340baeaed5baff | /Leetcode/Python Solutions/Binary Search/FirstBadVersion.py | 65537ce9473dd531e132bb495f34504fa9fb26fb | [
"MIT"
] | permissive | Mostofa-Najmus-Sakib/Applied-Algorithm | 39a69f6b9ed113efe4a420d19cad79e0aa317637 | bc656fd655617407856e0ce45b68585fa81c5035 | refs/heads/master | 2023-08-31T19:54:34.242559 | 2021-11-05T03:43:35 | 2021-11-05T03:43:35 | 412,263,430 | 0 | 0 | MIT | 2021-09-30T23:45:29 | 2021-09-30T23:45:25 | null | UTF-8 | Python | false | false | 806 | py | """
LeetCode Problem 278. First Bad Version
Link: https://leetcode.com/problems/first-bad-version/
Written by: Mostofa Adib Shakib
Language: Python
Time Complexity: O(logn)
Space complexity: O(1)
"""
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return a bool
# def isBadVersion(version):
class Solution:
def firstBadVersion(self, n):
"""
:type n: int
:rtype: int
"""
first = 0
last = n
while first <= last:
mid = (first+last)//2
if isBadVersion(mid) == False:
first = mid + 1
elif isBadVersion(mid) == True:
if isBadVersion(mid-1) == True:
last = mid - 1
else:
return mid | [
"adibshakib@gmail.com"
] | adibshakib@gmail.com |
981f4b1ca7364558cc1fa129366fb38449cd8a28 | 35bbb2ca1bc8624a724095890b4ee4e987782b45 | /app.py | 3926d315b39f0180ad0fdcccdd7ca18f27897f45 | [] | no_license | hdixon/badge-bot | eff07764339ae047f4df3be91e07dc3c809558c6 | 9def3592d30b017f39835c49207b4fed46ece722 | refs/heads/master | 2022-12-10T07:16:35.537351 | 2020-09-11T02:34:43 | 2020-09-11T02:34:43 | 292,378,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,585 | py | #!/usr/bin/env python3
import praw
import sqlite3
from praw.models import Message
# import datetime as dt
# from dateutil.parser import parse
# import time
import logging
from datesfunc import *
logging.basicConfig(level=logging.INFO, filename='app.log', filemode='a', format='%(asctime)s - %(message)s')
reddit = praw.Reddit("badge-bot", user_agent="badge-bot by u/huckingfoes")
# db = "badges.db"
db = 'test.db'
def sub_exists(sub):
exists = True
try:
results = reddit.subreddits.search_by_name(sub, exact=True)
if len(results) > 0:
exists = True
except:
exists = False
return exists
# String Helper
def cleanSubString(subreddit):
subredditString = str(subreddit) # just make sure subreddit is a string
subredditString = subredditString.replace('/r/', '')
subredditString = subredditString.replace('r/', '')
subredditString = subredditString.strip()
print(subredditString)
if subredditString == '' or ' ' in subredditString:
logging.info("Invalid subreddit string: %s" % subredditString)
print("Invalid subreddit string")
return -1
elif not sub_exists(subredditString):
logging.info("Subreddit string cleaned: " + subredditString)
logging.info("But subreddit does not exist.")
# DEBUGGING: lets just check if we had to replace anything
if(subredditString != subreddit):
logging.info("Oh we had to remove a r/ from %s to %s" %(subreddit, subredditString))
return subredditString.lower()
# Database Helpers
def create_table(subreddit):
# connects to a db and creates new table for subreddit
subredditString = cleanSubString(subreddit)
conn = sqlite3.connect(db)
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS ' + subredditString + ' (username TEXT NOT NULL, badgedate TEXT NOT NULL, dateadded TEXT NOT NULL);')
conn.commit()
conn.close()
logging.info('Created table: ' + subredditString)
return True
def table_exists(tableName):
if not isinstance(tableName, str): # ensure input is a string
return -1
# return boolean if table exists or not
try:
conn = sqlite3.connect(db)
c = conn.cursor()
c.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name=?;", (tableName,))
exists = 0
if c.fetchone()[0] == 1:
# table exists
logging.info("Table exists: " + tableName)
exists = True
else:
# table doesn't exist
logging.info("Table does not exist: " + tableName)
exists = False
conn.commit()
conn.close()
return exists
except Exception as e:
logging.critical(e)
logging.critical("Exception occurred", exc_info=True)
return -1
def addSubreddit(subreddit):
# takes subreddit string
subredditString = cleanSubString(subreddit)
# assert(isinstance(subredditString, str))
logging.info("addSubreddit(): Trying to add subreddit:" + subreddit)
if(table_exists(subredditString)):
# don't do anything and return false
# technically we don't need this check because we only create unique
# tables due to the SQL IF NOT EXISTS
logging.info("addSubreddit(): Table already exists.")
return 0
else:
# if table doesn't exist, we make it
create_table(subredditString)
logging.info("adSubreddit(): Table doesn't exist... creating: " + subredditString)
return 1
return -1
def isInDatabase(username, subreddit):
# check if redditor is in database
if not isinstance(username, str):
username = str(username)
subreddit = cleanSubString(subreddit)
try:
conn = sqlite3.connect(db)
cur = conn.cursor()
cur.execute("SELECT * FROM " + subreddit + " WHERE username =" + " '" + username + "'")
rows = cur.fetchall()
conn.commit()
conn.close()
if len(rows) == 1:
# user exists, we can update the date in the db and update flairText
# logging.info("isInDatabase(): User exists, returning true.")
return True
elif len(rows) == 0:
return False
else:
logging.critical("Exception occurred", exc_info=True)
logging.critical("isInDatabase(): Returning -1")
return -1
except Exception as e:
logging.error("isInDatabase() encountered an error.")
logging.critical(e)
logging.critical("Exception occurred", exc_info=True)
return -1
def updateFlairsFromDatabase(db):
logging.info("Looping through tables.")
try:
conn = sqlite3.connect(db)
db_list = []
mycursor = conn.cursor()
for db_name in mycursor.execute("SELECT name FROM sqlite_master WHERE type = 'table'"):
db_list.append(db_name)
conn.close()
# update all badges in teach table
for x in db_list:
logging.info("Updating badges for " + str(x[0]))
updateSubredditBadges(x[0])
except sqlite3.Error as e:
logging.critical('Db Not found', str(e))
logging.critical("Exception occurred", exc_info=True)
def updateSubredditBadges(subreddit):
subreddit = cleanSubString(subreddit)
try:
conn = sqlite3.connect(db)
c = conn.cursor()
c.execute('SELECT * from ' + subreddit)
for row in c:
username = row[0]
badgedate = row[1]
dateDiff = daysSince(badgedate)
badgeText = getBadgeText(dateDiff)
newBadge = str(badgeText)
updateFlair(username, newBadge, subreddit)
logging.info("updateSubredditBadges: updated " + username + " with " + newBadge + " in subreddit " + subreddit)
conn.close()
except Exception as e:
logging.critical("Exception occurred", exc_info=True)
logging.critical(e)
return 1
def getRedditorQuitDate(redditor, subreddit):
substr = cleanSubString(subreddit)
redditor = str(redditor)
try:
conn = sqlite3.connect(db)
c = conn.cursor()
c.execute("SELECT * FROM " + subreddit + " WHERE username =?", (redditor,))
quitDate = c.fetchall()
c.close()
return(quitDate[0][1])
except Exception as e:
logging.critical("Exception occurred", exc_info=True)
logging.critical(e)
def removeFromDatabase(username, subreddit):
username = str(username)
subreddit = cleanSubString(subreddit)
if not isInDatabase(username, subreddit):
logging.error("removefromDatabase(): Tried to remove user not in database.")
logging.error("Tried to remove user not in database")
logging.error("user: %s subreddit: %s" % (username, subreddit))
return 0
try:
logging.info("Trying to remove, user is in db: " + str(username))
conn = sqlite3.connect(db)
c = conn.cursor()
sqlite_param = "DELETE FROM " + subreddit + " WHERE username = ?"
c.execute(sqlite_param, (username,))
conn.commit()
conn.close()
logging.info("removeFromDatabase(): Removed " + username + " from " + subreddit)
return 1
except Exception as e:
logging.critical("Exception occurred", exc_info=True)
logging.critical(e)
return -1
return 1
def updateDate(username, startDate, subreddit):
dateDiff = daysSince(startDate)
username = str(username)
subreddit = cleanSubString(subreddit)
if isInDatabase(username, subreddit):
updateDatabase(username, startDate, subreddit)
badgeText = getBadgeText(dateDiff)
u = updateFlair(username, badgeText, subreddit)
else:
insertInDatabase(username, startDate, subreddit)
badgeText = getBadgeText(dateDiff)
u = updateFlair(username, badgeText, subreddit)
return u
def updateDatabase(username, startDate, subreddit):
try:
subredditString = cleanSubString(subreddit)
conn = sqlite3.connect(db)
c = conn.cursor()
c.execute('UPDATE ' + subredditString + ' SET badgedate = ? WHERE username = ?', (startDate, username))
conn.commit()
conn.close()
except Exception as e:
logging.critical(e)
logging.info("updateDatabase() failed.")
logging.error("Exception occurred", exc_info=True)
def insertInDatabase(username, startDate, subreddit):
subreddit = cleanSubString(subreddit)
assert(not isInDatabase(username, subreddit))
if isinstance(startDate, datetime):
startDate = dt.datetimestrptime(startDate, "%Y-%m-%d")
try:
conn = sqlite3.connect(db)
c = conn.cursor()
todayString = dt.datetimetoday().strftime("%Y-%m-%d")
query1 = 'INSERT INTO ' + subreddit + ' '
query2 = '(username, badgedate, dateadded) VALUES (?, ?, ?);'
sqlite_insert_with_param = query1 + query2
data_tuple = (username, startDate, todayString)
c.execute(sqlite_insert_with_param, data_tuple)
conn.commit()
conn.close()
except Exception as e:
logging.critical(e)
raise
return 0
def checkValidMessage(item):
# ensure type of item is Message
if not (isinstance(item, Message)):
if (item.subject == None or item.subject == ""):
return False
logging.warning("checkValidMessage passed item that is not message")
return False
redditor = item.author
subject = cleanSubString(item.subject)
body = str(item.body).strip().lower()
acceptableCommands = ["remove", "reset"]
logging.debug("Checking valid message")
if body in acceptableCommands or isValidDateStr(body):
# okay, seems to be within set of acceptable commands
if not table_exists(subject):
logging.info("Message invalid: table does not exist for " + subject)
return False
logging.debug("body in acceptableCommands or isValidDate")
if body == 'remove':
logging.debug("Date message okay!")
return True
elif body == 'reset':
logging.debug("Reset message okay!")
return True
elif isValidDateStr(body):
logging.debug("Remove message okay!")
return True
else:
logging.debug("checkValidMessage failed (other)")
return False
return True
return False
def updateFlair(redditor, flairText, subreddit):
subredditString = cleanSubString(subreddit)
sub = reddit.subreddit(subredditString)
username = str(redditor)
try:
quitDate = getRedditorQuitDate(redditor, subreddit)
soberDays = daysSince(quitDate)
cssClass = getBadgeClass(subreddit, soberDays)
except:
logging.info("Couldn't get quit date.")
try:
if quitDate:
return sub.flair.set(username, flairText, quitDate)
return sub.flair.set(username, flairText, css_class='badge')
except:
return 0
return 0
def removeFlair(redditor, subreddit):
subredditString = cleanSubString(subreddit)
sub = reddit.subreddit(subredditString)
return sub.flair.delete(redditor)
def acceptModInvites():
for message in reddit.inbox.unread(limit=None):
if message.body.startswith('gadzooks!'):
logging.info("Looks like we have a mod invite!")
subredditInstance = message.subreddit
subredditString = cleanSubString(message.subreddit)
logging.info("Attempting to accept moderator role for: " + subredditString)
message.mark_read()
try:
subredditInstance.mod.accept_invite()
logging.info("Accepted mod invite!")
strReply = "Thanks for the invite! I can now provide badges to your subreddit %s so long as I have flair permissions at least. \n\n[Check my userpage here](https://www.reddit.com/user/badge-bot/comments/ik7v4y/badgebot_alpha_version_now_available/) for more info on configuring badge-bot on your subreddit. \n\nFor any issues, please contact u/huckingfoes." %(subredditString)
linkbase = "https://www.reddit.com/message/compose/?to=badge-bot&subject=[SUBREDDIT]&message=[MESSAGE]"
linkbase = linkbase.replace("[SUBREDDIT]", subredditString)
customSetLink = linkbase.replace("[MESSAGE]", "YYYY-MM-DD")
customResetLink = linkbase.replace("[MESSAGE]", "reset")
customRemoveLink = linkbase.replace("[MESSAGE]", "remove")
reply2 = "\nHere are some custom links you can provide to your subreddit so that your community can use the bot more easily.\n"
reply2 += "\n\n\n **[Click here to set your flair to a particular date.](%s)**\n\n **[Click here to rest flair to 0.](%s)**\n\n **[Click here to remove your flair.](%s)**" % (customSetLink, customResetLink, customRemoveLink)
print(strReply + reply2)
strReply += reply2
message.reply(strReply)
logging.info("\tReplied: " + strReply)
# logging.debug("Checking all moderators.")
# checkModPerms(subredditInstance)
logging.info("\tCreating new table for subreddit: " + subredditString)
addSubreddit(subredditString)
logging.info("Subreddit added to db.")
except:
logging.error("Tried to accept invite, but invalid.")
elif message.subject.startswith('/u/badge-bot has been removed as a moderator'):
print("Removed as mod.")
if "r/" in message.subject:
sub = message.subject.split("r/")[1]
logging.info("Removed as mod from " + sub)
else:
logging.warning("Can't figure out what sub we were removed from.")
message.mark_read()
return False
def checkModPerms(sub):
# for the next version
for moderator in reddit.subreddit(sub).moderator():
if 'badge-bot' in str(moderator):
if 'flair' in moderator.mod_permissions:
print('I have flair permissions in ' + sub)
return True
return False
def getFlairTemplates(sub):
subObj = reddit.subreddit(sub)
flairs = []
for template in subObj.flair.templates:
flairs.append(template['css_class'])
return flairs
def iterateMessageRequests():
unreadMessages = reddit.inbox.unread(limit=None)
# unread_messages = []
# get any mod invites out of the way
acceptModInvites()
today = dt.datetime.today().strftime("%Y-%m-%d")
for item in unreadMessages:
if "username mention" in item.subject or item.was_comment:
try:
logging.warning("Mentioned in comment. Marking read.")
item.mark_read()
# item.reply("If you'd like to add me to your subreddit, read more [here](https://www.reddit.com/user/badge-bot/comments/imzh45/badgebot_is_in_beta_and_accepting_invites/)")
print("Marked comment mention read.")
except:
item.mark_read()
logging.critical("Tried to mark item that is not message instance read. Failed.")
elif isinstance(item, Message):
subreddit = cleanSubString(item.subject)
body = str(item.body.lower())
author = item.author
print("New message from %s\n subject: %s \n body: %s " %(author, subreddit, body))
logging.info("New message from %s\n subject: %s \n body: %s " %(author, subreddit, body))
if not table_exists(subreddit):
# if subreddit is not in database, check if we're a mod
try:
item.reply("Your subreddit is not in our database. Message your moderators or please invite u/badge-bot to moderate with flair permissions. If this is an error, contact u/huckingfoes by PM.")
item.mark_read()
logging.info("Replied to message.")
except:
logging.info("Tried to reply to message, but failed.")
item.mark_read()
elif table_exists(subreddit):
if(checkValidMessage(item)):
# if True:
# logging.info("New valid message from: " + str(item.author))
if body == "reset":
logging.info("New badge request... giving badge.")
updateDate(item.author, today, subreddit)
item.mark_read()
item.reply("Request honored. Your badge has been updated.")
elif isValidDateStr(body):
if int(daysSince(item.body)) > 2880:
# dont allow for manually updating flairs more than 4 years
logging.error("Replying to invalid date request")
item.reply("You may only update a flair with up to 8 years in the past. Try again with a more recent date or contact moderators manually to update your flair accordingly.")
else:
b = updateDate(author, body, subreddit)
if b == 0:
item.error("Issue updating date. Probably permissions error.")
item.reply("There may be a permissions issue in your subreddit. Ensure u/badge-bot has flair permissions.")
item.reply("Update honored. Your badge has been updated.")
logging.info("Updated badge.")
item.mark_read()
elif body == 'remove':
logging.debug("Message is remove request.")
removeFromDatabase(author, subreddit)
logging.info("Removed " + str(author) + " from " + subreddit)
removeFlair(author, subreddit)
item.mark_read()
item.reply("You've been removed from the badge database: " + subreddit)
logging.info("Replied to remove request.")
else:
s = "Hello %s, your message is invalid: \n %s \n %s" % (item.author, item.subject, item.body)
logging.debug(s)
try:
logging.info("Trying to reply to invalid message...")
item.reply(s)
logging.info("Sent reply: " + s)
except:
logging.info("Couldn't reply to invalid message. Marking as read.")
item.mark_read()
else:
s = "Hello %s, your message is invalid: \n %s \n %s" % (item.author, item.subject, item.body)
logging.error(s)
try:
item.reply(s)
except:
log.error("Couldn't reply to message. Marking read.")
item.mark_read()
def getBadgeClass(sub, days):
cssflairs = getFlairTemplates(sub)
days = int(days)
if days >= 30 and days < 60 and '30days' in cssflairs:
return '30days'
elif days >= 60 and days < 90 and '60days' in cssflairs:
return '60days'
elif days >= 90 and days < 180 and '90days' in cssflairs:
return '180days'
elif days >= 180 and days < 365 and '180days' in cssflairs:
return '180days'
elif days >= 365 and days < 730 and '1year' in cssflairs:
return '1year'
elif days >= 730 and '2years' in cssflairs:
return '2years'
else:
return 'badge'
logging.info('No colored badges found.')
return 'badge'
def getBadgeText(daysDiff):
daysDiff = int(daysDiff)
# return str(daysDiff) + " days"
if (daysDiff < 365):
return str(daysDiff) + " days"
elif daysDiff >= 365:
numYears = int(daysDiff / 365)
if numYears == 1:
return str(numYears) + " year"
else:
return str(numYears) + " years"
return str(daysDiff) + " days"
count = 0
while False:
count += 1
t = dt.datetime.today().strftime('%H:%M:%S')
if count % 100 == 1:
logging.info(t + " Checking messages.")
print(t + " Still working")
iterateMessageRequests()
if time_between(dt.time(23,45), dt.time(00,15)):
updateFlairsFromDatabase(db)
time.sleep(30)
| [
"henry@hdixon.xyz"
] | henry@hdixon.xyz |
678256f0e9251afdae873f233eb56b60123f7369 | b0c02d7ca86c1ef84af18a8c701702e8bb212b64 | /display-stuff/neopixels/ColorSynthesis/Neopixel Color Synthesis/colorsynthesis1.py | dc3e92458492203c89b292ad1f19f38abeac0e08 | [] | no_license | flashypepo/myMicropython-Examples | 24fa2f372e68742abe0f74913df000dfe64a9e55 | b2b63df865b5ad471b351ca5f279135025859f5d | refs/heads/master | 2021-09-24T18:52:18.083444 | 2018-10-13T11:59:19 | 2018-10-13T11:59:19 | 98,223,412 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | # Micro/CircuitPython NeoPixel Color Synthesis Experiments pt. 1
import machine
import time
import math
import neopixel
NEOPIXEL_PIN = machine.Pin(15, machine.Pin.OUT)
NEOPIXEL_COUNT = 8 * 4 #12
def seconds():
return time.ticks_ms()/1000 # MicroPython code for current seconds
# Setup NeoPixels
pixels = neopixel.NeoPixel(NEOPIXEL_PIN, NEOPIXEL_COUNT)
def blank():
pixels.fill((0,0,0))
pixels.write()
blank()
''' Example 2:
amplitude = 128
frequency = 0.25 # Increase this to speed up, decrease to slow down the pulse.
phase = 0
offset = 128
try:
while True:
red = int(amplitude*math.sin(2*math.pi*frequency*seconds()+phase)+\
offset)
color = (red, 0, 0)
pixels.fill(color)
pixels.write()
print("r={}\tg={}\tb={}".format(*color))
time.sleep(0.1)
except:
blank()
print('done')
#'''
################################################################################
# Example 3:
# Refactor to a functional style. Create a sine wave function on the fly
# so it's easy to add more animations (just make more sine wave functions).
################################################################################
def sine_wave(amplitude, frequency, phase, offset):
return lambda t: amplitude*math.sin(2*math.pi*frequency*t+phase)+offset
red_wave = sine_wave(128, 0.25, 0, 128)
green_wave = sine_wave(128, 0.25, math.pi, 128)
try:
while True:
current = seconds()
red = int(red_wave(current))
green = int(green_wave(current))
color = (red, green, 0)
pixels.fill(color)
pixels.write()
print("r={}\tg={}\tb={}".format(*color))
time.sleep(0.1)
except:
blank()
print('done')
| [
"peter@pepo.nl"
] | peter@pepo.nl |
9be6f968816a6916013d3e34e681989b083e3c99 | 7316e50bbaa90c004b0e73a1ccdfb79627f4b840 | /sprint/views.py | 51842eeea1fbf075326b909476a6a0e32bcc1a6a | [] | no_license | akshaysinghal93/scrum_app | 2438bbf24d6e5b2e3c3f6c5b163eadb124a82b78 | 8f78386b519cf674676addb563abc93a9c1eb584 | refs/heads/master | 2021-01-13T03:15:23.210687 | 2015-08-04T08:12:56 | 2015-08-04T08:12:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,658 | py | from django.shortcuts import render
from django.shortcuts import render_to_response, redirect, render
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from .forms import AddSprintForm
from .models import Sprint
# Create your views here.
@login_required(login_url='/app/login/')
def addNewSprint(request):
"""
Add Sprint
"""
if(request.method == 'POST'):
form = AddSprintForm(data=request.POST)
if form.is_valid():
form.save()
return redirect('/app/sprint/view')
else:
form = AddSprintForm()
return render_to_response('addUpdateSprint.html', {
'form' : form,
}, context_instance=RequestContext(request))
@login_required(login_url='/app/login/')
def updateSprint(request, sprint_id=None):
"""
Update existing sprint
"""
if(request.method == 'POST'):
form = AddSprintForm(request.POST)
if form.is_valid():
sprint = Sprint(
sprint_id = form.cleaned_data.get('sprint_id'),
sprint_name = form.cleaned_data.get('sprint_name'),
sprint_from_date = form.cleaned_data.get('sprint_from_date'),
sprint_to_date = form.cleaned_data.get('sprint_to_date'))
sprint.save()
return redirect('/app/sprint/view')
else:
sprint = Sprint.objects.get(sprint_id=sprint_id)
form = AddSprintForm(instance=sprint)
return render_to_response('addUpdateSprint.html', {
'form' : form,
}, context_instance=RequestContext(request))
@login_required(login_url='/app/login/')
def viewAllSprints(request):
"""
View All Sprints
"""
sprints = Sprint.objects.all()
return render_to_response('viewSprints.html', {
'sprints' : sprints,
}, context_instance=RequestContext(request)) | [
"kumar.badgujar91@gmail.com"
] | kumar.badgujar91@gmail.com |
28f2a27adf75d288767cd84fbe086f2362c8e97f | 1f6247ec2150cb4f03fc03c315ddff57af29fff9 | /math.py | e32d8771b9b0fbb138418fa14edab798fb106ca2 | [] | no_license | AmarjSingh03/ajsingh03 | f8ee529dc353e53ac3d850f92ce442ee61982a7e | c4c8e2485329f37eeeb789d197474d69bd2030f1 | refs/heads/main | 2023-06-14T11:06:35.315079 | 2021-07-08T09:53:29 | 2021-07-08T09:53:29 | 384,057,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | #Add implementaton
def add(x,y):
return x+y
#Subtract implementaton
def subtract(x,y):
pass
#Multiply implementaton
def multiply(x,y):
return x*y
#Divide implementaton
def divide(x,y):
pass
| [
"noreply@github.com"
] | AmarjSingh03.noreply@github.com |
78544477f1980b8197bbeb6369a8c22371a2db77 | a6203ce0f7f871ccd8fd341af6254795c938232b | /easy/power-of-two/solution.py | 08541ea6db52d29b1ee3005fc763fdc7559eb622 | [] | no_license | hsuanhauliu/leetcode-solutions | 542590de9b1dd4480bd582850363f71487dd37d0 | c14d8829c95f61ff6691816e8c0de76b9319f389 | refs/heads/master | 2021-03-31T00:31:18.489947 | 2019-10-21T03:51:10 | 2019-10-21T03:51:10 | 124,963,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | class Solution:
def isPowerOfTwo(self, n: int) -> bool:
if n <= 0:
return False
while n != 1:
# keep dividing
if n % 2:
return False
n //= 2
return True | [
"hsuanhal@usc.edu"
] | hsuanhal@usc.edu |
f0d68819afdb2e7a3916035a0b0a7475d91d143f | 2f396e10ecfc81369c984752158f54f4bab85e5a | /ksubsample_leukemia.py | 0618431c552f2216f046080dc801ccf668cd87c5 | [] | no_license | gsvaidya/Machine-Learning---4 | 9e9a876b0d085c53dca6954c0e1a2174482ee058 | d1dab821c9b021e07a2feddbfe68c82f2ceed032 | refs/heads/master | 2020-05-29T15:07:57.454721 | 2016-07-12T21:10:13 | 2016-07-12T21:10:13 | 63,192,517 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 607 | py | import numpy as np
from sklearn.datasets import load_svmlight_file
from sklearn import svm, feature_selection, cross_validation
from sklearn.pipeline import Pipeline
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import RFE,RFECV
from sklearn.svm import SVC,LinearSVC
from sklearn.utils import shuffle
X_train, y_train = load_svmlight_file("leu.t")
#print X_train , y_train,'old'
#X_train , y_train = shuffle (X_train,y_train,random_state=0)
#print X_train , y_train,'new'
#index = np.arange(np.shape(X_train)[0])
#print index
#np.random.shuffle(index)
#print X_train[index, :] | [
"gandhar.vaidya@gmail.com"
] | gandhar.vaidya@gmail.com |
156c0f0b9512126396049cde426dcfcc4d6b7d2a | 899e01c967536b358d02b124fce5ad2bc3884c57 | /vidCreate.py | 1c089dd407e669eb24247eac96a0101a1bbb2f99 | [] | no_license | Apoorva87/motionDetector | 3a00e050333e9f727ecf0e6f47ac2823fb811c84 | 3c5deb1f836ffa321ab967be0fc83f69c2e8b05f | refs/heads/master | 2020-06-20T11:46:58.082687 | 2016-12-14T07:07:44 | 2016-12-14T07:07:44 | 74,868,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | import cv2
img = cv2.imread('C:/Users/Andromeda/Pictures/Capture.png')
def imageOpen():
cv2.imshow("source",img)
cv2.waitKey(0)
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
imageOpen()
while(cap.isOpened()):
ret, frame = cap.read()
if ret==True:
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
#cv2.imshow('frame',gray)
cv2.imshow('frame',frame)
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# When everything done, release the capture
cap.release()
out.release()
cv2.destroyAllWindows()
| [
"apoorvakarnik@gmail.com"
] | apoorvakarnik@gmail.com |
b57bbed7f77c0abd0597b8c8d156818afb202ffe | 040c888476f6e57e5883562491aa5c488063dc0f | /etherscan/tokens.py | 600b722a614a7c8728f6f332852c798de42b3d08 | [] | no_license | LizetteO/etherscan.transactions | dafe75264dd2495336ebab9d26d755ddda6359cf | 95dbf46818bc52688d75ca6486f7e388d0bc77c9 | refs/heads/master | 2023-03-17T08:25:52.902596 | 2018-08-26T20:04:36 | 2018-08-26T20:04:36 | 346,601,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | from .client import Client
class Tokens(Client):
def __init__(self, contract_address, api_key='YourApiKeyToken'):
Client.__init__(self, address='', api_key=api_key)
# self.url_dict[self.TOKEN_NAME] = tokenname
self.url_dict[self.CONTRACT_ADDRESS] = contract_address
def get_total_supply(self):
self.url_dict[self.ACTION] = 'tokensupply'
self.url_dict[self.MODULE] = 'stats'
self.build_url()
req = self.connect()
return req['result']
def get_token_balance(self, address):
self.url_dict[self.ADDRESS] = address
self.url_dict[self.MODULE] = 'account'
self.url_dict[self.ACTION] = 'tokenbalance'
self.build_url()
req = self.connect()
return req['result']
| [
"30991021+elyselam@users.noreply.github.com"
] | 30991021+elyselam@users.noreply.github.com |
fa2426367d7e331041c267f0caa9af5a01f702f0 | 620323fc090cebaf7aca456ff3f7fbbe1e210394 | /psutil_example/get_win_services.py | 26361ecb1fd9c3ae7b2481a9ed2b4502e0765fd2 | [
"CC-BY-4.0"
] | permissive | gil9red/SimplePyScripts | bd2733372728bf9b9f00570e90316fa12116516b | 773c2c9724edd8827a1dbd91694d780e03fcb05a | refs/heads/master | 2023-08-31T04:26:09.120173 | 2023-08-30T17:22:59 | 2023-08-30T17:22:59 | 22,650,442 | 157 | 46 | null | 2023-09-08T17:51:33 | 2014-08-05T16:19:52 | Python | UTF-8 | Python | false | false | 853 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
# pip install psutil
import psutil
from psutil._pswindows import WindowsService
def get_win_services() -> list[WindowsService]:
return list(psutil.win_service_iter())
if __name__ == "__main__":
win_service_list = get_win_services()
print(f"Win service list ({len(win_service_list)}):")
for service in win_service_list:
title = f"{service.name()!r} ({service.display_name()})"
path = (
f"Pid={service.pid()}, name={service.name()!r}, display_name={service.display_name()!r}, "
f"status={service.status()!r}, start_type={service.start_type()!r}"
)
print("Title:", title)
print("Path:", path)
print("Status:", service.status())
print("binpath:", service.binpath())
print()
| [
"ilya.petrash@inbox.ru"
] | ilya.petrash@inbox.ru |
14f91eb8d09f341bd42a137e627ca4720d032da0 | 5b50ad29c545a7d54f776426aeaba5a892dab232 | /문제풀이/day_14/01_10869.py | 82aa19dd1cdd022216867b4b21c468ab65ddf75b | [] | no_license | reaqua07/sparta_algorithm | 22b99c99472fc82dde8b2b97eab33b799fa59f35 | 8ac9a6a4cf03769d7e618a1f23cb96eebc077e9b | refs/heads/master | 2023-06-06T03:58:53.809395 | 2021-06-21T13:23:37 | 2021-06-21T13:23:37 | 376,001,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | # 입력
A, B = input().split()
# 문자 -> 숫자 변환
A = int(A)
B = int(B)
print(int(A)+int(B))
print(int(A)-int(B))
print(int(A)*int(B))
print(int(A)//int(B))
print(int(A)%int(B)) | [
"reaqua07@naver.com"
] | reaqua07@naver.com |
059549e8fcd2d14af261a2352d6af2c4372ca9a2 | b1dcb13d94b08e82d0f9f43f5b04659dc79efcd9 | /Graphic Analysis/stack_images.py | 49fd9aec63e8cacf7759da9fba8fd89b8ba21683 | [] | no_license | jaimeMontea/Impractical-Python-Projects | 51705649d5161d899dbabfd2aaef0232d2418728 | b65eaecc1627fa16f0d321913dc9e25a5c265853 | refs/heads/main | 2023-05-30T23:01:57.412832 | 2021-06-30T13:43:28 | 2021-06-30T13:43:28 | 380,044,379 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | """Average pixels in a series of images to produce a single stacked image."""
import os
from PIL import Image
print("\nstart stacking images...")
# list images in directory
os.chdir('cropped')
images = os.listdir()
# loop through images and extract RGB channels as separate lists
red_data = []
green_data = []
blue_data = []
for image in images:
with Image.open(image) as img:
if image == images[0]: # get size of 1st cropped image
img_size = img.size # width-height tuple to use later
red_data.append(list(img.getdata(0)))
green_data.append(list(img.getdata(1)))
blue_data.append(list(img.getdata(2)))
ave_red = [round(sum(x) / len(red_data)) for x in zip(*red_data)]
ave_blue = [round(sum(x) / len(blue_data)) for x in zip(*blue_data)]
ave_green = [round(sum(x) / len(green_data)) for x in zip(*green_data)]
merged_data = [(x) for x in zip(ave_red, ave_green, ave_blue)]
stacked = Image.new('RGB', (img_size))
stacked.putdata(merged_data)
stacked.show()
os.chdir('..')
stacked.save('jupiter_stacked.tif', 'TIFF')
| [
"noreply@github.com"
] | jaimeMontea.noreply@github.com |
681bac4ad9f9a98e8ffb840a576d7238c7d6fd3e | 21773a29e0f3a752a5c2445473cd7445b86047a3 | /myapp/migrations/0001_initial.py | eab1d9eb8f67ab9cd30b87515bf482348f2d3445 | [] | no_license | nick-su246/django | 62ca9d0d2b6b2a411caa6e631c6dd0b43d55af8b | 7124e7225699547d5a9d60ad69689fbc0066afe2 | refs/heads/master | 2022-06-21T10:38:12.398640 | 2020-05-11T10:39:47 | 2020-05-11T10:39:47 | 263,012,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | # Generated by Django 2.2.9 on 2020-05-11 10:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('author', models.CharField(max_length=200)),
('pub_house', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
]
| [
"352339851@qq.com"
] | 352339851@qq.com |
c9dfb6ac3503bbaba36aefd18bac133eaf7e0d9d | 0f25182fd01ebb5db52bc4c25b7795ae6a81e568 | /frontend/views.py | c34273ed74a9f1d69c300b924eb55cc1931f024c | [] | no_license | nandhakumarn6/CourseManagementSystem | d20916b7d78d019380b0e3190d3fad5e784d6a03 | 1bb862929773c80c85d6e636eeb5519c296ae8f7 | refs/heads/main | 2023-06-03T12:19:30.753495 | 2021-06-20T10:44:45 | 2021-06-20T10:44:45 | 378,619,531 | 0 | 0 | null | 2023-09-10T05:02:13 | 2021-06-20T10:39:42 | Python | UTF-8 | Python | false | false | 683 | py | from django.shortcuts import render
def index(request):
return render(request, "react-header.html")
from rest_framework.views import APIView
from rest_framework import authentication, permissions
from django.core import serializers
from django.http import JsonResponse
class get_user_info(APIView):
authentication_classes = [authentication.SessionAuthentication]
permission_classes = [permissions.IsAuthenticated]
def get(self, request, format=None):
data = {
'first_name' : request.user.first_name,
'last_name' : request.user.last_name,
'email' : request.user.email,
}
return JsonResponse(data)
| [
"65668433+nandhakumarn6@users.noreply.github.com"
] | 65668433+nandhakumarn6@users.noreply.github.com |
778149ba5b951b2a7bda792605d2755ceddecd45 | a53fd5f2133d80fa84919833d7703a46e5fc5006 | /utils/maskgen.py | c176758ddb544f5859b43e9dc79806f8c8744e50 | [
"MIT"
] | permissive | saitejamalyala/DeepWay | c47eaecd0d23f635bc1485e10b3d205c2849e9e3 | 1c6c0f80c0e80b7f2f0a283488bc078731f65dda | refs/heads/master | 2023-03-30T18:57:32.328745 | 2021-03-29T16:21:56 | 2021-03-29T16:21:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,172 | py | import os
import cv2
import numpy as np
import random
import scipy.stats as stats
from utils.geometry import *
##################### Borders generation #####################
def gen_p(start=40,limit=760):
"""
Generate a number with exponentially deacying probability between start and limit
"""
p = (np.random.exponential(2,1)+1)*start
if p>limit:
return gen_p(start,limit)
else:
return p[0]
def gen_borders(border=40,H=800,W=800):
"""
Generate linear random borders (50% straight, 50% with random points on the border frame)
"""
border = (float)(border)
if np.random.uniform()<0.5:
border_points = (gen_p(border,W-border),H-gen_p(border,H-border),W-gen_p(border,W-border),gen_p(border,H-border))
border_points = [(border_points[0],border),(border,border_points[1]),
(border_points[2],H-border),(W-border,border_points[3])]
else:
border_points = (gen_p(border,H-border),gen_p(border,W-border),H-gen_p(border,H-border),W-gen_p(border,W-border))
border_points = [(border,border_points[0]),(border_points[1],H-border),
(W-border,border_points[2]),(border_points[3],border)]
border_points.append(border_points[0])
border_points = np.array(border_points)
alpha = []
for i in range(4):
dy = (border_points[i+1][1]-border_points[i][1])
dx = (border_points[i+1][0]-border_points[i][0])
alpha.append(np.arctan2(dy,dx))
return [(a,p) for a,p in zip(alpha,border_points[:-1])]
def check_borders_area(borders,reference_area,fraction=0.65):
"""
Compute area inside border and compare it with a reference value
"""
x = [b[1][0] for b in borders]
y = [b[1][1] for b in borders]
return (PolyArea(x,y)/reference_area)>fraction
def intersect_border(alpha,p,border):
"""
Find intersection point of a line with the border (given as distance l from point p with angle alpha)
"""
return find_intersect(alpha,p,border[0],border[1],ret_xy=False)
##################### Synthetic points generation #####################
def gen_start_and_end(alpha,center,borders=40,H=800,W=800,angle_var=0.005,border_var=40):
"""
Find line starting and ending points (intersections with borders)
"""
alpha = -alpha # convert alpha to get the right view in imshow mode
alpha += stats.truncnorm(-2,2,loc=0, scale = angle_var).rvs(1)[0]
l = []
for border in borders:
l.append(intersect_border(alpha,center,border))
l = np.array(l)
lmax = np.min(l[l>0])
lmin = np.max(l[l<0])
l1 = lmax - border_var/2 + stats.truncnorm(-2,2, loc=0, scale=border_var/2).rvs(1)[0]
l2 = lmin + border_var/2 + stats.truncnorm(-2,2, loc=0, scale=border_var/2).rvs(1)[0]
x1 = int(round(l1*np.cos(alpha)+center[0]))
y1 = int(round(l1*np.sin(alpha)+center[1]))
x2 = int(round(l2*np.cos(alpha)+center[0]))
y2 = int(round(l2*np.sin(alpha)+center[1]))
return ((x1,y1),(x2,y2))
def find_intrarow_distance(nrows,alpha,borders,p=(400,400),lower_limit=12):
"""
Compute intra-row distance Q randomly in a range depending on nrows
"""
alpha = alpha - np.pi/2 # perpendicular to the orientation
(x1,y1),(x2,y2) = gen_start_and_end(alpha,p,borders=borders,angle_var=0,border_var=0)
dist1 = compute_distance((x1,y1),p)
dist2 = compute_distance((x2,y2),p)
dist = min(dist1,dist2)
Qmax = 2*(dist-5)/(nrows-0.3) # heuristic formula to have at least 5 pixels between the border and the last possible point
if Qmax > lower_limit:
Q = np.random.uniform(lower_limit,Qmax)
else:
Q = lower_limit
nrows = 2*int(dist/Q) # too many rows with the current border, reduce them
return Q,nrows
def find_centers(nrows,alpha,image_center=(400,400),Q=20):
"""
Find rows pivot points starting from image_center with intra-row distance Q
"""
alpha = -alpha # convert alpha to get the right view in imshow mode (y -> -y)
l = np.arange(nrows/2-0.5,-nrows/2,-1)*Q+random_displ(Q/5,shape=nrows) # random displacement of Q/5 along the perpendicular line
x_c = l*np.cos(alpha-np.pi/2)+image_center[0]
y_c = l*np.sin(alpha-np.pi/2)+image_center[1]
return [(x,y) for x,y in zip(x_c,y_c)]
##################### Mask creation #####################
def get_row_line(p1,p2):
"""
Compute x,y values and alpha angle for the line between start (p1) and ending (p2) points
"""
l,alpha = line_polar(p1,p2)
x,y = line_polar_to_cart(l,alpha,p1)
return (np.round(x).astype("int"),np.round(y).astype("int"),alpha)
def generate_holes(row_len,hole_prob=0.5,hole_dim=[2,4],hole_frame=12):
"""
Randomly generate holes in the row. For each x,y a hole in range hole_dim is generated with hole_prob.
If a hole is too close (within hole_frame) to starting/ending points of the line, the hole is extended
to avoid border effects. Returns the boolean mask to select final row points.
"""
indexes = np.ones(row_len) # points mask
holes_pos = np.random.choice([0,1],row_len,p=(1-hole_prob/100,hole_prob/100))
holes_pos = np.where(holes_pos == 1)[0]
for h in holes_pos:
hole = random.randint(hole_dim[0],hole_dim[1]) #the hole dimension is random
indexes[h:h+hole]=0
if len(holes_pos):
if holes_pos[0]<=hole_frame: #avoid to leave few points at the beginning
indexes[:holes_pos[0]]=0
if holes_pos[-1]>=row_len-hole_frame: #avoid to leave few points at the end
indexes[holes_pos[-1]:]=0
return indexes.astype("bool")
def create_mask(points,H=800,W=800,radius=[3,4],hole_prob=0.5,hole_dim=[4,6],hole_frame=12):
"""
Compute x-y points of all the rows and draw them in the mask
"""
mask = np.ones((H,W),"float")
row_lines = []
for p in points:
row_line = get_row_line(p[0],p[1])
indexes = generate_holes(len(row_line[0]),hole_prob,hole_dim,hole_frame)
row_line = row_line[0][indexes],row_line[1][indexes],row_line[2]
for x,y in zip(row_line[0],row_line[1]):
cv2.circle(mask,(x,y),random.randint(radius[0],radius[1]),color=0,thickness=-1)
row_lines.append(row_line)
return mask.astype("bool"),row_lines
##################### Waypoints creation #####################
def gen_wp(line1,line2,index=0):
"""
Compute wp between two adjacent lines from the points in index position (0: line starting point, -1: line ending point)
"""
p0 = (line1[0][index],line1[1][index])
p1 = (line2[0][index],line2[1][index])
mx = np.mean((p0[0],p1[0])) # middle point x
my = np.mean((p0[1],p1[1])) # middle point y
alpha = np.mean((line1[-1],line2[-1])) # mean angle
dist = compute_distance(p0,p1)
l = dist/2
if index<0: # at the end of the line, we should move in the opposite direction
l = -l
x,y = line_polar_to_cart(l,alpha,(mx,my))
return (int(round(x)),int(round(y)))
def gen_waypoints(row_lines):
"""
Generate wp for all the rows
"""
waypoints = []
for row in range(1,len(row_lines)): # no wp before the first and after the last row
waypoints.append(gen_wp(row_lines[row-1],row_lines[row],index=0))
waypoints.append(gen_wp(row_lines[row-1],row_lines[row],index=-1))
return waypoints
##################### Ground truths managing #####################
def get_points(file,img_shape,mirror=False):
"""
Read wp from a YOLO style file: each wp are the upper-left and the lower-right points of the bounding box
"""
points = []
img_shape = img_shape[:-1][::-1]
file = open(file).read().split('\n')[:-1]
for r in file:
r = r.split()
center = (float(r[1])*img_shape[0],float(r[2])*img_shape[1])
width = float(r[3])*img_shape[0]
height = float(r[4])*img_shape[1]
if mirror: # depends on orientation of rows
p1 = round(center[0]+width/2),round(center[1]-height/2)
p2 = round(center[0]-width/2),round(center[1]+height/2)
else:
p1 = round(center[0]-width/2),round(center[1]-height/2)
p2 = round(center[0]+width/2),round(center[1]+height/2)
points.append((p1,p2))
return points
def rescale_points(points,pad,pad_ax,point_pad_ax,r):
"""
Compute wp positions after image padding and rescaling
"""
points = np.array(points.copy())
points[:,:,point_pad_ax] += pad[pad_ax][0]
return np.round(points/r).astype("int")
def rescale_img(img,points=None,H=800,W=800):
"""
Rescale the image and the wp to the target dimension (PLEASE USE INTEGER W/H RATIOS ONLY)
"""
ratio = W/H
w_new = img.shape[0]*ratio
h_new = img.shape[1]/ratio
p = np.max([h_new - img.shape[0],w_new - img.shape[1]])
pad_ax = np.argmax([h_new - img.shape[0],w_new - img.shape[1]])
pad = [(0,0)]*3
pad[pad_ax] = ((int)(np.ceil(p/2)),(int)(np.floor(p/2)))
img2 = np.pad(img,pad)
img2 = cv2.resize(img2,(W,H),interpolation=cv2.INTER_AREA)
img2 = np.clip(img2, 0, 1)
if points is not None:
if pad_ax:
r = img.shape[0]/H
point_pad_ax = 0
else:
r = img.shape[1]/W
point_pad_ax = 1
points = rescale_points(points,pad,pad_ax,point_pad_ax,r)
return img2,points
return img2
##################### Others #####################
def random_zoom(mask,wp,centers,points,zoom_ratio=[80,100],H=800,W=800):
"""
Apply random out-zoom to the generated mask. Max zoom_ratio must be <= 100
"""
ratio = stats.truncnorm((zoom_ratio[0]-100)/5,(zoom_ratio[1]-100)/5,loc=100,scale=5).rvs(1)[0]/100
H_reshaped,W_reshaped = (int)(H*ratio),(int)(W*ratio)
mask_reshaped = np.clip(cv2.resize(mask.astype("uint8"),(W_reshaped,H_reshaped),interpolation=cv2.INTER_NEAREST),0,1)
#white padding around the parcel
delta = (H-H_reshaped,W-W_reshaped)
pad_y = ((int)(np.ceil(delta[0]/2)),(int)(np.floor(delta[0]/2)))
pad_x = ((int)(np.ceil(delta[1]/2)),(int)(np.floor(delta[1]/2)))
padding = (pad_y,pad_x)
mask_reshaped = np.pad(mask_reshaped,padding,'constant', constant_values=1.)
# waypoints transformation
wp_reshaped = np.round(np.array(wp)*ratio+(pad_x[0],pad_y[0])).astype("int")
centers_reshaped = np.round(np.array(centers)*ratio+(pad_x[0],pad_y[0])).astype("int")
points_reshaped = np.round(np.array(points)*ratio+(pad_x[0],pad_y[0])).astype("int")
return mask_reshaped, wp_reshaped, centers_reshaped, points_reshaped
def save_img(img,num,extension='png',data_path="mask_datasets"):
"""
Save the mask as uint8 image
"""
final_path = os.path.join(data_path,'img{}.{}'.format(num,extension))
cv2.imwrite(final_path,(img*255).astype("uint8"))
| [
"francesco.salvetti@polito.it"
] | francesco.salvetti@polito.it |
898a7ec7de098353dcc1946616d6bfee3d923b3b | c84a545cd9dea05deea442525b69c2bc574d264c | /hw_3/task_3.py | e630a57c9a270065c55193c01a2068a02c79f849 | [] | no_license | NikeSmitt/geekbrains_algorithms_main | d1ecb756897f2baff0829f9a008bd5a006ce5603 | 70cde5c79380a5dd12135c5e984afb610f4ad449 | refs/heads/main | 2023-03-26T16:43:58.127480 | 2021-03-18T16:14:24 | 2021-03-22T06:33:45 | 341,490,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | # В массиве случайных целых чисел поменять местами минимальный и максимальный элементы.
import random
SIZE = 10
MIN_ITEM = 0
MAX_ITEM = 100
src_array = [random.randint(MIN_ITEM, MAX_ITEM) for _ in range(SIZE)]
min_elem_idx = 0
max_elem_idx = 0
for idx in range(1, len(src_array)):
if src_array[min_elem_idx] > src_array[idx]:
min_elem_idx = idx
elif src_array[max_elem_idx] < src_array[idx]:
max_elem_idx = idx
print(src_array)
src_array[min_elem_idx], src_array[max_elem_idx] = src_array[max_elem_idx], src_array[min_elem_idx]
print(f'Indexes: min = {min_elem_idx} <-> max = {max_elem_idx}')
print(src_array)
| [
"grim322@bk.ru"
] | grim322@bk.ru |
a99f813133a37978c9359920c85b038b5cc70642 | 158e6afdc1cfb397c5694f7cb885f3a8a84b4e44 | /datasets/dataset_utils.py | 6ec6d002ede16e176d96b4dd4ee8f9237c0e7e02 | [] | no_license | xuepo99/zero-shot-style-transfer | b9bca1e2aba8ff6dc8a0e133ba25473d8e9eac50 | c696863dadf3c92d8df6aec17fc8abb58602f809 | refs/heads/master | 2021-05-08T05:30:56.925416 | 2017-09-30T04:48:29 | 2017-09-30T04:48:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,361 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import yaml
import tensorflow as tf
slim = tf.contrib.slim
_META_DATA_FILENAME = 'dataset_meta_data.txt'
_FILE_PATTERN = '%s_%s_*.tfrecord'
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A color image of varying size.',
'shape': 'The shape of the image.'
}
def int64_feature(values):
if not isinstance(values, (tuple, list)):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
def bytes_feature(values):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[values]))
def image_to_tfexample(image_data, image_format, image_shape, image_filename):
return tf.train.Example(features=tf.train.Features(feature={
'image/encoded': bytes_feature(image_data),
'image/format': bytes_feature(image_format),
'image/shape': int64_feature(image_shape),
'image/filename': bytes_feature(image_filename),
}))
def write_dataset_meta_data(dataset_dir, dataset_meta_data,
filename=_META_DATA_FILENAME):
meta_filename = os.path.join(dataset_dir, filename)
with open(meta_filename, 'wb') as f:
yaml.dump(dataset_meta_data, f)
print('Finish writing the dataset meta data.')
def has_dataset_meta_data_file(dataset_dir, filename=_META_DATA_FILENAME):
return tf.gfile.Exists(os.path.join(dataset_dir, filename))
def read_dataset_meta_data(dataset_dir, filename=_META_DATA_FILENAME):
meta_filename = os.path.join(dataset_dir, filename)
with open(meta_filename, 'rb') as f:
dataset_meta_data = yaml.load(f)
print('Finish loading the dataset meta data of [%s].' %
dataset_meta_data.get('dataset_name'))
return dataset_meta_data
def get_split(dataset_name,
split_name,
dataset_dir,
file_pattern=None,
reader=None):
if split_name not in ['train', 'validation']:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % (
dataset_name, split_name))
# read the dataset meta data
if has_dataset_meta_data_file(dataset_dir):
dataset_meta_data = read_dataset_meta_data(dataset_dir)
num_samples = dataset_meta_data.get('num_of_' + split_name)
else:
raise ValueError('No dataset_meta_data file available in %s' % dataset_dir)
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),
'image/shape': tf.FixedLenFeature((3,), tf.int64, default_value=(224, 224, 3)),
'image/filename': tf.FixedLenFeature([], tf.string, default_value=''),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(
'image/encoded', 'image/format'),
'shape': slim.tfexample_decoder.Tensor('image/shape'),
'filename': slim.tfexample_decoder.Tensor('image/filename')
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=num_samples,
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS)
class ImageReader(object):
"""helper class that provides tensorflow image coding utilities."""
def __init__(self):
self._decode_data = tf.placeholder(dtype=tf.string)
self._decode_image = tf.image.decode_image(self._decode_data, channels=0)
def read_image_dims(self, sess, image_data):
image = self.decode_image(sess, image_data)
return image.shape
def decode_image(self, sess, image_data):
image = sess.run(self._decode_image,
feed_dict={self._decode_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
class ImageCoder(object):
"""helper class that provides Tensorflow Image coding utilities,
also works for corrupted data with incorrected extension"""
def __init__(self):
self._decode_data = tf.placeholder(dtype=tf.string)
self._decode_image = tf.image.decode_image(self._decode_data, channels=0)
self._encode_jpeg = tf.image.encode_jpeg(self._decode_image, format='rgb', quality=100)
def decode_image(self, sess, image_data):
# verify the image from the image_data
status = False
try:
# decode image and verify the data
image = sess.run(self._decode_image,
feed_dict={self._decode_data: image_data})
image_shape = image.shape
assert len(image_shape) == 3
assert image_shape[2] == 3
# encode as RGB JPEG image string and return
image_string = sess.run(self._encode_jpeg, feed_dict={self._decode_data: image_data})
status = True
except BaseException:
image_shape, image_string = None, None
return status, image_string, image_shape | [
"shenglv1989@gmail.com"
] | shenglv1989@gmail.com |
94da68a8c082923b0cb9e15792bef2b28bfbd2b0 | c529434b9dc4263ca0f9b35f586825399917270e | /book.py | f8d567e76c6e14f37b1a34883e0800935a87cd28 | [] | no_license | cyberomin/Library | 256aade093be5661c1c678074572d5395b34f30e | e60965dfdca04979f8c3db787b3e1953f9692bd8 | refs/heads/master | 2016-09-11T07:23:47.534318 | 2012-09-25T12:45:31 | 2012-09-25T12:45:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | import tornado.ioloop
import tornado.web
import pymongo
import os
import time
connection = pymongo.Connection("localhost", 27017)
db = connection["library"]
collection = db["books"]
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html")
class BookHandler(tornado.web.RequestHandler):
def post(self):
title = self.get_argument("title")
author = self.get_argument("author")
year = self.get_argument("year")
book_id = time.time()
data = {"book_id":book_id, "title":title, "author":author, "year":year}
collection.insert(data)
self.redirect("/books")
class BookResult(tornado.web.RequestHandler):
def get(self):
result = collection.find()
self.render("books.html",books=result)
class BookDelete(tornado.web.RequestHandler):
def get(self,id):
print id
collection.remove({"book_id":id})
application = tornado.web.Application([
(r"/", MainHandler),(r"/book", BookHandler),(r"/books", BookResult), (r"/delete/([0-9\.]+)", BookDelete)
], debug=True, static_path = os.path.join( os.path.dirname(__file__), "static" ) )
if __name__ == "__main__":
application.listen(8000)
tornado.ioloop.IOLoop.instance().start() | [
"cyberomin@yahoo.com"
] | cyberomin@yahoo.com |
5f34b64d875d3784f0e1740ec07bff206fac3a41 | 555eb9c234f86911df70188914d45c358c67bb62 | /tensorflow/python/keras/engine/base_layer_utils.py | b97326eea6a28ba9f1e466c1d59a43c7108bba19 | [
"Apache-2.0"
] | permissive | obeshor/tensorflow | 64b99bfec161e8680535104e7e90834b1060c5c3 | 0fd570848f7cd08904907640111d435dcb7fba8a | refs/heads/master | 2020-05-18T09:44:13.516187 | 2019-04-30T20:33:02 | 2019-04-30T21:32:19 | 184,335,557 | 2 | 1 | Apache-2.0 | 2019-04-30T21:43:01 | 2019-04-30T21:43:00 | null | UTF-8 | Python | false | false | 24,570 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains private utilities used mainly by the base Layer class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import threading
import enum
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.framework import auto_control_deps
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import init_ops_v2
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
_call_context = threading.local()
class CallConvention(enum.Enum):
"""Calling conventions for passing `Layer` inputs to `Layer.call`."""
# The Layer takes inputs as its first argument, named "inputs" for
# compatibility with the signature of Layer.__call__. This is the mode assumed
# for Layers which are not subclassed Models.
EXPLICIT_INPUTS_ARGUMENT = 1
# The Layer takes a single positional argument, not named "inputs". It's
# treated like an "inputs" argument.
SINGLE_POSITIONAL_ARGUMENT = 2
# The Layer has multiple positional arguments to which its inputs should be
# bound.
POSITIONAL_ARGUMENTS_ARE_INPUTS = 3
def create_mean_metric(value, name=None):
# TODO(psv): Remove this import when b/110718070 is fixed.
from tensorflow.python.keras import metrics as metrics_module # pylint: disable=g-import-not-at-top
from tensorflow.python.keras.distribute import distributed_training_utils # pylint: disable=g-import-not-at-top
metric_obj = metrics_module.Mean(name=name)
return (metric_obj,
distributed_training_utils.call_replica_local_fn(metric_obj, value))
def make_variable(name,
shape=None,
dtype=dtypes.float32,
initializer=None,
trainable=None,
caching_device=None,
validate_shape=True,
constraint=None,
use_resource=None,
collections=None,
synchronization=tf_variables.VariableSynchronization.AUTO,
aggregation=tf_variables.VariableAggregation.NONE,
partitioner=None): # pylint: disable=unused-argument
"""Temporary util to create a variable (relies on `variable_scope.variable`).
Some reuse-related technicalities prevent us from using
`variable_scope.get_variable()` directly, so we use a subcomponent
that has fewer constraints (`variable_scope.variable()`).
In the longer term, it seems like a similar "default variable creator" method
should exist in `Trackable` instead. When this happens, we can get
rid of this temporary solution.
TODO(fchollet): remove this method when no longer needed.
Arguments:
name: Variable name.
shape: Variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: Initializer instance (callable).
trainable: Whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
Note, if the current variable scope is marked as non-trainable
then this parameter is ignored and any added variables are also
marked as non-trainable. `trainable` defaults to `True` unless
`synchronization` is set to `ON_READ`.
caching_device: Passed to `tf.Variable`.
validate_shape: Passed to `tf.Variable`.
constraint: Constraint instance (callable).
use_resource: Whether to use a `ResourceVariable`.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
partitioner: Not handled at this time.
Returns:
Variable instance.
"""
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
with ops.init_scope():
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(
initializer,
(type(init_ops.Initializer), type(init_ops_v2.Initializer))):
initializer = initializer()
init_val = lambda: initializer(shape, dtype=dtype)
variable_dtype = dtype.base_dtype
if use_resource is None:
use_resource = True
# TODO(apassos,rohanj) figure out how to remove collections from here so we
# can remove the V1.
v = tf_variables.VariableV1(
initial_value=init_val,
name=name,
trainable=trainable,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape,
constraint=constraint,
use_resource=use_resource,
collections=collections,
synchronization=synchronization,
aggregation=aggregation)
return v
def get_default_graph_uid_map():
# TODO(fchollet): refactor this into backend.
graph = ops.get_default_graph()
name_uid_map = backend.PER_GRAPH_LAYER_NAME_UIDS.get(graph, None)
if name_uid_map is None:
name_uid_map = collections_lib.defaultdict(int)
backend.PER_GRAPH_LAYER_NAME_UIDS[graph] = name_uid_map
return name_uid_map
def unique_layer_name(name, name_uid_map=None, avoid_names=None, namespace='',
zero_based=False):
"""Makes a layer name (or arbitrary string) unique within a TensorFlow graph.
Arguments:
name: String name to make unique.
name_uid_map: An optional defaultdict(int) to use when creating unique
names. If None (default), uses a per-Graph dictionary.
avoid_names: An optional set or dict with names which should not be used. If
None (default) does not avoid any names.
namespace: Gets a name which is unique within the (graph, namespace). Layers
which are not Networks use a blank namespace and so get graph-global
names.
zero_based: If True, name sequences start with no suffix (e.g. "dense",
"dense_1"). If False, naming is one-based ("dense_1", "dense_2").
Returns:
Unique string name.
Example:
```python
_unique_layer_name('dense') # dense_1
_unique_layer_name('dense') # dense_2
```
"""
if name_uid_map is None:
name_uid_map = get_default_graph_uid_map()
if avoid_names is None:
avoid_names = set()
proposed_name = None
while proposed_name is None or proposed_name in avoid_names:
name_key = (namespace, name)
if zero_based:
number = name_uid_map[name_key]
if number:
proposed_name = name + '_' + str(number)
else:
proposed_name = name
name_uid_map[name_key] += 1
else:
name_uid_map[name_key] += 1
proposed_name = name + '_' + str(name_uid_map[name_key])
return proposed_name
def collect_previous_mask(input_tensors):
"""Retrieves the output mask(s) of the previous node.
Arguments:
input_tensors: An arbitrary structure of Tensors.
Returns:
A mask tensor or list of mask tensors.
"""
def _collect_previous_mask(x):
return getattr(x, '_keras_mask', None)
return nest.map_structure(_collect_previous_mask, input_tensors)
def have_all_keras_metadata(tensors):
return all(hasattr(x, '_keras_history') for x in nest.flatten(tensors))
def generate_placeholders_from_shape(shape):
return array_ops.placeholder(shape=shape, dtype=backend.floatx())
def create_keras_history(tensors):
"""Wraps TensorFlow Operations for compatibility with the Functional API.
This method checks to see if a Tensor in `tensors` is missing Keras metadata
and has its origin in a Keras `Input` Layer. If so, this method will replace
the raw TensorFlow Operations that created this tensor with
`TensorFlowOpLayer` instances that create identical operations.
Any Tensors not originating from a Keras `Input` Layer will be treated as
constants when constructing `TensorFlowOpLayer` instances.
Arguments:
tensors: A structure of Tensors, some of which come from raw TensorFlow
operations and need to have Keras metadata assigned to them.
Returns:
keras_tensors: The Tensors found that came from a Keras Layer.
"""
_, created_layers = _create_keras_history_helper(tensors, set(), [])
return created_layers
def _create_keras_history_helper(tensors, processed_ops, created_layers):
"""Helper method for `create_keras_history`.
Arguments:
tensors: A structure of Tensors for which to create Keras metadata.
processed_ops: Set. TensorFlow operations that have already been wrapped in
`TensorFlowOpLayer` instances.
created_layers: List. The `TensorFlowOpLayer` instances created.
Returns:
Tuple. First element is the updated set of TensorFlow Operations that
have been wrapped in `TensorFlowOpLayer` instances. Second element is
a list of the `TensorFlowOpLayer` instances created.
"""
# Import of `base_layer` needed in order to create `TensorFlowOpLayer`.
# Cannot be imported at top because of circular dependencies.
# TODO(omalleyt): Resolve circular dependency.
from tensorflow.python.keras.engine import base_layer # pylint: disable=g-import-not-at-top
tensor_list = nest.flatten(tensors)
for tensor in tensor_list:
if getattr(tensor, '_keras_history', None) is not None:
continue
op = tensor.op # The Op that created this Tensor.
if op not in processed_ops:
# Recursively set `_keras_history`.
op_inputs = list(op.inputs)
constants = {}
layer_inputs = []
for i, op_input in enumerate(op_inputs):
if uses_keras_history(op_input):
layer_inputs.append(op_input)
else:
# Treat any value not originating from a `keras.Input` as
# a constant. Variables cannot be supported.
if (distribution_strategy_context.in_cross_replica_context() and
not ops.executing_eagerly_outside_functions()):
# In Legacy Graph mode, evaluating here makes Session be
# configured improperly.
constants[i] = op_input
else:
constants[i] = backend.function([], op_input)([])
processed_ops, created_layers = _create_keras_history_helper(
layer_inputs, processed_ops, created_layers)
name = op.name
node_def = op.node_def.SerializeToString()
op_layer = base_layer.TensorFlowOpLayer(
node_def, constants=constants, name=name)
created_layers.append(op_layer)
op_layer._add_inbound_node( # pylint: disable=protected-access
layer_inputs, op.outputs)
processed_ops.update([op])
return processed_ops, created_layers
def needs_keras_history(tensors):
"""Check if any Tensors need to be wrapped in TensorFlowOpLayers.
This will never return True inside a sublayer, because sublayers
do not need to create Keras History. Otherwise, this returns True
if one or more of `tensors` originates from a `keras.Input` and
does not have `_keras_history` set.
Arguments:
tensors: An arbitrary nested structure of Tensors.
Returns:
Bool, whether at least one Tensor needs to be wrapped.
"""
input_tensors = nest.flatten(tensors)
if is_in_call_context() or all(
getattr(tensor, '_keras_history', None) is not None
for tensor in input_tensors):
# KerasHistory already set.
return False
return uses_keras_history(tensors)
def is_in_call_context():
"""Returns true if inside of a model/layer '__call__'."""
return getattr(_call_context, 'in_call', False)
def is_in_frozen_context():
"""Returns if currently executing inside a `call` of a frozen Layer.
A Layer is considered frozen if `layer.trainable=False`.
Returns:
Whether currently inside the `call` of a frozen Layer.
"""
return getattr(_call_context, 'frozen', False)
def uses_keras_history(tensors):
"""Check if at least one Tensor originates from a `keras.Input`.
This is `True` if at least one Tensor has its origin in a `keras.Input`.
Any Tensor that originates from a `keras.Input` will have a dependency
Tensor with a `_keras_history` attribute attached. Tensors that have
already been checked to not originate from a `keras.Input`
are marked as `_keras_history_checked`.
Arguments:
tensors: An arbitrary nested structure of Tensors.
Returns:
Bool, whether at least one Tensor originates from a `keras.Input`.
"""
checked_tensors = set()
tensors_to_check = nest.flatten(tensors)
while tensors_to_check:
new_tensors_to_check = set()
for tensor in tensors_to_check:
if getattr(tensor, '_keras_history_checked', None) is not None:
continue
if getattr(tensor, '_keras_history', None) is not None:
return True
try:
new_tensors_to_check.update(tensor.op.inputs)
except AttributeError:
# In case `tensor` is a Variable created in an Eager context.
pass
checked_tensors.update(tensors_to_check)
tensors_to_check = list(new_tensors_to_check - checked_tensors)
# Mark that these Tensors have been checked once for `_keras_history`,
# and should not be checked again for performance reasons.
mark_checked(tensors)
return False
def mark_checked(tensors):
"""Marks that these Tensors should not be tracked.
This prevents Layers from attempting to create TensorFlowOpLayers
for these Tensors.
Arguments:
tensors: An arbitrary structure of Tensors.
"""
def _mark_checked(tensor):
tensor._keras_history_checked = True # pylint: disable=protected-access
nest.map_structure(_mark_checked, tensors)
@tf_contextlib.contextmanager
def call_context(layer):
"""Scope that marks when we are currently inside a Layer/Model's `call`."""
was_in_call = is_in_call_context()
was_frozen = is_in_frozen_context()
_call_context.in_call = True
if not layer.trainable:
_call_context.frozen = True
try:
yield
finally:
_call_context.in_call = was_in_call
_call_context.frozen = was_frozen
def training_arg_passed_to_call(argspec, args, kwargs):
"""Returns whether a user passed the `training` argument in `__call__`."""
# `argspec.args` starts with ['self', 'inputs']
full_args = dict(zip(argspec.args[2:], args))
full_args.update(kwargs)
return 'training' in full_args
class AutoAddUpdates(object):
"""Automatically track stateful ops with `add_update`.
This context manager is used to automatically add stateful ops to a Layer
or Model's `.updates`. This ensures that stateful ops are run in the Keras
training loop. It also allows for these stateful ops to be disabled by
setting `trainable=False`.
Example:
```
with AutoAddUpdates(layer, inputs) as auto_updates:
outputs = layer.call(inputs)
auto_updates.set_outputs(outputs)
```
Attributes:
layer: Layer or Model instance to add the updates to.
inputs: The inputs to this Layer or Model, to be used for input-conditional
updates.
outputs: The outputs of this Layer or Model.
"""
def __init__(self, layer, inputs):
self.layer = layer
self.inputs = inputs
self.outputs = []
def set_outputs(self, outputs):
if self.outputs:
raise RuntimeError('`set_outputs` should only be called once on an'
'`AutoAddUpdates` instance.')
self.outputs = outputs
def __enter__(self):
# Only run in V2 Function mode.
if (context.executing_eagerly() or
not ops.executing_eagerly_outside_functions()):
return self
self._graph = ops.get_default_graph()
self._num_operations = len(self._graph.get_operations())
return self
def __exit__(self, error_type, unused_value, unused_traceback):
if error_type:
# Allow errors that occurred inside this context manager to pass through
# normally.
return
# Only run in V2 Function mode.
if (context.executing_eagerly() or
not ops.executing_eagerly_outside_functions()):
return
if (self._graph is not ops.get_default_graph() or
self._graph.name != 'keras_graph'):
# Only auto-track updates when the Keras Graph is the only one used.
return
new_operations = self._graph.get_operations()[self._num_operations:]
new_stateful_ops = set()
# pylint: disable=protected-access
for op in new_operations:
# While loop is not supported in general for automatic control
# dependencies.
if control_flow_util.IsInWhileLoop(op):
continue
# Track stateful ops via `add_update`.
is_stateful_op = (
op.type not in self._graph._registered_ops or
auto_control_deps.op_is_stateful(
self._graph._registered_ops[op.type]))
# Ignore ReadVariableOps as they are not needed to be run separately.
# This ensures existing Layers don't get extra updates.
if is_stateful_op and op.type != 'ReadVariableOp':
new_stateful_ops.add(op)
explicit_updates = set(
[u for u in self.layer.updates if not isinstance(u, tuple)])
# pylint: enable=protected-access
# Don't add updates that will already be run by virtue of being consumed by
# other stateful ops or by the Layer's outputs. This ensures that existing
# Layers like `BatchNormalization` continue to return the same values for
# `.update` calls.
minimum_ops = set()
targets = new_stateful_ops.union(
set(nest.flatten(self.outputs)), explicit_updates)
for op in new_stateful_ops:
# Scrub any ops that are consumed by the outputs or other stateful ops.
reachable = tf_utils.get_reachable_from_inputs(op)
if not (targets - {op}).intersection(reachable):
minimum_ops.add(op)
new_stateful_ops = minimum_ops
# Don't double-track updates added via explicitly calling `add_update`.
# Also don't double-track updates already tracked in sublayers.
new_stateful_ops = new_stateful_ops - explicit_updates
# Decide whether to track as input-conditional or unconditional.
input_reachable_ops = tf_utils.get_reachable_from_inputs(
self.inputs, targets=new_stateful_ops)
unconditional_updates = new_stateful_ops - input_reachable_ops
conditional_updates = new_stateful_ops - unconditional_updates
if unconditional_updates:
self.layer.add_update(list(unconditional_updates))
if conditional_updates:
self.layer.add_update(list(conditional_updates), inputs=self.inputs)
def _get_var_read_dtype(input_list, should_cast):
"""Gets the dtype that AutoCastVariables should be read in."""
if should_cast and input_list and input_list[0].dtype.is_floating:
return input_list[0].dtype.base_dtype
else:
return None
def autocast_context_manager(input_list, should_cast):
"""Returns a context manager to autocast AutoCastVariables.
Under this context manager, if `should_cast` is True, AutoCastVariables will
be casted. If `should_cast` is False, AutoCastVariables will not be casted,
which can be used to disable autocasting if nested under another
call to `autocast_context_manager`.
Args:
input_list: The inputs to the layer with the AutoCastVariables.
should_cast: Whether AutoCastVariables should be casted.
Returns:
A context manager to automatically cast AutoCastVariables.
"""
var_read_dtype = _get_var_read_dtype(input_list, should_cast)
return ops.get_default_graph()._enable_auto_casting_variables( # pylint: disable=protected-access
var_read_dtype)
def is_subclassed(layer):
return (layer.__module__.find('keras.engine') == -1 and
layer.__module__.find('keras.layers') == -1)
def check_graph_consistency(tensor, method):
"""Checks that tensors passed to `add_*` method match the Keras graph.
When one of the `add_*` method is called inside a V2 conditional branch,
the underlying tensor gets created in a FuncGraph managed by control_flow_v2.
We need to raise clear error messages in such cases.
Arguments:
tensor: Tensor to check.
method: Caller method, one of {'add_metric', 'add_loss', 'add_update'}.
Raises:
RuntimeError: In case of an out-of-graph tensor.
"""
if ops.executing_eagerly_outside_functions() and hasattr(tensor, 'graph'):
if isinstance(tensor.graph,
(control_flow_util_v2.CondBranchFuncGraph,
control_flow_util_v2.WhileCondFuncGraph,
control_flow_util_v2.WhileBodyFuncGraph)):
if method == 'add_metric':
bad_example = """
def call(self, inputs, training=None):
if training:
metric = compute_metric(inputs)
self.add_metric(metric, name='my_metric', aggregation='mean')
return inputs
"""
correct_example = """
def call(self, inputs, training=None):
if training:
metric = compute_metric(inputs)
else:
metric = 0.
self.add_metric(metric, name='my_metric', aggregation='mean')
return inputs
"""
elif method == 'add_loss':
bad_example = """
def call(self, inputs, training=None):
if training:
loss = compute_loss(inputs)
self.add_loss(loss)
return inputs
"""
correct_example = """
def call(self, inputs, training=None):
if training:
loss = compute_loss(inputs)
else:
loss = 0.
self.add_loss(loss)
return inputs
"""
else:
bad_example = """
def call(self, inputs, training=None):
if training:
self.add_update(self.w.assign_add(1))
return inputs
"""
correct_example = """
def call(self, inputs, training=None):
if training:
increment = 1
else:
increment = 0
self.add_update(self.w.assign_add(increment))
return inputs
"""
raise RuntimeError(
'You are using the method `{method}` in a control flow branch '
'in your layer, e.g.:\n{bad_example}\n'
'This is not currently supported. '
'You should either use static control flow (`tf.cond`) '
'or move your call to {method} out of the control flow branch, '
'e.g.:\n{correct_example}\n'
'You can also resolve this by marking your layer '
'as dynamic (eager-only) by passing '
'`dynamic=True` to the layer constructor. '
'Any kind of control flow is supported with dynamic layers. '
'Note that using `dynamic=True` requires you '
'to implement static shape inference '
'in the `compute_output_shape(input_shape)` method.'.format(
method=method,
bad_example=bad_example,
correct_example=correct_example))
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
4702670109e4a19c6f148777dbb9c4f3056f8575 | 49ce55ef14f47b2c295c24a9c7ce4d68a2001335 | /cadastros/migrations/0005_categorias_lojas.py | 8cc804d8471517e42287000a9c4a26d3af06b714 | [] | no_license | ertprs/comerga2 | 0bf28e81968155c3daa9732537d8518d3a8a4d65 | 8eab4d300133affc5f9285eff6b8fcecf9bf6b5b | refs/heads/master | 2022-11-11T04:03:00.828034 | 2020-06-26T00:40:58 | 2020-06-26T00:40:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | # Generated by Django 3.0.3 on 2020-04-29 21:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cadastros', '0004_lojas_valor_frete'),
]
operations = [
migrations.CreateModel(
name='categorias_lojas',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('categoria', models.CharField(max_length=50)),
('taxa_servico', models.DecimalField(decimal_places=2, max_digits=4)),
],
),
]
| [
"richard_mbs@hotmail.com"
] | richard_mbs@hotmail.com |
8c38e613928a8e982b638c4f95ef0d9328621f80 | 2c1e7bbc853097b3b7cc5ea5676a807a8abc84b2 | /env/lib/python3.6/site-packages/dialogflow_v2/types.py | 37ed4fe65fca3f66a7ab54959f6ccb853289b5ed | [
"MIT"
] | permissive | NickDST/Interactive-Assistant-Winter | 4c304d791f14d6b1bb8c60c47cfdeff76b1bcf8c | 7b4ea5bea45201a8a091134cdfab9e8bd3419d65 | refs/heads/master | 2023-01-20T09:08:32.618098 | 2020-01-06T15:39:22 | 2020-01-06T15:39:22 | 232,119,652 | 0 | 0 | MIT | 2023-01-09T12:06:44 | 2020-01-06T14:35:51 | Python | UTF-8 | Python | false | false | 1,963 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api_core.protobuf_helpers import get_messages
from google.api import http_pb2
from google.longrunning import operations_pb2
from google.protobuf import any_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import struct_pb2
from google.rpc import status_pb2
from google.type import latlng_pb2
from dialogflow_v2.proto import agent_pb2
from dialogflow_v2.proto import context_pb2
from dialogflow_v2.proto import entity_type_pb2
from dialogflow_v2.proto import intent_pb2
from dialogflow_v2.proto import session_entity_type_pb2
from dialogflow_v2.proto import session_pb2
from dialogflow_v2.proto import webhook_pb2
names = []
for module in (
http_pb2,
agent_pb2,
context_pb2,
entity_type_pb2,
intent_pb2,
session_entity_type_pb2,
session_pb2,
webhook_pb2,
operations_pb2,
any_pb2,
descriptor_pb2,
empty_pb2,
field_mask_pb2,
struct_pb2,
status_pb2,
latlng_pb2,
):
for name, message in get_messages(module).items():
message.__module__ = 'google.cloud.dialogflow_v2.types'
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
| [
"learningnickk@gmail.com"
] | learningnickk@gmail.com |
dc483a9023fe74ff7067e506302695ca17823a9f | 720e4d4a4c2f7c0b8e7e7292bd861d80705786e6 | /capstone/program/Client.py | 5d4e86686c64507607c4a2a637472e68559ed2f8 | [] | no_license | tLiMiT/Capstone | f0005a2d6133351ec4f90db73b958e59d7646955 | 6e2308a431133da308a000193486058d3bf28bdf | refs/heads/master | 2020-05-28T03:00:29.176293 | 2014-04-09T20:58:55 | 2014-04-09T20:58:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 594 | py | # Client.py
#
#
import os, sys
import signal
import Configuration as configuration
from PyQt4 import QtCore, QtGui, QtNetwork
#import simplejson as json
#####################################################################
# Globals
#####################################################################
DEBUG = 1
#####################################################################
# Main
#####################################################################
if __name__ == '__main__':
# Perform correct KeyboardInterrupt handling
signal.signal(signal.SIGINT, signal.SIG_DFL)
| [
"tim.liming@gmail.com"
] | tim.liming@gmail.com |
80ecbb277308e7fb1010e5ec65eb4683e140c3fe | feba3c32aac7f17d8fbaf6ef7bb4d229844f8247 | /machine_learning/clustering/hierarchical_clustering/AgglomerativeClustering/main.py | c44aad4338e74004ce5203e18522385184a3123b | [] | no_license | lisunshine1234/mlp-algorithm-python | d48aa1336ae7c4925a0e30f4f09fa6de21f83d0e | 898359a10f65f16e94f3bb27cc61f3837806ca68 | refs/heads/master | 2023-05-01T11:11:47.465491 | 2021-05-24T13:53:40 | 2021-05-24T13:53:40 | 291,934,886 | 0 | 0 | null | 2021-05-24T13:42:15 | 2020-09-01T08:00:17 | Python | UTF-8 | Python | false | false | 4,604 | py | import numpy as np
import run as r
'''
[id]
145
[name]
AgglomerativeClustering
[input]
x_train 训练集 训练集标签数据集 二维数组 必须 定数
y_train 测试集 测试集数据集 二维数组 必须 定数
n_clusters 簇数 默认为2,要查找的集群数。如果'None'不是'distance_threshold',则必须为'None',可选整数 整数 不必须 定数
affinity 亲和力 默认为'euclidean',用于计算链接的度量。可以是'euclidean','l1','l2','manhattan','cosine'或'precomputed'。如果链接为'ward',则仅接受'euclidean'。如果为'precomputed',则需要距离矩阵(而不是相似度矩阵)作为拟合方法的输入,可选'euclidean' 字符串 不必须 定数
memory memory 默认为None,用于缓存树计算的输出。默认情况下,不进行缓存。如果给出了字符串,则它是缓存目录的路径,可选整数,字符串 字符串 不必须 定数
connectivity 连通性 默认为None,连接矩阵。为每个样本定义遵循给定数据结构的相邻样本。这可以是连通性矩阵本身,也可以是将数据转换为连通性矩阵(例如从kneighbors_graph派生)的可调用对象。默认值为None,即分层聚类算法是非结构化的,可选数组 不定数组 不必须 定数
compute_full_tree 计算全树 默认为auto,尽早在n_clusters处停止树的构建。还要注意的是,当更改群集数量并使用缓存时,计算完整树可能是有利的。如果'True'不是'distance_threshold',则必须为'None'。默认情况下,'compute_full_tree'是'auto',当'True'不是'distance_threshold'或'None'次于100或'n_clusters'之间的最大值时,等于'0.02 * n_samples'。否则,'auto'等于'False',可选布尔值,'auto' 字符串 不必须 定数
linkage 链接标准 默认为ward,使用哪个链接标准。链接标准确定要在观察组之间使用的距离。该算法将合并最小化此标准的成对集群。-ward将合并的簇的方差最小化。-平均使用两组的每个观测值的距离的平均值。-完全或最大链接使用两组所有观测值之间的最大距离。-single使用两组的所有观测值之间的最小距离,可选'ward','average','single','complete' 字符串 不必须 定数
distance_threshold 距离阈值 默认为None,链接距离阈值,超过该距离时,群集将不会合并。如果不是'None',则'n_clusters'必须为'None',而'compute_full_tree'必须为'True',可选浮点数 浮点数 不必须 定数
[output]
n_clusters_ 簇数 该算法找到的簇数。如果为'distance_threshold=None',则等于给定的'n_clusters' 整数
labels_ 标签 每个点的聚类标签 一维数组
n_leaves_ 叶子数 层次树中的叶数 整数
n_connected_components_ 组件连接数 图中估计的已连接组件数 整数
children_ children_ 每个非叶节点的子级。小于'n_samples'的值对应于作为原始样本的树的叶子。大于或等于'i'的节点'n_samples'是非叶子节点,并且具有子节点'children_[i - n_samples]'。或者,在第i次迭代中,children [i] [0]和children [i] [1]合并以形成节点'n_samples + i 二维数组
[outline]
聚集聚类以递归方式合并这对最小增加给定链接距离的聚类对。
[describe]
聚集聚类以递归方式合并这对最小增加给定链接距离的聚类对。
'''
def main(x_train, y_train,
n_clusters=2, affinity="euclidean", memory=None, connectivity=None, compute_full_tree='auto', linkage='ward', distance_threshold=None
):
if type(x_train) is str:
x_train = eval(x_train)
if type(y_train) is str:
y_train = eval(y_train)
if type(n_clusters) is str:
n_clusters = eval(n_clusters)
if type(connectivity) is str:
connectivity = eval(connectivity)
if type(distance_threshold) is str:
distance_threshold = eval(distance_threshold)
return r.run(x_train=x_train, y_train=y_train, n_clusters=n_clusters,
affinity=affinity,
memory=memory,
connectivity=connectivity,
compute_full_tree=compute_full_tree,
linkage=linkage,
distance_threshold=distance_threshold)
if __name__ == '__main__':
import numpy as np
import json
array = np.loadtxt('D:\\123_2.csv', delimiter=',')
array = array[0:20, :]
y = array[:, -1].tolist()
x = np.delete(array, -1, axis=1).tolist()
array = array.tolist()
back = main(x, y)
print(back)
for i in back:
print(i + ":" + str(back[i]))
json.dumps(back) | [
"178513111@qq.com"
] | 178513111@qq.com |
e5f870139c8ea96d9c658a11948ae275d0e20d55 | e2d756cdef92155373a2ad21c21c8882bb22fa6c | /pyexamples/examples/pdf/example.py | b14649b333ec67291c7b2405ba4b328ec7c7b7fa | [] | no_license | fred-yu-2013/avatar | 87d1e371f3ecb907cc1facbabc0163fdbf4ef321 | 65058dd243c401934054779ab31c46806507a67e | refs/heads/master | 2020-03-30T20:59:10.771106 | 2016-02-21T05:50:06 | 2016-02-21T05:50:06 | 7,955,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,014 | py | # -*- coding: utf-8 -*-
import StringIO
import wx
from pyPdf import PdfFileWriter, PdfFileReader
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import letter, A4, landscape
from reportlab.lib.utils import ImageReader
def covert_page():
""" 读取a.pdf文件的一个页面,添加一行字,存到b.pdf
"""
packet = StringIO.StringIO()
# create a new PDF with Reportlab
can = canvas.Canvas(packet, pagesize=letter)
can.drawString(10, 100, "Hello world")
can.save()
#move to the beginning of the StringIO buffer
packet.seek(0)
new_pdf = PdfFileReader(packet)
# read your existing PDF
existing_pdf = PdfFileReader(file("document1.pdf", "rb"))
output = PdfFileWriter()
# add the "watermark" (which is the new pdf) on the existing page
page = existing_pdf.getPage(0)
page.mergePage(new_pdf.getPage(0))
output.addPage(page)
# finally, write "output" to a real file
outputStream = file("destination.pdf", "wb")
output.write(outputStream)
outputStream.close()
def bmp2pdf():
""" bmp -> pdf
"""
c = canvas.Canvas('bmp2pdf.pdf', pagesize=landscape(A4))
(w, h) = landscape(A4)
width, height = letter
#c.drawImage(filename, inch, height - 2 * inch) # Who needs consistency?
c.drawImage('bmp2pdf.bmp', 0, 0, w, h)
c.showPage()
c.save()
def StringIO2pdf():
""" bmp -> StringIO -> ImageReader -> pdf
"""
with open('bmp2pdf.bmp', 'rb') as f:
buf = StringIO.StringIO(f.read())
ir = ImageReader(buf)
c = canvas.Canvas('StringIO2pdf.pdf', pagesize=landscape(A4))
(w, h) = landscape(A4)
width, height = letter
#c.drawImage(filename, inch, height - 2 * inch) # Who needs consistency?
c.drawImage(ir, 0, 0, w, h)
c.showPage()
c.save()
class MainFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, 'wxBitmap2pdf')
self.wxBitmap2pdf()
self.Show()
def wxBitmap2pdf(self):
""" bmp -> StringIO -> ImageReader -> pdf
"""
bitmap = wx.Bitmap('bmp2pdf.bmp', wx.BITMAP_TYPE_BMP)
image = wx.ImageFromBitmap(bitmap)
buf = StringIO.StringIO(image.GetDataBuffer())
# bitmap.CopyToBuffer(buf)
# bitmap.SaveFile(buf, wx.BITMAP_TYPE_BMP)
fbuf = wx.FFileOutputStream(buf)
image.SaveFile(buf, wx.BITMAP_TYPE_BMP)
# bitmap.SaveFile(buf, wx.BITMAP_TYPE_BMP)
# image.
# ir = ImageReader(buf)
# c = canvas.Canvas('wxBitmap2pdf.pdf', pagesize=landscape(A4))
# (w, h) = landscape(A4)
# width, height = letter
# #c.drawImage(filename, inch, height - 2 * inch) # Who needs consistency?
# c.drawImage(ir, 0, 0, w, h)
# c.showPage()
# c.save()
if __name__ == '__main__':
# covert_page()
# bmp2pdf()
# StringIO2pdf()
# INFO: Test with wx.
app = wx.PySimpleApp()
frame = MainFrame()
app.MainLoop()
| [
"chentj070921@yeah.net"
] | chentj070921@yeah.net |
0f5fc401e8158ec04382d8aaaa0e1344c53b050f | 6b0f5e42914d3c173f3f193f516b6c354572f25c | /s5/5.8_calcular_edad.py | 84a23c8c9e946d1bedb710c8ba6dd96468e2ecfd | [] | no_license | cesarco/MisionTic | 91c72774bbbefe4b5f265ea1ffb5839589f1447e | 8a2df5446bd6f77e4819d12af9c950921294ac1c | refs/heads/master | 2023-07-18T22:48:44.462973 | 2021-09-21T19:53:11 | 2021-09-21T19:53:11 | 390,785,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | print('programa para calcular la edad')
y = float(input('Digite su año de nacimiento'))
edad = 2021 - y
print('Su edad es: ', y) | [
"rodriguezcesar_125@outlook.es"
] | rodriguezcesar_125@outlook.es |
344f3f42d412b4739b449dc2636318d7bc673afe | 9c4d5fbc4f43b938f9abf5dac903aad82a6b792e | /getofiosxe.py | 3411c9b9d2efbd2ed984d75f1b1b399177a338d8 | [] | no_license | Anand111993/Cisco_Devnet_Practice | 18912a76ace1228d35be0f24b4cb7251dae87431 | d6a55910ab9639c6d78a3d325bff5c6e8118a464 | refs/heads/main | 2023-04-19T10:50:22.723899 | 2021-05-03T06:30:44 | 2021-05-03T06:30:44 | 363,837,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | from ncclient import manager
import xmltodict
host = 'sandbox-iosxe-latest-1.cisco.com'
port = 830
username = 'developer'
password = 'C1sco12345'
get_rply = manager.connect(host=host, port=port, username=username, password=password, hostkey_verify=False,
look_for_keys=False)
for rply in get_rply.server_capabilities:
print(rply)
get_rply.close_session() | [
"noreply@github.com"
] | Anand111993.noreply@github.com |
1d90ee6dc0cce81b7ee6e5ebc395a18ae771e9a8 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/util/navigation/__init__.pyi | 50c892e49919e06c449a9ea0c91e410ecd8bb2e3 | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 195 | pyi | from .GoToAddressLabelDialog import GoToAddressLabelDialog as GoToAddressLabelDialog
from .GoToQuery import GoToQuery as GoToQuery
from .GoToServiceImpl import GoToServiceImpl as GoToServiceImpl
| [
"tsunekou1019@gmail.com"
] | tsunekou1019@gmail.com |
65f3545961c9f0bfa1503676816c493caf570322 | 5785283842cf91ed4ec7504336ace4856786e645 | /manage.py | 7d34d2b26899c87a6b210ffe91541e6099219fdb | [] | no_license | HanaRsm/taskdayone | a5a6d53ed0d822ef08af5c06271bfe145e747c25 | f4ff31f8d30a9ff5b500515bce2c946fda9121d2 | refs/heads/master | 2020-03-22T03:50:35.668553 | 2018-07-02T14:46:53 | 2018-07-02T14:46:53 | 139,455,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "taskdayone.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"h.alrassam@live.com"
] | h.alrassam@live.com |
695b3cb3035008a1da7f9885276b2351ac915ccb | 3e3178e235f328e7f7f7aab2b2a62c3d88f40ea8 | /disp2_ws/build/agent_dispatcher/catkin_generated/pkg.develspace.context.pc.py | 623c89c750fdbab6b9d05849c9a8df29e6e7372a | [] | no_license | annodomini1/mag_nal | f9809ac655f4e3007bbf233af87da9829c17fa6b | ef98ce18849c426b09f25548051ae3627e3bac0b | refs/heads/master | 2022-12-25T07:26:25.882938 | 2020-10-01T13:12:45 | 2020-10-01T13:12:45 | 299,103,205 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "agent_dispatcher"
PROJECT_SPACE_DIR = "/home/martin/disp2_ws/devel"
PROJECT_VERSION = "0.0.0"
| [
"martin93.knap@yahoo.com"
] | martin93.knap@yahoo.com |
40b8332ba9e127f5c02645a472d0bb8b7504ae6d | 97af6e799b61c1ba76651e72fcdc7e8f86dbc513 | /src/lib/pysoarlib/__init__.py | f1ece3082db181cce3dc03befc429fbb6b3d66bc | [
"MIT"
] | permissive | igorviniciusavanci/soar | 6d00dfa66ff9ac2bd296146f829fef21c60e5f8f | 87329247bb252fdb283f8e05471486433862bbad | refs/heads/master | 2023-04-13T18:10:52.179291 | 2021-04-11T07:01:33 | 2021-04-11T07:01:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,480 | py | """ Helper classes and functions for creating a soar agent and working with SML
Depends on the Python_sml_ClientInterface, so make sure that SOAR_HOME is on the PYTHONPATH
SoarAgent and AgentConnector are used to create an agent
WMInterface is a standardized interface for adding/removing structures from working memory
SoarWME is a wrapper for creating working memory elements
SVSCommands will generate svs command strings for some common use cases
Also adds helper methods to the Identifier class to access children more easily
(See IdentifierExtensions)
"""
import Python_sml_ClientInterface as sml
__all__ = ["WMInterface", "SoarWME", "SVSCommands", "AgentConnector", "SoarAgent", "TimeConnector"]
# Extend the sml Identifier class definition with additional utility methods
from .IdentifierExtensions import *
sml.Identifier.GetChildString = get_child_str
sml.Identifier.GetChildInt = get_child_int
sml.Identifier.GetChildFloat = get_child_float
sml.Identifier.GetChildId = get_child_id
sml.Identifier.GetAllChildIds = get_all_child_ids
sml.Identifier.GetAllChildValues = get_all_child_values
sml.Identifier.GetAllChildWmes = get_all_child_wmes
sml.Identifier.__lt__ = lambda self, other: self.GetIdentifierSymbol() < other.GetIdentifierSymbol()
from .WMInterface import WMInterface
from .SoarWME import SoarWME
from .SVSCommands import SVSCommands
from .AgentConnector import AgentConnector
from .SoarAgent import SoarAgent
from .TimeConnector import TimeConnector
| [
"kieran.gill1997@gmail.com"
] | kieran.gill1997@gmail.com |
1caf740ad3e307091cbf9e198e933bae1d1c7a34 | 16f8fbcfd7e67abd42f4128a11d5b6134b304101 | /App/models.py | 72c756dca8b1a387d7f3bd351fb869212125a9ea | [] | no_license | xiaodong-Ren/mytest | 2b64d5d8a084e5bdd5a5dcae42440866a9a0cd0b | 472d88f7f0ab3bd2a7a7998bbe15fb28b26a4c9c | refs/heads/master | 2022-11-15T09:06:36.763718 | 2020-07-22T13:35:16 | 2020-07-22T13:35:16 | 276,074,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | # -*- coding: utf-8 -*-
from App import db
ROLE_USER = 0
ROLE_ADMIN = 1
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
nickname=db.Column(db.String(60), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
role = db.Column(db.SmallInteger, default=ROLE_USER)
def __repr__(self):
return "<User %r>" % self.nickname | [
"2550828616@qq.com"
] | 2550828616@qq.com |
a0a362fb3cf297d127447c05947dad5d44f76ce3 | ebee11af2d66615a2c5c97b4dbffcfc142ee40bb | /torchgen/static_runtime/config.py | bfcab625e2e366812c0a4ece160deff367cf4487 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | timgates42/pytorch | e600e945a366223232ff4d4ddcafe659fdefc0cf | a25df29cc4a64bfc75cf3415cb941ae66ef22201 | refs/heads/master | 2023-03-15T17:57:17.622007 | 2022-07-13T17:01:11 | 2022-07-13T20:24:37 | 227,502,054 | 0 | 0 | NOASSERTION | 2019-12-12T02:22:11 | 2019-12-12T02:22:11 | null | UTF-8 | Python | false | false | 13,607 | py | from torchgen.model import NativeFunctionsGroup, NativeFunctionsViewGroup
from typing import Dict, Union
def func_name_base_str(g: Union[NativeFunctionsGroup, NativeFunctionsViewGroup]) -> str:
if isinstance(g, NativeFunctionsGroup):
return str(g.functional.func.name.name.base)
else:
return str(g.view.root_name)
is_hand_written_ops_ = frozenset(
(
"abs",
"add",
"addmm",
"all",
"any",
"argmin",
"bmm",
"clamp",
"clamp_min",
"cumsum",
"div",
"fmod",
"index_select",
"leaky_relu",
"linear",
"log",
"matmul",
"mul",
"narrow_copy",
"nonzero",
"pow",
"remainder",
"sigmoid",
"sign",
"sub",
"tanh",
"detach",
"expand_as",
"flatten",
"narrow",
"reshape_as",
"select",
"slice",
"softmax",
"split",
"squeeze",
"transpose",
"view",
"where",
)
)
def is_hand_written(g: NativeFunctionsGroup) -> bool:
name_base = func_name_base_str(g)
return name_base in is_hand_written_ops_
def override_test_values(arg_map: Dict[str, str], op_name: str, index: int) -> None:
assert index == 0 or index == 1
if op_name == "addr":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
arg_map["vec1"] = "at::rand({6})"
arg_map["vec2"] = "at::rand({6})"
else:
arg_map["self"] = "at::rand({22, 22})"
arg_map["vec1"] = "at::rand({22})"
arg_map["vec2"] = "at::rand({22})"
return
if op_name == "mv":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
arg_map["vec"] = "at::rand({6})"
else:
arg_map["self"] = "at::rand({22, 22})"
arg_map["vec"] = "at::rand({22})"
return
if op_name == "addbmm":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
else:
arg_map["self"] = "at::rand({22, 22})"
return
if op_name == "cross":
if index == 0:
arg_map["self"] = "at::rand({3, 3, 3})"
arg_map["other"] = "at::rand({3, 3, 3})"
else:
arg_map["self"] = "at::rand({22, 3, 22})"
arg_map["other"] = "at::rand({22, 3, 22})"
return
if op_name == "take":
if index == 0:
arg_map["index"] = "at::randint(0, 216, {20}, torch::kInt64)"
else:
arg_map["index"] = "at::randint(0, 1000, {100}, torch::kInt64)"
return
if op_name == "take_along_dim":
if index == 0:
arg_map["indices"] = "at::argsort(self0, 1, true)"
else:
arg_map["indices"] = "at::argsort(self1, 1, true)"
return
if op_name == "masked_select":
if index == 0:
arg_map["mask"] = "at::randn({6, 6, 6}) > 0.5"
else:
arg_map["mask"] = "at::rand({22, 22, 22}) > 0.5"
return
if op_name == "orgqr":
if index == 0:
arg_map["input2"] = "at::rand({6, 6})"
else:
arg_map["input2"] = "at::rand({22, 22})"
return
if op_name == "ormqr":
if index == 0:
arg_map["input2"] = "at::rand({6, 6})"
else:
arg_map["input2"] = "at::rand({22, 22})"
return
if op_name == "quantile":
if index == 0:
arg_map["q"] = "at::rand({6})"
arg_map["interpolation"] = '"linear"'
else:
arg_map["q"] = "at::rand({22})"
arg_map["interpolation"] = '"linear"'
return
if op_name == "nanquantile":
if index == 0:
arg_map["q"] = "at::rand({6})"
arg_map["interpolation"] = '"linear"'
else:
arg_map["q"] = "at::rand({22})"
arg_map["interpolation"] = '"linear"'
return
if op_name == "multi_margin_loss":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
arg_map["target"] = "at::randint(6, {6}, torch::kInt64)"
arg_map["weight"] = "at::rand({6})"
else:
arg_map["self"] = "at::rand({22, 22})"
arg_map["target"] = "at::randint(22, {22}, torch::kInt64)"
arg_map["weight"] = "at::rand({22})"
return
if op_name == "multilabel_margin_loss":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
arg_map["target"] = "at::randint(6, {6, 6}, torch::kInt64)"
else:
arg_map["self"] = "at::rand({22, 22})"
arg_map["target"] = "at::randint(22, {22, 22}, torch::kInt64)"
return
if op_name == "nll_loss":
if index == 0:
arg_map["self"] = "at::rand({6, 6})"
arg_map["target"] = "at::randint(6, {6}, torch::kInt64)"
arg_map["weight"] = "at::rand({6})"
else:
arg_map["self"] = "at::rand({22, 22})"
arg_map["target"] = "at::randint(22, {22}, torch::kInt64)"
arg_map["weight"] = "at::rand({22})"
return
if op_name == "nll_loss2d":
if index == 0:
arg_map["self"] = "at::rand({6, 6, 6, 6})"
arg_map["target"] = "at::randint(6, {6, 6, 6}, torch::kInt64)"
arg_map["weight"] = "at::rand({6})"
else:
arg_map["self"] = "at::rand({22, 22, 22, 22})"
arg_map["target"] = "at::randint(22, {22, 22, 22}, torch::kInt64)"
arg_map["weight"] = "at::rand({22})"
return
if op_name in (
"fft_fft",
"fft_ifft",
"fft_rfft",
"fft_irfft",
"fft_hfft",
"fft_ihfft",
):
arg_map["norm"] = '"forward"'
return
if op_name == "linalg_tensorinv":
if index == 0:
arg_map["self"] = "at::rand({6, 6, 6, 6})"
arg_map["ind"] = "2"
else:
arg_map["self"] = "at::rand({22, 22, 22, 22})"
arg_map["ind"] = "2"
return
if op_name == "addmv":
if index == 0:
arg_map["self"] = "at::rand({2})"
arg_map["mat"] = "at::rand({2, 2})"
arg_map["vec"] = "at::rand({2})"
else:
arg_map["self"] = "at::rand({35})"
arg_map["mat"] = "at::rand({35, 35})"
arg_map["vec"] = "at::rand({35})"
return
if op_name == "acosh":
if index == 0:
arg_map["self"] = "at::rand({2, 2, 2}) + at::ones({2, 2, 2})"
else:
arg_map["self"] = "at::rand({5, 5, 5}) + at::ones({5, 5, 5})"
return
if op_name == "adaptive_max_pool2d_backward":
if index == 0:
arg_map["grad_output"] = "at::randint(-3, 2, {2,2,2})"
arg_map["self"] = "at::randint(-3, 2, {2,2,2})"
arg_map["indices"] = "at::randint(0, 1, {2,2,2}, at::kLong)"
else:
arg_map["grad_output"] = "at::randint(-3, 3, {3,3,3})"
arg_map["self"] = "at::randint(-3, 2, {3,3,3})"
arg_map["indices"] = "at::randint(0, 1, {3,3,3}, at::kLong)"
return
if op_name == "adaptive_max_pool3d_backward":
if index == 0:
arg_map["grad_output"] = "at::randint(-3, 2, {2,2,2,2})"
arg_map["self"] = "at::randint(-3, 2, {2,2,2,2})"
arg_map["indices"] = "at::randint(0, 1, {2,2,2,2}, at::kLong)"
else:
arg_map["grad_output"] = "at::randint(-3, 3, {3,3,3,3})"
arg_map["self"] = "at::randint(-3, 2, {3,3,3,3})"
arg_map["indices"] = "at::randint(0, 1, {3,3,3,3}, at::kLong)"
return
if op_name == "gather":
if index == 0:
arg_map["self"] = "at::randint(1, 100, {2,2,2}, at::kInt)"
arg_map["dim"] = "1"
arg_map["index"] = "at::randint(0, 1, {2,2,2}, torch::kInt64)"
arg_map["sparse_grad"] = "false"
else:
arg_map["self"] = "at::randint(1, 100, {5,5,5}, at::kInt)"
arg_map["dim"] = "1"
arg_map["index"] = "at::randint(0, 4, {5,5,5}, torch::kInt64)"
arg_map["sparse_grad"] = "false"
return
if op_name == "gelu":
if index == 0:
arg_map["self"] = "at::rand({6, 6, 6})"
arg_map["approximate"] = '"tanh"'
else:
arg_map["self"] = "at::rand({22, 22, 22})"
arg_map["approximate"] = '"tanh"'
return
if op_name == "gelu_backward":
if index == 0:
arg_map["grad_output"] = "at::rand({6, 6, 6})"
arg_map["self"] = "at::rand({6, 6, 6})"
arg_map["approximate"] = '"tanh"'
else:
arg_map["grad_output"] = "at::rand({22, 22, 22})"
arg_map["self"] = "at::rand({22, 22, 22})"
arg_map["approximate"] = '"tanh"'
return
if op_name == "index_add":
if index == 0:
arg_map["self"] = "at::rand({2})"
arg_map["dim"] = "0"
arg_map["index"] = "at::randint(0, 1, {2}, at::kInt)"
arg_map["source"] = "at::rand({2})"
arg_map["alpha"] = "2"
else:
arg_map["self"] = "at::rand({16})"
arg_map["dim"] = "0"
arg_map["index"] = "at::randint(0, 10, {16}, at::kInt)"
arg_map["source"] = "at::rand({16})"
arg_map["alpha"] = "2"
return
if op_name == "index_copy":
if index == 0:
arg_map["self"] = "at::rand({2})"
arg_map["dim"] = "0"
arg_map["index"] = "at::randint(0, 1, {2}, at::kLong)"
arg_map["source"] = "at::rand({2})"
else:
arg_map["self"] = "at::rand({32})"
arg_map["dim"] = "0"
arg_map["index"] = "at::randint(0, 10, {32}, at::kLong)"
arg_map["source"] = "at::rand({32})"
return
if op_name == "linalg_cross":
if index == 0:
arg_map["self"] = "at::rand({6, 3, 6})"
arg_map["other"] = "at::rand({6, 3, 6})"
arg_map["dim"] = "1"
else:
arg_map["self"] = "at::rand({22, 3, 22})"
arg_map["other"] = "at::rand({22, 3, 22})"
arg_map["dim"] = "1"
return
if op_name == "nll_loss_backward":
if index == 0:
arg_map["grad_output"] = "at::rand({})"
arg_map["self"] = "at::rand({6})"
arg_map["target"] = "at::randint(0, 5, {6}, torch::kInt64)"
arg_map["weight"] = "at::rand({6})"
arg_map["reduction"] = "1"
arg_map["ignore_index"] = "1"
arg_map["total_weight"] = "at::rand({})"
else:
arg_map["grad_output"] = "at::rand({})"
arg_map["self"] = "at::rand({36})"
arg_map["target"] = "at::randint(0, 11, {36}, torch::kInt64)"
arg_map["weight"] = "at::rand({36})"
arg_map["reduction"] = "1"
arg_map["ignore_index"] = "1"
arg_map["total_weight"] = "at::rand({})"
return
if op_name in ["scatter", "scatter_add", "_scatter_reduce"]:
if index == 0:
arg_map["self"] = "at::randint(1, 100, {2,2,2}, torch::kInt64)"
arg_map["index"] = "at::randint(0, 1, {2,2,2}, torch::kInt64)"
arg_map["src"] = "at::randint(1, 100, {2,2,2}, torch::kInt64)"
else:
arg_map["self"] = "at::randint(1, 100, {5,5,5}, torch::kInt64)"
arg_map["index"] = "at::randint(0, 1, {5,5,5}, torch::kInt64)"
arg_map["src"] = "at::randint(1, 100, {5,5,5}, torch::kInt64)"
if "reduce" in arg_map:
arg_map["reduce"] = '"sum"' if op_name == "_scatter_reduce" else '"add"'
return
if op_name == "scatter_reduce":
arg_map["reduce"] = '"mean"'
if index == 0:
arg_map["index"] = "at::randint(6, {6, 6, 6}, torch::kInt64)"
else:
arg_map["index"] = "at::randint(22, {22, 22, 22}, torch::kInt64)"
return
if op_name == "special_zeta":
if index == 0:
arg_map["self"] = "at::rand({2,2,2}, at::kDouble) + at::ones({2,2,2})"
arg_map["other"] = "at::rand({2,2,2}, at::kDouble) + at::ones({2,2,2})"
else:
arg_map["self"] = "at::rand({5,5,5}, at::kDouble) + at::ones({5,5,5})"
arg_map["other"] = "at::rand({5,5,5}, at::kDouble) + at::ones({5,5,5})"
return
if op_name == "_convert_indices_from_csr_to_coo":
if index == 0:
arg_map["crow_indices"] = "torch::tensor({1}, torch::kInt32)"
arg_map["col_indices"] = "torch::tensor({0, 1, 0}, torch::kInt32)"
arg_map["out_int32"] = "false"
else:
arg_map["crow_indices"] = "torch::tensor({0}, torch::kInt32)"
arg_map[
"col_indices"
] = "torch::tensor({0, 1, 0, 2, 1, 2, 0, 1, 0, 2, 1, 2}, torch::kInt32)"
arg_map["out_int32"] = "false"
return
if op_name == "_convert_indices_from_coo_to_csr":
if index == 0:
arg_map["self"] = "at::randint(0, 3, {2}, at::kInt)"
arg_map["size"] = "10"
arg_map["out_int32"] = "false"
else:
arg_map["self"] = "at::randint(0, 3, {12}, at::kInt)"
arg_map["size"] = "24"
arg_map["out_int32"] = "false"
return
if op_name in ("diagonal", "linalg_diagonal"):
arg_map["offset"] = "0"
arg_map["dim0"] = "1"
arg_map["dim1"] = "2"
return
| [
"pytorchmergebot@users.noreply.github.com"
] | pytorchmergebot@users.noreply.github.com |
a90df0f8301847cbcdea57ba21e0c8f2f9530cd0 | b19c5899fe3804067ed8a3a7985b134e85fcef92 | /my_memory_card.py | 6c4b1406ade7cc41be12f20adc99c5100f5886c4 | [
"CC0-1.0"
] | permissive | Artem554-dev/nddaf | 8a8ea365955417e59f6da4689580011c43f4efdc | c2ba6d9c4e8eb1593946d645bc99118d2d26e1ca | refs/heads/main | 2023-04-06T08:43:05.671082 | 2021-04-18T13:47:56 | 2021-04-18T13:47:56 | 359,150,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,224 | py | from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QLabel, QMessageBox, QRadioButton, QHBoxLayout, QGroupBox
from random import shuffle
app = QApplication([])
window = QWidget()
window.resize(500,400)
otv = QPushButton("Ответить", window)
otv.move(200,300)
la_qes = QLabel("Вопрос", window)
la_qes.move(200,20)
RadioGroupBox = QGroupBox("Варианты ответа")
rd = QRadioButton("12",window)
rd.move(200,100)
rd1 = QRadioButton("13",window)
rd1.move(300,100)
rd2 = QRadioButton("1",window)
rd2.move(100,200)
rd3 = QRadioButton("2",window)
rd3.move(300,200)
number = 0
all_ans = 0
ri_ans = 0
AnsGroup = QGroupBox("Результаты теста")
la_prav = QLabel('Правельно или нет', window)
la_otv = QLabel('Ответы',window)
la_prav.move(0,0)
la_otv.move(200,200)
sled = QPushButton('Следующий вопрос',window)
sled.move(200,300)
window.setWindowTitle("Memory Card")
class Question():
def __init__(self, question, right_answer, answer2, answer3, answer4):
self.question = question
self.rightanswer = right_answer
self.answer2 = answer2
self.answer3 = answer3
self.answer4 = answer4
question_list = []
question_list.append(Question('press F в какой игре была первой','в call of duty','в Payday 2','',''))
question_list.append(Question('сколько частей сталкер','3','2','1','4'))
question_list.append(Question('сколько надо class-D чтобы почистить камеру scp-173','3','2','4','1'))
question_list.append(Question('сколько парталов может быть одновреммено открыто в portal 2','4','2','1','3'))
question_list.append(Question('Какая часть call of duty была лудшей','modern Warfarem 2','modern Warfarem 4','modern Warfarem','modern Warfarem 3'))
question_list.append(Question('сколько частей метро','4','2','1','3'))
question_list.append(Question('какая видео карта лучшая','3080 ti','2080 ti','3080','2080'))
question_list.append(Question('какой процессор лучший','AMD','intel 7','нет','нет'))
question_list.append(Question('','','','',''))
question_list.append(Question('','','','',''))
question_list.append(Question('','','','',''))
question_list.append(Question('','','','',''))
question_list.append(Question('','','','',''))
question_list.append(Question('','','','',''))
question_list.append(Question('','','','',''))
def Question():
la_prav.hide()
la_otv.hide()
sled.hide()
rd.show()
rd1.show()
rd2.show()
rd3.show()
otv.show()
la_qes.show()
def Answer():
la_prav.show()
la_otv.show()
sled.show()
rd.hide()
rd1.hide()
rd2.hide()
rd3.hide()
otv.hide()
la_qes.hide()
answer = [rd, rd1, rd2, rd3]
def ask(question , rigt_answer, answer2, answer3, answer4):
shuffle(answer)
answer[0].setText(rigt_answer)
answer[1].setText(answer2)
answer[2].setText(answer3)
answer[3].setText(answer4)
la_qes.setText(question)
la_otv.setText(rigt_answer)
Question()
def check_answer():
global all_ans
global ri_ans
all_ans += 1
if answer[0].isChecked():
ri_ans += 1
la_prav.setText("Правильно!\n Статистика:"+str(ri_ans)+"/"+str(all_ans))
else:
if answer[1].isChecked() or answer[2].isChecked() or answer[3].isChecked():
la_prav.setText("Нихтправельно!\n Статистика:"+str(ri_ans)+"/"+str(all_ans))
Answer()
def next_question():
global number
if number >= len(question_list):
number = 0
q = question_list[number]
ask(q.question,q.rightanswer,q.answer2,q.answer3,q.answer4)
number += 1
window.setWindowTitle("Memory Card")
ask("Варианты ответа","2","1","13","12")
otv.clicked.connect(check_answer)
sled.clicked.connect(next_question)
window.show()
app.exec() | [
"noreply@github.com"
] | Artem554-dev.noreply@github.com |
baa029633fc67b046e1cb9d2e0e8ac45b91aead8 | 9e1e4fe4fc57b93ca75ff5343565e226a3f16978 | /setup.py | 77e65612710e6031100987135da7b610b1129833 | [
"Apache-2.0"
] | permissive | pradeepkr103/checkmate | 4a5a0f508df635c0c31f56a89c8441041916ac0d | 3b95601b6e35e62cd157a1ac408e0ca55c46d014 | refs/heads/master | 2020-12-12T01:03:40.945084 | 2020-01-15T05:01:22 | 2020-01-15T05:01:22 | 234,003,051 | 0 | 0 | Apache-2.0 | 2020-01-15T05:10:05 | 2020-01-15T05:10:04 | null | UTF-8 | Python | false | false | 852 | py | from setuptools import setup
setup(
name="checkmate",
version="0.1.0",
description="Checkmate prevents you from OOMing when training big deep neural nets",
packages=["checkmate"], # find_packages()
python_requires=">=3.5",
install_requires=[
"matplotlib", # this is only used once in the core checkmate package
"numpy",
"pandas",
"toposort",
"psutil",
],
extras_require={
"eval": [
"graphviz",
"keras_segmentation @ https://github.com/ajayjain/image-segmentation-keras/archive/master.zip#egg=keras_segmentation-0.2.0remat",
"python-dotenv",
"ray>=0.7.5",
"redis",
"scipy",
"seaborn",
"tqdm",
],
"test": ["graphviz", "pytest", "tensorflow>=2.0.0"],
},
)
| [
"noreply@github.com"
] | pradeepkr103.noreply@github.com |
6f7c3eebdf06407cee5d8e9e62976c7a454ff836 | e3a25b40812b6b70f10b52a6f66f9348dcc251a6 | /algorithm/0402codeAD/구슬고르기복습.py | ae0fd9e3bf77bee8c5bdacf9d2e3d4790c8f7305 | [] | no_license | yoonwoo123/python101 | 75643cb5dcf411c9ddcf988bf09bb88e4523206c | 637dce64a6320a6f46eb941e33e8e9f6ee41c910 | refs/heads/master | 2020-04-14T11:30:43.018126 | 2019-07-25T08:28:31 | 2019-07-25T08:28:31 | 163,815,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,620 | py | import sys
sys.stdin = open("구슬고르기_input.txt")
# 김은경 샘 코드
def DFS1(n): # 중복순열
if n>N:
for i in range(1, N+1): print(arr[i], end=' ')
print()
return
for i in range(1, 7):
arr[n]=i
DFS1(n+1)
def DFS3(n): # 순열
if n>N:
for i in range(1, N+1): print(arr[i], end=' ')
print()
return
for i in range(1, 7):
if chk[i]:continue
chk[i]=1 # 순열은 체크 해야함
arr[n]=i
DFS3(n+1)
chk[i]=0 # 순열은 체크해제도 해야함
def DFS2(n, start): # 중복조합
if n>N:
for i in range(1, N+1): print(arr[i], end=' ')
print()
return
for i in range(start, 7): # 시작은 스타트부터
arr[n]=i
DFS2(n+1, i) # 스타트업데이트는 start가 아닌 i
def DFS4(n, start): # 조합
if n>N:
for i in range(1, N+1): print(arr[i], end=' ')
print()
return
for i in range(start, 7):
arr[n]=i
DFS4(n+1, i+1) # 조합은 i 가 아닌 i + 1 중요!
#main---------------------------------
N, M = map(int, input().split())
arr =[0] * (N+1)
chk = [0] * 7
if M ==1: DFS1(1)
elif M ==3 : DFS3(1)
elif M == 2: DFS2(1, 1)
elif M ==4: DFS4(1,1)
# def ovperm(n, k):
# if n == k:
# for g in p:
# print(g, end=" ")
# print()
# else:
# for i in range(k, n):
# a[i], a[k] = a[k], a[i]
# p[k] = a[i]
# perm(n, k+1)
# # perm(n-1, k+1)
# a[i], a[k] = a[k], a[i]
#
# def DFS(no): # chk를 하면 순열 chk를 하지 않으면 중복순열
# if no >= N:
# for i in range(N):
# print(b[i], end=" ")
# print()
# return
# for i in range(6):
# # if chk[i]:continue # 1이면 continue, 0이면 진행
# # chk[i] = 1
# b[no] = a[i]
# DFS(no + 1)
# # chk[i] = 0
#
# def comb(no):
# if no >= N:
# for i in range(N):
# print(b[i], end=" ")
# print()
# return
# b[no] = a[no]
# comb(no + 1)
# b[no] = 0
# comb(no + 1)
#
# # def combs(no, start): # a[no]번째 구슬을 상자에 담거나 담지 않는 모든 경우
# # for i in range(N): print(b[i], end=" ")
# # print()
# # if no >= N or start >= N:
# # return
# # for i in range(start, N):
# # b[no] = a[i]
# # combs(no+1, i+1)
# # b[no] = 0
#
# N = int(input())
# a = [n for n in range(1, 7)]
# b = [0] * N
# chk = [0] * N
# # DFS(0)
# # comb(0)
# DFS(0)
| [
"lkkjasd@korea.ac.kr"
] | lkkjasd@korea.ac.kr |
d9c8492de65e97d31698c6af19a73287bb786556 | c3ee04757a79b2a29979c9719866e7ff779bae13 | /greeting/app/service.py | b879d716b282cae2fc6e8d0a692950b513e56344 | [] | no_license | ryklebaron/dockercourse | e5423c459a6d66e157e57c59c3479aca71feb534 | dabee2c25edc36eea6fbf32d7615f6ff7f6eb573 | refs/heads/master | 2021-08-14T06:54:34.934267 | 2017-11-14T21:55:25 | 2017-11-14T21:55:25 | 110,748,593 | 1 | 0 | null | 2017-11-14T21:46:37 | 2017-11-14T21:46:37 | null | UTF-8 | Python | false | false | 774 | py | import cherrypy
import os
import simplejson
STATIC_DIR = os.path.join(os.path.abspath("."), u"static")
class AjaxApp(object):
@cherrypy.expose
def index(self):
return open(os.path.join(STATIC_DIR, u'index.html'))
@cherrypy.expose
def submit(self, name):
cherrypy.response.headers['Content-Type'] = 'application/json'
format = cherrypy.config.get('greeting.format')
return simplejson.dumps({'title': format.format(name)})
# return simplejson.dumps({'title': "Hello, %s" % name})
cherrypy.config.update("service.conf")
config = {'/static': {
'tools.staticdir.on' : True,
'tools.staticdir.dir' : STATIC_DIR
}}
cherrypy.tree.mount(AjaxApp(), '/', config=config)
cherrypy.engine.start()
cherrypy.engine.block()
| [
"bertusbaron@gmail.com"
] | bertusbaron@gmail.com |
64470eabc4f21b566772c5520ce1936bf45aabd6 | 4d80d84b1bbc3c7cd395a521485cd61aab3a21d5 | /abstract_test.py | ddb35b9208619bccd74453e50175bde8e560e57a | [] | no_license | saverymax/blobrunner | b83bc752cb8b10e65bc9cf47d54fbdbb81729f38 | 3b0b9894af8a1e4200b11862de2c06c62c697890 | refs/heads/master | 2021-04-27T00:51:52.689172 | 2018-03-05T22:39:59 | 2018-03-05T22:39:59 | 122,662,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,428 | py | from textblob import TextBlob
text = "Drosophila suzukii (Matsumura) (Diptera: Drosophilidae) is a damaging pest of fruit. Reproductively diapausing adults overwinter in woodlands and remain active on warmer winter days. It is unknown if this adult phase of the lifecycle feeds during the winter period, and what the food source may be. This study characterized the flora in the digestive tract of D. suzukii using a metagenomics approach. Live D. suzukii were trapped in four woodlands in the south of England and their guts dissected for DNA extraction and amplicon-based metagenomics sequencing (internal transcribed spacer and 16S rRNA). Analysis at genus and family taxonomic levels showed high levels of diversity with no differences in digestive tract bacterial or fungal biota between woodland sites of winter-form D. suzukii. Female D. suzukii at one site appeared to have higher bacterial diversity in the alimentary canal than males, but there was a site, sex interaction. Many of the biota were associated with cold, wet climatic conditions and decomposition. This study provides the first evidence that winter-form D. suzukii may be opportunistic feeders during the winter period and are probably exploiting food sources associated with moisture on decomposing vegetation during this time. A core gut microbiome has been identified for winter-form D. suzukii."
abstract = TextBlob(text)
print(abstract.sentiment)
| [
"saverymax@gmail.com"
] | saverymax@gmail.com |
192ae1a138fa814f372817b7d5f2c2c679b339cc | e0e4981d0859b7094fdd9e3001f943c4154b42ea | /NextHack/tstools/model/__init__.py | dd909eadd7d91d46850d9b1bf3f92bd4a7fc070d | [] | no_license | AleksandrRuzanov/andreychubin95 | 5248db430538994f559f47f2c6700000d33fba58 | e69a0c7fcdc5f41543007dcd046859962ce793a9 | refs/heads/main | 2023-08-16T13:40:05.975002 | 2021-10-04T20:10:41 | 2021-10-04T20:10:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | from .optuna_lgbm import OptunaTuner, OptunaTunerCV
from .automl import UnitAutoML, UnitAutoMLCV
from .feature_selector import feature_selection_by_feature_importance, feature_selection_by_corr
from .classifier import ClassifierLeaf
| [
"72697554+andreychubin95@users.noreply.github.com"
] | 72697554+andreychubin95@users.noreply.github.com |
ad0ec5ccf1bcf916dc9da6286c6f775e3851e0c4 | 89d8e4b3031f963188552313e79d953e14ced124 | /Week 3/excercise_3_4_4_9.py | 314641681f5e36bbe6f2587e99878c59cc254c40 | [] | no_license | ebrisum/Basictrack_2021_1a | 240128ecbb0a7221d7231a997c21ca45ccfe4f1b | d798f4bf2f8f4717d5fd5a0e248bb4195077bf66 | refs/heads/master | 2023-01-06T15:11:32.418425 | 2020-11-12T12:29:03 | 2020-11-12T12:29:03 | 296,370,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | a = 4
b = 3
c = 5
if a>c:
a, c = c, a
if b>c:
b, c = c, b
hypotenuse = (a * b + b * b) ** .5
if abs(c-hypotenuse) < 1e-7:
print("The triangle is right-angled")
else:
print("This is not a right-angled triangle")
| [
"warnerherrema@gmail.com"
] | warnerherrema@gmail.com |
4d93fc164dfa92b8b6ef0228026653a95f26e97e | 2e2b9f321aeb0fd9f0d8da3c8dd1c0d2590a7e1f | /fig/posindex/posindex_start.py | 4cb3b990190db20e4eadb417aab04f7e2bf8675f | [
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | BenYI/Topological_Nulls | 570deb08102d06cb588dd682d3ab03c63e744a63 | 2d8c13e932169bce7d616c83be784443a9260362 | refs/heads/master | 2020-08-06T18:44:34.675313 | 2019-10-22T19:11:36 | 2019-10-22T19:11:36 | 213,110,424 | 0 | 0 | NOASSERTION | 2019-10-06T04:50:56 | 2019-10-06T04:50:55 | null | UTF-8 | Python | false | false | 3,072 | py | # posindex_start.py: generate a plot of the vector field around a 3d magnetic null.
# A yellow transparend sphere is located at the origin, and the vector field of the
# null is shown by it's integral curves, as well as vectors located on the surface
# of a ball around the null
#
#
#
import os, sys, inspect # For importing the submodules in a platform-independend robust way
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
import bpy # This module is only available inside of Blender's version of python, comment when debugging
from functools import partial # create new funtions from old functions (and strip the kwargs)
# Make sure that the path to BlenDaViz and the integration library are in the front of the path.
code_folder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"../../code")))
if code_folder not in sys.path:
sys.path.insert(0, code_folder)
import integrate as ig
import BlenDaViz as bz
startlen = .35
# generate the points at which vectors are to be evaluated
ncirc=15
ncircvec=50
vecpoints = [[np.array((np.sqrt(1-z**2)*np.cos(t), np.sqrt(1-z**2)*np.sin(t), z))
for t in np.linspace(np.pi, 3*np.pi, ncircvec+1)[:ncircvec]]
for z in np.cos(np.linspace(0,np.pi,ncirc+2)[1:-1])]
vecpoints.append([np.array((0,0,1))])
vecpoints.append([np.array((0,0,-1))])
#generate the points from which streamlines are to be traced
nstreams=15
streampoints = []
for r in np.linspace(0.01, 1.5, 5):
streampoints.extend(ig.circlePoints(np.array((0,0,1)), radius = r, slide=8,
npoints = nstreams, rot=r))
streampoints.extend(ig.circlePoints(np.array((0,0,1)), radius = r, slide=-8,
npoints = nstreams, rot=r))
fn=partial(ig.ZeroField, index=1)
#def fn(xx): return np.array((0,0,1))
streams = ig.stream_multi(streampoints, vvfn=fn, tol=1e-7, iterMax=1000000, intdir = 'back')
lines = []
for stream in streams:
print('plotting stream...')
lines.append(bz.plot(stream.x, stream.y, stream.z, color = (1,1,1), radius=0.01))
vecs = []
vc = cm.get_cmap('plasma', 10) # vertical colors
for num1, veccirc in enumerate(vecpoints):
cdict = {'red': [[0.0, vc(num1)[0], vc(num1)[0]],
[1.0, .5, .5]],
'green': [[0.0, vc(num1)[1], vc(num1)[1]],
[1.0, .5, .5]],
'blue': [[0.0, vc(num1)[2], vc(num1)[2]],
[1.0, .5, .5]]}
cmap = matplotlib.colors.LinearSegmentedColormap('map'+str(num1), segmentdata=cdict)
norm = plt.Normalize(vmin=0, vmax=ncircvec)
s_m = plt.cm.ScalarMappable(cmap = cmap, norm = norm)
for num2, point in enumerate(veccirc):
#print(num2,point)
color = s_m.to_rgba(num2)[:-1] # throw out the a of rgba
vecs.append(bz.vec(point, fn(point), length=3*ig.norm(fn(point)), color=color))
bpy.data.scenes['Scene'].render.filepath = '../posindex_start.png'
bpy.ops.render.render(write_still=True)
| [
"smiet@physics.leidenuniv.nl"
] | smiet@physics.leidenuniv.nl |
8f5b53674caa26cd827c4943842f96a981027ade | 386a5b505d77c9798aaab78495d0f00c349cf660 | /python/function/harmonic.py | f23e439bde8eccf7c61bf23d64a8e3c28998c89d | [] | no_license | namratarane20/MachineLearning | 2da2c87217618d124fd53f607c20641ba44fb0b7 | b561cc74733b655507242cbbf13ea09a2416b9e2 | refs/heads/master | 2023-01-20T18:54:15.662179 | 2020-03-09T14:12:44 | 2020-03-09T14:12:44 | 237,597,461 | 0 | 0 | null | 2023-01-05T12:37:12 | 2020-02-01T10:22:20 | Python | UTF-8 | Python | false | false | 431 | py | #this program is used print the nth harmonic value
from data import functional
try:
value = int(input("enter the value: "))
if value > 0: # if value is more than 0 it will run the method
functional.harmonic(value)
else:print("enter more than 0")
except ValueError: # if is not numerical value it will throw the error
print("enter the proper input") | [
"namrata.ashok@impelsys.com"
] | namrata.ashok@impelsys.com |
79dceca566689bfcd1ce4894deae5b484f460a10 | 35c25b89d51f43ec1dc7e2c6856b098b22c71a6b | /map_objects/game_map.py | 31f13c563756beb55c8b6969a6187b81809ba608 | [
"MIT"
] | permissive | Ross-Clark/rouge | 744564b1a6ac8382ab99f9b888c74abb66c80477 | 3830b7b178a018a59a8ae70530503a9e24ebb907 | refs/heads/master | 2022-02-15T21:39:49.690054 | 2019-09-05T22:53:48 | 2019-09-05T22:53:48 | 205,878,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,887 | py | from random import randint
from map_objects.tile import Tile
from map_objects.rectangle import Rect
class GameMap:
def __init__(self, width, height):
self.width = width
self.height = height
self.tiles = self.initialize_tiles()
def initialize_tiles(self):
tiles = [[Tile(True) for y in range(self.height)] for x in range(self.width)]
return tiles
def make_map(self, max_rooms, room_min_size, room_max_size, map_width, map_height, player):
rooms = []
num_rooms = 0
for r in range(max_rooms):
#rand w h
w = randint(room_min_size, room_max_size)
h = randint(room_min_size, room_max_size)
#rand pos in bound
x = randint(0, map_width - w - 1)
y = randint(0, map_height - h - 1)
#Rect
new_room = Rect(x, y, w, h)
#check intersection
for other_room in rooms:
if new_room.intersect(other_room):
break
else:
#no intersect
self.create_room(new_room)
(new_x, new_y) = new_room.center()
if num_rooms == 0:
# starting room
player.x = new_x
player.y = new_y
else:
#after room 0
# connect to previous room
#center of previous room
(prev_x, prev_y) = rooms[num_rooms - 1].center()
#coin flip
if randint(0,1) == 1:
#move h then v
self.create_h_tunnel(prev_x, new_x, prev_y)
self.create_v_tunnel(prev_y, new_y, new_x)
else:
#v then h
self.create_v_tunnel(prev_y, new_y, prev_x)
self.create_h_tunnel(prev_x, new_x, new_y)
#add room to list
rooms.append(new_room)
num_rooms += 1
print(num_rooms)
def create_room(self, room):
# makes rect room
for x in range(room.x1 + 1, room.x2):
for y in range(room.y1 +1, room.y2):
self.tiles[x][y].blocked = False
self.tiles[x][y].block_sight = False
def create_h_tunnel(self, x1, x2, y):
for x in range(min(x1, x2), max(x1, x2) + 1):
self.tiles[x][y].blocked = False
self.tiles[x][y].block_sight = False
def create_v_tunnel(self, y1, y2, x):
for y in range(min(y1, y2), max(y1, y2) + 1):
self.tiles[x][y].blocked = False
self.tiles[x][y].block_sight = False
def is_blocked(self, x, y):
if self.tiles[x][y].blocked:
return True
return False
| [
"ross.clark2@nhs.net"
] | ross.clark2@nhs.net |
b97cffb7e0a43919de823cb6cf823479aa0bc268 | a2ee667a402d821831ce1532c3a2e62305624388 | /extras/sample_site/sample_site/urls.py | b391adc051ec60ccb38c358017504169712765ab | [
"MIT"
] | permissive | cltrudeau/django-flowr | 9c1c7c8a43d881f962e8dd58ca424daa3ee1348a | ea2d69fda94d1998f48301954f8dc69f0b553553 | refs/heads/master | 2023-07-05T20:28:05.370538 | 2023-06-29T19:36:41 | 2023-06-29T19:36:41 | 40,761,586 | 3 | 0 | MIT | 2022-12-26T19:50:47 | 2015-08-15T13:37:23 | Python | UTF-8 | Python | false | false | 185 | py | from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^flowr/', include('flowr.urls')),
]
| [
"ctrudeau@arsensa.com"
] | ctrudeau@arsensa.com |
55523ffc80b4031a4c382d5eb39568dd12d3a8ee | 5ca4d0bc0622fc55f2f5eb4e0b367d7a6f83e946 | /model.py | 06f1265f7210e9526fb9c8752e9265fc1fa3a089 | [] | no_license | klintan/relation-network | ed479e12dae0ef00a0b748ec62c02ec0033bd29f | 84f5060a8b1536103ecd65bb1629459a94d03442 | refs/heads/master | 2020-07-14T02:25:48.499808 | 2017-06-15T08:50:44 | 2017-06-15T08:50:44 | 94,296,278 | 2 | 1 | null | 2017-06-14T06:17:23 | 2017-06-14T06:17:23 | null | UTF-8 | Python | false | false | 2,931 | py | import numpy as np
import keras
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Embedding,\
LSTM, Bidirectional, Lambda, Concatenate, Add
from keras.layers.convolutional import Conv2D, MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
import prepare
def bn_layer(x, conv_unit):
''''
Convolutional layers with batch normalization and RELU activation
'''
def f(inputs):
md = Conv2D(x, (conv_unit, conv_unit), padding='same')(inputs)
md = BatchNormalization()(md)
return Activation('relu')(md)
return f
def conv_net(inputs):
''''
Batch normalization layer, RELU activation
'''
model = bn_layer(32, 3)(inputs)
model = MaxPooling2D((2, 2), 2)(model)
model = bn_layer(32, 3)(model)
model = MaxPooling2D((2, 2), 2)(model)
model = bn_layer(32, 3)(model)
model = MaxPooling2D((2, 2), 2)(model)
model = bn_layer(32, 3)(model)
model = MaxPooling2D((2, 2), 2)(model)
model = bn_layer(64, 3)(model)
return model
def model():
input1 = Input((50, 200, 3))
input2 = Input((mxlen,))
cnn_features = conv_net(input1)
embedding_layer = prepare.embedding_layer(prepare.tokenizer.word_index, prepare.get_embeddings_index(), mxlen)
embedding = embedding_layer(input2)
bi_lstm = Bidirectional(LSTM(lstm_unit, implementation=2, return_sequences=False))
lstm_encode = bi_lstm(embedding)
shapes = cnn_features.shape
w, h = shapes[1], shapes[2]
features = []
for k1 in range(w):
for k2 in range(h):
def get_feature(t):
return t[:, k1, k2, :]
get_feature_layer = Lambda(get_feature)
features.append(get_feature_layer(cnn_features))
relations = []
concat = Concatenate()
for feature1 in features:
for feature2 in features:
relations.append(concat([feature1, feature2, lstm_encode]))
g_MLP = get_MLP(4, get_dense(4))
f_MLP = get_MLP(2, get_dense(2))
mid_relations = []
for r in relations:
mid_relations.append(g_MLP(r))
combined_relation = Add()(mid_relations)
rn = dropout_dense(combined_relation)
rn = dropout_dense(rn)
pred = Dense(1, activation='sigmoid')(rn)
model = Model(inputs=[input1, input2], outputs=pred)
optimizer = Adam(lr=3e-5)
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
return model
def get_dense(n):
r = []
for k in range(n):
r.append(Dense(MLP_unit, activation='relu'))
return r
def get_MLP(n, denses):
def g(x):
d = x
for k in range(n):
d = denses[k](d)
return d
return g
def dropout_dense(x):
y = Dense(MLP_unit)(x)
y = Dropout(0.5)(y)
y = Activation('relu')(y)
return y
| [
"andreas.klintberg@meltwater.com"
] | andreas.klintberg@meltwater.com |
25fab6766a13625e8a902e830a22564c4687e10a | 3b41a9a0f17f9330514856933e05188532d92ed5 | /Projects/Programming Languages/test/runtest.py | 7180ebe455005f202853d16359a3df23c8157954 | [] | no_license | vikrsri/EECS-354 | 13703f8a8c345cfaf3980e17b81f7f917c89b37a | 3a26127070cf814741f92425e82a37de492122c9 | refs/heads/master | 2023-03-17T14:43:29.536719 | 2020-06-19T01:43:37 | 2020-06-19T01:43:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,662 | py | #!/usr/bin/python
import os
import sys
import random
from popen2 import Popen3
def run(out):
langs = ["c", "ruby", "shell", "mysql"]
if len(sys.argv) > 1:
newlangs = []
for lang in sys.argv[1:]:
if lang in langs:
newlangs.append(lang)
else:
print >> sys.stderr, 'Ignoring language %s' % lang
langs = newlangs
total_passed = 0
total_tests = 0
cwd = os.getcwd()
for lang in langs:
print >> out, "Testing %s...\n%s" % (lang, "-"*70);
os.chdir('%s/%s' % (os.path.dirname(__file__), lang))
print >> out, 'Building...'
p = Popen3('./build')
out.write(p.fromchild.read())
status = p.wait()
testcount = 4
successcount = 0
linecount = 0
if status:
print >> out, 'Failed to build.'
else:
if lang == 'c': testcount = 3
if lang == 'mysql': testcount = 5
if lang == 'shell': testcount = 8
command = os.popen('./testrig', 'r')
while True:
line = command.readline()
if not line: break
linecount += 1
if linecount > testcount:
print >> out, 'Error: saw too many lines. Make sure you aren\'t printing anything.'
successcount = 0
break
if lang == 'python':
if (line.strip() == 'True'): successcount += 1
else:
if (line.strip() == 'correct'): successcount += 1
command.close()
os.chdir(cwd)
print >> out, "Passed %i out of %i tests.\n" % (successcount, testcount)
total_passed += successcount
total_tests += testcount
os.chdir(cwd)
return total_passed, total_tests
if __name__ == '__main__':
run(sys.stdout)
| [
"lestr3959@gmail.com"
] | lestr3959@gmail.com |
ccbcf2954113910e0990e81ce01c63a41d7fe0f6 | 8c4217a069d8f913f0c740428c8e1510034fa23d | /tryex/tryex3.py | 5d1375dfa99c988c89eb8395fe67d47df5e87043 | [
"MIT"
] | permissive | rivwoxx/python-Le | 8d204f0c13137ce7c0c80fffd95c54d3c4b55123 | dbb653255dab7d11b87f25eec94bcce63a86aa42 | refs/heads/master | 2023-03-08T10:20:48.197013 | 2021-02-22T04:29:23 | 2021-02-22T04:29:23 | 252,054,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | try:
num = float(input("Enter a num "))
num2 = float(input("Enter another num "))
nu3 = num / num2
print(nu3)
except ValueError:
print("Incorrect value")
except ZeroDivisionError as err: #you can store the error as a variable :)
print(err)
print("Cannot divide by 0") | [
"rivera_augusto@protonmail.com"
] | rivera_augusto@protonmail.com |
b03483ef22230e81bd37b045f7357185d5c56335 | 609e784ab6556693d5227c9648ce0ee21942262c | /tmp/tags/views.py | 0b843810d1cc6376f24f78bc9ab77973afb67f2c | [] | no_license | konstantin-mohin/django_blog | 56eebe2224d1a24666b778a9d0bdcfa116018d02 | 0321d18f9a8a82e5eeb42729361ebe8e84100f93 | refs/heads/master | 2016-09-05T14:57:36.149551 | 2015-09-17T09:09:47 | 2015-09-17T09:09:47 | 42,627,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
from django.template import RequestContext
from django.shortcuts import render
from articles.models import Article
from tags.models import Tags
def tag(request, slug_title):
tag = Tags.objects.get(slug_title=slug_title)
art_with_tag = []
for n in Article.objects.filter(show=True):
if n.tags.filter(slug_title=slug_title).count():
art_with_tag.append(n)
return render(request,
'tag.html',
{
'tag': tag,
'art_with_tag': art_with_tag,
},
)
| [
"k.logvin@webinerds.com"
] | k.logvin@webinerds.com |
4cf7a8a6e08943e6d6c2f28ff0ea86222cd00c72 | 427964e7876ed17f553acf6c30d38a00a98f9b16 | /CnnArchitectures/DenseNet.py | 1c334e76618e7033e49ef4b90c90dc4e8bf80b8c | [] | no_license | NonlinearNimesh/DeepLearning | 8f7fa872b19c67dad9a5695eef766c47a13a5d2e | f80e8fdbb4d919a7d989e36c83792808afeeff07 | refs/heads/main | 2023-08-14T17:29:44.492113 | 2021-09-29T14:51:14 | 2021-09-29T14:51:14 | 398,557,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,449 | py | # -*- coding: utf-8 -*-
"""Untitled49.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ZoS_lKVmkm8eA0cMxvxo4JFld-tgSv61
"""
import tensorflow
from tensorflow.keras.layers import Input, BatchNormalization, ReLU, Conv2D, Dense, MaxPool2D, AvgPool2D, GlobalAvgPool2D, Concatenate
def bn_rl_conv(x, filters, kernel_size):
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(filters=filters,
kernel_size=kernel_size,
padding='same')(x)
return x
def dense_block(tensor, k, reps):
for _ in range(reps):
x = bn_rl_conv(tensor, filters=4*k, kernel_size=1)
x = bn_rl_conv(x, filters=k, kernel_size=3)
tensor = Concatenate()([tensor, x])
return tensor
def transition_layer(x, theta):
f = int(tensorflow.keras.backend.int_shape(x)[-1] * theta)
x = bn_rl_conv(x, filters=f, kernel_size=1)
x = AvgPool2D(pool_size=2, strides=2, padding='same')(x)
return x
k = 32
theta = 0.5
repetitions = 6, 12, 24, 16
input = Input(shape=(224, 224, 3))
x = Conv2D(2*k, 7, strides=2, padding='same')(input)
x = MaxPool2D(3, strides=2, padding='same')(x)
for reps in repetitions:
d = dense_block(x, k, reps)
x = transition_layer(d, theta)
x = GlobalAvgPool2D()(d)
output = Dense(1000, activation='softmax')(x)
from tensorflow.keras import Model
model = Model(input, output)
| [
"noreply@github.com"
] | NonlinearNimesh.noreply@github.com |
55feae8fb18f3bb08da3149dae14dedb9de8a084 | 108602ae4f1bdef5df0fb614a7d44a4ebd634713 | /dqn_agent.py | 6ce25c3caad5b2158ce89bda001e6a7dff84321f | [
"MIT"
] | permissive | xunyiljg/rl_navigation | c8086ad701a75c0a4bf0725bb3093500769add0e | 696ec77d41c74aefb67880d38cf21dfcbac6ef62 | refs/heads/master | 2020-05-09T13:39:51.664619 | 2019-01-06T14:25:24 | 2019-01-06T14:25:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,011 | py | import numpy as np
import random
from collections import namedtuple, deque
from model import Qnetwork
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
UPDATE_EVERY = 4
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent:
def __init__(self,state_size,action_size,gamma=0.99,lr=5e-4,
buffer_size=int(1e5),batch_size=64,tau=1e-3):
# defining local and target networks
self.qnet_local = Qnetwork(state_size,action_size).to(device)
self.qnet_target = Qnetwork(state_size,action_size).to(device)
# set local and target parameters equal to each other
self.soft_update(tau=1.0)
# experience replay buffer
self.memory = ReplayBuffer(buffer_size,batch_size)
# defining variables
self.state_size = state_size
self.action_size = action_size
self.buffer_size = buffer_size
self.batch_size = batch_size
self.gamma = gamma
self.lr = lr
self.tau = tau
self.t_step = 0
# optimizer
self.optimizer = optim.Adam(self.qnet_local.parameters(),lr=self.lr)
def step(self,state,action,reward,next_state,done):
""" saves the step info in the memory buffer and perform a learning iteration
Input :
state,action,reward,state,done : non-batched numpy arrays
Output :
none
"""
# add sample to the memory buffer
self.memory.add(state,action,reward,next_state,done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
# use replay buffer to learn if it has enough samples
if self.t_step == 0:
if len(self.memory) > self.batch_size:
experiences = self.memory.sample()
self.learn(experiences)
def learn(self,experiences):
""" perform a learning iteration by using sampled experience batch
Input :
experience : tuple from the memory buffer
states, actions, rewards, next_states, dones = experiences
eg : states.shape = [N,state_size]
Output :
none
"""
states, actions, rewards, next_states, dones,wj,choose = experiences
#states, actions, rewards, next_states, dones = experiences
# set optimizer grdient to zero
self.optimizer.zero_grad()
# predicted action value
q_pred = self.qnet_local.forward(states).gather(1,actions)
# target action value
## use double DQNs, refer https://arxiv.org/abs/1509.06461
next_action_local = self.qnet_local.forward(next_states).max(1)[1]
q_target = rewards + self.gamma*(1-dones)*self.qnet_target.forward(next_states)[range(self.batch_size),next_action_local].unsqueeze(1)
# compute td error
td_error = q_target-q_pred
# update td error in Replay buffer
self.memory.update_td_error(choose,td_error.detach().cpu().numpy().squeeze())
# defining loss
loss = ((wj*td_error)**2).mean()
# running backprop and optimizer step
loss.backward()
self.optimizer.step()
# run soft update
self.soft_update(self.tau)
def act(self,state,eps=0.):
""" return the local model's predicted action for the given state
Input :
state : [state_size]
Output :
action : scalar action as action space is discrete with dim = 1
"""
state = torch.from_numpy(state).float().unsqueeze(dim=0).to(device) # converts numpy array to torch tensor
self.qnet_local.eval() # put net in test mode
with torch.no_grad():
max_action = np.argmax(self.qnet_local(state)[0].cpu().data.numpy())
self.qnet_local.train() # put net back in train mode
rand_num = np.random.rand() # sample a random number uniformly between 0 and 1
# implementing epsilon greedy policy
if rand_num < eps:
return np.random.randint(self.action_size)
else:
return max_action
def soft_update(self,tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
"""
for target_param, local_param in zip(self.qnet_target.parameters(), self.qnet_local.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
class ReplayBuffer:
def __init__(self,buffer_size,batch_size):
self.buffer = deque(maxlen=buffer_size)
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done","td_error"])
self.batch_size = batch_size
self.epsilon = 1e-6
self.alpha = 2
self.beta = .3
def add(self,state,action,reward,next_state,done):
max_td_error = max([e.td_error for e in self.buffer if e is not None]+[0])
e = self.experience(state,action,reward,next_state,done,max_td_error)
self.buffer.append(e)
def update_td_error(self,choose,td_errors):
abs_td_errors = np.abs(td_errors)
for j,td_error in zip(choose,abs_td_errors):
self.buffer[j] = self.buffer[j]._replace(td_error=td_error)
def sample(self,random=False):
if random:
choose = np.random.choice(range(len(self.buffer)),self.batch_size,replace=False)
experiences = [self.buffer[i] for i in choose]
else:
# prioritised experience replay
pi = np.array([e.td_error for e in self.buffer if e is not None]) + self.epsilon
Pi = pi**self.alpha
Pi = Pi/np.sum(Pi)
wi = (len(self.buffer)*Pi)**(-self.beta)
wi_ = wi/np.max(wi)
choose = np.random.choice(range(len(self.buffer)),self.batch_size,replace=False,p=Pi)
experiences = [self.buffer[j] for j in choose]
wj = [wi_[j] for j in choose]
wj = torch.from_numpy(np.vstack(wj)).float().to(device)
states = torch.from_numpy(np.vstack([e.state for e in experiences if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in experiences if e is not None])).long().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in experiences if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in experiences if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in experiences if e is not None]).astype(np.uint8)).float().to(device)
return (states,actions,rewards,next_states,dones,wj,choose)
def __len__(self):
return len(self.buffer) | [
"1.jaskiratsingh@gmail.com"
] | 1.jaskiratsingh@gmail.com |
df352c02f8bee9de395d16310479d3c9d6a91d18 | 15dd0a271841e709ed14d71302fac876fe0ebcea | /func.py | 47a806bacaf8be643208c02bc520e87b9d9e569b | [] | no_license | srenoes/message-board | f987f82955f5195f73825af2c3e8937e4d36096a | 1ad90a7385c50d7a647f7fa23b5c8b32d0d7cc25 | refs/heads/master | 2021-01-06T11:10:43.360449 | 2020-02-17T06:19:34 | 2020-02-17T06:19:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | def read(data):
''
''' This function should receive a dictionary (see the json file format) and return a list of rows
For example:
data = {
"names": ["Son", "Thao"],
"posted_at": ["16/02/2020 22:37", "16/02/2020 22:39"],
"messages": ["Hello", "Hi"]
}
rows = [
("Son", "16/02/2020 22:37", "Hello"),
("Thao", "16/02/2020 22:39", "Hi")
]
Bonus: You might want to show the latest messages first. How would you do that?
'''
# Your code here
return rows
def write(rows, name, message):
from datetime import datetime
''' This function should receive a list of rows, a name and a message (string), and return a dictionary.
For example:
rows = [
("Thao", "16/02/2020 22:39", "Hi"),
("Son", "16/02/2020 22:37", "Hello")
]
name = "Anonymous"
message = "Hello world"
data = {
"names": ["Son", "Thao", "Anonymous"],
"posted_at": ["16/02/2020 22:37", "16/02/2020 22:39", "<current system date time>"],
"messages": ["Hello", "Hi", "Hello world"]
}
Note also the format of the date time should be the same as in the original data.
'''
# Your code here
return data
def checkLen(rows):
''
''' This function should return the length of rows.
We also want to show maximum 20 messages on our message board.
If there are more than 20 in the data file, you should show the 20 latest messages.
'''
# Your code here
| [
"sonchuynh.mail@gmail.com"
] | sonchuynh.mail@gmail.com |
013c3b9bc66f363cd2fc654f7a1b812dd457c05b | 5287260a60dab00625044264010eeb160e2b8a12 | /recents_page.py | 6f0a28c745fb7e88851281c70077d9778109dae8 | [] | no_license | Lemju28100/water-marker | cfcf984238b8de5fc79d3716318c137b1da830a3 | 82881f0d3768ef4171ca5a86bf57ceff1d14fbfc | refs/heads/master | 2023-03-23T23:17:12.427167 | 2021-03-01T22:07:28 | 2021-03-01T22:07:28 | 335,047,157 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,143 | py | from functools import partial
from kivy.uix.screenmanager import Screen
from kivy.graphics.vertex_instructions import (Rectangle, Ellipse, Line)
from kivy.graphics.context_instructions import Color
from kivy.uix.image import Image
from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
import os
from PIL import Image as Im
import tkinter as tk
from tkinter import filedialog
class RecentsPage(Screen):
def __init__(self, page_controller, user, **kw):
super().__init__(**kw)
self.user = user
self.user_path = f'{os.getcwd()}/users/{self.user}'
# Set bg color
self.bg_image = Image(source='data/home_background.png', allow_stretch=True,
keep_ratio = False)
self.add_widget(self.bg_image)
# Load images
self.load_all_images()
back_button = Button(text='BACK TO HOME', font_size=14, size_hint = (.15, .1), pos_hint = {'x':.825, 'y':.07},
on_release=partial(self.back_to_home, page_controller))
self.add_widget(back_button)
def load_all_images(self):
images_box = GridLayout(cols=4, size_hint = (.8, 1), spacing=10, padding=5)
images_path = f'{self.user_path}/images'
dir_list = os.listdir(images_path)
for i in range(len(dir_list)):
img = Image(source=f'{images_path}/{dir_list[i]}', size_hint=(1, 1),
keep_ratio = False, allow_stretch=True, on_press_down=self.save_image_as)
images_box.add_widget(img)
self.add_widget(images_box)
def back_to_home(self, page_controller, event):
page_controller.initialize_home_page()
def save_image_as(self, image):
image_source = image.source
image_photo = Im.open(image_source)
root = tk.Tk()
root.withdraw()
root = tk.Tk()
root.withdraw()
files = [('PNG Image', '*.png'), ('JPEG Image', '*.jpeg')]
f = filedialog.asksaveasfile(filetypes=files, defaultextension=files)
if f is None:
return
else:
image_photo.save(fp=f.name)
| [
"lemju28100@yahoo.com"
] | lemju28100@yahoo.com |
630b3166bf1c291c17baf1f9c0acf8c0e55b1c10 | 9dce92a54a6986c0f230bf013fb359bce88f160a | /student/api/permissions.py | 3c1fc850d624b71c31486cbc9307eb85150ee919 | [] | no_license | fIenETICmn/teacher_student_api | b0f9b670fdd3d08ed7061f4075214cb1077c2181 | 73d6fb67d861b7316eb8629aaff0312e5a68cb39 | refs/heads/master | 2023-01-29T17:16:57.641260 | 2020-12-10T12:24:37 | 2020-12-10T12:24:37 | 319,307,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # from rest_framework import permissions
#
#
# class IsOwnerOrReadOnly(permissions.BasePermission):
#
# def has_object_permission(self, request, view, obj):
# # Read permissions are allowed to any request,
# # so we'll always allow GET, HEAD or OPTIONS requests.
# if request.method in permissions.SAFE_METHODS:
# return True
#
# # Write permissions are only allowed to the owner of the snippet.
# return obj.owner == request.user | [
"simarpreet@codefront.io"
] | simarpreet@codefront.io |
3ba4121f15970cc64516bc97d97459ac7c242252 | 758c94ad42e754938792470bd93889a46b3dfd53 | /DeepSymphony/models/SeqAE/SeqAE_model.py | f70f3c9035f8bdcf55865bdd7f40ae285357aaf5 | [] | no_license | HaozeYang/DeepSymphony | acf865df0047ac5f4d4be9ee5dc4a96cb943cd01 | 7fd7a97ac5814bda489fe98adff3771d8259eca5 | refs/heads/master | 2021-04-15T19:11:34.411291 | 2017-11-26T23:47:57 | 2017-11-26T23:47:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,696 | py | from DeepSymphony.common.HParam import HParam
from DeepSymphony.common.Helpers import GreedyEmbeddingDecisionHelper
import os
import shutil
import tensorflow as tf
from tensorflow.contrib import rnn, seq2seq, layers
class SeqAEHParam(HParam):
# basic
encoder_cells = [256]
decoder_cells = [256]
embed_dim = 200
basic_cell = rnn.LSTMCell
# training
learning_rate = 1e-3
clip_norm = 3.
batch_size = 32
iterations = 300
# logs
workdir = './temp/SeqAE/'
tensorboard_dir = 'tensorboard/'
overwrite_workdir = False
# debug
debug = False
def __init__(self, **kwargs):
self.register_check('timesteps')
self.register_check('gen_timesteps')
self.register_check('vocab_size')
super(SeqAEHParam, self).__init__(**kwargs)
assert(self.encoder_cells == self.decoder_cells)
# in next version, maybe we can have
# encoder: layer1 layer2 layer3
# v
# final_state(code)
# v
# decoder: layer1 layer2 layer3
class SeqAE(object):
"""
Sequential Autoencoder
compress the sequence into a code
seq = (A, B, C)
Encoder:
rnn rnn rnn --> code
A B C
Decoder:
Training: (TrainingHelper)
A B C
code-> rnn rnn rnn
<start> A B
Testing: (GreedyEmbeddingHelper)
y_1 y_2 y_3
random-> rnn rnn rnn
<start> y_1 y_2
"""
def __init__(self, hparam):
self.built = False
if not hasattr(hparam, 'weight_path'):
hparam.weight_path = \
os.path.join(hparam.workdir, 'SeqAE.ckpt')
# hparam.encoder_cells.append(hparam.vocab_size+2)
# hparam.decoder_cells.append(hparam.vocab_size+2)
self.hparam = hparam
self.start_token = hparam.vocab_size
# since we are using fixed sequence here,
# we do not add eos_token to the data
self.eos_token = hparam.vocab_size+1
def build(self):
hparam = self.hparam
seqs = tf.placeholder("int32", [hparam.batch_size,
hparam.timesteps],
name="seqs")
start_tokens = tf.ones([hparam.batch_size],
dtype=tf.int32) * self.start_token
embeddings = tf.Variable(
tf.random_uniform([hparam.vocab_size+2,
hparam.embed_dim],
-1.0, 1.0))
# pad_seqs: (bs, 1+ts)
pad_seqs = tf.concat([tf.expand_dims(start_tokens, 1), seqs], 1)
# pad_seqs_emb: (bs, 1+ts, dim)
pad_seqs_emb = tf.nn.embedding_lookup(embeddings, pad_seqs)
encoder = rnn.MultiRNNCell([hparam.basic_cell(c)
for c in hparam.encoder_cells])
encoder_outputs, encoder_final_state = \
tf.nn.dynamic_rnn(encoder,
pad_seqs_emb[:, 1:, :], # (A, B, C)
dtype=tf.float32)
code = encoder_final_state
train_helper = tf.contrib.seq2seq.TrainingHelper(
pad_seqs_emb[:, :-1, :], # (<start>, A, B)
tf.ones([hparam.batch_size], tf.int32) * hparam.timesteps)
# this code use the output of RNN directly, while we add an additional
# decision layer on the top of RNN.
# pred_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
# embeddings,
# start_tokens=start_tokens,
# end_token=self.eos_token)
pred_helper = GreedyEmbeddingDecisionHelper(
decision_scope='decision',
reuse=True,
output_dim=hparam.vocab_size+2,
embedding=embeddings,
start_tokens=start_tokens,
end_token=self.eos_token)
def decode(helper, scope, initial_state, timesteps, reuse=None):
with tf.variable_scope(scope, reuse=reuse):
cells = rnn.MultiRNNCell([hparam.basic_cell(c)
for c in hparam.decoder_cells])
decoder = seq2seq.BasicDecoder(
cell=cells, helper=helper,
initial_state=initial_state)
final_outputs, final_state, final_sequence_lengths = \
seq2seq.dynamic_decode(
decoder=decoder, output_time_major=False,
maximum_iterations=timesteps,
# impute_finished=True,
)
scores = layers.linear(final_outputs.rnn_output,
hparam.vocab_size+2,
scope='decoder/decision')
pred = tf.argmax(scores, axis=2)
return scores, pred
scores, train_pred, = decode(train_helper,
'decode',
initial_state=code,
timesteps=hparam.timesteps)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=seqs,
logits=scores)
loss = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer(
learning_rate=hparam.learning_rate)
train_op = tf.contrib.slim.learning.create_train_op(
loss, optimizer, clip_gradient_norm=hparam.clip_norm)
tf.summary.scalar('loss', loss)
# same decoder, but use different helper
pred_scores, pred = decode(pred_helper,
'decode',
initial_state=code,
timesteps=hparam.gen_timesteps,
reuse=True)
# train
self.seqs = seqs
self.loss = loss
self.train_op = train_op
# debug
self.pad_seqs_emb = pad_seqs_emb
self.embeddings = embeddings
self.train_pred = train_pred
# generate
self.code = code
self.pred = pred
self.built = True
def train(self, fetch_data, continued=None):
if self.built is False:
self.build()
hparam = self.hparam
if not continued:
if os.path.exists(hparam.workdir):
if hparam.overwrite_workdir:
shutil.rmtree(hparam.workdir)
else:
raise Exception("The workdir exists.")
os.makedirs(hparam.workdir)
os.makedirs(os.path.join(hparam.workdir, hparam.tensorboard_dir))
with tf.Session() as sess:
train_writer = tf.summary.FileWriter(
os.path.join(hparam.workdir, hparam.tensorboard_dir),
sess.graph)
merged = tf.summary.merge_all()
saver = tf.train.Saver()
if continued:
print 'restoring'
saver.restore(sess, hparam.weight_path)
else:
sess.run(tf.global_variables_initializer())
# debug
if hparam.debug:
seqs = fetch_data(hparam.batch_size)
code, pad_seqs_emb, embeddings, loss =\
sess.run([self.code,
self.pad_seqs_emb,
self.embeddings,
self.loss],
feed_dict={self.seqs: seqs})
print code
print pad_seqs_emb
print embeddings
import ipdb
ipdb.set_trace()
global_step = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)[0]
i = begin = global_step.eval()
while i < hparam.iterations+begin:
seqs = fetch_data(hparam.batch_size)
_, loss, summary = sess.run([self.train_op,
self.loss,
merged],
feed_dict={self.seqs: seqs})
print('Step %d: loss = %.2f' % (i, loss))
train_writer.add_summary(summary, i)
i += 1
saver.save(sess, hparam.weight_path)
def collect(self, fetch_data, samples=10):
hparam = self.hparam
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess, hparam.weight_path)
collection = []
seqs = []
for _ in range(samples):
seq = fetch_data(hparam.batch_size)
code = sess.run([self.code],
feed_dict={self.seqs: seq})
collection.append(code)
seqs.append(seq)
return collection, seqs
def generate(self, code):
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess, self.hparam.weight_path)
prediction = sess.run(self.pred,
feed_dict={self.code: code})
return prediction
def eval(self, seqs):
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess, self.hparam.weight_path)
test_pred, train_pred = \
sess.run([self.pred, self.train_pred],
feed_dict={self.seqs: seqs})
return test_pred, train_pred
| [
"laishaovan@gmail.com"
] | laishaovan@gmail.com |
b4be61d8b86b193478f3cf286e713cde26bb27d9 | 7e7a1a1c7f5a2069b50b90b247d89faef17b7eef | /test/unit/test_make.py | 32d6c20acbcdafa123544c60d5ce8704b4b77154 | [
"BSD-3-Clause"
] | permissive | JulianVolodia/bfg9000 | e1d13e07ef43577ce871cbdf28d7854eaad9985e | c04867cd7fc4861bc67fe38f9ca47ee6cc43edef | refs/heads/master | 2021-01-11T12:16:38.842893 | 2016-12-11T21:16:52 | 2016-12-12T01:18:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,162 | py | import os
import unittest
from six.moves import cStringIO as StringIO
from bfg9000 import path
from bfg9000 import safe_str
from bfg9000.backends.make.syntax import *
from bfg9000.platforms import platform_name
esc_colon = ':' if platform_name() == 'windows' else '\\:'
def quoted(s):
return "'" + s + "'"
class TestMakeWriter(unittest.TestCase):
# strings
def test_write_string_target(self):
out = Writer(StringIO())
out.write('foo: $bar|baz,quux', Syntax.target)
self.assertEqual(out.stream.getvalue(),
'foo' + esc_colon + '\\ $$bar|baz,quux')
def test_write_string_dependency(self):
out = Writer(StringIO())
out.write('foo: $bar|baz,quux', Syntax.dependency)
self.assertEqual(out.stream.getvalue(),
'foo' + esc_colon + '\\ $$bar\\|baz,quux')
def test_write_string_function(self):
out = Writer(StringIO())
out.write('foo: $bar|baz,quux', Syntax.function)
self.assertEqual(out.stream.getvalue(), quoted('foo: $$bar|baz$,quux'))
def test_write_string_shell(self):
out = Writer(StringIO())
out.write('foo: $bar|baz,quux', Syntax.shell)
self.assertEqual(out.stream.getvalue(), quoted('foo: $$bar|baz,quux'))
def test_write_string_clean(self):
out = Writer(StringIO())
out.write('foo: $bar|baz,quux', Syntax.clean)
self.assertEqual(out.stream.getvalue(), 'foo: $$bar|baz,quux')
# escaped strings
def test_write_escaped_string_target(self):
out = Writer(StringIO())
out.write(safe_str.escaped_str('foo: $bar|baz,quux'), Syntax.target)
self.assertEqual(out.stream.getvalue(), 'foo: $bar|baz,quux')
def test_write_escaped_string_dependency(self):
out = Writer(StringIO())
out.write(safe_str.escaped_str('foo: $bar|baz,quux'),
Syntax.dependency)
self.assertEqual(out.stream.getvalue(), 'foo: $bar|baz,quux')
def test_write_escaped_string_function(self):
out = Writer(StringIO())
out.write(safe_str.escaped_str('foo: $bar|baz,quux'), Syntax.function)
self.assertEqual(out.stream.getvalue(), 'foo: $bar|baz,quux')
def test_write_escaped_string_shell(self):
out = Writer(StringIO())
out.write(safe_str.escaped_str('foo: $bar|baz,quux'), Syntax.shell)
self.assertEqual(out.stream.getvalue(), 'foo: $bar|baz,quux')
def test_write_escaped_string_clean(self):
out = Writer(StringIO())
out.write(safe_str.escaped_str('foo: $bar|baz,quux'), Syntax.clean)
self.assertEqual(out.stream.getvalue(), 'foo: $bar|baz,quux')
# jbos
def test_write_jbos_target(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.escaped_str('$bar'))
out.write(s, Syntax.target)
self.assertEqual(out.stream.getvalue(), '$$foo$bar')
def test_write_jbos_dependency(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.escaped_str('$bar'))
out.write(s, Syntax.dependency)
self.assertEqual(out.stream.getvalue(), '$$foo$bar')
def test_write_jbos_function(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.escaped_str('$bar'))
out.write(s, Syntax.function)
self.assertEqual(out.stream.getvalue(), quoted('$$foo') + '$bar')
def test_write_jbos_shell(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.escaped_str('$bar'))
out.write(s, Syntax.shell)
self.assertEqual(out.stream.getvalue(), quoted('$$foo') + '$bar')
def test_write_jbos_clean(self):
out = Writer(StringIO())
s = safe_str.jbos('$foo', safe_str.escaped_str('$bar'))
out.write(s, Syntax.clean)
self.assertEqual(out.stream.getvalue(), '$$foo$bar')
# paths
def test_write_path_target(self):
out = Writer(StringIO())
out.write(path.Path('foo', path.Root.srcdir), Syntax.target)
self.assertEqual(out.stream.getvalue(),
os.path.join('$(srcdir)', 'foo'))
def test_write_path_dependency(self):
out = Writer(StringIO())
out.write(path.Path('foo', path.Root.srcdir), Syntax.dependency)
self.assertEqual(out.stream.getvalue(),
os.path.join('$(srcdir)', 'foo'))
def test_write_path_function(self):
out = Writer(StringIO())
out.write(path.Path('foo', path.Root.srcdir), Syntax.function)
self.assertEqual(out.stream.getvalue(),
quoted(os.path.join('$(srcdir)', 'foo')))
def test_write_path_shell(self):
out = Writer(StringIO())
out.write(path.Path('foo', path.Root.srcdir), Syntax.shell)
self.assertEqual(out.stream.getvalue(),
quoted(os.path.join('$(srcdir)', 'foo')))
def test_write_path_clean(self):
out = Writer(StringIO())
out.write(path.Path('foo', path.Root.srcdir), Syntax.clean)
self.assertEqual(out.stream.getvalue(),
os.path.join('$(srcdir)', 'foo'))
| [
"jporter@mozilla.com"
] | jporter@mozilla.com |
38c85766058c3a603f20b655c986aabb0a4c7fbd | fd251579cc129172fa648d812bb39e5a4ca7ef4d | /Tp3/dag.py | b459ccb0c63b9d50555ee9d3eb64e1593b60867d | [] | no_license | enzomasson25/Maths531 | 40e08f9d79ae1f4e2eefcd025d527e42c6127d47 | ddbbfd50828a4bab8263d0355ff25102c3a6cbe6 | refs/heads/master | 2020-09-21T18:51:05.056221 | 2019-12-12T08:36:43 | 2019-12-12T08:36:43 | 224,890,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,948 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 10 14:04:24 2019
@author: 33762
"""
#==============================ETAPE==============================
class etape:
"""
-numero
-date au plus tot
-date au plus tard
-liste de tâches
"""
def __init__(self,numero,date_plus_tard,date_plus_tot,taches=[]):
"""
constructeur de la classe etape
"""
self.numero = numero
self.date_plus_tard = date_plus_tard
self.date_plus_tot = date_plus_tot
self.taches = taches
def __str__(self):
return self.numero
def get_au_plus_tot(self):
return self.date_plus_tot
def get_au_plus_tard(self):
return self.date_plus_tard
def get_number(self):
return self.numero
def get_next_steps(self):
res = []
for tache in self.taches:
res.append(tache.etape_suivante)
return res
def get_previous_steps(self,noeudAChercher):
res = []
liste_noeuds = self.parcourir()
for noeud in liste_noeuds:
if noeudAChercher in noeud.get_next_steps():
res.append(noeud)
return res
def parcourir(self, liste_passage = [], liste_noeuds = []):
liste_noeuds = [self]
for etape in self.get_next_steps():
if not etape in liste_passage:
liste_passage.append(etape)
liste_noeuds += etape.parcourir(liste_passage)
return liste_noeuds
#==============================TACHE==============================
class tache:
"""
-nom
-duree
-etape precedente
-etape suivante
"""
def __init__(self,nom,duree,etape_suivante=None,etape_precedente=None):
"""
constructeur de la classe tache
"""
self.nom = nom
self.duree = duree
self.etape_precedente = etape_precedente
self.etape_suivante = etape_suivante
def __str__(self):
return self.nom
def set_etape_pre(self,etape_pre):
self.etape_precedente = etape_pre
def get_name(self):
return self.nom
def get_duration(self):
return self.duree
def get_begin_step(self):
return self.etape_precedente
def get_end_step(self):
return self.etape_suivante
#==============================MAIN==============================
def critique(pert,chemin=[]):
chemin.append(pert)
for etape in pert.get_next_steps():
if not etape == None:
if etape.get_au_plus_tard() == etape.get_au_plus_tot():
return critique(etape,chemin)
if pert.get_next_steps() == []:
return chemin
#==============================TESTS==============================
etape8 = etape('8',220,220)
tacheH = tache('H',10,etape8)
etape7 = etape('7',210,210,[tacheH])
tacheG = tache('G',60,etape7)
tacheT = tache('T',0,etape7)
tacheT2 = tache('T',0,etape7)
etape5 = etape('5',50,210,[tacheT2])
etape6 = etape('6',150,150,[tacheG])
etape3 = etape('3',150,210,[tacheT])
tacheE = tache('E',10,etape5)
tacheF = tache('F',30,etape6)
tacheC = tache('C',30,etape3)
etape4 = etape('4',40,200,[tacheE])
etape2 = etape('2',120,120,[tacheC,tacheF])
tacheD = tache('D',10,etape4)
tacheB = tache('B',90,etape2)
etape1 = etape('1',30,30,[tacheB,tacheD])
tacheA = tache('A',30,etape1)
etape0 = etape('0',0,0,[tacheA])
tacheA.set_etape_pre(etape0)
tacheB.set_etape_pre(etape1)
tacheC.set_etape_pre(etape2)
tacheD.set_etape_pre(etape1)
tacheE.set_etape_pre(etape4)
tacheG.set_etape_pre(etape6)
tacheH.set_etape_pre(etape7)
tacheT.set_etape_pre(etape3)
tacheT2.set_etape_pre(etape5)
print(critique(etape0)) | [
"noreply@github.com"
] | enzomasson25.noreply@github.com |
4cc497fce98493517f1cd08498300cfe6ea024ff | b5225ecbef0965f20a48c5d7a7d84e1ecaea380d | /prime number.py | 314cadc58c8bb689a7913d21488c914e6991bb86 | [
"MIT"
] | permissive | Vignesh-29/letsupgrade-python | a7e192d7b6ab838f8f0e2c1598866cac029e4bfb | dad34b9f5d6bb90fd9bd166c756af3465da268ac | refs/heads/master | 2022-12-11T01:43:35.695276 | 2020-09-14T16:43:19 | 2020-09-14T16:43:19 | 293,293,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | for i in range(1,201):
count=0
for j in range(1,i+1):
if(i%j==0):
count=count+1
if(count==2):
print(i) | [
"noreply@github.com"
] | Vignesh-29.noreply@github.com |
5ceba797824aee8f07f5eeba0944278ceb0d95db | 2b3390fead721863ef8056cbfe1fa0b513fa6055 | /finger_painting.py | 8f969bf700a47aeedc359a858bca5a752cf36c13 | [] | no_license | ncorwin/image_analisys_final | c1808150218daae10b1db0c29001d83ff18981d8 | 25cba97a1259d28d0cf96425458816bb4b68719e | refs/heads/master | 2021-01-10T07:02:55.238193 | 2016-03-08T05:20:27 | 2016-03-08T05:20:27 | 53,369,990 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,775 | py | #!/usr/bin/env python
import numpy as np
import cv2 as cv2
import cv2.cv as cv
import sys
import math as m
x=0
############################################################################
def draw(location, mask, img):
x,y = location
b = 255
g = 0
r = 255
if x != 0 and y != 0:
cv2.circle(mask, location, 5, (b,g,r), -1)
out = cv2.bitwise_and(img, mask)
return np.array(out, dtype = "uint8")
############################################################################
def remove_bg1(raw_img):
#create the weight based on total number of frames that have passed
global x
x +=1.0
alpha = (1.- 1./x)
#initialize bg_img
if x ==1:
bg_img = raw_img
global bg_img
#continuously updated average to create background
bg_img=np.add(alpha*np.array(bg_img, dtype=float), (1.- alpha )*np.array(raw_img, dtype=float))
bg_img = np.array(bg_img, dtype = "uint8")
return bg_img
def remove_bg(raw_img, avg):
cv2.accumulateWeighted(raw_img,avg,0.0005)
bg_img = cv2.convertScaleAbs(avg)
#cv2.imshow('bg_img',bg_img)
return bg_img, avg
'''
######foreground practice###
raw_hsv = cv2.cvtColor(raw_img,cv2.COLOR_BGR2HSV)
bg_hsv = cv2.cvtColor(bg_img,cv2.COLOR_BGR2HSV)
hmask = cv2.absdiff(raw_hsv[:,:,0], bg_hsv[:,:,0])
smask = cv2.absdiff(raw_hsv[:,:,1], bg_hsv[:,:,1])
vmask = cv2.absdiff(raw_hsv[:,:,2], bg_hsv[:,:,2])
ret,hmask_thresh = cv2.threshold(hmask,20.,1.,cv2.THRESH_BINARY)
ret,smask_thresh = cv2.threshold(smask,20.,1.,cv2.THRESH_BINARY)
ret,vmask_thresh = cv2.threshold(vmask,20.,1.,cv2.THRESH_BINARY)
mask1 = np.multiply(np.multiply(hmask_thresh, smask_thresh), vmask_thresh)
# bgsub = cv2.BackgroundSubtractorMOG(10, 2, .1)
# fgmask = bgsub.apply(raw_img)
# print np.max(fgmask)
####filter for only skin tones
# h= cv2.cvtColor(img,cv2.COLOR_BGR2HSV)[:,:,0]
# s =cv2.cvtColor(img,cv2.COLOR_BGR2HSV)[:,:,1]
# ret, raw_h = cv2.threshold(h, 100., 1., cv2.THRESH_BINARY)
# ret, raw_s = cv2.threshold(s, 20, 1., cv2.THRESH_BINARY)
# hs_mask = np.multiply(raw_h, raw_s)
###Filter out colors with extreme values and no red for skin###
# ret, rmask = cv2.threshold(img[:,:,2],40,1., cv2.THRESH_BINARY)
# ret, r2mask = cv2.threshold(img[:,:,2],235.,1., cv2.THRESH_BINARY_INV)
# rb_mask = np.multiply(rmask, r2mask)
# img[:,:,0 ]= np.multiply(img[:,:,0], rb_mask)
# img[:,:,1 ]= np.multiply(img[:,:,1], rb_mask)
# img[:,:,2 ]= np.multiply(img[:,:,2], rb_mask)
# bmask = cv2.absdiff(img[:,:,0], bg_img[:,:,0])
# gmask = cv2.absdiff(img[:,:,1], bg_img[:,:,1])
'''
def foreground(bg_img, raw_img):
#take the background and subtract somehow from the foreground
img = raw_img*1
raw_hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
bg_hsv = cv2.cvtColor(bg_img,cv2.COLOR_BGR2HSV)
#raw_hsv = cv2.GaussianBlur(raw_hsv, (5,5), 2)
#bg_hsv = cv2.GaussianBlur(bg_hsv, (5,5), 2)
hmask = cv2.absdiff(raw_hsv[:,:,0], bg_hsv[:,:,0])
smask = cv2.absdiff(raw_hsv[:,:,1], bg_hsv[:,:,1])
vmask = cv2.absdiff(raw_hsv[:,:,2], bg_hsv[:,:,2])
ret,hmask_thresh = cv2.threshold(hmask,1.,1.,cv2.THRESH_BINARY)
ret,smask_thresh = cv2.threshold(smask,1.,1.,cv2.THRESH_BINARY)
ret, vmask_thresh = cv2.threshold(vmask,1.,1.,cv2.THRESH_BINARY)
hsv_mask = np.multiply(hmask_thresh, smask_thresh)
#hsv_mask = np.multiply(hsv_mask, vmask_thresh)
#hsv_mask = hmask_thresh
##Greyscale mask that kinda worked except for bright lighting
raw_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
bg_gray = cv2.cvtColor(bg_img,cv2.COLOR_BGR2GRAY)
raw_gray = cv2.GaussianBlur(raw_gray, (5,5), 2)
bg_gray = cv2.GaussianBlur(bg_gray, (5,5), 2)
mask_g1 =cv2.absdiff(bg_gray,raw_gray)
mask_g2 =cv2.absdiff(raw_gray,bg_gray)
ret,mask_g1 = cv2.threshold(mask_g1,10.,1.,cv2.THRESH_BINARY)
ret,mask_g2 = cv2.threshold(mask_g2,10.,1.,cv2.THRESH_BINARY)
mask = np.multiply(mask_g1, mask_g2)
#mask2_b = cv2.absdiff(img[:,:,0], bg_img[:,:,0])
#mask2_g = cv2.absdiff(img[:,:,1], bg_img[:,:,1])
#mask2_r = cv2.absdiff(img[:,:,2], bg_img[:,:,2])
mask2_b = cv2.absdiff(bg_img[:,:,0], img[:,:,0])
mask2_g = cv2.absdiff(bg_img[:,:,1], img[:,:,1])
mask2_r = cv2.absdiff(bg_img[:,:,2], img[:,:,2])
ret,mask2_b = cv2.threshold(mask2_b,5.,1.,cv2.THRESH_BINARY)
ret,mask2_g = cv2.threshold(mask2_g,5.,1.,cv2.THRESH_BINARY)
ret,mask2_r = cv2.threshold(mask2_r,5.,1.,cv2.THRESH_BINARY)
mask2 = np.multiply(mask2_b, mask2_g)
mask2 = np.multiply(mask2, mask2_r)
###make changes here
mask = mask*1.0
mask = np.multiply(hsv_mask, mask)
#mask = cv2.bitwise_xor(mask, mask2)
for i in range(7):
mask = cv2.dilate(mask*255., (50,50))/255.
for i in range(6):
mask = cv2.erode(mask*255, (50,50))/255.
# for i in range(3):
# mask = cv2.dilate(mask*255., (50,50))/255.
fg_img = img*1.0
fg_img[:,:,0 ]= np.multiply(img[:,:,0], mask)
fg_img[:,:,1 ]= np.multiply(img[:,:,1], mask)
fg_img[:,:,2 ]= np.multiply(img[:,:,2], mask)
cv2.imshow("fg_img", np.array(fg_img, dtype= "uint8"))
return np.array(mask, dtype = "uint8")
def main():
###read the campera input###
for i in range(5):
cap = cv2.VideoCapture(i)
#cap = cv2.VideoCapture(1)
if cap: break
#cap = cv2.VideoCaptur(1)
null, raw_img =cap.read()
#initialize the array 'avg'
avg = np.float32(raw_img)
out_img = cv2.cvtColor(raw_img, cv2.COLOR_BGR2RGB)
out_img = cv2.cvtColor(out_img, cv2.COLOR_RGB2BGR)
###############################################################
draw_layer = cv2.imread('draw_layer.jpg')
#draw_img = np.multiply(draw_layer, raw_img)
################################################################
###Main Loop###
while True:
null, raw_img = cap.read()
bg_img, avg = remove_bg(raw_img, avg)
fg_mask = foreground(bg_img, raw_img)
out_img = cv2.cvtColor(raw_img, cv2.COLOR_BGR2RGB)
out_img = cv2.cvtColor(out_img, cv2.COLOR_RGB2BGR)
#fg_gray = cv2.cvtColor(fg_img,cv2.COLOR_BGR2GRAY)
contours, hier = cv2.findContours(fg_mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
contour_list = []
idx = -1
for i in range(len(contours)):
if(cv2.contourArea(contours[i])>6000): #and cv2.contourArea(contours[i])<60000):
contour_list.append(i)
idx = i
hull = []
if idx != -1:
#print len(contour_list), cv2.contourArea(contours[idx])
cv2.drawContours(raw_img, contours, idx, [200,50,50], thickness = 3)
hull = cv2.convexHull(contours[idx])
cv2.drawContours(raw_img, hull, -1, [10,10,225], thickness = 10)
#defects = cv2.convexityDefects(contours[idx], hull)
#########################################################################
tip = []
for i in range(len(hull)):
measure = 10
nx,ny = hull[i,0]
if i < 2:
nx1,ny1 = hull[i+1,0]
nx2,ny2 = hull[i+2,0]
nx3,ny3 = hull[len(hull)-1,0]
nx4,ny4 = hull[len(hull)-2,0]
elif i >= len(hull) - 2:
nx1,ny1 = hull[0,0]
nx2,ny2 = hull[1,0]
nx3,ny3 = hull[i-1,0]
nx4,ny4 = hull[i-2,0]
else:
nx1,ny1 = hull[i+1,0]
nx2,ny2 = hull[i+2,0]
nx3,ny3 = hull[i-1,0]
nx4,ny4 = hull[i-2,0]
diff1 = abs(m.sqrt(m.pow(nx1-nx, 2)+m.pow(ny1-ny, 2)))
diff2 = abs(m.sqrt(m.pow(nx2-nx, 2)+m.pow(ny2-ny, 2)))
diff3 = abs(m.sqrt(m.pow(nx3-nx, 2)+m.pow(ny3-ny, 2)))
diff4 = abs(m.sqrt(m.pow(nx4-nx, 2)+m.pow(ny4-ny, 2)))
if diff1 < measure and diff2 < measure and diff3 < measure and diff4 < measure:
tip.append((nx, ny))
if len(tip) > 0:
print tip[0]
cv2.circle(raw_img, tip[0], 25, (0,0,0), -1)
draw_img = draw(tip[0], draw_layer, out_img)
else:
draw_img = draw((0,0), draw_layer, out_img)
#########################################################################
if len(hull) != 0:
#print hull[0:,0]
print hull.shape
cv2.imshow("raw_img", np.fliplr(raw_img))
cv2.imshow("draw_img", np.fliplr(draw_img))
#cv2.imshow("bg_img", bg_img)
#break statement for image processing
if (cv2.waitKey(1) & 0xFF == ord('q')):
break
main()
cap.release()
cv2.destroyAllWindows()
| [
"nathancorwin2015@u.northwestern.edu"
] | nathancorwin2015@u.northwestern.edu |
6c718683e9d32aee735955ed83f3b5efc08ab4f0 | 27d8bbb908334523474730be15990d06d6d925d9 | /npy2.py | 7a1cbdc2c9d566b27ba65ee010b1ff4bc8718a6e | [] | no_license | ciaocamilo/ejemplos-python | 08dfccc32140ce858990f4e9e1986aea0e641a49 | c6e404ab7ac2c54e713d89b08dba6e2938719f1c | refs/heads/main | 2023-07-07T21:35:26.248781 | 2021-08-10T06:25:41 | 2021-08-10T06:25:41 | 394,547,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,024 | py | # from numpy import random as r
# print(r.choice(['Andres','Juan','Pedro', 'Mateo'], size= r.choice([1,2],p=[0.1,0.9]) , p=[0.5,0.2,0.2,0.1], replace=False))
import numpy as np
# a = np.array([34, 25, 7]) # Crear una matriz de rango 1
# print(a)
# print(type(a))
# print(a.shape)
# print(a[0], a[1], a[2])
# a[0] = 5 # Cambiar un elemento de la matriz
# print(a)
# b = np.array([[1,2,3],[4,5,6]])
# print(b)
# print(b.shape)
# print(b[0, 0], b[0, 1], b[1, 0])
# matriz = np.zeros((5,5))
# print(matriz)
# b = np.ones((1,2))
# print(b)
# c = np.full((2,5), 8)
# print(c)
# d = np.eye(10)
# print(d)
# e = np.random.random((3,4))
# print(e)
# import numpy as np
# Crear la siguiente matriz de rango 2 con forma (3, 4)
# [[ 1 2 3]
# [ 5 6 7]
# [ 9 10 11 ]]
# a = np.array([[1,2,3], [5,6,7], [9,10,11]])
# Usar el rebanado para sacar el subconjunto que consiste en las 2 primeras filas
# y las columnas 1 y 2; b es el siguiente conjunto de forma (2, 2):
# [[2 3]
# [6 7]]
# b = a[:2, 1:3]
# print(b)
# print(a)
# print()
# print(np.fliplr(a))
# print(a[0, 1]) # Prints "2"
# b[0, 0] = 77 # b[0, 0] es la misma pieza de datos que a[0, 1]
# print(a[0, 1])
# import numpy as np
# a = np.array([[1,2], [3, 4], [5, 6]])
# bool_idx = (a > 2)
# print(bool_idx)
# print(a[bool_idx])
# print(a[a == 5])
# x = np.array([1, 2]) # Dejar que numpy elija el tipo de datos
# print(x.dtype) # Prints "int64"
# x = np.array([1.0, 2.0]) # Dejar que numpy elija el tipo de datos
# print(x.dtype)
# x = np.array([1, 2], dtype = np.int64) # Forzar un tipo de datos en particular
# print(x.dtype)
# x = np.array([[1,2],[3,4]], dtype=np.float64)
# y = np.array([[5,6],[7,8]], dtype=np.float64)
# Suma de elementos; ambos producen la matriz
# [[ 6.0 8.0]
# [10.0 12.0]]
# print(x)
# print()
# print(y)
# print()
# print(x + y)
# print()
# print(np.add(x, y))
# print(x - y)
# print(np.subtract(x, y))
# print(x * y)
# print(np.multiply(x, y))
# print(x / y)
# print(np.divide(x, y))
# print(np.sqrt(x))
# x = np.array([[1,2],[3,4]])
# y = np.array([[5,6],[7,8]])
# v = np.array([9,10])
# w = np.array([11, 12])
# Producto interno de los vectores; ambos producen 219
# print(v.dot(w))
# print(np.dot(v, w))
# print(x.dot(v))
# print(np.dot(x, v))
# print(x.dot(y))
# print(np.dot(x, y))
# x = np.array([[1,2],[3,4]])
# print(np.sum(x)) # Calcular la suma de todos los elementos; imprime "10"
# print(np.sum(x, axis=0)) # Calcula la suma de cada columna; imprime "[4 6]"
# print(np.sum(x, axis=1)) # Calcula la suma de cada fila; imprime "[3 7]"
# x = np.array([[1,2,8], [3,4,7]])
# print(x) # Prints "[[1 2]
# # [3 4]]"
# print()
# print(x.T) # Prints "[[1 3]
# # [2 4]]"
# Sumaremos el vector v a cada fila de la matriz x,
# almacenando el resultado en la matriz y
# x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
# v = np.array([1, 0, 1])
# y = np.empty_like(x) # Crear una matriz vacía con la misma forma que x
# # Agrega el vector v a cada fila de la matriz x con un bucle explícito
# for i in range(4):
# y[i, :] = x[i, :] + v
# # Ahora y es lo siguiente
# # [[ 2 2 4]
# # [ 5 5 7]
# # [ 8 8 10]
# # [11 11 13]]
# print(x)
# print()
# print(v)
# print()
# print(y)
# Añadiremos el vector v a cada fila de la matriz x,
# almacenando el resultado en la matriz y
# x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
# v = np.array([1, 0, 1])
# vv = np.tile(v, (4, 1)) # Amontonar 4 copias de V una encima de la otra
# print(vv) # Prints "[[1 0 1]
# # [1 0 1]
# # [1 0 1]
# # [1 0 1]]"
# y = x + vv # Agrega x y vv elementalmente
# print()
# print(y) # Prints "[[ 2 2 4
# [ 5 5 7]
# [ 8 8 10]
# [11 11 13]]"
# We will add the vector v to each row of the matrix x,
# storing the result in the matrix y
# x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
# v = np.array([[1, 0],[3, 0]])
# print(v)
# print(3 in np.sum(v,axis=0))
# #y = x + v # Añada v a cada fila de x utilizando la radiodifusión
# #print(y) # Prints "[[ 2 2 4]
# # [ 5 5 7]
# # [ 8 8 10]
# # [11 11 13]]"
# Calcular el producto exterior de los vectores
# v = np.array([1,2,3]) # v tiene forma (3,)
# # w = np.array([4,5]) # w tiene forma (2,)
# # # Para calcular un producto exterior, primero reformamos v para que sea una columna
# # # vector de forma (3, 1); podemos entonces emitirlo contra w para rendir
# # # una salida de la forma (3, 2), que es el producto exterior de v y w:
# # # [[ 4 5]
# # # [ 8 10]
# # # [12 15]]
# # print(np.reshape(v, (3, 1)) * w)
# x = np.array([[1,2,3], [4,5,6]])
# # x tiene forma (2, 3) y v tiene forma (3,) por lo que transmiten a (2, 3),
# # dando la siguiente matriz:
# # [[2 4 6]
# # [5 7 9]]
# print(x + v)
| [
"ing_camiloacg@hotmail.com"
] | ing_camiloacg@hotmail.com |
858c51c8c9f563c0c5054b8d8466a2f7140398c7 | 52d9c6d005b2e91f489fdd817059b1217efd711d | /_downloads/8591a6f0671d02c692445320b45c6776/date_demo_rrule.py | 56d2ebf97b135046b79231488af7e5385b733588 | [] | no_license | yuhaihaiyu/matplotlib.github.com | 2a785654d4a0e4a9b6d1876b0aae96b6b5d20fc5 | fbe748c706e92f9ccb660eab656deaebe179a6af | refs/heads/master | 2023-07-31T08:04:16.716267 | 2021-10-02T06:00:49 | 2021-10-02T06:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | ../../stable/_downloads/8591a6f0671d02c692445320b45c6776/date_demo_rrule.py | [
"quantum.analyst@gmail.com"
] | quantum.analyst@gmail.com |
79d7ceb13c98a1bc6103dccdd8ebd2065138aeee | f351f6d0b790a57c15570c82d190f9e06fa49052 | /pages/views.py | cca5c04235777c1ec37067d467784854134ce647 | [] | no_license | masayamatu/docker-test-django | f077812f281061fff17b41d3e4f578b5e7995011 | 80cb67e31cf213a63b81bfd54860da44f6b0c686 | refs/heads/master | 2023-01-09T17:01:19.452157 | 2020-11-08T13:29:12 | 2020-11-08T13:29:12 | 311,064,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | from django.shortcuts import render
from django.http import HttpResponse
def home_page_view(request):
return HttpResponse('Hello,World')
# Create your views here.
| [
"m.masa1104@outlook.jp"
] | m.masa1104@outlook.jp |
2cf12fc9c4d87c62769dfa58d378fcde7e11f059 | 4ec55bb92b9ef58d7e9033500cc237d8fb97735f | /app1/migrations/0002_newuser_file.py | 3ae53b55264eb091b472ed71ded7a16936e175b7 | [] | no_license | Aravindcp866/Doorstep-Labs | 4d237e259d54889b7b0bdf0be713f23f9d821bcb | c3739b001b6c901a8fdcd22ef01190117e4fec1a | refs/heads/master | 2023-06-20T07:48:35.447744 | 2021-07-16T06:09:54 | 2021-07-16T06:09:54 | 386,523,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | # Generated by Django 3.2.4 on 2021-06-25 17:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app1', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='newuser',
name='file',
field=models.FileField(null=True, upload_to='documents'),
),
]
| [
"aravindcp14@gmail.com"
] | aravindcp14@gmail.com |
d679f6827b0f1f6f4622c5dc9032480ab7ff815d | bf363511741b1ddca00163bd3b4bdf87078d8d1b | /gpu_cluster/controllers/gpu_container_controller.py | 4b269d2f043a0d98fd04a6de97c6a39a0339111c | [
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | acm-uiuc/gpu-cluster-backend | f49eeba8438a0a7d0784ec942c4cf50cb16fde2c | 15b414f7333271ba8e91632cfa4f76bd6393c966 | refs/heads/master | 2021-01-24T07:30:10.214821 | 2018-09-07T06:29:58 | 2018-09-07T06:29:58 | 93,349,527 | 2 | 0 | NOASSERTION | 2019-03-25T04:01:30 | 2017-06-05T00:15:42 | Python | UTF-8 | Python | false | false | 2,318 | py | from ..database import db_session
from ..models import Instance
from .container_controller import ContainerController
from nvdocker import NVDockerClient
class GPUContainerController(ContainerController):
def __init__(self, config):
super().__init__(config)
self.docker_client = NVDockerClient()
def create_container(image, user="", token_required=False, budget=-1, num_gpus=1):
# Get 2 open ports for UI and Monitor
uport = self.get_port()
mport = self.get_port()
while uport == mport:
mport = self.get_port()
# Get select a gpu(s) that are least in use
num_available_gpus = len(docker_client.list_gpus())
if num_gpus > num_available_gpus:
num_gpus = num_available_gpus
gpus = []
memory_usage = docker_client.gpu_memory_usage()
for g in num_gpus:
for gpu, used in memory_usage.items():
if used < memory_usage[gpu[-1]]:
gpus.append(gpu)
# Assemble config for container
container_config = {
"ports": {
'8888/tcp': uport,
'6006/tcp': mport
},
"working_dir": "/vault/" + user,
"visible_devices": gpus,
"detach": True,
"auto_remove": True
}
#create container
c_id = docker_client.create_container(image, **container_config).id
#assemble endpoints for UI, monitor and get the access token if needed
uurl = ""
murl = ""
token = ""
if token_required:
token = docker_client.exec_run(c_id, 'python3 /opt/cluster-container/jupyter_get.py')
uurl = "http://vault.acm.illinois.edu:{}/?token={}".format(uport, token.decode("utf-8") )
murl = "http://vault.acm.illinois.edu:" + str(mport)
else:
uurl = "http://vault.acm.illinois.edu:" + str(uport)
murl = "http://vault.acm.illinois.edu:" + str(mport)
#TODO insert budget
budget = -1
db_session.add(Instance(c_id, uport, mport, uurl, murl, user, budget, token))
db_session.commit()
return c_id, uurl, murl
def kill_container(self, c_id):
self.docker_client.stop_container(c_id) | [
"naren@narendasan.com"
] | naren@narendasan.com |
751baeeaf78e31e7c30ff0263dce2e8a7717fb44 | 48517a9b7ec7b0f0bf0a03291b7d1e3def751c0a | /Pibow/corner_to_corner_3_v2.py | 4b9f958fcb0bdf6453d970f0e489ffbcd7e54229 | [
"MIT"
] | permissive | Breakfast-for-Pigeons/Unicorn-HAT | 1ae033bf11c05b9cc739b1eacfc77665506e0bc8 | 9ff1388ee627a8e81f361929e9e9b708db4e2832 | refs/heads/master | 2021-06-06T12:22:48.162031 | 2020-10-22T17:31:51 | 2020-10-22T17:31:51 | 74,648,524 | 1 | 0 | null | 2018-10-02T17:37:31 | 2016-11-24T07:28:23 | Python | UTF-8 | Python | false | false | 7,514 | py | #!/usr/bin/python3
"""
Corner to Corner 3 version 2- Pibow
Moves a square from the lower right corner to the upper left corner.
Instead of cycling through all the colors, a specific color must be sent
to the function as an argument.
....................
Author: Paul Ryan
This program was written on a Raspberry Pi using the Geany IDE.
"""
########################################################################
# Import modules #
########################################################################
from time import sleep
import unicornhat
from bfp_unicornhat import print_header
from bfp_unicornhat import stop
########################################################################
# Import Variables #
########################################################################
from bfp_unicornhat import C1
from bfp_unicornhat import C2
from bfp_unicornhat import C3
from bfp_unicornhat import C4
from bfp_unicornhat import C5
from bfp_unicornhat import C6
from bfp_unicornhat import C7
from bfp_unicornhat import C8
########################################################################
# Functions #
########################################################################
def corner_to_corner_3_v2(color):
"""
Moves a square from the lower right corner to the upper left corner.
Arguments:
This function takes an RGB tuple as an argument argument.
"""
sleep_speed = 0.1
off = (0, 0, 0)
unicornhat.set_pixel(7, 7, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(6, 7, color)
unicornhat.set_pixel(6, 6, color)
unicornhat.set_pixel(7, 6, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(5, 7, color)
unicornhat.set_pixel(5, 6, color)
unicornhat.set_pixel(5, 5, color)
unicornhat.set_pixel(6, 5, color)
unicornhat.set_pixel(7, 5, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(4, 7, color)
unicornhat.set_pixel(4, 6, color)
unicornhat.set_pixel(4, 5, color)
unicornhat.set_pixel(4, 4, color)
unicornhat.set_pixel(5, 4, color)
unicornhat.set_pixel(6, 4, color)
unicornhat.set_pixel(7, 4, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(3, 7, color)
unicornhat.set_pixel(3, 6, color)
unicornhat.set_pixel(3, 5, color)
unicornhat.set_pixel(3, 4, color)
unicornhat.set_pixel(3, 3, color)
unicornhat.set_pixel(4, 3, color)
unicornhat.set_pixel(5, 3, color)
unicornhat.set_pixel(6, 3, color)
unicornhat.set_pixel(7, 3, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(2, 7, color)
unicornhat.set_pixel(2, 6, color)
unicornhat.set_pixel(2, 5, color)
unicornhat.set_pixel(2, 4, color)
unicornhat.set_pixel(2, 3, color)
unicornhat.set_pixel(2, 2, color)
unicornhat.set_pixel(3, 2, color)
unicornhat.set_pixel(4, 2, color)
unicornhat.set_pixel(5, 2, color)
unicornhat.set_pixel(6, 2, color)
unicornhat.set_pixel(7, 2, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(1, 7, color)
unicornhat.set_pixel(1, 6, color)
unicornhat.set_pixel(1, 5, color)
unicornhat.set_pixel(1, 4, color)
unicornhat.set_pixel(1, 3, color)
unicornhat.set_pixel(1, 2, color)
unicornhat.set_pixel(1, 1, color)
unicornhat.set_pixel(2, 1, color)
unicornhat.set_pixel(3, 1, color)
unicornhat.set_pixel(4, 1, color)
unicornhat.set_pixel(5, 1, color)
unicornhat.set_pixel(6, 1, color)
unicornhat.set_pixel(7, 1, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(0, 7, color)
unicornhat.set_pixel(0, 6, color)
unicornhat.set_pixel(0, 5, color)
unicornhat.set_pixel(0, 4, color)
unicornhat.set_pixel(0, 3, color)
unicornhat.set_pixel(0, 2, color)
unicornhat.set_pixel(0, 1, color)
unicornhat.set_pixel(0, 0, color)
unicornhat.set_pixel(1, 0, color)
unicornhat.set_pixel(2, 0, color)
unicornhat.set_pixel(3, 0, color)
unicornhat.set_pixel(4, 0, color)
unicornhat.set_pixel(5, 0, color)
unicornhat.set_pixel(6, 0, color)
unicornhat.set_pixel(7, 0, color)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(7, 0, off)
unicornhat.set_pixel(7, 1, off)
unicornhat.set_pixel(7, 2, off)
unicornhat.set_pixel(7, 3, off)
unicornhat.set_pixel(7, 4, off)
unicornhat.set_pixel(7, 5, off)
unicornhat.set_pixel(7, 6, off)
unicornhat.set_pixel(7, 7, off)
unicornhat.set_pixel(6, 7, off)
unicornhat.set_pixel(5, 7, off)
unicornhat.set_pixel(4, 7, off)
unicornhat.set_pixel(3, 7, off)
unicornhat.set_pixel(2, 7, off)
unicornhat.set_pixel(1, 7, off)
unicornhat.set_pixel(0, 7, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(6, 0, off)
unicornhat.set_pixel(6, 1, off)
unicornhat.set_pixel(6, 2, off)
unicornhat.set_pixel(6, 3, off)
unicornhat.set_pixel(6, 4, off)
unicornhat.set_pixel(6, 5, off)
unicornhat.set_pixel(6, 6, off)
unicornhat.set_pixel(5, 6, off)
unicornhat.set_pixel(4, 6, off)
unicornhat.set_pixel(3, 6, off)
unicornhat.set_pixel(2, 6, off)
unicornhat.set_pixel(1, 6, off)
unicornhat.set_pixel(0, 6, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(5, 0, off)
unicornhat.set_pixel(5, 1, off)
unicornhat.set_pixel(5, 2, off)
unicornhat.set_pixel(5, 3, off)
unicornhat.set_pixel(5, 4, off)
unicornhat.set_pixel(5, 5, off)
unicornhat.set_pixel(4, 5, off)
unicornhat.set_pixel(3, 5, off)
unicornhat.set_pixel(2, 5, off)
unicornhat.set_pixel(1, 5, off)
unicornhat.set_pixel(0, 5, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(4, 0, off)
unicornhat.set_pixel(4, 1, off)
unicornhat.set_pixel(4, 2, off)
unicornhat.set_pixel(4, 3, off)
unicornhat.set_pixel(4, 4, off)
unicornhat.set_pixel(3, 4, off)
unicornhat.set_pixel(2, 4, off)
unicornhat.set_pixel(1, 4, off)
unicornhat.set_pixel(0, 4, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(3, 0, off)
unicornhat.set_pixel(3, 1, off)
unicornhat.set_pixel(3, 2, off)
unicornhat.set_pixel(3, 3, off)
unicornhat.set_pixel(2, 3, off)
unicornhat.set_pixel(1, 3, off)
unicornhat.set_pixel(0, 3, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(2, 0, off)
unicornhat.set_pixel(2, 1, off)
unicornhat.set_pixel(2, 2, off)
unicornhat.set_pixel(1, 2, off)
unicornhat.set_pixel(0, 2, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(1, 0, off)
unicornhat.set_pixel(1, 1, off)
unicornhat.set_pixel(0, 1, off)
unicornhat.show()
sleep(sleep_speed)
unicornhat.set_pixel(0, 0, off)
unicornhat.show()
sleep(sleep_speed)
if __name__ == '__main__':
COLORS = [C1, C2, C3, C4, C5, C6, C7, C8]
try:
# STEP01: Print header
print_header()
# STEP02: Print instructions in white text
print("\033[1;37;40mPress Ctrl-C to stop the program.")
# STEP03:
for COLOR in COLORS:
corner_to_corner_3_v2(COLOR)
# STEP04: Exit the program.
stop()
except KeyboardInterrupt:
stop()
| [
"noreply@github.com"
] | Breakfast-for-Pigeons.noreply@github.com |
ac2efb8e5d10ce86e7f53fa7b307771e71496fda | 76bcdd8142a60d427ca891b6aa9670273f570013 | /kClosest.py | 10ac86921d07393457f025e825f7c29b50ab2fb5 | [] | no_license | vedapuriswar/CompetitiveProgramming | 790cb666a510ea150565b65c81644b8e463bca74 | b1100e34f664b0db43a613384da3487159c75c18 | refs/heads/master | 2021-04-07T15:49:31.819230 | 2020-03-27T18:46:07 | 2020-03-27T18:46:07 | 248,688,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | def closest(points, k):
point_distance = []
for i in range(len(points)):
point_distance.append(points[i][0] ** 2 + points[i][1] ** 2)
heap = point_distance[0:k]
indices = []
for i in range(k):
indices.append(i)
for point in point_distance[k:]:
n = max(heap)
if point < n:
heap.append(point)
indices.append(point_distance.index(point))
heap.remove(n)
indices.remove(point_distance.index(n))
return indices
arr = [(-2, -4), (0, -2), (-1, 0), (3, -5), (-2, -3), (3, 2)]
index = closest(arr, 3)
for i in range(len(index)):
print(arr[i]) | [
"noreply@github.com"
] | vedapuriswar.noreply@github.com |
0034eb86b4ec01f361c94d75a724909d7872812b | cab8e03b8044741e8670e7eb9b7a44f26093878b | /tasks/merge/merge_hdf5_h5py/merge_h5py.py | e6415e36371cf2cf9b12a2b877ee5f3ed0981daf | [
"MIT"
] | permissive | samanvp/table-testing | 01e9233154b4df63a730f2f7aec1a27d90bd46cc | 877576409010152d361096f07b25e2b781966846 | refs/heads/master | 2020-03-22T16:09:18.207033 | 2018-06-21T06:02:39 | 2018-06-25T14:58:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | import argparse
import h5py
def merge_hdf5s(hdf5_paths, output_path):
first_dset = h5py.File(hdf5_paths[0])["data"]
output_hfile = h5py.File(output_path, "w")
output_hfile.create_dataset(
name="data",
shape=(first_dset.shape[0], first_dset.shape[1]*len(hdf5_paths)),
dtype=first_dset.dtype
)
for i, hdf5_path in enumerate(hdf5_paths):
data = h5py.File(hdf5_path)["data"]
output_hfile["data"][:, i:i+1] = data
output_hfile.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_paths",
required=True,
nargs="+",
help="Paths to the input matrices."
)
parser.add_argument(
"--output_path",
required=True,
help="Where to put the result matrix file."
)
args = parser.parse_args()
merge_hdf5s(args.input_paths, args.output_path)
if __name__ == "__main__":
main()
| [
"mckinsel@users.noreply.github.com"
] | mckinsel@users.noreply.github.com |
8ed7b7edbb81eb185891281cece14b3ff64d6a77 | a434b7240f641b4cd33fb3dcc4c2001978cbec36 | /dj_snips/wsgi.py | 380669ab68e87711dce6b07603d02e860f53a025 | [] | no_license | kdenny/snips-django-app | e65fe4ff5584e5e05b52d9cdc5684d895756144a | 219015829189667c80ba988315c597e85b21458b | refs/heads/master | 2020-05-21T01:11:02.185576 | 2019-05-09T18:18:39 | 2019-05-09T18:18:39 | 185,850,836 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | """
WSGI config for dj_snips project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dj_snips.settings")
application = get_wsgi_application()
| [
"kevin.denny@piano.io"
] | kevin.denny@piano.io |
4d735bd623ed2657f9136b757674c32a0b45d16d | 251dd8b6865fb499c1e76757ea0d6a11d197bd97 | /python/src/zero/optimizers.py | a8de21b0082344d855a3e9c368d3eeaab6313af7 | [
"MIT"
] | permissive | ichi-pg/deep-learning | 666e3cc45accbf4f2f6efd303db2e010a05d9540 | e253d11bafa34bb0260bb655268054534cc8cff6 | refs/heads/master | 2022-04-05T00:36:02.526057 | 2020-02-20T00:21:43 | 2020-02-20T00:21:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,490 | py | from layers import *
from keras.datasets import mnist
from keras.utils import to_categorical
class SGD:
def __init__(self, lr=0.01):
self.lr = lr
def update(self, params, grads):
for key in params.keys():
params[key] -= self.lr * grads[key]
class Momentum:
def __init__(self, lr=0.01, momentum=0.9):
self.lr = lr
self.momentum = momentum
self.v = None
def update(self, params, grads):
if self.v is None:
self.v = {}
for key, val in params.items():
self.v[key] = np.zeros_like(val)
for key in params.keys():
self.v[key] = self.momentum * self.v[key] - self.lr * grads[key]
params[key] += self.v[key]
class AdaGrad:
def __init__(self, lr=0.01):
self.lr = lr
self.h = None
def update(self, params, grads):
if self.h is None:
self.h = {}
for key, val in params.items():
self.h[key] = np.zeros_like(val)
for key in params.keys():
self.h[key] += grads[key] * grads[key]
params[key] -= self.lr * grads[key] / (np.sqrt(self.h[key]) + 1e-7)
class Dropout:
def __init__(self, dropout_ratio=0.5):
self.dropout_ratio = dropout_ratio
self.mask = None
def forward(self, x, train_flg=True):
if train_flg:
self.mask = np.random.rand(*x.shape) > self.dropout_ratio
return x * self.mask
else:
return x * (1.0 - self.dropout_ratio)
def backward(self, dout):
return dout * self.mask
# Adam ≒ Momentum + AdaGrad
(x_train, t_train), (x_test, t_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_train = x_train.astype('float32')
x_train /= 255
x_test = x_test.reshape(10000, 784)
x_test = x_test.astype('float32')
x_test /= 255
t_train = to_categorical(t_train)
t_test = to_categorical(t_test)
train_size = x_train.shape[0]
batch_size = 100
net = TwoLayerNet(784, 50, 10)
opt = AdaGrad()
train_loss_list = []
for i in range(10000):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
grads = net.gradient(x_batch, t_batch)
opt.update(net.params, grads)
loss = net.loss(x_batch, t_batch)
train_loss_list.append(loss)
x = np.arange(len(train_loss_list))
plt.plot(x, train_loss_list)
plt.savefig("optimizers.png")
| [
"d.ikeda@sakurasoft.co.jp"
] | d.ikeda@sakurasoft.co.jp |
3b55b8f646478e3049eb57a17d9c7789ea13c266 | a18d549ab7518128e2e7c3ba8d34c5a873c99ef7 | /backend/cukiernia/migrations/0004_auto_20210411_1311.py | 767139ab11b8d811a092d2469c569859420243d7 | [
"MIT"
] | permissive | MatMark/ZIwG | 614e0ea4ab85c4e8554fbfdf8fb6a246cdf3c9af | 76cda0d1f72d3f5220ad13457d5a1ea0a0b9771a | refs/heads/main | 2023-06-11T06:44:50.996823 | 2021-06-30T20:23:38 | 2021-06-30T20:23:38 | 345,768,410 | 3 | 0 | MIT | 2021-06-30T20:23:39 | 2021-03-08T19:18:23 | Vue | UTF-8 | Python | false | false | 548 | py | # Generated by Django 3.2 on 2021-04-11 13:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cukiernia', '0003_auto_20210411_1234'),
]
operations = [
migrations.AlterField(
model_name='carouselphoto',
name='url',
field=models.CharField(max_length=150),
),
migrations.AlterField(
model_name='productphoto',
name='url',
field=models.CharField(max_length=150),
),
]
| [
"kamilzdeb17@gmail.com"
] | kamilzdeb17@gmail.com |
74da093fa588a02d5807c167da00010a43ff273f | f321337033e25e85ba35fa0526d4c898592fc790 | /assignment1/settings.py | 438242ee7c5a1ac5eaac7829645c6c9610199248 | [] | no_license | kuss123/django-assignment-1 | 02da1876b8a08830dce4eee0db8480b83537a907 | ec46e6620616815152b2abd72edadd07757f0647 | refs/heads/master | 2022-11-16T13:31:49.530341 | 2020-07-19T15:25:18 | 2020-07-19T15:25:18 | 280,893,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,115 | py | """
Django settings for assignment1 project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y-dqsik+sk$9x_mn1j=wz_^o7k^66fwf0p(ixy0nb%(s9z1$q*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'assignment1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'assignment1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"halkstar12@gmail.com"
] | halkstar12@gmail.com |
c63458239ad3eb84a928f51c59ee9fa3569bdd61 | 7f7eddabf922c994a490f469255cf8e1ddd51612 | /fgen_4panel.py | dc60092c0847fdad757f884b19d25ac92b0078f8 | [] | no_license | cycle13/dadriaan-era5 | 20778efa91c4ee768b60a8dada1370d68471d338 | da341623a3ddd78ae764edc1bbdfbe01dea7edaa | refs/heads/main | 2022-12-24T00:42:44.061284 | 2020-04-07T20:21:42 | 2020-04-07T20:21:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,346 | py | from era5_plot_params import Params
p = Params()
p.init()
from siphon.catalog import TDSCatalog
from siphon.http_util import session_manager
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage as ndimage
import xarray as xr
import matplotlib.gridspec as gridspec
import metpy.calc as mpcalc
import metpy.plots as mpplt
from metpy.units import units
import datetime
# Set the requested date
rd = datetime.datetime.strptime(p.opt['tstring'],'%Y-%m-%d %H:%M:%S')
# What date string
yyyymm = rd.strftime('%Y%m')
yyyymmdd = rd.strftime('%Y%m%d')
# What 3D product strings
prod3d = ['_u.','_v.','_z.','_t.']
# Set RDA credentials
session_manager.set_session_options(auth=p.opt['creds'])
# The dataset catalog
cat = TDSCatalog('https://rda.ucar.edu/thredds/catalog/files/g/ds633.0/e5.oper.an.pl/'+yyyymm+'/catalog.xml')
# Get all of the datasets in the catalog
files = cat.datasets
# Turn this list of files into a list
allfiles = list(files)
# Loop through the files and save the ones we want to load
casefiles = [i for i in allfiles if yyyymmdd in i]
# Find the indexes in the list of files we want to load
indexes = [allfiles.index(f) for f in casefiles]
# Trim down files further based on product
li = []
for cf in indexes:
for p3 in prod3d:
if p3 in files[cf].name:
li.append(cf)
# Load using list comprehension, creating list of xarray dataset objects
singlesets = [files[i].remote_access(use_xarray=True) for i in li]
# Combine all of the datasets (all files into a single dataset)
ds = xr.combine_by_coords(singlesets)
print(ds)
# Subset the dataset. We want all levels, at a specific time, and reduce lat/lon
ds = ds.sel(time=rd,latitude=slice(60,15),longitude=slice(230,300))
# Coordinate reference system
crs = ccrs.LambertConformal(central_longitude=-100.0, central_latitude=45.0)
# Set a new figure window
fig = plt.figure(1, figsize=(22, 15))
# Use gridspec
gs = gridspec.GridSpec(nrows=2,ncols=2,height_ratios=[1,1],hspace=0.03,wspace=0.03)
# Setup axis
def axis_setup(ax):
ax.set_extent(p.opt['zoom'],ccrs.PlateCarree())
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.5)
ax.add_feature(cfeature.STATES, linewidth=0.5)
ax.add_feature(cfeature.BORDERS, linewidth=0.5)
return ax
# Combine 1D latitude and longitudes into a 2D grid of locations
lon_2d, lat_2d = np.meshgrid(ds['longitude'], ds['latitude'])
# Calculate the grid deltas for frontogenesis calculation
dx, dy = mpcalc.lat_lon_grid_deltas(lon_2d, lat_2d)
# Smooth the height data
#heights_500 = ndimage.gaussian_filter(mpcalc.geopotential_to_height(ds['Z'].sel(level=500.0)), sigma=1.5, order=0)
heights_600 = ndimage.gaussian_filter(mpcalc.geopotential_to_height(ds['Z'].sel(level=600.0)), sigma=1.5, order=0)
heights_700 = ndimage.gaussian_filter(mpcalc.geopotential_to_height(ds['Z'].sel(level=700.0)), sigma=1.5, order=0)
heights_850 = ndimage.gaussian_filter(mpcalc.geopotential_to_height(ds['Z'].sel(level=850.0)), sigma=1.5, order=0)
heights_925 = ndimage.gaussian_filter(mpcalc.geopotential_to_height(ds['Z'].sel(level=925.0)), sigma=1.5, order=0)
# Contour levels for heights
h5lev = np.arange(4800.0,5800.0,30.0)
h6lev = np.arange(3500.0,4500.0,30.0)
h7lev = np.arange(2500.0,3500.0,30.0)
h85lev = np.arange(1000.0,2000.0,30.0)
h92lev = np.arange(0.0,1000.0,30.0)
# Compute 700 hPa frontogenesis
# First compute potential temperature, then compute frontogenesis
theta_600 = mpcalc.potential_temperature(600.0*units.hPa,ds['T'].sel(level=600.0))
theta_700 = mpcalc.potential_temperature(700.0*units.hPa,ds['T'].sel(level=700.0))
theta_850 = mpcalc.potential_temperature(850.0*units.hPa,ds['T'].sel(level=850.0))
theta_925 = mpcalc.potential_temperature(925.0*units.hPa,ds['T'].sel(level=925.0))
front_600 = mpcalc.frontogenesis(theta_600,ds['U'].sel(level=600.0),ds['V'].sel(level=600.0),dx,dy)
front_700 = mpcalc.frontogenesis(theta_700,ds['U'].sel(level=700.0),ds['V'].sel(level=700.0),dx,dy)
front_850 = mpcalc.frontogenesis(theta_850,ds['U'].sel(level=850.0),ds['V'].sel(level=850.0),dx,dy)
front_925 = mpcalc.frontogenesis(theta_925,ds['U'].sel(level=925.0),ds['V'].sel(level=925.0),dx,dy)
# A conversion factor to get frontogensis units of K per 100 km per 3 h
convert_to_per_100km_3h = 1000*100*3600*3
# UPPER LEFT PANEL- 600 hPa height/frontogenesis
ax1 = plt.subplot(gs[0,0],projection=crs)
axis_setup(ax1)
cf1 = ax1.contourf(lon_2d, lat_2d, front_600*convert_to_per_100km_3h, np.arange(-8, 8.5, 0.5), extend='both', cmap='bwr'
, transform=ccrs.PlateCarree())
c1 = ax1.contour(lon_2d, lat_2d, heights_600, h6lev, colors='black', linewidths=2,
transform=ccrs.PlateCarree())
#ax1.clabel(c1, fontsize=10, inline=1, inline_spacing=1, fmt='%i', rightside_up=True)
ax1.set_title('600-hPa Pettersen Fgen and Heights', fontsize=16)
cb1 = fig.colorbar(cf1, ax=ax1, orientation='horizontal', shrink=0.74, pad=.05)
cb1.set_label('degK/100km/3h', size='x-large')
# UPPER RIGHT PANEL- 700 hPa height/frontogenesis
ax2 = plt.subplot(gs[0,1],projection=crs)
axis_setup(ax2)
cf2 = ax2.contourf(lon_2d, lat_2d, front_700*convert_to_per_100km_3h, np.arange(-8, 8.5, 0.5), extend='both', cmap='bwr'
, transform=ccrs.PlateCarree())
c2 = ax2.contour(lon_2d, lat_2d, heights_700, h7lev, colors='black', linewidths=2,
transform=ccrs.PlateCarree())
#ax2.clabel(c1, fontsize=10, inline=1, inline_spacing=1, fmt='%i', rightside_up=True)
ax2.set_title('700-hPa Pettersen Fgen and Heights', fontsize=16)
cb2 = fig.colorbar(cf2, ax=ax2, orientation='horizontal', shrink=0.74, pad=.05)
cb2.set_label('degK/100km/3h', size='x-large')
# LOWER LEFT PANEL- 850 hPa height/frontogenesis
ax3 = plt.subplot(gs[1,0],projection=crs)
axis_setup(ax3)
cf3 = ax3.contourf(lon_2d, lat_2d, front_850*convert_to_per_100km_3h, np.arange(-8, 8.5, 0.5), extend='both', cmap='bwr'
, transform=ccrs.PlateCarree())
c3 = ax3.contour(lon_2d, lat_2d, heights_850, h85lev, colors='black', linewidths=2,
transform=ccrs.PlateCarree())
#ax3.clabel(c1, fontsize=10, inline=1, inline_spacing=1, fmt='%i', rightside_up=True)
ax3.set_title('850-hPa Pettersen Fgen and Heights', fontsize=16)
cb3 = fig.colorbar(cf3, ax=ax3, orientation='horizontal', shrink=0.74, pad=.05)
cb3.set_label('degK/100km/3h', size='x-large')
# LOWER RIGHT PANEL- 925 hPa height/frontogenesis
ax4 = plt.subplot(gs[1,1],projection=crs)
axis_setup(ax4)
cf4 = ax4.contourf(lon_2d, lat_2d, front_925*convert_to_per_100km_3h, np.arange(-8, 8.5, 0.5), extend='both', cmap='bwr'
, transform=ccrs.PlateCarree())
c4 = ax4.contour(lon_2d, lat_2d, heights_925, h92lev, colors='black', linewidths=2,
transform=ccrs.PlateCarree())
#ax4.clabel(c1, fontsize=10, inline=1, inline_spacing=1, fmt='%i', rightside_up=True)
ax4.set_title('925-hPa Pettersen Fgen and Heights', fontsize=16)
cb4 = fig.colorbar(cf4, ax=ax4, orientation='horizontal', shrink=0.74, pad=.05)
cb4.set_label('degK/100km/3h', size='x-large')
# Set figure title
fig.suptitle(rd.strftime('%d %B %Y %H:%MZ')+' F%02d' % (int(p.opt['fnum'])), fontsize=24)
# Save figure
plt.savefig('fgen_'+rd.strftime('%Y%m%d%H')+'_'+'%02d' % (int(p.opt['fnum']))+'.png')
| [
"daniel.adriaansen@gmail.com"
] | daniel.adriaansen@gmail.com |
4a625cc49e2d484363ea090f357a0e45dc2e536a | 9e28200b71d43de1e122a964e88f1b547bfde465 | /question_leetcode/702.py | e39791835a7143e971e1b37e879656148c9a064b | [] | no_license | paul0920/leetcode | 6f8a7086eefd3e9bccae83752ef41cbfee1acaea | 474886c5c43a6192db2708e664663542c2e39548 | refs/heads/master | 2023-08-19T14:10:10.494355 | 2021-09-16T20:26:50 | 2021-09-16T20:26:50 | 290,560,326 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | # """
# This is ArrayReader's API interface.
# You should not implement it, or speculate about its implementation
# """
# class ArrayReader(object):
# def get(self, index):
# """
# :type index: int
# :rtype int
# """
class Solution(object):
def search(self, reader, target):
"""
:type reader: ArrayReader
:type target: int
:rtype: int
"""
kth = 1
while reader.get(kth - 1) < target:
kth *= 2
left = 0
right = kth
while left + 1 < right:
mid = left + (right - left) // 2
if reader.get(mid) < target:
left = mid
else:
right = mid
if reader.get(left) == target:
return left
if reader.get(right) == target:
return right
return -1
| [
"39969716+paul0920@users.noreply.github.com"
] | 39969716+paul0920@users.noreply.github.com |
ed520b0d516ca6383e62b173f9f1ee63e68d876b | 8f6950feda7e58cb196971a56a51aef52377473e | /tracking/inference.py | 2c2f0267b111c543c3a83a2898ddcbaea3ab10a7 | [] | no_license | rajprateek/AI_BayesInference | 128d7773e0940028242735f801f749b152606534 | 213f2b89b24f575b73235f03797d582b6e5c3c9b | refs/heads/master | 2020-04-29T12:23:29.290416 | 2015-03-28T22:52:29 | 2015-03-28T22:52:29 | 33,053,040 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 26,114 | py | # inference.py
# ------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to
# http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import itertools
import util
import random
import busters
import game
class InferenceModule:
"""
An inference module tracks a belief distribution over a ghost's location.
This is an abstract class, which you should not modify.
"""
############################################
# Useful methods for all inference modules #
############################################
def __init__(self, ghostAgent):
"Sets the ghost agent for later access"
self.ghostAgent = ghostAgent
self.index = ghostAgent.index
self.obs = [] # most recent observation position
def getJailPosition(self):
return (2 * self.ghostAgent.index - 1, 1)
def getPositionDistribution(self, gameState):
"""
Returns a distribution over successor positions of the ghost from the given gameState.
You must first place the ghost in the gameState, using setGhostPosition below.
"""
ghostPosition = gameState.getGhostPosition(self.index) # The position you set
actionDist = self.ghostAgent.getDistribution(gameState)
dist = util.Counter()
for action, prob in actionDist.items():
successorPosition = game.Actions.getSuccessor(ghostPosition, action)
dist[successorPosition] = prob
return dist
def setGhostPosition(self, gameState, ghostPosition):
"""
Sets the position of the ghost for this inference module to the specified
position in the supplied gameState.
Note that calling setGhostPosition does not change the position of the
ghost in the GameState object used for tracking the true progression of
the game. The code in inference.py only ever receives a deep copy of the
GameState object which is responsible for maintaining game state, not a
reference to the original object. Note also that the ghost distance
observations are stored at the time the GameState object is created, so
changing the position of the ghost will not affect the functioning of
observeState.
"""
conf = game.Configuration(ghostPosition, game.Directions.STOP)
gameState.data.agentStates[self.index] = game.AgentState(conf, False)
return gameState
def observeState(self, gameState):
"Collects the relevant noisy distance observation and pass it along."
distances = gameState.getNoisyGhostDistances()
if len(distances) >= self.index: # Check for missing observations
obs = distances[self.index - 1]
self.obs = obs
self.observe(obs, gameState)
def initialize(self, gameState):
"Initializes beliefs to a uniform distribution over all positions."
# The legal positions do not include the ghost prison cells in the bottom left.
self.legalPositions = [p for p in gameState.getWalls().asList(False) if p[1] > 1]
self.initializeUniformly(gameState)
######################################
# Methods that need to be overridden #
######################################
def initializeUniformly(self, gameState):
"Sets the belief state to a uniform prior belief over all positions."
pass
def observe(self, observation, gameState):
"Updates beliefs based on the given distance observation and gameState."
pass
def elapseTime(self, gameState):
"Updates beliefs for a time step elapsing from a gameState."
pass
def getBeliefDistribution(self):
"""
Returns the agent's current belief state, a distribution over
ghost locations conditioned on all evidence so far.
"""
pass
class ExactInference(InferenceModule):
"""
The exact dynamic inference module should use forward-algorithm
updates to compute the exact belief function at each time step.
"""
def initializeUniformly(self, gameState):
"Begin with a uniform distribution over ghost positions."
self.beliefs = util.Counter()
for p in self.legalPositions: self.beliefs[p] = 1.0
self.beliefs.normalize()
def observe(self, observation, gameState):
"""
Updates beliefs based on the distance observation and Pacman's position.
The noisyDistance is the estimated manhattan distance to the ghost you are tracking.
The emissionModel below stores the probability of the noisyDistance for any true
distance you supply. That is, it stores P(noisyDistance | TrueDistance).
self.legalPositions is a list of the possible ghost positions (you
should only consider positions that are in self.legalPositions).
A correct implementation will handle the following special case:
* When a ghost is captured by Pacman, all beliefs should be updated so
that the ghost appears in its prison cell, position self.getJailPosition()
You can check if a ghost has been captured by Pacman by
checking if it has a noisyDistance of None (a noisy distance
of None will be returned if, and only if, the ghost is
captured).
"""
noisyDistance = observation
emissionModel = busters.getObservationDistribution(noisyDistance)
pacmanPosition = gameState.getPacmanPosition()
"*** YOUR CODE HERE ***"
# Replace this code with a correct observation update
# Be sure to handle the "jail" edge case where the ghost is eaten
# and noisyDistance is None
allPossible = util.Counter()
for p in self.legalPositions:
trueDistance = util.manhattanDistance(p, pacmanPosition)
if (noisyDistance is None):
allPossible[p]=0
else:
if emissionModel[trueDistance] > 0: allPossible[p] = emissionModel[trueDistance]*self.beliefs[p]
if (noisyDistance is None):
allPossible[self.getJailPosition()] =1.0
"*** END YOUR CODE HERE ***"
allPossible.normalize()
self.beliefs = allPossible
def elapseTime(self, gameState):
"""
Update self.beliefs in response to a time step passing from the current state.
The transition model is not entirely stationary: it may depend on Pacman's
current position (e.g., for DirectionalGhost). However, this is not a problem,
as Pacman's current position is known.
In order to obtain the distribution over new positions for the
ghost, given its previous position (oldPos) as well as Pacman's
current position, use this line of code:
newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))
Note that you may need to replace "oldPos" with the correct name
of the variable that you have used to refer to the previous ghost
position for which you are computing this distribution. You will need to compute
multiple position distributions for a single update.
newPosDist is a util.Counter object, where for each position p in self.legalPositions,
newPostDist[p] = Pr( ghost is at position p at time t + 1 | ghost is at position oldPos at time t )
(and also given Pacman's current position). You may also find it useful to loop over key, value pairs
in newPosDist, like:
for newPos, prob in newPosDist.items():
...
*** GORY DETAIL AHEAD ***
As an implementation detail (with which you need not concern
yourself), the line of code at the top of this comment block for obtaining newPosDist makes
use of two helper methods provided in InferenceModule above:
1) self.setGhostPosition(gameState, ghostPosition)
This method alters the gameState by placing the ghost we're tracking
in a particular position. This altered gameState can be used to query
what the ghost would do in this position.
2) self.getPositionDistribution(gameState)
This method uses the ghost agent to determine what positions the ghost
will move to from the provided gameState. The ghost must be placed
in the gameState with a call to self.setGhostPosition above.
It is worthwhile, however, to understand why these two helper methods are used and how they
combine to give us a belief distribution over new positions after a time update from a particular position
"""
"*** YOUR CODE HERE ***"
allPossible = util.Counter()
for p in self.legalPositions:
newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, p))
for newPos, prob in newPosDist.items():
prob = (prob*self.beliefs[p])
if (newPos in allPossible):
allPossible[newPos]=allPossible[newPos]+prob
else:
allPossible[newPos]=prob
"*** END YOUR CODE HERE ***"
allPossible.normalize()
self.beliefs = allPossible
def getBeliefDistribution(self):
return self.beliefs
class ParticleFilter(InferenceModule):
"""
A particle filter for approximately tracking a single ghost.
Useful helper functions will include random.choice, which chooses
an element from a list uniformly at random, and util.sample, which
samples a key from a Counter by treating its values as probabilities.
"""
def __init__(self, ghostAgent, numParticles=300):
InferenceModule.__init__(self, ghostAgent);
self.setNumParticles(numParticles)
def setNumParticles(self, numParticles):
self.numParticles = numParticles
def initializeUniformly(self, gameState):
"""
Initializes a list of particles. Use self.numParticles for the number of particles.
Use self.legalPositions for the legal board positions where a particle could be located.
Particles should be evenly (not randomly) distributed across positions in order to
ensure a uniform prior.
** NOTE **
the variable you store your particles in must be a list; a list is simply a collection
of unweighted variables (positions in this case). Storing your particles as a Counter or
dictionary (where there could be an associated weight with each position) is incorrect
and will produce errors
"""
"*** YOUR CODE HERE ***"
numParticles = self.numParticles
legalPositions = self.legalPositions
particlesPerPosition = numParticles/len(legalPositions)
self.listOfParticles = []
for position in legalPositions:
count = 0
while(count<particlesPerPosition):
self.listOfParticles.append(position)
count=count+1
def observe(self, observation, gameState):
"""
Update beliefs based on the given distance observation. Make
sure to handle the special case where all particles have weight
0 after reweighting based on observation. If this happens,
resample particles uniformly at random from the set of legal
positions (self.legalPositions).
A correct implementation will handle two special cases:
1) When a ghost is captured by Pacman, **all** particles should be updated so
that the ghost appears in its prison cell, self.getJailPosition()
You can check if a ghost has been captured by Pacman by
checking if it has a noisyDistance of None (a noisy distance
of None will be returned if, and only if, the ghost is
captured).
2) When all particles receive 0 weight, they should be recreated from the
prior distribution by calling initializeUniformly. The total weight
for a belief distribution can be found by calling totalCount on
a Counter object
util.sample(Counter object) is a helper method to generate a sample from
a belief distribution
You may also want to use util.manhattanDistance to calculate the distance
between a particle and pacman's position.
"""
noisyDistance = observation
emissionModel = busters.getObservationDistribution(noisyDistance)
pacmanPosition = gameState.getPacmanPosition()
"*** YOUR CODE HERE ***"
import collections
import util
finalList = []
self.weights = []
tWeight = 0
list = self.listOfParticles
newList = collections.Counter(list)
newList = dict(newList)
legalPositions = newList.keys()
allPossible = util.Counter()
if(noisyDistance is None):
d = self.numParticles
self.listOfParticles = []
for k in range(0,d):
self.listOfParticles.append(self.getJailPosition())
else:
for j in range(0,len(legalPositions)):
dist = util.manhattanDistance(legalPositions[j],pacmanPosition)
weight = emissionModel[dist]*newList[legalPositions[j]]
self.weights.append(weight)
tWeight = tWeight+weight
if tWeight!=0:
for i in range(0, len(self.weights)):
self.weights[i] = self.weights[i]/tWeight
for particle in self.listOfParticles:
finalList.append(util.sample(self.weights,legalPositions))
self.listOfParticles = finalList
else :
self.initializeUniformly(gameState)
self.beliefs = self.getBeliefDistribution()
def elapseTime(self, gameState):
"""
Update beliefs for a time step elapsing.
As in the elapseTime method of ExactInference, you should use:
newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, oldPos))
to obtain the distribution over new positions for the ghost, given
its previous position (oldPos) as well as Pacman's current
position.
util.sample(Counter object) is a helper method to generate a sample from a
belief distribution
"""
"*** YOUR CODE HERE ***"
pos = self.listOfParticles
for i in range(0,len(pos)):
newPosDist = self.getPositionDistribution(self.setGhostPosition(gameState, pos[i]))
pos[i]=util.sample(newPosDist)
self.listOfParticles = pos
self.beliefs = self.getBeliefDistribution()
def getBeliefDistribution(self):
"""
Return the agent's current belief state, a distribution over
ghost locations conditioned on all evidence and time passage. This method
essentially converts a list of particles into a belief distribution (a Counter object)
"""
"*** YOUR CODE HERE ***"
import collections
c = collections.Counter(self.listOfParticles)
diction = {}
a = dict(c)
for key in c.keys():
diction[key]=a[key]/float(self.numParticles)
temp = collections.Counter(diction)
self.beliefs = temp
return self.beliefs
class MarginalInference(InferenceModule):
"A wrapper around the JointInference module that returns marginal beliefs about ghosts."
def initializeUniformly(self, gameState):
"Set the belief state to an initial, prior value."
if self.index == 1: jointInference.initialize(gameState, self.legalPositions)
jointInference.addGhostAgent(self.ghostAgent)
def observeState(self, gameState):
"Update beliefs based on the given distance observation and gameState."
if self.index == 1: jointInference.observeState(gameState)
def elapseTime(self, gameState):
"Update beliefs for a time step elapsing from a gameState."
if self.index == 1: jointInference.elapseTime(gameState)
def getBeliefDistribution(self):
"Returns the marginal belief over a particular ghost by summing out the others."
jointDistribution = jointInference.getBeliefDistribution()
dist = util.Counter()
for t, prob in jointDistribution.items():
dist[t[self.index - 1]] += prob
return dist
class JointParticleFilter:
"JointParticleFilter tracks a joint distribution over tuples of all ghost positions."
def __init__(self, numParticles=600):
self.setNumParticles(numParticles)
def setNumParticles(self, numParticles):
self.numParticles = numParticles
def initialize(self, gameState, legalPositions):
"Stores information about the game, then initializes particles."
self.numGhosts = gameState.getNumAgents() - 1
self.ghostAgents = []
self.legalPositions = legalPositions
self.initializeParticles()
def initializeParticles(self):
"""
Initialize particles to be consistent with a uniform prior.
Each particle is a tuple of ghost positions. Use self.numParticles for
the number of particles. You may find the python package 'itertools' helpful.
Specifically, you will need to think about permutations of legal ghost
positions, with the additional understanding that ghosts may occupy the
same space. Look at the 'product' function in itertools to get an
implementation of the cartesian product. Note: If you use
itertools, keep in mind that permutations are not returned in a random order;
you must shuffle the list of permutations in order to ensure even placement
of particles across the board. Use self.legalPositions to obtain a list of
positions a ghost may occupy.
** NOTE **
the variable you store your particles in must be a list; a list is simply a collection
of unweighted variables (positions in this case). Storing your particles as a Counter or
dictionary (where there could be an associated weight with each position) is incorrect
and will produce errors
"""
"*** YOUR CODE HERE ***"
import itertools
import random
products = list(itertools.product(self.legalPositions, repeat=self.numGhosts)) #permutations of legal ghost positions
random.shuffle(products)
pList=products #permutations are not returned in a random order
particleNo = self.numParticles
self.particles = []
particlestoAdd=particleNo
while len(pList)<=particlestoAdd :
particlestoAdd = particlestoAdd - len(pList)
self.particles += pList
for i in range(0,particlestoAdd-1):
self.particles.insert(self.particles.__len__()-1, pList[i])
def addGhostAgent(self, agent):
"Each ghost agent is registered separately and stored (in case they are different)."
self.ghostAgents.append(agent)
def getJailPosition(self, i):
return (2 * i + 1, 1);
def observeState(self, gameState):
"""
Resamples the set of particles using the likelihood of the noisy observations.
To loop over the ghosts, use:
for i in range(self.numGhosts):
...
A correct implementation will handle two special cases:
1) When a ghost is captured by Pacman, all particles should be updated so
that the ghost appears in its prison cell, position self.getJailPosition(i)
where "i" is the index of the ghost.
You can check if a ghost has been captured by Pacman by
checking if it has a noisyDistance of None (a noisy distance
of None will be returned if, and only if, the ghost is
captured).
2) When all particles receive 0 weight, they should be recreated from the
prior distribution by calling initializeParticles. After all particles
are generated randomly, any ghosts that are eaten (have noisyDistance of 0)
must be changed to the jail Position. This will involve changing each
particle if a ghost has been eaten.
** Remember ** We store particles as tuples, but to edit a specific particle,
it must be converted to a list, edited, and then converted back to a tuple. Since
this is a common operation when placing a ghost in the jail for a particle, we have
provided a helper method named self.getParticleWithGhostInJail(particle, ghostIndex)
that performs these three operations for you.
"""
pacmanPosition = gameState.getPacmanPosition()
noisyDistances = gameState.getNoisyGhostDistances()
if len(noisyDistances) < self.numGhosts: return
emissionModels = [busters.getObservationDistribution(dist) for dist in noisyDistances]
"*** YOUR CODE HERE ***"
def getParticleWithGhostInJail(self, particle, ghostIndex):
particle = list(particle)
particle[ghostIndex] = self.getJailPosition(ghostIndex)
return tuple(particle)
def elapseTime(self, gameState):
"""
Samples each particle's next state based on its current state and the gameState.
To loop over the ghosts, use:
for i in range(self.numGhosts):
...
Then, assuming that "i" refers to the index of the
ghost, to obtain the distributions over new positions for that
single ghost, given the list (prevGhostPositions) of previous
positions of ALL of the ghosts, use this line of code:
newPosDist = getPositionDistributionForGhost(setGhostPositions(gameState, prevGhostPositions),
i, self.ghostAgents[i])
**Note** that you may need to replace "prevGhostPositions" with the
correct name of the variable that you have used to refer to the
list of the previous positions of all of the ghosts, and you may
need to replace "i" with the variable you have used to refer to
the index of the ghost for which you are computing the new
position distribution.
As an implementation detail (with which you need not concern
yourself), the line of code above for obtaining newPosDist makes
use of two helper functions defined below in this file:
1) setGhostPositions(gameState, ghostPositions)
This method alters the gameState by placing the ghosts in the supplied positions.
2) getPositionDistributionForGhost(gameState, ghostIndex, agent)
This method uses the supplied ghost agent to determine what positions
a ghost (ghostIndex) controlled by a particular agent (ghostAgent)
will move to in the supplied gameState. All ghosts
must first be placed in the gameState using setGhostPositions above.
The ghost agent you are meant to supply is self.ghostAgents[ghostIndex-1],
but in this project all ghost agents are always the same.
"""
newParticles = []
for oldParticle in self.particles:
newParticle = list(oldParticle) # A list of ghost positions
# now loop through and update each entry in newParticle...
"*** YOUR CODE HERE ***"
for j in range(0,self.numGhosts):
newPosDist = getPositionDistributionForGhost(setGhostPositions(gameState, list(oldParticle)), j+1, self.ghostAgents[j])
newParticle[j] = util.sampleFromCounter(newPosDist)
"*** END YOUR CODE HERE ***"
newParticles.append(tuple(newParticle))
self.particles = newParticles
def getBeliefDistribution(self):
"*** YOUR CODE HERE ***"
import collections
c = collections.Counter(self.particles)
diction = {}
a = dict(c)
for key in c.keys():
diction[key]=a[key]/float(self.numParticles)
temp = collections.Counter(diction)
self.beliefs = temp
return self.beliefs
# One JointInference module is shared globally across instances of MarginalInference
jointInference = JointParticleFilter()
def getPositionDistributionForGhost(gameState, ghostIndex, agent):
"""
Returns the distribution over positions for a ghost, using the supplied gameState.
"""
# index 0 is pacman, but the students think that index 0 is the first ghost.
ghostPosition = gameState.getGhostPosition(ghostIndex+1)
actionDist = agent.getDistribution(gameState)
dist = util.Counter()
for action, prob in actionDist.items():
successorPosition = game.Actions.getSuccessor(ghostPosition, action)
dist[successorPosition] = prob
return dist
def setGhostPositions(gameState, ghostPositions):
"Sets the position of all ghosts to the values in ghostPositionTuple."
for index, pos in enumerate(ghostPositions):
conf = game.Configuration(pos, game.Directions.STOP)
gameState.data.agentStates[index + 1] = game.AgentState(conf, False)
return gameState
| [
"rajprateek@gatech.edu"
] | rajprateek@gatech.edu |
181a1ab6eeadd7134316b33e7db2d9fa161c70d8 | f48411683e966017ef5cb511f5be226bebb87742 | /15-HTTPRequests/http-class.py | afbadf19d826db4b04a2ab476e73b51ed1d80201 | [] | no_license | JacquesBeets/python-bootcamp | b9caef172cecf8d1a7aec86ad0fd8930fe9b7cb3 | 38a869fdd11b096a95021d0486e45a9d4d194cf9 | refs/heads/master | 2020-03-19T05:05:29.779517 | 2018-06-15T10:43:25 | 2018-06-15T10:43:25 | 135,898,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 840 | py | import requests
url = "https://icanhazdadjoke.com/"
response = requests.get(url, headers={"Accept": "application/json"})
if response.ok:
print("========================================================================================\n")
print(f"Your response to {url} came back with status code {response.status_code}\n")
print("========================================================================================\n")
else:
print("===========================================================================================================\n")
print(f"We encountered an error with the request to {url}. Status Code {response.status_code}\n")
print("===========================================================================================================\n")
data = response.json()
print(data['joke']) | [
"jacques.beets@gmail.com"
] | jacques.beets@gmail.com |
884d53f18a0d62423dd609816415f397968d7fc4 | 8e7f852ec991fd9ad7facdb48f53484e8fbc43db | /ducttapp/api/user/list_user.py | c16a6c69b7b9f766163acc478c9ca14cbc69b834 | [] | no_license | anhducc13/flask-app | e1726da6194dbd71a24a8acc7346c4c44c20ad3c | 05aa051696c296f20df131da5af01b5cd8946620 | refs/heads/master | 2021-06-15T21:45:17.388569 | 2019-08-19T00:12:47 | 2019-08-19T00:12:47 | 194,702,388 | 0 | 1 | null | 2021-05-06T19:37:25 | 2019-07-01T15:50:21 | Python | UTF-8 | Python | false | false | 1,719 | py | from flask_restplus import Resource, fields
from flask_jwt_extended import jwt_required
from flask import request
from . import ns
from ducttapp import models, services
from ducttapp.helpers.decorators import user_management_required
user_model = ns.model(
name='user_model',
model={
'id': fields.Integer(),
'email': fields.String(),
'username': fields.String(),
'is_admin': fields.Boolean(),
'is_active': fields.Boolean(),
'updated_at': fields.DateTime(),
'fullname': fields.String(),
'phone_number': fields.String(),
'gender': fields.Boolean(),
'birthday': fields.DateTime(),
'avatar': fields.String(),
'roles': fields.List(fields.Integer())
}
)
user_list_model = ns.model('ListUserModel', {
'total': fields.Integer,
'results': fields.List(fields.Nested(user_model))
})
@ns.route('/list/')
class UserList(Resource):
@ns.marshal_with(user_list_model)
@ns.doc(
params={
'_page': 'page number',
'_limit': 'size in page',
'q': 'key search',
'_sort': 'Sort field',
'_order': 'Sort type',
'is_active': 'Filter status'
}
)
@jwt_required
@user_management_required
def get(self):
params = request.args
_page = params.get('_page') or 1
_limit = params.get('_limit') or 10
q = params.get('q') or ''
_sort = params.get('_sort') or 'username'
_order = params.get('_order') or 'descend'
is_active = params.getlist('is_active[]')
result = services.admin.get_all_users(_page, _limit, q, _sort, _order, is_active)
return result
| [
"trantienduc10@gmail.com"
] | trantienduc10@gmail.com |
14f22e821064a45449db0b40104e10a512772137 | 83f5674b39413032c40982f11a59901cb8c68031 | /models/es/blogger.py | 6744bef473ecccbc709c3df88ba0945658e08d51 | [] | no_license | Jade-sweet/mini_crawler | 7de0c158b3dc5753e0728952f13a2ba0badf88bb | 948bd3d681fa9980ccc2a3e382988dc0b99cfcf6 | refs/heads/master | 2023-01-30T05:42:52.591215 | 2020-12-12T10:32:14 | 2020-12-12T10:32:14 | 320,804,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 908 | py | # -*- coding: utf-8 -*-
from elasticsearch_dsl import Document, Keyword, Text, Integer
from elasticsearch_dsl.connections import connections
from conf.elasticSearchSettings import ELASTICSEARCH_SERVER
from utils.elasticSearchHelper import executeCommand
connections.create_connection(hosts=[ELASTICSEARCH_SERVER[0]])
class SinaBloggerType(Document):
"""
新浪博主信息
"""
uid = Keyword()
name = Text()
headPortrait = Keyword()
introduction = Text()
fansCount = Integer()
attentionCount = Integer()
postCount = Integer()
storageTime = Keyword()
class Index:
"""
表名sina_blogger_info
"""
name = 'sina_blogger_info'
settings = {
"number_of_shards": 2,
}
@staticmethod
def toSave(item):
body = dict(item)
executeCommand('sina_blogger_info', '_doc', item.uid, body)
| [
"1092824310@qq.com"
] | 1092824310@qq.com |
a7eeda35b40746f1096f785416c9b8d3a352dd5f | 442ff7dac3ace0a78166afe16f408d48ffe8e2bc | /hand_pose_estimators/CVPR2020_hand3d/data/RHD_dataset.py | 625af0df6ff66fac64ca0773c090485078990573 | [] | no_license | Whiskysu/mm-hand | 02adb9140b45e09e599c229296298ee00d8c0d68 | 1af2ea3a0787a3f38742dceb39afc39d0825f370 | refs/heads/master | 2023-08-27T23:59:43.491273 | 2020-10-07T18:05:59 | 2020-10-07T18:05:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,022 | py | import numpy as np
import cv2
import pickle
import os
from easydict import EasyDict as edict
import sys
PALM_COLOR = [1] * 3
THUMB_COLOR1 = [2] * 3
THUMB_COLOR2 = [3] * 3
THUMB_COLOR3 = [4] * 3
INDEX_COLOR1 = [5] * 3
INDEX_COLOR2 = [6] * 3
INDEX_COLOR3 = [7] * 3
MIDDLE_COLOR1 = [8] * 3
MIDDLE_COLOR2 = [9] * 3
MIDDLE_COLOR3 = [10] * 3
RING_COLOR1 = [11] * 3
RING_COLOR2 = [12] * 3
RING_COLOR3 = [13] * 3
PINKY_COLOR1 = [14] * 3
PINKY_COLOR2 = [15] * 3
PINKY_COLOR3 = [16] * 3
def generate_jointsmap(uv_coord, depth, width, height, channel=3):
canvas = np.ones((height, width, channel)) * sys.maxsize
_canvas = canvas.copy()
bones = [
((0, 17), [160] * channel),
((0, 1), [170] * channel),
((0, 5), [180] * channel),
((0, 9), [190] * channel),
((0, 13), [200] * channel),
((17, 18), [130] * channel),
((18, 19), [140] * channel),
((19, 20), [150] * channel),
((1, 2), [10] * channel),
((2, 3), [20] * channel),
((3, 4), [30] * channel),
((5, 6), [40] * channel),
((6, 7), [50] * channel),
((7, 8), [60] * channel),
((9, 10), [70] * channel),
((10, 11), [80] * channel),
((11, 12), [90] * channel),
((13, 14), [100] * channel),
((14, 15), [110] * channel),
((15, 16), [120] * channel),
]
for connection, color in bones:
temp_canvas = np.ones(canvas.shape) * sys.maxsize
coord1 = uv_coord[connection[0]]
coord2 = uv_coord[connection[1]]
coords = np.stack([coord1, coord2])
avg_depth = (depth[connection[0]] + depth[connection[1]]) / 2
x = coords[:, 0]
y = coords[:, 1]
mX = x.mean()
mY = y.mean()
length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
angle = np.math.degrees(np.math.atan2(y[0] - y[1], x[0] - x[1]))
radius = 5
polygon = cv2.ellipse2Poly((int(mX), int(mY)), (int(length / 2), radius), int(angle), 0, 360, 1)
cv2.fillConvexPoly(temp_canvas, polygon, [avg_depth] * channel)
_canvas = np.minimum(_canvas, temp_canvas)
canvas[_canvas==avg_depth] = color[0]
canvas[canvas==sys.maxsize] = 0
return canvas
class RHDdataset():
def __init__(self, opt):
""" STB dataset for dataset processed by create_STB_DP.py
:param path: path to dataset (assuming this is the file folder generated by create_MHP_DB.py
:param mode: paired (depthmap and rgb) or unpaired (rgb and random depth) or heatmap
:param kwargs:
"""
super(RHDdataset, self).__init__()
self.opt = opt
self.root_dir = self.opt.dataroot
with open(os.path.join(self.root_dir, "annotation.pickle"), "rb") as handle:
self.annotations = pickle.load(handle)
self.color_images = []
self.depth_images = []
self.mask_images = []
for folder in self.annotations.keys():
for image in self.annotations[folder].keys():
img_path = os.path.join(self.root_dir, folder, image)
attr = getattr(self, "{}_images".format(folder))
attr.append(img_path)
def __len__(self):
return len(self.color_images)
def __getitem__(self, item):
if self.opt.isTrain:
if (self.opt.data_mode == 'hpm2d') :
return self.get_item_kp(item)
elif (self.opt.data_mode == 'hpm3d'):
return self.get_item_z(item)
else:
return self.get_item_aligned(item)
else:
return self.get_item_test(item)
def get_item_test(self, item):
image_path = self.color_images[item]
image_labels = self.get_labels(image_path)
image = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB) / 255 - 0.5
keypoint_xyz21 = image_labels['xyz']
keypoint_xyz21 = self.convert(keypoint_xyz21)
keypoint_vis21 = np.ones(21)
keypoint_xyz21_rel = keypoint_xyz21 - keypoint_xyz21[0]
index_root_bone_length = np.linalg.norm(keypoint_xyz21_rel[12] -
keypoint_xyz21_rel[11])
keypoint_uv21 = self.convert(image_labels['uv_coord'])
batch = {}
batch['image'] = np.expand_dims(image, 0)
batch['keypoint_xyz21'] = np.expand_dims(keypoint_xyz21, 0)
batch['keypoint_scale'] = np.expand_dims(index_root_bone_length, 0)
batch['keypoint_vis21'] = np.expand_dims(keypoint_vis21, 0)
batch['keypoint_uv21'] = keypoint_uv21
return batch
@staticmethod
def convert(xyz:np.array):
"""convert xyz order to hand3d's definition"""
mapping = [0, 4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9,
16,15,14,13,20,19,18,17]
return xyz[mapping]
def get_item_z(self, item):
image_path = self.color_images[item]
image_labels = self.get_labels(image_path)
joint_imgs = self.get_heatmaps(image_labels['uv_coord'], [256, 256], 5)
batch = {}
# print(image_labels['depth'])
batch['A'] = joint_imgs
batch['B'] = torch.tensor(image_labels['depth'])
return batch
def get_item_kp(self, item):
image_path = self.color_images[item]
image = self.to_tensor(self.norm(cv2.imread(image_path), 'bgr'))
image_labels = self.get_labels(image_path)
joint_imgs = self.get_heatmaps(image_labels['uv_coord'], [256, 256], 5)
batch = {}
batch['A'] = image
batch['B'] = joint_imgs
return batch
def get_item_aligned(self, item):
color_image_path = self.color_images[item]
color_image = self.to_tensor(self.norm(cv2.imread(color_image_path), 'bgr'))
color_labels = self.get_labels(color_image_path)
joint_map = self.to_tensor(self.norm(generate_jointsmap(color_labels['uv_coord'],color_labels['depth'], 256, 256,
3,) ,'bgr'))
batch = {}
batch['A'] = joint_map
batch['B'] = color_image
return batch
def _get_item_default(self, item):
if self.mode == "Aligned":
return self._get_item_aligned(item)
def _get_item_aligned(self, item):
color_image_path = self.color_images[item]
depth_image_path = self.depth_images[item]
color_image = self.to_tensor(self.norm(cv2.imread(color_image_path), 'bgr',
mask=cv2.imread(self.mask_images[item], cv2.IMREAD_GRAYSCALE)))
depth_image = self.to_tensor(self.norm(cv2.imread(depth_image_path), 'depth'))
color_labels = self.get_labels(color_image_path)
depth_labels = self.get_labels(depth_image_path)
batch = dict()
if self.opt.heatmaps:
batch["heatmaps"] = self.get_heatmaps(color_labels['uv_coord'], color_image.shape[1:])
color_labels['depth'] = torch.squeeze(torch.from_numpy(self.norm(color_labels['depth'], 'linear')), -1)
depth_labels['depth'] = torch.squeeze(torch.from_numpy(self.norm(depth_labels['depth'], 'linear')), -1)
color_labels['uv_coord'] = torch.from_numpy(color_labels['uv_coord']).type(torch.FloatTensor)
depth_labels['uv_coord'] = torch.from_numpy(depth_labels['uv_coord']).type(torch.FloatTensor)
batch["A"] = color_image
batch["B"] = depth_image
batch["labelsA"] = color_labels
batch["labelsB"] = depth_labels
batch["A_paths"] = color_image_path
batch["B_paths"] = depth_image_path
return batch
def get_heatmaps(self, uv_coords, shape, sigma):
heatmaps = []
for x, y in uv_coords:
heatmaps.append(
self.to_tensor(
self.gen_heatmap(x, y, shape, sigma).astype(np.float32)))
heatmaps = torch.stack(heatmaps)
heatmaps = heatmaps.squeeze(1)
return heatmaps
@staticmethod
def to_tensor(image):
shape = image.shape
if shape[-1] == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = image.transpose(2, 0, 1)
image = torch.from_numpy(image)
else:
# grayscale
image = torch.from_numpy(image)
image = image.unsqueeze(0)
return image
def norm(self, input, encoding, mask=None):
"""
normalize a given image.
:param input:
:param encoding: either 'rgb' or 'depth'. STB dataset encodes depth information with rgb values.
:return:
"""
if encoding == 'bgr':
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
norm_image = cv2.normalize(input, None, alpha=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
if mask is not None:
norm_image = cv2.bitwise_and(norm_image, norm_image, mask=mask)
return norm_image
# return gray / np.linalg.norm(gray)
elif encoding == 'depth':
# depth formula is given by the author.
b, g, r = input[:, :, 0], input[:, :, 1], input[:, :, 2]
input = r + g * 256.0
norm_image = cv2.normalize(input, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
# return depth / np.linalg.norm(depth)
return norm_image
elif encoding == 'linear':
return cv2.normalize(input, None, 0, 1, cv2.NORM_MINMAX, dtype=cv2.CV_32F)
else:
raise NotImplemented
def get_labels(self, image_path):
*_, folder, name = image_path.split('/')
return self.annotations[folder][name]
def gen_heatmap(self, x, y, shape, sigma):
# base on DGGAN description
# a heat map is a dirac-delta function on (x,y) with Gaussian Distribution sprinkle on top.
centermap = np.zeros((shape[0], shape[1], 1), dtype=np.float32)
center_map = self.gaussian_kernel(shape[0], shape[1], x, y, sigma)
center_map[center_map > 1] = 1
center_map[center_map < 0.0099] = 0
centermap[:, :, 0] = center_map
return center_map
@staticmethod
def draw(image, uv_coord, bbox=None):
"""
draw image with uv_coord and an optional bounding box
:param image:
:param uv_coord:
:param bbox:
:return: image
"""
for i, p in enumerate(uv_coord):
x, y = p
cv2.circle(image, (int(x), int(y)), 2, 255, 1)
cv2.putText(image, str(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.8, 255)
if bbox is not None:
cv2.rectangle(image, (bbox[0], bbox[3]), (bbox[1], bbox[2]), 255, 2)
return image
@staticmethod
def gaussian_kernel(width, height, x, y, sigma):
# print(width, height, x, y, sigma)
gridy, gridx = np.mgrid[0:height, 0:width]
D2 = (gridx - x) ** 2 + (gridy - y) ** 2
return np.exp(-D2 / 2.0 / sigma / sigma)
if __name__ == "__main__":
from options.train_options import TrainOptions
from data import create_dataset
from util import util
import cv2
from torchvision.utils import save_image
opt = TrainOptions().parse()
dataset = create_dataset(opt)
for data in dataset:
image = data['A']
# image = image.numpy()
# image = image.reshape((256, 256, 3))
save_image(image , "test_data2.png", normalize=True, range=(0, 255))
image_numpy = util.tensor2im(image)
cv2.imwrite("test_data3.png", image_numpy)
# image_numpy = image_numpy.reshape((256, 256, 3))
break
| [
"wuzhenyusjtu@gmail.com"
] | wuzhenyusjtu@gmail.com |
fec9af424e5192af1a758a4184341390bd59a6f7 | b47fb5884e25ec189ab123c620fc651702774e61 | /assets/migrations/0001_initial.py | 4c5e27db2ad79e50e511e7dbbf26e703ea3e135a | [] | no_license | cs4224485/CMDB | e4782ac81b8c8394a1445c4a9f85777f7859354d | 41710e97fc79ae228f9f654fc5879910e91e1e25 | refs/heads/master | 2020-09-20T18:16:41.479379 | 2019-11-28T02:49:57 | 2019-11-28T02:49:57 | 224,557,148 | 0 | 0 | null | 2019-11-28T02:52:40 | 2019-11-28T02:49:28 | JavaScript | UTF-8 | Python | false | false | 19,877 | py | # Generated by Django 2.1.1 on 2019-06-28 02:36
from django.conf import settings
import django.contrib.auth.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Asset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('asset_type', models.CharField(choices=[('server', '服务器'), ('networkdevice', '网络设备'), ('storagedevice', '存储设备'), ('securitydevice', '安全设备'), ('securitydevice', '机房设备'), ('software', '软件资产')], default='server', max_length=64)),
('name', models.CharField(max_length=64, unique=True)),
('sn', models.CharField(max_length=128, unique=True, verbose_name='资产SN号')),
('management_ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='管理IP')),
('trade_date', models.DateField(blank=True, null=True, verbose_name='购买时间')),
('expire_date', models.DateField(blank=True, null=True, verbose_name='过保修期')),
('price', models.FloatField(blank=True, null=True, verbose_name='价格')),
('status', models.SmallIntegerField(choices=[(0, '在线'), (1, '已下线'), (2, '未知'), (3, '故障'), (4, '备用')], default=0)),
('memo', models.TextField(blank=True, null=True, verbose_name='备注')),
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '资产总表',
'verbose_name_plural': '资产总表',
},
),
migrations.CreateModel(
name='BusinessUnit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, unique=True, verbose_name='业务线')),
('memo', models.CharField(blank=True, max_length=64, verbose_name='备注')),
('parent_unit', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='parent_level', to='assets.BusinessUnit')),
],
options={
'verbose_name': '业务线',
'verbose_name_plural': '业务线',
},
),
migrations.CreateModel(
name='Contract',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sn', models.CharField(max_length=128, unique=True, verbose_name='合同号')),
('name', models.CharField(max_length=64, verbose_name='合同名称')),
('memo', models.TextField(blank=True, null=True, verbose_name='备注')),
('price', models.IntegerField(verbose_name='合同金额')),
('detail', models.TextField(blank=True, null=True, verbose_name='合同详细')),
('start_date', models.DateField(blank=True)),
('end_date', models.DateField(blank=True)),
('license_num', models.IntegerField(blank=True, verbose_name='license数量')),
('create_date', models.DateField(auto_now_add=True)),
('update_date', models.DateField(auto_now=True)),
],
options={
'verbose_name': '合同',
'verbose_name_plural': '合同',
},
),
migrations.CreateModel(
name='CPU',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cpu_model', models.CharField(blank=True, max_length=128, verbose_name='CPU型号')),
('cpu_count', models.SmallIntegerField(verbose_name='物理cpu个数')),
('cpu_core_count', models.SmallIntegerField(verbose_name='cpu核数')),
('memo', models.TextField(blank=True, null=True, verbose_name='备注')),
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(blank=True, null=True)),
('asset', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
],
options={
'verbose_name': 'CPU部件',
'verbose_name_plural': 'CPU部件',
},
),
migrations.CreateModel(
name='Disk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sn', models.CharField(blank=True, max_length=128, null=True, verbose_name='SN号')),
('slot', models.CharField(max_length=64, verbose_name='插槽位')),
('model', models.CharField(blank=True, max_length=128, null=True, verbose_name='磁盘型号')),
('capacity', models.FloatField(verbose_name='磁盘容量GB')),
('iface_type', models.CharField(choices=[('SATA', 'SATA'), ('SAS', 'SAS'), ('SCSI', 'SCSI'), ('SSD', 'SSD')], default='SAS', max_length=64, verbose_name='接口类型')),
('memo', models.TextField(blank=True, null=True, verbose_name='备注')),
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(blank=True, null=True)),
('asset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
],
options={
'verbose_name': '硬盘',
'verbose_name_plural': '硬盘',
},
),
migrations.CreateModel(
name='EventLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='事件名称')),
('event_type', models.SmallIntegerField(choices=[(1, '硬件变更'), (2, '新增配件'), (3, '设备下线'), (4, '设备上线'), (5, '定期维护'), (6, '业务上线\\更新\\变更'), (7, '其它')], verbose_name='事件类型')),
('component', models.CharField(blank=True, max_length=255, null=True, verbose_name='事件子项')),
('detail', models.TextField(verbose_name='事件详情')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='事件时间')),
('memo', models.TextField(blank=True, null=True, verbose_name='备注')),
('asset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
],
options={
'verbose_name': '事件纪录',
'verbose_name_plural': '事件纪录',
},
),
migrations.CreateModel(
name='IDC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, unique=True, verbose_name='机房名称')),
('memo', models.CharField(blank=True, max_length=128, null=True, verbose_name='备注')),
],
options={
'verbose_name': '机房',
'verbose_name_plural': '机房',
},
),
migrations.CreateModel(
name='Manufactory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('manufactory', models.CharField(max_length=64, unique=True, verbose_name='厂商名称')),
('support_num', models.CharField(blank=True, max_length=30, verbose_name='支持电话')),
('memo', models.CharField(blank=True, max_length=128, verbose_name='备注')),
],
options={
'verbose_name': '厂商',
'verbose_name_plural': '厂商',
},
),
migrations.CreateModel(
name='NetworkDevice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_asset_type', models.SmallIntegerField(choices=[(0, '路由器'), (1, '交换机'), (2, '负载均衡'), (4, 'VPN设备')], default=0, verbose_name='网络设备类型')),
('vlan_ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='VlanIP')),
('intranet_ip', models.GenericIPAddressField(blank=True, null=True, verbose_name='内网IP')),
('model', models.CharField(blank=True, max_length=128, null=True, verbose_name='型号')),
('firmware', models.CharField(blank=True, max_length=128, null=True, verbose_name='固件')),
('port_num', models.SmallIntegerField(blank=True, null=True, verbose_name='端口个数')),
('device_detail', models.TextField(blank=True, null=True, verbose_name='设置详细配置')),
('asset', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
],
options={
'verbose_name': '网络设备',
'verbose_name_plural': '网络设备',
},
),
migrations.CreateModel(
name='NIC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=64, null=True, verbose_name='网卡名')),
('sn', models.CharField(blank=True, max_length=128, null=True, verbose_name='SN号')),
('model', models.CharField(blank=True, max_length=128, null=True, verbose_name='网卡型号')),
('macaddress', models.CharField(max_length=64, unique=True, verbose_name='MAC')),
('ipaddress', models.GenericIPAddressField(blank=True, null=True, verbose_name='IP')),
('netmask', models.CharField(blank=True, max_length=64, null=True)),
('bonding', models.CharField(blank=True, max_length=64, null=True)),
('memo', models.CharField(blank=True, max_length=128, null=True, verbose_name='备注')),
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(blank=True, null=True)),
('asset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
],
options={
'verbose_name': '网卡',
'verbose_name_plural': '网卡',
},
),
migrations.CreateModel(
name='RaidAdaptor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sn', models.CharField(blank=True, max_length=128, null=True, verbose_name='SN号')),
('slot', models.CharField(max_length=64, verbose_name='插口')),
('model', models.CharField(blank=True, max_length=64, null=True, verbose_name='型号')),
('memo', models.TextField(blank=True, null=True, verbose_name='备注')),
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(blank=True, null=True)),
('asset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
],
),
migrations.CreateModel(
name='RAM',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sn', models.CharField(blank=True, max_length=128, null=True, verbose_name='SN号')),
('model', models.CharField(max_length=128, verbose_name='内存型号')),
('slot', models.CharField(max_length=64, verbose_name='插槽')),
('capacity', models.IntegerField(verbose_name='内存大小(MB)')),
('memo', models.CharField(blank=True, max_length=128, null=True, verbose_name='备注')),
('create_date', models.DateTimeField(auto_now_add=True)),
('update_date', models.DateTimeField(blank=True, null=True)),
('asset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
],
options={
'verbose_name': 'RAM',
'verbose_name_plural': 'RAM',
},
),
migrations.CreateModel(
name='SecurityDevice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_asset_type', models.SmallIntegerField(choices=[(0, '防火墙'), (1, '入侵检测设备'), (2, '互联网网关'), (4, '运维审计系统')], default=0, verbose_name='服务器类型')),
('asset', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
],
),
migrations.CreateModel(
name='Server',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_asset_type', models.SmallIntegerField(choices=[(0, 'PC服务器'), (1, '刀片机'), (2, '小型机')], default=0, verbose_name='服务器类型')),
('created_by', models.CharField(choices=[('auto', 'Auto'), ('manual', 'Manual')], default='auto', max_length=32)),
('model', models.CharField(blank=True, max_length=128, null=True, verbose_name='型号')),
('raid_type', models.CharField(blank=True, max_length=512, null=True, verbose_name='raid类型')),
('os_type', models.CharField(blank=True, max_length=64, null=True, verbose_name='操作系统类型')),
('os_distribution', models.CharField(blank=True, max_length=64, null=True, verbose_name='发型版本')),
('os_release', models.CharField(blank=True, max_length=64, null=True, verbose_name='操作系统版本')),
('asset', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='assets.Asset')),
('hosted_on', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='hosted_on_server', to='assets.Server')),
],
options={
'verbose_name': '服务器',
'verbose_name_plural': '服务器',
},
),
migrations.CreateModel(
name='Software',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_asset_type', models.SmallIntegerField(choices=[(0, 'OS'), (1, '办公\\开发软件'), (2, '业务软件')], default=0, verbose_name='服务器类型')),
('license_num', models.IntegerField(verbose_name='授权数')),
('version', models.CharField(help_text='eg. CentOS release 6.5 (Final)', max_length=64, unique=True, verbose_name='软件/系统版本')),
],
options={
'verbose_name': '软件/系统',
'verbose_name_plural': '软件/系统',
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=32, unique=True, verbose_name='Tag name')),
('create_date', models.DateField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('name', models.CharField(max_length=32, verbose_name='姓名')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
bases=('auth.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AddField(
model_name='tag',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assets.UserProfile'),
),
migrations.AddField(
model_name='eventlog',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assets.UserProfile', verbose_name='事件源'),
),
migrations.AddField(
model_name='asset',
name='admin',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='assets.UserProfile', verbose_name='资产管理员'),
),
migrations.AddField(
model_name='asset',
name='business_unit',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='assets.BusinessUnit', verbose_name='所属业务线'),
),
migrations.AddField(
model_name='asset',
name='contract',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='assets.Contract', verbose_name='合同'),
),
migrations.AddField(
model_name='asset',
name='idc',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='assets.IDC', verbose_name='IDC机房'),
),
migrations.AddField(
model_name='asset',
name='manufactory',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='assets.Manufactory', verbose_name='制造商'),
),
migrations.AddField(
model_name='asset',
name='tags',
field=models.ManyToManyField(blank=True, to='assets.Tag'),
),
migrations.AlterUniqueTogether(
name='ram',
unique_together={('asset', 'slot')},
),
migrations.AlterUniqueTogether(
name='raidadaptor',
unique_together={('asset', 'slot')},
),
migrations.AlterUniqueTogether(
name='nic',
unique_together={('asset', 'macaddress')},
),
migrations.AlterUniqueTogether(
name='disk',
unique_together={('asset', 'slot')},
),
]
| [
"414804000@qq.com"
] | 414804000@qq.com |
0095d1257f4e58f49ca655484cf6dc68f2f557e0 | 52d4280866bf3b5ea61aa2d1e77705bb4fa1b11f | /setup/doctype/company/company.py | c3fd3566578a0d3da7f99d205736c3d954cf6537 | [] | no_license | rangsitww/test3 | b382deec071c0eb40cc4ebc152156b74bf55d0ab | a342b868c29c80f1c0814bdb674413c8c94c929f | refs/heads/master | 2022-12-14T09:12:44.292566 | 2020-09-11T17:45:37 | 2020-09-11T17:45:37 | 294,721,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,781 | py | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, os, json
from frappe import _
from frappe.utils import get_timestamp
from frappe.utils import cint, today, formatdate
import frappe.defaults
from frappe.cache_manager import clear_defaults_cache
from frappe.model.document import Document
from frappe.contacts.address_and_contact import load_address_and_contact
from frappe.utils.nestedset import NestedSet
from past.builtins import cmp
import functools
class Company(NestedSet):
nsm_parent_field = 'parent_company'
def onload(self):
load_address_and_contact(self, "company")
self.get("__onload")["transactions_exist"] = self.check_if_transactions_exist()
def check_if_transactions_exist(self):
exists = False
for doctype in ["Sales Invoice", "Delivery Note", "Sales Order", "Quotation",
"Purchase Invoice", "Purchase Receipt", "Purchase Order", "Supplier Quotation"]:
if frappe.db.sql("""select name from `tab%s` where company=%s and docstatus=1
limit 1""" % (doctype, "%s"), self.name):
exists = True
break
return exists
def validate(self):
self.update_default_account = False
if self.is_new():
self.update_default_account = True
self.validate_abbr()
self.validate_default_accounts()
self.validate_currency()
self.validate_coa_input()
self.validate_perpetual_inventory()
self.check_country_change()
self.set_chart_of_accounts()
def validate_abbr(self):
if not self.abbr:
self.abbr = ''.join([c[0] for c in self.company_name.split()]).upper()
self.abbr = self.abbr.strip()
# if self.get('__islocal') and len(self.abbr) > 5:
# frappe.throw(_("Abbreviation cannot have more than 5 characters"))
if not self.abbr.strip():
frappe.throw(_("Abbreviation is mandatory"))
if frappe.db.sql("select abbr from tabCompany where name!=%s and abbr=%s", (self.name, self.abbr)):
frappe.throw(_("Abbreviation already used for another company"))
def create_default_tax_template(self):
from erpnext.setup.setup_wizard.operations.taxes_setup import create_sales_tax
create_sales_tax({
'country': self.country,
'company_name': self.name
})
def validate_default_accounts(self):
accounts = [
"default_bank_account", "default_cash_account",
"default_receivable_account", "default_payable_account",
"default_expense_account", "default_income_account",
"stock_received_but_not_billed", "stock_adjustment_account",
"expenses_included_in_valuation", "default_payroll_payable_account"
]
for field in accounts:
if self.get(field):
for_company = frappe.db.get_value("Account", self.get(field), "company")
if for_company != self.name:
frappe.throw(_("Account {0} does not belong to company: {1}").format(self.get(field), self.name))
def validate_currency(self):
if self.is_new():
return
self.previous_default_currency = frappe.get_cached_value('Company', self.name, "default_currency")
if self.default_currency and self.previous_default_currency and \
self.default_currency != self.previous_default_currency and \
self.check_if_transactions_exist():
frappe.throw(_("Cannot change company's default currency, because there are existing transactions. Transactions must be cancelled to change the default currency."))
def on_update(self):
NestedSet.on_update(self)
if not frappe.db.sql("""select name from tabAccount
where company=%s and docstatus<2 limit 1""", self.name):
if not frappe.local.flags.ignore_chart_of_accounts:
frappe.flags.country_change = True
self.create_default_accounts()
self.create_default_warehouses()
if frappe.flags.country_change:
install_country_fixtures(self.name)
self.create_default_tax_template()
if not frappe.db.get_value("Department", {"company": self.name}):
from erpnext.setup.setup_wizard.operations.install_fixtures import install_post_company_fixtures
install_post_company_fixtures(frappe._dict({'company_name': self.name}))
if not frappe.db.get_value("Cost Center", {"is_group": 0, "company": self.name}):
self.create_default_cost_center()
if not frappe.local.flags.ignore_chart_of_accounts:
self.set_default_accounts()
if self.default_cash_account:
self.set_mode_of_payment_account()
if self.default_currency:
frappe.db.set_value("Currency", self.default_currency, "enabled", 1)
if hasattr(frappe.local, 'enable_perpetual_inventory') and \
self.name in frappe.local.enable_perpetual_inventory:
frappe.local.enable_perpetual_inventory[self.name] = self.enable_perpetual_inventory
frappe.clear_cache()
def create_default_warehouses(self):
for wh_detail in [
{"warehouse_name": _("All Warehouses"), "is_group": 1},
{"warehouse_name": _("Stores"), "is_group": 0},
{"warehouse_name": _("Work In Progress"), "is_group": 0},
{"warehouse_name": _("Finished Goods"), "is_group": 0}]:
if not frappe.db.exists("Warehouse", "{0} - {1}".format(wh_detail["warehouse_name"], self.abbr)):
warehouse = frappe.get_doc({
"doctype":"Warehouse",
"warehouse_name": wh_detail["warehouse_name"],
"is_group": wh_detail["is_group"],
"company": self.name,
"parent_warehouse": "{0} - {1}".format(_("All Warehouses"), self.abbr) \
if not wh_detail["is_group"] else ""
})
warehouse.flags.ignore_permissions = True
warehouse.flags.ignore_mandatory = True
warehouse.insert()
def create_default_accounts(self):
from erpnext.accounts.doctype.account.chart_of_accounts.chart_of_accounts import create_charts
frappe.local.flags.ignore_root_company_validation = True
create_charts(self.name, self.chart_of_accounts, self.existing_company)
frappe.db.set(self, "default_receivable_account", frappe.db.get_value("Account",
{"company": self.name, "account_type": "Receivable", "is_group": 0}))
frappe.db.set(self, "default_payable_account", frappe.db.get_value("Account",
{"company": self.name, "account_type": "Payable", "is_group": 0}))
def validate_coa_input(self):
if self.create_chart_of_accounts_based_on == "Existing Company":
self.chart_of_accounts = None
if not self.existing_company:
frappe.throw(_("Please select Existing Company for creating Chart of Accounts"))
else:
self.existing_company = None
self.create_chart_of_accounts_based_on = "Standard Template"
if not self.chart_of_accounts:
self.chart_of_accounts = "Standard"
def validate_perpetual_inventory(self):
if not self.get("__islocal"):
if cint(self.enable_perpetual_inventory) == 1 and not self.default_inventory_account:
frappe.msgprint(_("Set default inventory account for perpetual inventory"),
alert=True, indicator='orange')
def check_country_change(self):
frappe.flags.country_change = False
if not self.get('__islocal') and \
self.country != frappe.get_cached_value('Company', self.name, 'country'):
frappe.flags.country_change = True
def set_chart_of_accounts(self):
''' If parent company is set, chart of accounts will be based on that company '''
if self.parent_company:
self.create_chart_of_accounts_based_on = "Existing Company"
self.existing_company = self.parent_company
def set_default_accounts(self):
default_accounts = {
"default_cash_account": "Cash",
"default_bank_account": "Bank",
"round_off_account": "Round Off",
"accumulated_depreciation_account": "Accumulated Depreciation",
"depreciation_expense_account": "Depreciation",
"capital_work_in_progress_account": "Capital Work in Progress",
"asset_received_but_not_billed": "Asset Received But Not Billed",
"expenses_included_in_asset_valuation": "Expenses Included In Asset Valuation"
}
if self.enable_perpetual_inventory:
default_accounts.update({
"stock_received_but_not_billed": "Stock Received But Not Billed",
"default_inventory_account": "Stock",
"stock_adjustment_account": "Stock Adjustment",
"expenses_included_in_valuation": "Expenses Included In Valuation",
"default_expense_account": "Cost of Goods Sold"
})
if self.update_default_account:
for default_account in default_accounts:
self._set_default_account(default_account, default_accounts.get(default_account))
if not self.default_income_account:
income_account = frappe.db.get_value("Account",
{"account_name": _("Sales"), "company": self.name, "is_group": 0})
if not income_account:
income_account = frappe.db.get_value("Account",
{"account_name": _("Sales Account"), "company": self.name})
self.db_set("default_income_account", income_account)
if not self.default_payable_account:
self.db_set("default_payable_account", self.default_payable_account)
if not self.default_payroll_payable_account:
payroll_payable_account = frappe.db.get_value("Account",
{"account_name": _("Payroll Payable"), "company": self.name, "is_group": 0})
self.db_set("default_payroll_payable_account", payroll_payable_account)
if not self.default_employee_advance_account:
employe_advance_account = frappe.db.get_value("Account",
{"account_name": _("Employee Advances"), "company": self.name, "is_group": 0})
self.db_set("default_employee_advance_account", employe_advance_account)
if not self.write_off_account:
write_off_acct = frappe.db.get_value("Account",
{"account_name": _("Write Off"), "company": self.name, "is_group": 0})
self.db_set("write_off_account", write_off_acct)
if not self.exchange_gain_loss_account:
exchange_gain_loss_acct = frappe.db.get_value("Account",
{"account_name": _("Exchange Gain/Loss"), "company": self.name, "is_group": 0})
self.db_set("exchange_gain_loss_account", exchange_gain_loss_acct)
if not self.disposal_account:
disposal_acct = frappe.db.get_value("Account",
{"account_name": _("Gain/Loss on Asset Disposal"), "company": self.name, "is_group": 0})
self.db_set("disposal_account", disposal_acct)
def _set_default_account(self, fieldname, account_type):
if self.get(fieldname):
return
account = frappe.db.get_value("Account", {"account_type": account_type, "is_group": 0, "company": self.name})
if account:
self.db_set(fieldname, account)
def set_mode_of_payment_account(self):
cash = frappe.db.get_value('Mode of Payment', {'type': 'Cash'}, 'name')
if cash and self.default_cash_account \
and not frappe.db.get_value('Mode of Payment Account', {'company': self.name, 'parent': cash}):
mode_of_payment = frappe.get_doc('Mode of Payment', cash)
mode_of_payment.append('accounts', {
'company': self.name,
'default_account': self.default_cash_account
})
mode_of_payment.save(ignore_permissions=True)
def create_default_cost_center(self):
cc_list = [
{
'cost_center_name': self.name,
'company':self.name,
'is_group': 1,
'parent_cost_center':None
},
{
'cost_center_name':_('Main'),
'company':self.name,
'is_group':0,
'parent_cost_center':self.name + ' - ' + self.abbr
},
]
for cc in cc_list:
cc.update({"doctype": "Cost Center"})
cc_doc = frappe.get_doc(cc)
cc_doc.flags.ignore_permissions = True
if cc.get("cost_center_name") == self.name:
cc_doc.flags.ignore_mandatory = True
cc_doc.insert()
frappe.db.set(self, "cost_center", _("Main") + " - " + self.abbr)
frappe.db.set(self, "round_off_cost_center", _("Main") + " - " + self.abbr)
frappe.db.set(self, "depreciation_cost_center", _("Main") + " - " + self.abbr)
def after_rename(self, olddn, newdn, merge=False):
frappe.db.set(self, "company_name", newdn)
frappe.db.sql("""update `tabDefaultValue` set defvalue=%s
where defkey='Company' and defvalue=%s""", (newdn, olddn))
clear_defaults_cache()
def abbreviate(self):
self.abbr = ''.join([c[0].upper() for c in self.company_name.split()])
def on_trash(self):
"""
Trash accounts and cost centers for this company if no gl entry exists
"""
NestedSet.validate_if_child_exists(self)
frappe.utils.nestedset.update_nsm(self)
rec = frappe.db.sql("SELECT name from `tabGL Entry` where company = %s", self.name)
if not rec:
frappe.db.sql("""delete from `tabBudget Account`
where exists(select name from tabBudget
where name=`tabBudget Account`.parent and company = %s)""", self.name)
for doctype in ["Account", "Cost Center", "Budget", "Party Account"]:
frappe.db.sql("delete from `tab{0}` where company = %s".format(doctype), self.name)
if not frappe.db.get_value("Stock Ledger Entry", {"company": self.name}):
frappe.db.sql("""delete from `tabWarehouse` where company=%s""", self.name)
frappe.defaults.clear_default("company", value=self.name)
for doctype in ["Mode of Payment Account", "Item Default"]:
frappe.db.sql("delete from `tab{0}` where company = %s".format(doctype), self.name)
# clear default accounts, warehouses from item
warehouses = frappe.db.sql_list("select name from tabWarehouse where company=%s", self.name)
if warehouses:
frappe.db.sql("""delete from `tabItem Reorder` where warehouse in (%s)"""
% ', '.join(['%s']*len(warehouses)), tuple(warehouses))
# reset default company
frappe.db.sql("""update `tabSingles` set value=""
where doctype='Global Defaults' and field='default_company'
and value=%s""", self.name)
# reset default company
frappe.db.sql("""update `tabSingles` set value=""
where doctype='Chart of Accounts Importer' and field='company'
and value=%s""", self.name)
# delete BOMs
boms = frappe.db.sql_list("select name from tabBOM where company=%s", self.name)
if boms:
frappe.db.sql("delete from tabBOM where company=%s", self.name)
for dt in ("BOM Operation", "BOM Item", "BOM Scrap Item", "BOM Explosion Item"):
frappe.db.sql("delete from `tab%s` where parent in (%s)"""
% (dt, ', '.join(['%s']*len(boms))), tuple(boms))
frappe.db.sql("delete from tabEmployee where company=%s", self.name)
frappe.db.sql("delete from tabDepartment where company=%s", self.name)
frappe.db.sql("delete from `tabTax Withholding Account` where company=%s", self.name)
frappe.db.sql("delete from `tabSales Taxes and Charges Template` where company=%s", self.name)
frappe.db.sql("delete from `tabPurchase Taxes and Charges Template` where company=%s", self.name)
@frappe.whitelist()
def enqueue_replace_abbr(company, old, new):
kwargs = dict(company=company, old=old, new=new)
frappe.enqueue('erpnext.setup.doctype.company.company.replace_abbr', **kwargs)
@frappe.whitelist()
def replace_abbr(company, old, new):
new = new.strip()
if not new:
frappe.throw(_("Abbr can not be blank or space"))
frappe.only_for("System Manager")
frappe.db.set_value("Company", company, "abbr", new)
def _rename_record(doc):
parts = doc[0].rsplit(" - ", 1)
if len(parts) == 1 or parts[1].lower() == old.lower():
frappe.rename_doc(dt, doc[0], parts[0] + " - " + new, force=True)
def _rename_records(dt):
# rename is expensive so let's be economical with memory usage
doc = (d for d in frappe.db.sql("select name from `tab%s` where company=%s" % (dt, '%s'), company))
for d in doc:
_rename_record(d)
for dt in ["Warehouse", "Account", "Cost Center", "Department",
"Sales Taxes and Charges Template", "Purchase Taxes and Charges Template"]:
_rename_records(dt)
frappe.db.commit()
def get_name_with_abbr(name, company):
company_abbr = frappe.get_cached_value('Company', company, "abbr")
parts = name.split(" - ")
if parts[-1].lower() != company_abbr.lower():
parts.append(company_abbr)
return " - ".join(parts)
def install_country_fixtures(company):
company_doc = frappe.get_doc("Company", company)
path = frappe.get_app_path('erpnext', 'regional', frappe.scrub(company_doc.country))
if os.path.exists(path.encode("utf-8")):
try:
module_name = "erpnext.regional.{0}.setup.setup".format(frappe.scrub(company_doc.country))
frappe.get_attr(module_name)(company_doc, False)
except Exception as e:
frappe.log_error(str(e), frappe.get_traceback())
frappe.throw(_("Failed to setup defaults for country {0}. Please contact support@erpnext.com").format(frappe.bold(company_doc.country)))
def update_company_current_month_sales(company):
current_month_year = formatdate(today(), "MM-yyyy")
results = frappe.db.sql('''
SELECT
SUM(base_grand_total) AS total,
DATE_FORMAT(`posting_date`, '%m-%Y') AS month_year
FROM
`tabSales Invoice`
WHERE
DATE_FORMAT(`posting_date`, '%m-%Y') = '{current_month_year}'
AND docstatus = 1
AND company = {company}
GROUP BY
month_year
'''.format(current_month_year=current_month_year, company=frappe.db.escape(company)),
as_dict = True)
monthly_total = results[0]['total'] if len(results) > 0 else 0
frappe.db.set_value("Company", company, "total_monthly_sales", monthly_total)
def update_company_monthly_sales(company):
'''Cache past year monthly sales of every company based on sales invoices'''
from frappe.utils.goal import get_monthly_results
import json
filter_str = "company = {0} and status != 'Draft' and docstatus=1".format(frappe.db.escape(company))
month_to_value_dict = get_monthly_results("Sales Invoice", "base_grand_total",
"posting_date", filter_str, "sum")
frappe.db.set_value("Company", company, "sales_monthly_history", json.dumps(month_to_value_dict))
def update_transactions_annual_history(company, commit=False):
transactions_history = get_all_transactions_annual_history(company)
frappe.db.set_value("Company", company, "transactions_annual_history", json.dumps(transactions_history))
if commit:
frappe.db.commit()
def cache_companies_monthly_sales_history():
companies = [d['name'] for d in frappe.get_list("Company")]
for company in companies:
update_company_monthly_sales(company)
update_transactions_annual_history(company)
frappe.db.commit()
@frappe.whitelist()
def get_children(doctype, parent=None, company=None, is_root=False):
if parent == None or parent == "All Companies":
parent = ""
return frappe.db.sql("""
select
name as value,
is_group as expandable
from
`tab{doctype}` comp
where
ifnull(parent_company, "")={parent}
""".format(
doctype = doctype,
parent=frappe.db.escape(parent)
), as_dict=1)
@frappe.whitelist()
def add_node():
from frappe.desk.treeview import make_tree_args
args = frappe.form_dict
args = make_tree_args(**args)
if args.parent_company == 'All Companies':
args.parent_company = None
frappe.get_doc(args).insert()
def get_all_transactions_annual_history(company):
out = {}
items = frappe.db.sql('''
select transaction_date, count(*) as count
from (
select name, transaction_date, company
from `tabQuotation`
UNION ALL
select name, transaction_date, company
from `tabSales Order`
UNION ALL
select name, posting_date as transaction_date, company
from `tabDelivery Note`
UNION ALL
select name, posting_date as transaction_date, company
from `tabSales Invoice`
UNION ALL
select name, creation as transaction_date, company
from `tabIssue`
UNION ALL
select name, creation as transaction_date, company
from `tabProject`
) t
where
company=%s
and
transaction_date > date_sub(curdate(), interval 1 year)
group by
transaction_date
''', (company), as_dict=True)
for d in items:
timestamp = get_timestamp(d["transaction_date"])
out.update({ timestamp: d["count"] })
return out
def get_timeline_data(doctype, name):
'''returns timeline data based on linked records in dashboard'''
out = {}
date_to_value_dict = {}
history = frappe.get_cached_value('Company', name, "transactions_annual_history")
try:
date_to_value_dict = json.loads(history) if history and '{' in history else None
except ValueError:
date_to_value_dict = None
if date_to_value_dict is None:
update_transactions_annual_history(name, True)
history = frappe.get_cached_value('Company', name, "transactions_annual_history")
return json.loads(history) if history and '{' in history else {}
return date_to_value_dict
@frappe.whitelist()
def get_default_company_address(name, sort_key='is_primary_address', existing_address=None):
if sort_key not in ['is_shipping_address', 'is_primary_address']:
return None
out = frappe.db.sql(""" SELECT
addr.name, addr.%s
FROM
`tabAddress` addr, `tabDynamic Link` dl
WHERE
dl.parent = addr.name and dl.link_doctype = 'Company' and
dl.link_name = %s and ifnull(addr.disabled, 0) = 0
""" %(sort_key, '%s'), (name)) #nosec
if existing_address:
if existing_address in [d[0] for d in out]:
return existing_address
if out:
return sorted(out, key = functools.cmp_to_key(lambda x,y: cmp(y[1], x[1])))[0][0]
else:
return None | [
"rangsit.tm@gmail.com"
] | rangsit.tm@gmail.com |
8430cc72e5197e5860a888821e49c94d25cf10d5 | 46df6cf9190bdd2120e5bab6f64e320fbafaf578 | /07_youdao.py | 5f1c602ef7a4733576c1289f208ddbc4ed9983af | [] | no_license | jupiter555/spider | 4b5f50c6ba8de67dd19bc01533dfd43164299e5b | 8aac13210aac668c1f65685ddcb8f7e70a78b4ad | refs/heads/master | 2020-09-11T02:45:16.328119 | 2019-11-21T00:30:57 | 2019-11-21T00:30:57 | 221,915,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,240 | py | import requests
import time
import random
from hashlib import md5
class YoudaoSpider(object):
def __init__(self):
# post_url为F12抓包抓到的地址
self.post_url = 'http://fanyi.youdao.com/' \
'translate_o?smartresult=dict&' \
'smartresult=rule'
self.proxies = {
'http':'http://309435365:szayclhp@120.26.167.133:16817',
'https':'https://309435365:szayclhp@120.26.167.133:16817'
}
self.headers = {
# 检查频率最高的3个
"Cookie": "OUTFOX_SEARCH_USER_ID=-1503804236@10.108.160.19; OUTFOX_SEARCH_USER_ID_NCOO=909810129.8233626; JSESSIONID=aaaZeLbI5jXLT_44xqU5w; ___rl__test__cookies=1573807897878",
"Referer": "http://fanyi.youdao.com/",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36",
}
def get_salt_sign_ts(self,word):
ts = str(int(time.time()*1000))
salt = ts + str(random.randint(0,9))
string = "fanyideskweb" + word + salt + "n%A-rKaT5fb[Gy?;N5@Tj"
s = md5()
s.update(string.encode())
sign = s.hexdigest()
return ts,salt,sign
def attack_youdao(self,word):
ts,salt,sign = self.get_salt_sign_ts(word)
# url headers formdata
formdata = {
"i": word,
"from": "AUTO",
"to": "AUTO",
"smartresult": "dict",
"client": "fanyideskweb",
"salt": salt,
"sign": sign,
"ts": ts,
"bv": "57d46cf581e5c43f8109a84cf9227e5e",
"doctype": "json",
"version": "2.1",
"keyfrom": "fanyi.web",
"action": "FY_BY_REALTlME",
}
html = requests.post(
url=self.post_url,
data=formdata,
# proxies=self.proxies,
headers=self.headers
).json()
print('翻译结果:',html['translateResult'][0][0]['tgt'])
def run(self):
word = input('请输入要翻译的单词:')
self.attack_youdao(word)
if __name__ == '__main__':
spider = YoudaoSpider()
spider.run()
| [
"noreply@github.com"
] | jupiter555.noreply@github.com |
d894aba882ce315c64cd83f4449c90e0cc5f8af1 | 2ed9c7634072be34f29975875b04cd8fea5d68c6 | /TestCases/Register/test_register_page_case.py | 193d93e540e696bcbc1645cb738e0eaca29bfba4 | [] | no_license | a550306923/ui_auto_frame_v2 | a6f48e619beaf11e66bf4c658bf6854ab207af26 | 2ce5fe6cacdacb6c7ed56b06cdf6e202ba0e88d6 | refs/heads/master | 2022-09-18T21:58:34.137446 | 2020-06-04T10:05:18 | 2020-06-04T10:05:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,533 | py | # coding:utf-8
import pytest
import allure
from Base.assert_method import Assert_method
@allure.feature("Register_page_case")
class Test_register_page_case:
@allure.story("Register")
@allure.severity("normal")
@allure.description("测试登录")
@allure.link("https://www.baidu.com", name="连接跳转百度")
@allure.testcase("https://www.sina.com", name="测试用例位置")
@allure.title("执行测试用例用于登录模块")
def test_DLZC7(self, login_page_class_load, register_page_class_load, function_driver):
login_page_class_load.login_by_config_url()
login_page_class_load.click_register_btn()
username_input_attribute_value = register_page_class_load.get_username_attribute_value()
Assert_method.assert_equal_screen_shot(function_driver, (username_input_attribute_value, "手机号码"))
@allure.story("Register")
@allure.severity("normal")
@allure.description("测试登录")
@allure.link("https://www.baidu.com", name="连接跳转百度")
@allure.testcase("https://www.sina.com", name="测试用例位置")
@allure.title("执行测试用例用于登录模块")
def test_DLZC8(self, login_page_class_load, register_page_class_load, function_driver):
login_page_class_load.login_by_config_url()
login_page_class_load.click_register_btn()
register_page_class_load.click_other_register_btn()
res = register_page_class_load.check_page_is_other_page()
Assert_method.assert_false_screen_shot(function_driver, res)
@allure.story("Register")
@allure.severity("normal")
@allure.description("测试登录")
@allure.link("https://www.baidu.com", name="连接跳转百度")
@allure.testcase("https://www.sina.com", name="测试用例位置")
@allure.title("执行测试用例用于登录模块")
def test_DLZC9(self, login_page_class_load, register_page_class_load, function_driver):
login_page_class_load.login_by_config_url()
login_page_class_load.click_register_btn()
register_page_class_load.click_login_btn()
login_title = register_page_class_load.get_login_page_title()
Assert_method.assert_equal_screen_shot(function_driver, (login_title, "登录"))
@allure.story("Register")
@allure.severity("normal")
@allure.description("测试登录")
@allure.link("https://www.baidu.com", name="连接跳转百度")
@allure.testcase("https://www.sina.com", name="测试用例位置")
@allure.title("执行测试用例用于登录模块")
def test_DLZC10(self, login_page_class_load, register_page_class_load, function_driver):
login_page_class_load.login_by_config_url()
login_page_class_load.click_register_btn()
register_page_class_load.click_register_btn()
error_text = register_page_class_load.get_error_text()
Assert_method.assert_equal_screen_shot(function_driver, (error_text, "登录"))
@allure.story("Register")
@allure.severity("normal")
@allure.description("测试登录")
@allure.link("https://www.baidu.com", name="连接跳转百度")
@allure.testcase("https://www.sina.com", name="测试用例位置")
@allure.title("执行测试用例用于登录模块")
def test_DLZC11(self, login_page_class_load, register_page_class_load, function_driver):
login_page_class_load.login_by_config_url()
login_page_class_load.click_register_btn()
register_page_class_load.click_code_btn()
error_text = register_page_class_load.get_error_text()
Assert_method.assert_equal_screen_shot(function_driver, (error_text, "用户名不能为空"))
@allure.story("Register")
@allure.severity("normal")
@allure.description("测试登录")
@allure.link("https://www.baidu.com", name="连接跳转百度")
@allure.testcase("https://www.sina.com", name="测试用例位置")
@allure.title("执行测试用例用于登录模块")
def test_DLZC12(self, login_page_class_load, register_page_class_load, function_driver):
login_page_class_load.login_by_config_url()
login_page_class_load.click_register_btn()
register_page_class_load.username_send_keys(1)
register_page_class_load.click_register_btn()
error_text = register_page_class_load.get_error_text()
Assert_method.assert_equal_screen_shot(function_driver, (error_text, "手机号码格式不正确"))
if __name__ == '__main__':
pytest.main(["test_register_page_case.py"])
| [
"848257135@qq.com"
] | 848257135@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.