blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
efc7afc0ee9bfd7f2b6a3219c8f25345badca682 | 37ae9741d39012e517dbbff98669bd4b18149b6c | /machine.py | 5371509d6b1dbb05b60c75d487bf52c753005060 | [] | no_license | amaterasu1577/vending-machine | f106f793486f6158526af773e6be8041e918276f | ed520d8e1322e11b9fe0630a58468a4d67d504df | refs/heads/master | 2020-06-06T01:35:15.278570 | 2019-06-18T20:44:41 | 2019-06-18T20:44:41 | 192,602,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,220 | py | """
Implement a vending machine
"""
from enum import Enum
class Slot():
""" Class Slot to implement a slot in vending machine """
def __init__(self, code, name, price):
self.code = code
self.name = name
self.price = price
self.quantity = 0
class Coin(Enum):
""" Enumeration of coins values """
NICKEL = 5
DIME = 10
QUARTER = 25
DOLLAR = 100
class Machine:
""" Implement a vending machine """
# Default number of slots in a machine
NB_SLOT = 4
def __init__(self, slot_list):
self.slots = {slot.code: slot for slot in slot_list}
self.amount = 0
self.coins = {}
def refill(self, code, count):
""" Refill the machine with slots """
self.slots[code].quantity += count
def refill_coins(self, coins_list):
""" Refill the machine with slots """
self.coins = coins_list
def insert_coin(self, value):
""" Insertion of coin action """
self.amount += value
def press(self, code):
""" Render the client choice """
slot = self.slots[code]
if self.amount >= slot.price:
slot.quantity -= 1
self.amount -= slot.price
| [
"stephane.a.menanteau@socgen.com"
] | stephane.a.menanteau@socgen.com |
da808e1db71b784d8abea5fb5736304d99e9c6d7 | e7a1d1f501181daadcb9bbf4acffa469a41e1778 | /scouts/models.py | 825039ef17e6be7bec76a7494b17a874df968e26 | [] | no_license | Aziguy/Django_basic | 7f94b5586cd5303a2f2ba94d79c2e467bbc87aff | f495a6a41f15241397dcc629c43a99175e6c5470 | refs/heads/master | 2023-08-18T10:03:14.445030 | 2021-10-10T10:56:06 | 2021-10-10T10:56:06 | 415,558,645 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | from django.db import models
# Create your models here.
class Employee(models.Model):
photo = models.ImageField(blank=False, upload_to='userprofile')
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
id_number = models.CharField(max_length=20, unique=True)
email = models.EmailField(max_length=100, unique=True)
adress = models.CharField(max_length=100, unique=True)
phone_number = models.CharField(max_length=50, unique=True)
class Meta:
verbose_name = "Employee"
verbose_name_plural = "Employees"
def __str__(self):
return self.last_name + ' ' + self.first_name | [
"monsieur_hernandez@hotmail.com"
] | monsieur_hernandez@hotmail.com |
cb22349d4e848f634014ed843624b6925d920894 | 15b34f197e35b802501fa092ff652b2c3d6076d9 | /if-test2/iftest2.py | e67a6821f989a681ce6fc4785b45704d157a874e | [] | no_license | wolf-ja/mycode | e7b7d0d87b911f26fa5060e419d87d9d72b00773 | 6b6fda3f8db5e6c68b8fe8a7900ccfa9cec19779 | refs/heads/main | 2023-02-27T18:25:34.220343 | 2021-02-10T21:50:39 | 2021-02-10T21:50:39 | 337,128,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | #!/usr/bin/env python3
from ipaddress import ip_address, IPv4Address, IPv6Address
ipchk = input('Apply an IP address: ') # this line now prompts the user for input
ipchk = ip_address(ipchk)
print(ipchk)
print(type(ipchk))
if ipchk == '192.168.70.1': # if a match on '192.168.70.1'
# indented under if
print('Looks like the IP address of the Gateway was set: ' + ipchk + ' This is not recommended.')
elif ipchk: # if any data is provided, this will test true
if type(ip_address(ipchk)) == IPv4Address:
print('Looks like the IP address was set: ' + ipchk) # indented under if
else:
print('You did not provide a valid IPv4 Address.')
else: # if data is NOT provided
print('You did not provide input.') # indented under else
| [
"jasonwolf.wolf@gmail.com"
] | jasonwolf.wolf@gmail.com |
b0262c38debe45cff08a45da06eb141b99fed10c | 85fdc5d6cd34304fe827a49ed31023ccf094f9cc | /content/views.py | 4101cf44f843f85633b7c353fe7cb56eb13c2c4a | [] | no_license | ajoulikelion8/toyproject_1day3ggang | 6670bc4df6111a7a191f3209c3acc9e6647de407 | 7559e00946b5a03768dba6784c38ceb395342ea0 | refs/heads/master | 2022-11-22T16:14:24.018037 | 2020-07-12T17:39:54 | 2020-07-12T17:39:54 | 271,964,140 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | from django.shortcuts import render,redirect,get_object_or_404
from django.utils import timezone
from .forms import PostForm,UpdateForm
from content.models import Todolist
from account.models import User
# Create your views here.
def mainpage(request):
return render(request,'content/mainpage.html')
def wholelist(request):
lists = Todolist.objects
return render(request,'content/wholelist.html',{'lists':lists})
def create(request):
if request.method == 'POST':
form = PostForm(request.POST or None, request.FILES or None)
if form.is_valid():
form.save()
return redirect('wholelist')
else:
form = PostForm()
return render(request,'content/create.html',{'form':form})
def delete(request,list_id):
lists = Todolist.objects.get(pk=list_id)
lists.delete()
return redirect('wholelist')
def update(request,list_id):
obj=get_object_or_404(Todolist,pk=list_id)
if request.method == 'POST':
form = UpdateForm(request.POST,instance=obj)
if form.is_valid():
form.save()
return redirect('wholelist')
else:
form = UpdateForm()
return render(request,'content/update.html',{'form':form,'obj':obj})
def detail(request,list_id):
list_detail = get_object_or_404(Todolist,pk=list_id)
return render(request,'content/detail.html',{'list':list_detail})
def aboutus(request):
return render(request,'content/aboutus.html')
| [
"rbfl9611@gmail.com"
] | rbfl9611@gmail.com |
c770e3b327455e13849eeee61191a2598e34255f | e1a56ac7e85030de9ed440db0d276612fc8ad02e | /wsperf.py | ac4c5131cd39821c4f0630ba1f46a55189edb2fd | [] | no_license | hoangtrucit/wsperf | cfeb9ee794475ecffcf96e9b1929ca69ed2a8942 | 3d9dd986b1fb7dd0af38540191cc9ea73f119770 | refs/heads/master | 2021-10-20T19:30:06.236857 | 2019-03-01T13:52:34 | 2019-03-01T13:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,838 | py | import os, sys, argparse
from twisted.internet import reactor
from twisted.internet.utils import getProcessOutput, getProcessValue
from twisted.internet.defer import DeferredList
import analyze
if __name__ == '__main__':
default_wsperf = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'wsperf')
parser = argparse.ArgumentParser(description = 'wsperf test driver')
parser.add_argument('--wsuri', dest = 'wsuri', type = str, default = 'ws://127.0.0.1:9000', help = 'The WebSocket URI the testee is listening on, e.g. ws://127.0.0.1:9000.')
parser.add_argument('--workers', dest = 'workers', type = int, default = 4, help = 'Number of wsperf worker processes to spawn.')
parser.add_argument('--threads', dest = 'threads', type = int, default = 0, help = 'Number of wsperf worker threads to spawn at each worker [0: run on main thread, >0: spawn that many background worker threads].')
parser.add_argument('--conns', dest = 'conns', type = int, default = 50000, help = 'Number of WebSocket connections to open from each worker.')
parser.add_argument('--lowmark', dest = 'lowmark', type = int, default = 250, help = 'Low watermark for each worker.')
parser.add_argument('--highmark', dest = 'highmark', type = int, default = 500, help = 'High watermark for each worker.')
parser.add_argument('--resultfile', dest = 'resultfile', type = str, default = r'result_%d.json', help = 'Result file pattern.')
parser.add_argument('--wsperf', dest = 'wsperf', type = str, default = default_wsperf, help = 'Full path to wsperf executable.')
parser.add_argument('--skiprun', dest = 'skiprun', action = "store_true", default = False, help = 'Skip test run.')
parser.add_argument('--skipanalyze', dest = 'skipanalyze', action = "store_true", default = False, help = 'Skip analyze results.')
options = parser.parse_args()
resultfiles = [(options.resultfile % i) for i in xrange(options.workers)]
if options.skiprun:
## here we don't start a reactor.
if not options.skipanalyze:
analyze.printResults(resultfiles)
else:
df = []
for i in range(options.workers):
args = [options.wsuri,
str(options.threads),
str(options.conns),
str(options.lowmark),
str(options.highmark),
options.resultfile % i]
## run wsperf executable
d = getProcessOutput(options.wsperf, args, os.environ)
## accumulate any output
df.append(d)
d = DeferredList(df, consumeErrors = True)
def onok(res):
if not options.skipanalyze:
analyze.printResults(resultfiles)
reactor.stop()
def onerr(err):
print err
reactor.stop()
d.addCallbacks(onok, onerr)
reactor.run()
| [
"tobias.oberstein@tavendo.de"
] | tobias.oberstein@tavendo.de |
195ac95f63e61157f163bece66445bf2cac32366 | e58ecbf6af1cafbff42e2cc33abcbbf6e4ee7475 | /tests/accounting/test_call_fee_scalar.py | a6ed4f4b831b346ef58636e8757486598b762f01 | [
"MIT"
] | permissive | celeduc/ethereum-alarm-clock | 1edbbe207e0f9a7ea34a792728a2b6dceda455dd | fd202f5e96b753e6ce6bcee9a67363c468c10c7b | refs/heads/master | 2020-02-26T17:23:54.054416 | 2015-11-09T06:11:28 | 2015-11-09T06:11:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | test_values = (
(20, (
(4, 145),
(8, 138),
(12, 129),
(16, 117),
(20, 100),
(24, 83),
(28, 71),
(32, 62),
(36, 55),
)),
(500, (
(50, 148),
(125, 143),
(275, 132),
(400, 117),
(475, 105),
(500, 100),
(525, 95),
(600, 83),
(700, 71),
(900, 55),
(1200, 41),
)),
)
deploy_contracts = [
"CallLib",
]
def test_call_fee_scalar_values(CallLib):
for base_gas_price, values in test_values:
actual_values = [
(CallLib.getCallFeeScalar(base_gas_price, gas_price), expected)
for gas_price, expected in values
]
assert all(actual == expected for actual, expected in actual_values)
| [
"pipermerriam@gmail.com"
] | pipermerriam@gmail.com |
df6eb988e4cdd97624b0cbc2b3fba8c61b9c07d1 | b5cc07fcb8e89193c01cd9f8688b8d7b4fa506d7 | /transcripcion.py | a9fff7b4d7f1c3ec3065d1a6931a116cb0a51ae5 | [] | no_license | saisua/Wordscores | f063cd3c5cd7e1dbfda427a2e4d1fce0eb0dedef | 49eb10e2da5ba14f6f52cc381828f730768c3a88 | refs/heads/main | 2023-02-02T09:19:35.402693 | 2020-12-18T04:13:11 | 2020-12-18T04:13:11 | 322,485,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,786 | py | print("Empezando imports (transcripción)...")
from argparse import ArgumentParser
from pydub import AudioSegment
from numpy import array
import speech_recognition as sp_recon
from speech_recognition import Recognizer
from urllib.request import Request
import json
import sys
from io import BytesIO
from multiprocessing import Pool, cpu_count, Manager
print("FIN (transcripción)")
def default(p):
print(f"Handling {p[0]}:")
with open(p[0], 'rb') as audio_file:
if(not p[2]):
p[1].append(text_from_audio(audio_from_video(audio_file)))
else:
p[1].append(text_from_audio(audio_file))
def main():
#if(len(argv) > 1):
args = __create_parser()
#else:
# class args:
# audio:bool=False
# Archivos:list=[input("File name: ") or "TestEsAudio.wav"]
# verbose:bool=True
# proc_num:int=8
results = Manager().list()
print(f"Starting pool of {(num_proc := min(cpu_count(), args.proc_num, len(args.Archivos)))} process to "
f"handle {len(args.Archivos)} {'audio' if args.audio else 'video'} files")
with Pool(num_proc) as p:
p.map_async(default, ({0:f, 1:results, 2:args.audio} for f in args.Archivos)).get()
#control.wait()
print(results)
def text_from_audio(audio:"Audio[FileIO]") -> str:
print("Extracting text...")
recognizer = sp_recon.Recognizer()
with sp_recon.AudioFile(audio) as audio_wrapper:
return recognizer.recognize_sphinx(recognizer.record(audio_wrapper), language="es-ES", show_all=False)
def audio_from_video(video:"Video[FileIO]") -> "Audio[FileIO]":
print("Extracting audio...", end=' > ')
file = AudioSegment.from_file_using_temporary_files(video).export(BytesIO(), format="wav")
file.seek(0)
return file
def __create_parser():
### Any functionality will be added as needed
description = """
Transcribe a partir de un video o audio, mediante reconocimiento del lenguaje natural
"""
parser = ArgumentParser(description=description)
parser.add_argument(
"--audio", "-a",
action="store_true",
help="Indica que los archivos son audio, en vez de video (por defecto:video)")
parser.add_argument(
"--verbose", "-v",
action="store_true",
help="Muestra por pantalla información adicional")
parser.add_argument(
"-p", "--proc_num",
type=int,
default=1,
help="Límite de procesos a lanzar"
)
parser.add_argument(
dest="Archivos",
nargs='+',
type=str,
help="Los archivos a partir de los cuales transcribir el audio"
)
return parser.parse_args()
if __name__ == "__main__":
main() | [
"noreply@github.com"
] | noreply@github.com |
a0bcf79be4876bd759e9b7a8b1c790f4a0cce311 | e84f94069339bcabb26a7fe82365e6822c5cea91 | /good/admin.py | 4fae9b90d04a25127ef5b4272c341678d3edb389 | [] | no_license | GMS34onv/testapp | b66092cc8ec0f3ec10dcccb72ef016467d69e277 | a7bbee1148758fca2a699f3f21a8aaa9c41684a5 | refs/heads/master | 2023-01-08T20:06:17.207749 | 2020-11-07T12:39:36 | 2020-11-07T12:39:36 | 310,841,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | from django.contrib import admin
from .models import Participants
admin.site.register(Participants) | [
"zs827348@fg8.so-net.ne.jp"
] | zs827348@fg8.so-net.ne.jp |
e45e164e98b1895ddc938a79d25184cb7d64155e | 5dec53054c22354e702eb93a5efce219c5251de7 | /mypaddle_recongnize_digits.py | 70be6ae157090537b2725a195968ad1be2753678 | [] | no_license | cawind2/mypaddle | b10d2ea34204382069e572c289a6a90a038b6035 | f0408db958654a87a39f01f849abd5e27a468e8c | refs/heads/master | 2020-05-07T16:47:58.377619 | 2019-04-12T03:17:46 | 2019-04-12T03:17:46 | 180,698,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,887 | py | # -*- coding:utf8 -*-
# digits
from __future__ import print_function
import os
from PIL import Image
import numpy
import paddle
import paddle.fluid as fluid
BATCH_SIZE = 64
PASS_NUM = 5
def loss_net(hidden, label):
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
acc = fluid.layers.accuracy(input=prediction, label=label)
return prediction, avg_loss, acc
def multilayer_perceptron(img, label):
img = fluid.layers.fc(input=img, size=200, act='tanh')
hidden = fluid.layers.fc(input=img, size=200, act='tanh')
return loss_net(hidden, label)
def softmax_regression(img, label):
return loss_net(img, label)
def convolutional_neural_network(img, label):
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=20,
pool_size=2,
pool_stride=2,
act="relu")
conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=50,
pool_size=2,
pool_stride=2,
act="relu")
return loss_net(conv_pool_2, label)
def train(nn_type,
use_cuda,
save_dirname=None,
model_filename=None,
params_filename=None):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
# X是输入:MNIST图片是28×28 的二维图像
img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32')
# Label 是图片的真实标签:Label=(l0,l1,…,l9)也是10维,但只有一维为1,其他都为0。
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
#最简单的Softmax回归模型是先将输入层经过一个全连接层得到特征,然后直接通过 softmax 函数计算多个类别的概率并输出
# 分别表示该样本属于这 N 个类别的概率
if nn_type == 'softmax_regression':
net_conf = softmax_regression
#多层感知机(Multilayer Perceptron, MLP)
#Softmax回归模型采用了最简单的两层神经网络,即只有输入层和输出层,因此其拟合能力有限。为了达到更好的识别效果,
# 我们考虑在输入层和输出层中间加上若干个隐藏层
elif nn_type == 'multilayer_perceptron':
net_conf = multilayer_perceptron
else:
# LeNet-5是一个较简单的卷积神经网络。图4显示了其结构:输入的二维图像,先经过两次卷积层到池化层,再经过全连接层,
# 最后使用softmax分类作为输出层
net_conf = convolutional_neural_network
prediction, avg_loss, acc = net_conf(img, label)
test_program = fluid.default_main_program().clone(for_test=True)
optimizer = fluid.optimizer.Adam(learning_rate=0.001)
optimizer.minimize(avg_loss)
def train_test(train_test_program, train_test_feed, train_test_reader):
acc_set = []
avg_loss_set = []
for test_data in train_test_reader():
acc_np, avg_loss_np = exe.run(
program=train_test_program,
feed=train_test_feed.feed(test_data),
fetch_list=[acc, avg_loss])
acc_set.append(float(acc_np))
avg_loss_set.append(float(avg_loss_np))
# get test acc and loss
acc_val_mean = numpy.array(acc_set).mean()
avg_loss_val_mean = numpy.array(avg_loss_set).mean()
return avg_loss_val_mean, acc_val_mean
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
train_reader = paddle.batch(
paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500),
batch_size=BATCH_SIZE)
test_reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=BATCH_SIZE)
feeder = fluid.DataFeeder(feed_list=[img, label], place=place)
exe.run(fluid.default_startup_program())
main_program = fluid.default_main_program()
epochs = [epoch_id for epoch_id in range(PASS_NUM)]
lists = []
step = 0
for epoch_id in epochs:
for step_id, data in enumerate(train_reader()):
metrics = exe.run(
main_program,
feed=feeder.feed(data),
fetch_list=[avg_loss, acc])
if step % 100 == 0:
print("Pass %d, Batch %d, Cost %f" % (step, epoch_id,
metrics[0]))
step += 1
# test for epoch
avg_loss_val, acc_val = train_test(
train_test_program=test_program,
train_test_reader=test_reader,
train_test_feed=feeder)
print("Test with Epoch %d, avg_cost: %s, acc: %s" %
(epoch_id, avg_loss_val, acc_val))
lists.append((epoch_id, avg_loss_val, acc_val))
if save_dirname is not None:
fluid.io.save_inference_model(
save_dirname, ["img"], [prediction],
exe,
model_filename=model_filename,
params_filename=params_filename)
# find the best pass
best = sorted(lists, key=lambda list: float(list[1]))[0]
print('Best pass is %s, testing Avgcost is %s' % (best[0], best[1]))
print('The classification accuracy is %.2f%%' % (float(best[2]) * 100))
def infer(use_cuda,
save_dirname=None,
model_filename=None,
params_filename=None):
if save_dirname is None:
return
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
def load_image(file):
im = Image.open(file).convert('L')
im = im.resize((28, 28), Image.ANTIALIAS)
im = numpy.array(im).reshape(1, 1, 28, 28).astype(numpy.float32)
im = im / 255.0 * 2.0 - 1.0
return im
cur_dir = os.path.dirname(os.path.realpath(__file__))
tensor_img = load_image(cur_dir + '/image/infer_3.png')
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be feeded
# data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators).
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(
save_dirname, exe, model_filename, params_filename)
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets.
results = exe.run(
inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
lab = numpy.argsort(results)
print("Inference result of image/infer_3.png is: %d" % lab[0][0][-1])
def main(use_cuda, nn_type):
model_filename = None
params_filename = None
save_dirname = "recognize_digits_" + nn_type + ".inference.model"
# call train() with is_local argument to run distributed train
train(
nn_type=nn_type,
use_cuda=use_cuda,
save_dirname=save_dirname,
model_filename=model_filename,
params_filename=params_filename)
infer(
use_cuda=use_cuda,
save_dirname=save_dirname,
model_filename=model_filename,
params_filename=params_filename)
if __name__ == '__main__':
use_cuda = False
#predict = 'softmax_regression' # uncomment for Softmax
#predict = 'multilayer_perceptron' # uncomment for MLP
predict = 'convolutional_neural_network' # uncomment for LeNet5
main(use_cuda=use_cuda, nn_type=predict)
| [
"konglan@hotmail.com"
] | konglan@hotmail.com |
090878f19ffe408b52f9598216f4a2f609c8d58e | e9685369da45e5c502ce5540891e6018eadba252 | /backend/server/apps/tasks/api/serializers.py | 8f557e1bb12bfa5ff5230105c2b8d8284b099ec9 | [
"MIT"
] | permissive | Turi-fly/simple-tasks | 9703a2dd405081b129222cf6a325a5b591709d8c | ae759a8100f6604b6d8fc00f19cf3aedbd945f3d | refs/heads/master | 2022-04-10T15:26:01.590888 | 2018-11-14T08:45:46 | 2018-11-14T08:45:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | from rest_framework import serializers
import tasks.models as models
import cerberus
class TaskSerializer(serializers.ModelSerializer):
class Meta:
model = models.Task
read_only_fields = ('id', 'state', 'result', 'task_id',)
fields = ('id', 'state', 'params', 'result', 'task_id')
def validate_params(self, params):
if params is None or params == '':
raise serializers.ValidationError("Params cannot be empty")
schema = {'arg1': {'type': 'integer', 'required': True},
'arg2': {'type': 'integer', 'required': True}}
validator = cerberus.Validator(schema)
if not validator.validate(params):
raise serializers.ValidationError(validator.errors)
return params
| [
"pplonski86@gmail.com"
] | pplonski86@gmail.com |
57696caf005b8a54da7aaaf3df859c70b66f1d82 | 073f6b363b128179e3b698f7298937890da58d79 | /simplegrid.py | 9b78ee6c16c85559c1e2a10bea2ec6a753ede5bf | [] | no_license | janagood/My-Advent-of-Code-2020 | df84910f88f771a417cda6a0ecde1fb0c3142e87 | 61630fabd2f38fbc64ab0106429e536d272d136c | refs/heads/main | 2023-02-19T02:26:31.092070 | 2021-01-16T22:09:15 | 2021-01-16T22:09:15 | 321,767,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | '''
simple grid rows/columns of characters
'''
class SimpleGrid:
def __init__(self, width, height, display=None, BLANK='.'):
self.width = width
if display != None:
# lengths of lines vary
self.width = max(len(line) for line in display)
self.height = height
self.BLANK = BLANK
self.grid = [[BLANK for x in range(0, self.width)]
for y in range(0, self.height)]
if display != None:
for y in range(0, self.height):
for x in range(0, len(display[y])):
self.set_display(x, y, display[y][x])
def set_display(self, x, y, ch):
self.grid[y][x] = ch
def get_display(self, x, y):
return self.grid[y][x]
def get_neighbors(self, x, y, chk, diag=False):
if diag:
neighbors = [(x - 1, y - 1), (x + 1, y - 1), (x - 1, y + 1), (x + 1, y + 1),
(x, y - 1), (x + 1, y), (x, y + 1), (x - 1, y)]
else:
neighbors = [(x, y - 1), (x + 1, y), (x, y + 1), (x - 1, y)]
return [(x2, y2) for x2, y2 in neighbors
if self.get_display(x2, y2) == chk]
def is_clear(self, x, y):
return self.get_display(x, y) == self.BLANK
def count(self, ch):
return sum(1 for x in range(0, self.width)
for y in range(0, self.height)
if self.get_display(x, y) == ch)
def add_border(self, ch):
for y in range(0, self.height):
self.grid[y] = [ch] + self.grid[y] + [ch]
self.width += 2
border = [ch] * self.width
self.grid = [border] + self.grid + [border]
self.height += 2
def __eq__(self, other):
if (self.width != other.width
or self.height != other.height):
return False
for y in range(0, self.height):
for x in range(0, self.width):
if self.get_display(x, y) != other.get_display(x, y):
return False
return True
def __repr__(self):
result = ''
for row in self.grid:
result += ''.join(row) + '\n'
return result
| [
"noreply@github.com"
] | noreply@github.com |
3e1776247d845fc32f4cfe242171b4dcbe3a6657 | 182f8408c5ae6373716717906a6317ae42314497 | /Python Scripts/Helper/marker_tracking_opencv.py | 465567570aeb9304de20661c47d23d2472db3713 | [] | no_license | RaedShabbir/3DPositionalTracking | c0ab9bc65930cf023c5184537f9279f538ccf550 | 66a77423350c15566451460af8a68df48a6495bc | refs/heads/master | 2021-06-12T09:32:54.745728 | 2021-02-23T18:29:56 | 2021-02-23T18:29:56 | 142,172,794 | 15 | 4 | null | null | null | null | UTF-8 | Python | false | false | 969 | py | import numpy as np
import cv2
import cv2.aruco as aruco
import sys
video = cv2.VideoCapture(0)
#Error checking
if not video.isOpened():
print ("ERROR: COULD NOT OPEN VIDEO ")
sys.exit()
aruco_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
while(True):
#read frame
ok, frame = video.read()
if not ok:
print ("ERROR: COULD NOT READ FIRST FRAME FROM FILE")
sys.exit()
#frame operations
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
parameters = aruco.DetectorParameters_create()
#list ids and respective corners
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
frame = aruco.drawDetectedMarkers(frame, corners, borderColor=(255,0,0))
frame = aruco.drawDetectedMarkers(frame, rejectedImgPoints, borderColor=(0,0,255))
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows()
| [
"raedshabbir@gmail.com"
] | raedshabbir@gmail.com |
35ca70a8b5270904c42a2735dff91b71f75adeab | 03f97c2af0830e5806b652c059b7a5c838c46e58 | /primsmstsub/python/avishek1013.py | 1863f10893abac7f032b8b6125b8bd50b5dfc151 | [] | no_license | saketrule/hackerrank_top_solutions_dataset | 28f2539bfce14b6f7ddf6364c23cd00ca61681eb | 12699befcaf1ebd727c66104b1d5715ef58573b3 | refs/heads/master | 2020-05-22T18:35:48.609982 | 2017-03-31T14:38:42 | 2017-03-31T14:38:42 | 84,714,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | import heapq
N,M = map(int,raw_input().split())
edges = {}
adj = {}
for m in range(M):
x,y,r = map(int,raw_input().split())
if (edges.has_key((x,y)) and edges[(x,y)] > r) or not edges.has_key((x,y)):
edges[(x,y)] = r
if (edges.has_key((y,x)) and edges[(y,x)] > r) or not edges.has_key((y,x)):
edges[(y,x)] = r
S = int(raw_input())
for edge in edges.keys():
x,y,r = edge[0],edge[1],edges[edge]
if adj.has_key(x):
adj[x].append((r,y))
else:
adj[x] = [(r,y)]
V = set([S])
queue = adj[S]
heapq.heapify(queue)
weight = 0
while len(V) < N:
r,y = heapq.heappop(queue)
if y not in V:
weight += r
V.add(y)
for e in adj[y]:
heapq.heappush(queue,e)
print weight
| [
"anonymoussaketjoshi@gmail.com"
] | anonymoussaketjoshi@gmail.com |
74f2bdf58cb20622e1de728a20875fc2c71867af | 1323bc9ccc02ccc169e7abb9e36c3238f11e967c | /mysite/blog/models.py | c0c96e4f8842405eb7403f45d0e1f6f4025c809c | [] | no_license | defrank/old-personal-website | 1f870a52ce70d1f63fb769c0885ee19c59937341 | df063b2bc961daf68dbbd07628071604e7b73d50 | refs/heads/master | 2021-06-16T03:05:52.033742 | 2017-04-12T18:50:06 | 2017-04-12T18:50:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,819 | py | # $Id: models.py,v 1.1 2013-06-30 17:08:56-07 dmf - $
# Derek Frank
#
# NAME
# models.py - blog
#
# DESCRIPTION
# Models definition for mysite blog.
#
from django.db import models
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from os.path import basename, splitext
from urlparse import urlsplit
from utils import get_default_user as _user, markdown_to_html
####
## MODELS
class Entry(models.Model):
"""
A blog post entry.
Author, title, and body are required. Other fields optional.
"""
author = models.ForeignKey(User, related_name='entries', default=_user)
title = models.CharField(_(u'title'), max_length=128)
timestamp = models.DateTimeField(_(u'entry date and time'))
html = models.BooleanField(_('html in body?'))
body = models.TextField(_(u'body text'))
def body_html(self):
if self.html:
images = self.images.all()
return markdown_to_html(self.body, images)
return self.body
@models.permalink
def get_absolute_url(self):
return ('blog.views.archive_id', [str(self.id)])
def __unicode__(self):
return u'%s' % self.title
class Meta:
ordering = ('-timestamp', 'author', 'title')
verbose_name = 'entry'
verbose_name_plural = 'entries'
class Image(models.Model):
"""
A blog post's image.
Entry and either the image or url field are required. Other fields optional.
"""
entry = models.ForeignKey(Entry, related_name='images')
title = models.CharField(_(u'title'), max_length=64, blank=True)
def get_upload_dir(instance, filename):
return 'blog/%s/entry%d/%s' % (instance.entry.author.username, instance.entry.pk, filename)
image = models.ImageField(_(u'uploaded image'), upload_to=get_upload_dir, blank=True, null=True)
url = models.URLField(_(u'url address'), blank=True)
def filename(self):
if self.title:
return self.title
elif self.image:
return basename(self.image.url)
elif self.url:
return basename(urlsplit(self.url).path)
return 'image%d' % self.id
def get_absolute_url(self):
if self.image:
return '%s' % self.image.url
elif self.url:
return '%s' % self.url
return ''
def __unicode__(self):
if self.image:
name = basename(self.image.path)
if self.title:
return u'%s%s' % (self.title, splitext(name)[-1])
return u'%s' % name
elif self.url:
name = urlsplit(self.url).netloc
if self.title:
return u'%s (hosted on %s)' % (self.url, name)
return u'unknown (hosted on %s)' % name
elif self.title:
return u'%s' % self.title
return u'image %d for blog entry %d: %s' % (self.id, self.entry.id, self.entry)
class Meta:
ordering = ('-entry', 'title')
class Link(models.Model):
"""
A link to something of interest.
All fields required.
"""
author = models.ForeignKey(User, related_name='links', default=_user)
title = models.CharField(_(u'title'), max_length=128)
timestamp = models.DateTimeField(_(u'post date and time'), auto_now_add=True)
url = models.URLField(_(u'url address'))
CATEGORY_CHOICES = (
(u'CO', u'Cosmos'),
(u'TC', u'Technology'),
(u'LH', u'Life Hacks'),
(u'PO', u'Politics'),
(u'BG', u'Blogs'),
)
category = models.CharField(_(u'category'), max_length=2, choices=CATEGORY_CHOICES)
description = models.TextField(_('description'), blank=True)
def get_absolute_url(self):
return '%s' % self.url
def __unicode__(self):
return u'%s' % self.title
class Meta:
ordering = ('category', '-timestamp', 'title')
####
## ADMIN
class ImageInline(admin.TabularInline):
model = Image
extra = 0
class EntryAdmin(admin.ModelAdmin):
def get_author(self, obj):
return '%s' % obj.author
get_author.short_description = u'Author'
list_display = ('title', 'timestamp', 'get_author')
fieldsets = (
(None, {'fields': ('author',)}),
('Entry information', {'fields': ('title', 'timestamp')}),
#('Entry information', {'fields': ('title',)}),
('Entry content', {'fields': ('html', 'body',)}),
)
inlines = (ImageInline,)
## Link
class LinkAdmin(admin.ModelAdmin):
def get_author(self, obj):
return '%s' % obj.author
get_author.short_description = u'Author'
list_display = ('title', 'category', 'timestamp', 'get_author')
####
## REGISTER
admin.site.register(Entry, EntryAdmin)
admin.site.register(Link, LinkAdmin)
| [
"derekmfrank@gmail.com"
] | derekmfrank@gmail.com |
f68edc296aad188b58a8795023a0db3bed2f7007 | 2091bc632121d7ea50440eac86147146d673edb6 | /search/majority_element2.py | 41f5d3eb7217beb02f884bf427129b651746cfea | [] | no_license | teamday/Learning | f6e2fb6553bdf6ba7c492948e771ae4b622e3465 | cf99d0e40388183d1b54a0a0f4c1a9f3b92ccb40 | refs/heads/master | 2022-12-24T19:28:23.768178 | 2020-10-10T07:35:16 | 2020-10-10T07:35:16 | 86,290,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | #!/usr/bin/env python
class Solution(object):
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
if not nums:
return []
res1, res2, cnt1, cnt2 = None, None, 0, 0
for n in nums:
if n == res1:
cnt1 += 1
elif n == res2:
cnt2 += 1
elif cnt1 == 0:
res1, cnt1 = n, 1
elif cnt2 == 0:
res2, cnt2 = n, 1
else:
cnt1 -= 1
cnt2 -= 1
return [i for i in (res1, res2) if nums.count(i) > len(nums) // 3]
s = Solution()
print(s.majorityElement([1,2,3,3,4])) | [
"roman@itech.ua"
] | roman@itech.ua |
4cdaf7a82e2c58ba6dd327460842b08cd2a84836 | fb63b9a6f0fb2a61718133b6c73cf88d6d86b473 | /tests/unit/test_conditions.py | 6e54ec94e09d80b3d8b1c82bb90e99a82b98fea8 | [
"MIT"
] | permissive | Sazpaimon/bloop | e3f15d55253b077e6bb4764e3a3cf614726f33e9 | e5eee6a1c5c46ecbb9a6a3517cca345d756ecc53 | refs/heads/master | 2021-07-12T12:16:59.748176 | 2017-09-23T01:33:21 | 2017-09-23T01:33:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,169 | py | import logging
import operator
import pytest
from bloop.conditions import (
AndCondition,
BaseCondition,
BeginsWithCondition,
BetweenCondition,
ComparisonCondition,
ComparisonMixin,
Condition,
ConditionRenderer,
ContainsCondition,
InCondition,
InvalidCondition,
NotCondition,
OrCondition,
Proxy,
Reference,
ReferenceTracker,
get_marked,
get_snapshot,
iter_columns,
iter_conditions,
printable_column_name,
render,
)
from bloop.models import BaseModel, Column
from bloop.signals import object_deleted, object_loaded, object_saved
from bloop.types import Binary, Boolean, Integer, List, Map, Set, String
from ..helpers.models import Document, User
class MockColumn(Column):
"""model, model_name, dynamo_name, __repr__"""
def __init__(self, name):
super().__init__(String(), name="d_" + name)
self.model_name = name
# Mock model so this can render as M.name
self.model = type("M", tuple(), {})
c = MockColumn("c")
d = MockColumn("d")
def condition_for(operation, column=None):
return conditions_for(operation, column=column)[0]
def conditions_for(*operations, column=None):
column = column or MockColumn("c")
value = 0
values = [1, 2]
conditions = []
if None in operations:
conditions.append(Condition())
if "and" in operations:
left = ComparisonCondition("==", column, value)
right = ComparisonCondition("!=", column, value)
conditions.append(AndCondition(left, right))
if "or" in operations:
left = ComparisonCondition("==", column, value)
right = ComparisonCondition("!=", column, value)
conditions.append(OrCondition(left, right))
if "not" in operations:
inner = ComparisonCondition("==", column, value)
conditions.append(NotCondition(inner))
if "begins_with" in operations:
conditions.append(BeginsWithCondition(column, value))
if "between" in operations:
conditions.append(BetweenCondition(column, *values))
if "contains" in operations:
conditions.append(ContainsCondition(column, value))
if "in" in operations:
conditions.append(InCondition(column, values))
for operation in ("<", "<=", ">", ">=", "!=", "=="):
if operation in operations:
conditions.append(ComparisonCondition(operation, column, value))
return conditions
def non_meta_conditions(column=None):
return conditions_for(
"begins_with", "between", "contains", "in",
">", "<", ">=", "<=", "==", "!=",
column=column
)
def meta_conditions(column=None):
return conditions_for("and", "or", "not", column=column)
def empty_conditions():
return [Condition(), AndCondition(), OrCondition(), NotCondition(Condition())]
@pytest.fixture
def reference_tracker(engine):
return ReferenceTracker(engine)
@pytest.fixture
def renderer(engine):
return ConditionRenderer(engine)
# TRACKING SIGNALS ================================================================================== TRACKING SIGNALS
# Columns are sorted by model name
empty_user_condition = (
User.age.is_(None) &
User.email.is_(None) &
User.id.is_(None) &
User.joined.is_(None) &
User.name.is_(None)
)
def test_on_deleted(engine):
"""When an object is deleted, the snapshot expects all columns to be empty"""
user = User(age=3, name="foo")
object_deleted.send(engine, engine=engine, obj=user)
assert get_snapshot(user) == empty_user_condition
# It doesn't matter if the object had non-empty values saved from a previous sync
object_saved.send(engine, engine=engine, obj=user)
assert get_snapshot(user) == (
User.age.is_({"N": "3"}) &
User.name.is_({"S": "foo"})
)
# The deleted signal still clears everything
object_deleted.send(engine, engine=engine, obj=user)
assert get_snapshot(user) == empty_user_condition
# But the current values aren't replaced
assert user.age == 3
assert user.name == "foo"
def test_on_loaded_partial(engine):
"""When an object is loaded, the state after loading is snapshotted for future atomic calls"""
# Creating an instance doesn't snapshot anything
user = User(age=3, name="foo")
assert get_snapshot(user) == empty_user_condition
# Pretend the user was just loaded. Because only
# age and name are marked, they will be the only
# columns included in the snapshot. A normal load
# would set the other values to None, and the
# snapshot would expect those.
object_loaded.send(engine, engine=engine, obj=user)
# Values are stored dumped. Since the dumped flag isn't checked as
# part of equality testing, we can simply construct the dumped
# representations to compare.
assert get_snapshot(user) == (
User.age.is_({"N": "3"}) &
User.name.is_({"S": "foo"})
)
def test_on_loaded_full(engine):
"""Same as the partial test, but with explicit Nones to simulate a real engine.load"""
user = User(age=3, email=None, id=None, joined=None, name="foo")
object_loaded.send(engine, engine=engine, obj=user)
assert get_snapshot(user) == (
User.age.is_({"N": "3"}) &
User.email.is_(None) &
User.id.is_(None) &
User.joined.is_(None) &
User.name.is_({"S": "foo"})
)
def test_on_modified():
"""When an object's values are set or deleted, those columns are marked for tracking"""
# Creating an instance doesn't mark anything
user = User()
assert get_marked(user) == set()
user.id = "foo"
assert get_marked(user) == {User.id}
# Deleting the value does not clear it from the set of marked columns
del user.id
assert get_marked(user) == {User.id}
# Even when the delete fails, the column is marked.
# We're tracking intention, not state change.
with pytest.raises(AttributeError):
del user.age
assert get_marked(user) == {User.id, User.age}
def test_on_saved(engine):
"""Saving is equivalent to loading w.r.t. tracking.
The state after saving is snapshotted for future atomic operations."""
user = User(name="foo", age=3)
object_saved.send(engine, engine=engine, obj=user)
# Since "name" and "age" were the only marked columns saved to DynamoDB,
# they are the only columns that must match for an atomic save. The
# state of the other columns wasn't specified, so it's not safe to
# assume the intended value (missing vs empty)
assert get_snapshot(user) == (
User.age.is_({"N": "3"}) &
User.name.is_({"S": "foo"})
)
# END TRACKING SIGNALS ========================================================================== END TRACKING SIGNALS
# REFERENCE TRACKER ================================================================================ REFERENCE TRACKER
def test_ref_index_always_increments(reference_tracker):
"""Don't risk forgetting to increment it - ALWAYS increment after getting."""
assert reference_tracker.next_index == 0
assert reference_tracker.next_index == 1
def test_ref_same_name(reference_tracker):
"""Don't create two references for the same name string"""
name = "foo"
expected_ref = "#n0"
ref = reference_tracker._name_ref(name)
same_ref = reference_tracker._name_ref(name)
assert ref == same_ref == expected_ref
assert reference_tracker.attr_names[ref] == name
assert reference_tracker.name_attr_index[name] == ref
assert reference_tracker.counts[ref] == 2
def test_ref_path_empty(reference_tracker):
"""Path reference without a path (column only) is just a name ref"""
column = MockColumn("column")
expected_name = "d_column"
expected_ref = "#n0"
ref = reference_tracker._path_ref(column)
assert ref == expected_ref
assert reference_tracker.attr_names[ref] == expected_name
assert reference_tracker.name_attr_index[expected_name] == ref
assert reference_tracker.counts[ref] == 1
def test_ref_path_complex(reference_tracker):
"""Path reference with integer and string indexes. Strings include duplicates and literal periods."""
column = MockColumn("column")["foo"][3][4]["repeat"]["has.period"]["repeat"]
expected_ref = "#n0.#n1[3][4].#n2.#n3.#n2"
expected_names = {
"#n0": "d_column",
"#n1": "foo",
"#n2": "repeat",
"#n3": "has.period"
}
ref = reference_tracker._path_ref(column)
assert ref == expected_ref
assert reference_tracker.attr_names == expected_names
def test_ref_path_reuse(reference_tracker):
"""paths are re-used, even across columns"""
first = MockColumn("first")[3]["foo"]
second = MockColumn("second")[3]["foo"]
expected_first = "#n0[3].#n1"
expected_second = "#n2[3].#n1"
expected_names = {
"#n0": "d_first",
"#n1": "foo",
"#n2": "d_second"
}
first_ref = reference_tracker._path_ref(first)
second_ref = reference_tracker._path_ref(second)
assert first_ref == expected_first
assert second_ref == expected_second
assert reference_tracker.attr_names == expected_names
def test_ref_path_periods(reference_tracker):
"""Path segments with periods aren't de-duped with each individual segment"""
column = MockColumn("column")["foo"]["foo.bar"]["bar"]
expected_ref = "#n0.#n1.#n2.#n3"
expected_names = {
"#n0": "d_column",
"#n1": "foo",
"#n2": "foo.bar",
"#n3": "bar",
}
ref = reference_tracker._path_ref(column)
assert ref == expected_ref
assert reference_tracker.attr_names == expected_names
def test_ref_value(reference_tracker):
"""no path, value not dumped"""
column = User.age
value = 3
expected_ref = ":v0"
expected_value = {"N": "3"}
expected_values = {":v0": expected_value}
ref, value = reference_tracker._value_ref(column, value)
assert ref == expected_ref
assert value == expected_value
assert reference_tracker.attr_values == expected_values
def test_ref_value_path(reference_tracker):
"""has path, value not dumped"""
column = Document.data["Description"]["Body"]
value = "value"
expected_ref = ":v0"
expected_value = {"S": value}
expected_values = {":v0": expected_value}
ref, value = reference_tracker._value_ref(column, value)
assert ref == expected_ref
assert value == expected_value
assert reference_tracker.attr_values == expected_values
def test_ref_value_dumped(reference_tracker):
"""no path, value already dumped"""
column = Document.id
# This shouldn't be dumped, so we use an impossible value for the type
dumped_value = object()
expected_ref = ":v0"
expected_values = {":v0": dumped_value}
ref, value = reference_tracker._value_ref(column, dumped_value, dumped=True)
assert ref == expected_ref
assert value == dumped_value
assert reference_tracker.attr_values == expected_values
def test_ref_value_dumped_path(reference_tracker):
"""has path, value already dumped"""
column = Document.data["Description"]
# Description's typedef is Map, wich can't dump an object
# This shouldn't be dumped, so we use an impossible value for the type
dumped_value = object()
expected_ref = ":v0"
expected_values = {":v0": dumped_value}
ref, value = reference_tracker._value_ref(column, dumped_value, dumped=True)
assert ref == expected_ref
assert value == dumped_value
assert reference_tracker.attr_values == expected_values
def test_ref_any_column_name(reference_tracker):
"""Render a reference to the column name (and path) when there's no value"""
column = Document.data["Description"]["Body"]
expected_ref = Reference(name="#n0.#n1.#n2", type="name", value=None)
expected_names = {
"#n0": "data",
"#n1": "Description",
"#n2": "Body"
}
ref = reference_tracker.any_ref(column=column)
assert ref == expected_ref
assert reference_tracker.attr_names == expected_names
def test_ref_any_value_is_column(reference_tracker):
"""Render a reference to a value that is also a column"""
column = Document.id["Description"]["Rating"]
# value has its own path
value = Document.data["Description"]["Body"]
expected_ref = Reference(name="#n0.#n1.#n2", type="name", value=None)
expected_names = {
"#n0": "data",
"#n1": "Description",
"#n2": "Body"
}
ref = reference_tracker.any_ref(column=column, value=value)
assert ref == expected_ref
assert reference_tracker.attr_names == expected_names
def test_ref_any_value_not_column(reference_tracker):
"""Render a reference to a regular value"""
column = Document.id
value = 3
expected_value = {"N": "3"}
expected_ref = Reference(name=":v0", type="value", value=expected_value)
expected_values = {":v0": expected_value}
ref = reference_tracker.any_ref(column=column, value=value)
assert ref == expected_ref
assert reference_tracker.attr_values == expected_values
def test_ref_pop_none(reference_tracker):
"""pop_refs without args doesn't pop any refs"""
# Add a name and value ref so we can make sure nothing is cleared
name = reference_tracker.any_ref(column=Document.id).name
value = reference_tracker.any_ref(column=Document.id, value=3).name
reference_tracker.pop_refs()
assert name in reference_tracker.attr_names
assert value in reference_tracker.attr_values
def test_ref_pop_unknown(reference_tracker):
"""Popping an unknown ref doesn't do anything"""
# Add a name and value ref so we can make sure nothing is cleared
name = reference_tracker.any_ref(column=Document.id).name
value = reference_tracker.any_ref(column=Document.id, value=3).name
unknown_name_ref = Reference(name="foo", type="value", value=None)
unknown_value_ref = Reference(name="bar", type="name", value=None)
reference_tracker.pop_refs(unknown_name_ref, unknown_value_ref)
assert name in reference_tracker.attr_names
assert value in reference_tracker.attr_values
def test_ref_pop_name(reference_tracker):
"""References aren't removed until they're popped as many times as they're used"""
name_ref = reference_tracker.any_ref(column=Document.id)
same_name_ref = reference_tracker.any_ref(column=Document.id)
assert reference_tracker.counts[name_ref.name] == 2
# Still in attr_names, name_attr_index
reference_tracker.pop_refs(same_name_ref)
assert reference_tracker.counts[name_ref.name] == 1
assert reference_tracker.attr_names[name_ref.name] == "id"
assert reference_tracker.name_attr_index["id"] == name_ref.name
# Not in attr_names, name_attr_index
reference_tracker.pop_refs(same_name_ref)
assert reference_tracker.counts[name_ref.name] == 0
assert name_ref.name not in reference_tracker.attr_names
assert "id" not in reference_tracker.name_attr_index
# Count doesn't go below 0
reference_tracker.pop_refs(name_ref)
assert reference_tracker.counts[name_ref.name] == 0
def test_ref_pop_value(reference_tracker):
"""Same pop test, for values"""
value_ref = reference_tracker.any_ref(column=Document.id, value=3)
# Have to fake this out a bit, because there's no de-duping for values
# This test exists to guard incorrect pop behavior, in case values are
# ever de-duped.
reference_tracker.counts[value_ref.name] += 1
assert reference_tracker.counts[value_ref.name] == 2
# Still in attr_names, name_attr_index
reference_tracker.pop_refs(value_ref)
assert reference_tracker.counts[value_ref.name] == 1
assert reference_tracker.attr_values[value_ref.name] == {"N": "3"}
# Not in attr_names, name_attr_index
reference_tracker.pop_refs(value_ref)
assert reference_tracker.counts[value_ref.name] == 0
assert value_ref.name not in reference_tracker.attr_values
# Count doesn't go below 0
reference_tracker.pop_refs(value_ref)
assert reference_tracker.counts[value_ref.name] == 0
# END REFERENCE TRACKER ======================================================================== END REFERENCE TRACKER
# RENDERER ================================================================================================== RENDERER
def test_render_missing_object(engine):
"""Can't render atomic or update without an object"""
with pytest.raises(InvalidCondition):
render(engine, update=True)
with pytest.raises(InvalidCondition):
render(engine, atomic=True)
@pytest.mark.parametrize("kwarg_name, expression_key", [
("filter", "FilterExpression"),
("key", "KeyConditionExpression"),
("condition", "ConditionExpression"),
])
def test_render_condition_only(kwarg_name, expression_key, engine, caplog):
"""Only renders the given condition"""
condition = (User.email == "@") & (User.name.is_(None))
rendered = render(engine, **{kwarg_name: condition})
assert rendered == {
"ExpressionAttributeNames": {"#n0": "email", "#n2": "name"},
"ExpressionAttributeValues": {":v1": {"S": "@"}},
expression_key: "((#n0 = :v1) AND (attribute_not_exists(#n2)))"
}
assert caplog.record_tuples == [
("bloop.conditions", logging.DEBUG, "popping last usage of Reference(name=':v3', type='value', value=None)"),
("bloop.conditions", logging.DEBUG, "rendering \"==\" as attribute_not_exists"),
]
def test_render_projection_only(engine):
columns = [User.id, User.email, User.id, User.age]
rendered = render(engine, projection=columns)
assert rendered == {
"ExpressionAttributeNames": {"#n0": "id", "#n1": "email", "#n2": "age"},
"ProjectionExpression": "#n0, #n1, #n2",
}
def test_render_atomic_only_new(engine):
"""Atomic condition on a new object only -> all attribute_not_exists"""
rendered = render(engine, obj=User(), atomic=True)
assert rendered == {
"ExpressionAttributeNames": {"#n0": "age", "#n2": "email", "#n4": "id", "#n6": "j", "#n8": "name"},
"ConditionExpression": (
"((attribute_not_exists(#n0)) AND (attribute_not_exists(#n2)) AND"
" (attribute_not_exists(#n4)) AND (attribute_not_exists(#n6)) AND"
" (attribute_not_exists(#n8)))"
)
}
def test_render_atomic_only_partial(engine):
"""Atomic condition on an object already partially synced"""
user = User(id="user_id", age=3, email=None)
# Sync gives us an atomic condition
object_saved.send(engine, engine=engine, obj=user)
# Unlike a new save, this one has no expectation about the values of "joined" or "name"
rendered = render(engine, obj=user, atomic=True)
assert rendered == {
"ExpressionAttributeNames": {"#n0": "age", "#n2": "email", "#n4": "id"},
"ExpressionAttributeValues": {":v1": {"N": "3"}, ":v5": {"S": "user_id"}},
"ConditionExpression": "((#n0 = :v1) AND (attribute_not_exists(#n2)) AND (#n4 = :v5))"
}
def test_render_atomic_and_condition(engine):
"""Atomic condition and condition are ANDed together (condition first)"""
user = User(id="user_id", age=3, email=None)
# Sync gives us an atomic condition
object_saved.send(engine, engine=engine, obj=user)
# Value ref isn't re-used
condition = User.email.contains("@")
rendered = render(engine, obj=user, condition=condition, atomic=True)
assert rendered == {
"ExpressionAttributeNames": {"#n0": "email", "#n2": "age", "#n5": "id"},
"ExpressionAttributeValues": {":v1": {"S": "@"}, ":v3": {"N": "3"}, ":v6": {"S": "user_id"}},
"ConditionExpression": "((contains(#n0, :v1)) AND (#n2 = :v3) AND (attribute_not_exists(#n0)) AND (#n5 = :v6))"
}
def test_render_update_only(engine):
user = User(email="@", age=3)
rendered = render(engine, obj=user, update=True)
assert rendered == {
"ExpressionAttributeNames": {"#n0": "age", "#n2": "email"},
"ExpressionAttributeValues": {":v1": {"N": "3"}, ":v3": {"S": "@"}},
"UpdateExpression": "SET #n0=:v1, #n2=:v3",
}
def test_render_complex(engine):
"""Render a filter condition, key condition, projection, condition, atomic and update"""
user = User(id="uid", age=3, email=None)
# Sync gives us an atomic condition on id, age, email (sorted)
object_saved.send(engine, engine=engine, obj=user)
filter_condition = User.email.contains("@")
key_condition = User.age == 4
# projection isn't sorted by name
projection = [User.name, User.id]
condition = User.age <= User.id
# SET name, REMOVE age
# (in addition to REMOVE email, from email=None)
user.name = "bill"
del user.age
rendered = render(engine, obj=user,
filter=filter_condition, projection=projection, key=key_condition,
atomic=True, condition=condition, update=True)
# Render order: filter, projection, key, (condition & atomic), update
assert rendered == {
"ExpressionAttributeNames": {"#n0": "email", "#n2": "name", "#n3": "id", "#n4": "age"},
"ExpressionAttributeValues": {
":v1": {"S": "@"},
":v5": {"N": "4"},
":v6": {"N": "3"},
":v8": {"S": "uid"},
":v11": {"S": "bill"}
},
"FilterExpression": "(contains(#n0, :v1))",
"ProjectionExpression": "#n2, #n3",
"KeyConditionExpression": "(#n4 = :v5)",
"ConditionExpression": "((#n4 <= #n3) AND (#n4 = :v6) AND (attribute_not_exists(#n0)) AND (#n3 = :v8))",
"UpdateExpression": "SET #n2=:v11 REMOVE #n4, #n0",
}
@pytest.mark.parametrize("func_name, expression_key", [
("render_condition_expression", "ConditionExpression"),
("render_filter_expression", "FilterExpression"),
("render_key_expression", "KeyConditionExpression"),
])
def test_render_simple_conditions(func_name, expression_key, renderer):
"""condition, filter, key expression rendering simply defers to the condition"""
condition = User.name.between("foo", User.age)
render = getattr(renderer, func_name)
render(condition)
assert renderer.rendered == {
"ExpressionAttributeNames": {"#n0": "name", "#n2": "age"},
"ExpressionAttributeValues": {":v1": {"S": "foo"}},
expression_key: "(#n0 BETWEEN :v1 AND #n2)"
}
def test_render_projection_dedupes_names(renderer):
"""Duplicate columns are filtered when rendering the projection expression"""
columns = [User.id, User.email, User.id, User.age]
renderer.render_projection_expression(columns)
assert renderer.rendered == {
"ExpressionAttributeNames": {"#n0": "id", "#n1": "email", "#n2": "age"},
"ProjectionExpression": "#n0, #n1, #n2",
}
def test_render_update_no_changes(renderer):
"""When there aren't any marked *non-key* columns on an object, there's no update expression"""
user = User(id="user_id")
renderer.render_update_expression(user)
assert not renderer.rendered
def test_render_update_set_only(renderer):
"""Only updates are where values were set (none of the values were None or rendered as None)"""
user = User(email="@", age=3)
renderer.render_update_expression(user)
assert renderer.rendered == {
"ExpressionAttributeNames": {"#n0": "age", "#n2": "email"},
"ExpressionAttributeValues": {":v1": {"N": "3"}, ":v3": {"S": "@"}},
"UpdateExpression": "SET #n0=:v1, #n2=:v3",
}
def test_render_update_remove_only(renderer):
"""Only updates were del'd values, values set to None, or values that render as None"""
document = Document()
# Renders as None
document.data = dict()
# Deleted, even though it wasn't set
with pytest.raises(AttributeError):
del document.numbers
# Explicit None
document.value = None
renderer.render_update_expression(document)
assert renderer.rendered == {
"ExpressionAttributeNames": {"#n0": "data", "#n2": "numbers", "#n4": "value"},
"UpdateExpression": "REMOVE #n0, #n2, #n4",
}
def test_render_update_set_and_remove(renderer):
"""Some values set, some values removed"""
document = Document()
# Renders as None -> removed
document.data = dict()
# Deleted, even though it wasn't set
with pytest.raises(AttributeError):
del document.numbers
# Both set
document.value = 3
document.another_value = 4
renderer.render_update_expression(document)
# Ordering is alphabetical by model name: another_value, data, numbers, value
# REMOVE statements will cause a skip in index (because value renders empty and pops the ref)
assert renderer.rendered == {
"ExpressionAttributeNames": {"#n0": "another_value", "#n2": "data", "#n4": "numbers", "#n6": "value"},
"ExpressionAttributeValues": {":v1": {"N": "4"}, ":v7": {"N": "3"}},
"UpdateExpression": "SET #n0=:v1, #n6=:v7 REMOVE #n2, #n4",
}
# END RENDERER ========================================================================================== END RENDERER
# CONDITIONS ============================================================================================== CONDITIONS
def test_abstract_base(renderer):
"""BaseCondition requires 4 methods for subclasses"""
condition = BaseCondition(None)
with pytest.raises(NotImplementedError):
len(condition)
with pytest.raises(NotImplementedError):
repr(condition)
with pytest.raises(NotImplementedError):
condition.render(renderer)
def test_empty_condition():
assert Condition().operation is None
@pytest.mark.parametrize("condition", empty_conditions())
def test_len_empty(condition):
assert len(condition) == 0
def test_iter_empty():
condition = Condition()
assert set(iter_conditions(condition)) == {condition}
assert next(iter_columns(condition), None) is None
def test_render_empty(renderer):
condition = Condition()
condition.render(renderer)
assert not renderer.rendered
@pytest.mark.parametrize("condition", non_meta_conditions())
def test_len_non_empty(condition):
assert len(condition) == 1
@pytest.mark.parametrize("condition", non_meta_conditions())
def test_len_non_meta(condition):
"""Non-meta conditions *must* have exactly 1 condition"""
assert len(condition) == 1
@pytest.mark.parametrize("condition", meta_conditions())
def test_len_meta(condition):
if condition.operation == "not":
assert len(condition) == 1
else:
assert len(condition) == 2
def test_len_cyclic():
"""Cyclic conditions count the cyclic reference"""
# Here's the structure to create:
# root
# / \
# a b
# / \
# c root
root = AndCondition()
a = ComparisonCondition("<", MockColumn("a"), 3)
b = OrCondition()
c = ComparisonCondition(">", MockColumn("c"), 3)
root.values.extend([a, b])
b.values.extend([c, root])
assert len(root) == 4
def test_len_unpack_not():
"""Even though not(not(x)) -> x shouldn't exist, its length should be the inner length"""
lt, gt = conditions_for("<", ">")
outer = NotCondition(lt)
condition = NotCondition(outer)
assert len(condition) == len(outer) == 1
# Swap inner for an AND with length 2
and_ = AndCondition(lt, gt)
outer.values[0] = and_
assert len(condition) == len(outer) == len(and_) == 2
@pytest.mark.parametrize("condition", conditions_for(
"begins_with", "between", "contains", "in",
">", "<", ">=", "<=", "==", "!=",
"and", "or"))
def test_invert_wraps(condition):
"""everything but not and () are wrapped in a not"""
wrapped = ~condition
assert wrapped.operation == "not"
assert wrapped.values[0] is condition
def test_invert_empty():
"""~() -> ()"""
empty = Condition()
assert (~empty) is empty
def test_invert_simplifies():
"""~~x -> x"""
condition = ComparisonCondition(">", MockColumn("c"), 3)
assert (~~condition) is condition
def test_invert_empty_not():
"""~not() -> ()"""
condition = condition_for("not")
assert (~condition).operation == condition.values[0].operation
# CONDITIONS AND/IAND ============================================================================ CONDITIONS AND/IAND
@pytest.mark.parametrize("empty", empty_conditions())
def test_and_empty_conditions(empty):
"""When conditions are falsey (literal empty or meta with no inner value), simplify instead of nesting:
()_1 & ()_2 -> ()_1
x & () -> x
() & x -> x
"""
also_empty = Condition()
not_empty = condition_for(">")
assert (empty & not_empty) is not_empty
assert (not_empty & empty) is not_empty
assert (empty & also_empty) is empty
assert (also_empty & empty) is also_empty
def test_and_both_and():
"""(a & b) & (c & d) -> (a & b & c & d)"""
a, b, c, d = [condition_for(">") for _ in range(4)]
left = AndCondition(a, b)
right = AndCondition(c, d)
assert (left & right).operation == "and"
assert (left & right).values == [a, b, c, d]
assert (right & left).values == [c, d, a, b]
@pytest.mark.parametrize("other", non_meta_conditions())
def test_and_simplifies(other):
"""When only one condition is an and, the other is put in a new and, in the correct place
(a & b) & (c > 2) -> (a & b & (c > 2))
(a > 2) & (b & c) -> ((a > 2) & b & c)
"""
a, b, = [condition_for(">"), condition_for("<")]
and_condition = AndCondition(a, b)
assert (and_condition & other).operation == "and"
assert (and_condition & other).values == [a, b, other]
assert (other & and_condition).values == [other, a, b]
def test_and_basic():
a = condition_for(">")
b = condition_for("<")
assert (a & b).operation == "and"
assert (a & b).values == [a, b]
assert (b & a).values == [b, a]
@pytest.mark.parametrize("empty", empty_conditions())
def test_iand_empty_conditions(empty):
"""Similar to and, empty values don't change the non-empty values. LHS always wins if both empty."""
also_empty = Condition()
not_empty = condition_for(">")
# None of the following modify the object
original_empty = empty
empty &= also_empty
assert empty is original_empty
original_also_empty = also_empty
also_empty &= empty
assert also_empty is original_also_empty
original_not_empty = not_empty
not_empty &= empty
assert not_empty is original_not_empty
# The only modifying __iand__
empty &= not_empty
assert empty is not_empty
def test_iand_both_and():
"""other's conditions are appended to self's conditions"""
a, b, c, d = [condition_for(">") for _ in range(4)]
left = AndCondition(a, b)
right = AndCondition(c, d)
original_left = left
left &= right
assert left is original_left
assert left.values == [a, b, c, d]
assert right.values == [c, d]
@pytest.mark.parametrize("other", non_meta_conditions())
def test_iand_simplifies(other):
"""Similar to and, other value is pushed into the and (on LHS) or front of a new and (on RHS)"""
a, b, = [condition_for(">"), condition_for("<")]
and_condition = AndCondition(a, b)
original_other = other
other &= and_condition
assert other is not original_other
assert other.values == [original_other, a, b]
original_and_condition = and_condition
and_condition &= original_other
assert and_condition is original_and_condition
assert and_condition.values == [a, b, original_other]
def test_iand_basic():
a = condition_for(">")
b = condition_for("<")
original_a = a
original_b = b
a &= original_b
assert a is not original_a
assert a.operation == "and"
assert a.values == [original_a, original_b]
b &= original_a
assert b is not original_b
assert b.operation == "and"
assert b.values == [original_b, original_a]
# CONDITIONS OR/IOR ================================================================================ CONDITIONS OR/IOR
@pytest.mark.parametrize("empty", empty_conditions())
def test_or_empty_conditions(empty):
"""When conditions are falsey (literal empty or meta with no inner value), simplify instead of nesting:
()_1 | ()_2 -> ()_1
x | () -> x
() | x -> x
"""
also_empty = Condition()
not_empty = condition_for(">")
assert (empty | not_empty) is not_empty
assert (not_empty | empty) is not_empty
assert (empty | also_empty) is empty
assert (also_empty | empty) is also_empty
def test_or_both_or():
"""(a | b) | (c | d) -> (a | b | c | d)"""
a, b, c, d = [condition_for(">") for _ in range(4)]
left = OrCondition(a, b)
right = OrCondition(c, d)
assert (left | right).operation == "or"
assert (left | right).values == [a, b, c, d]
assert (right | left).values == [c, d, a, b]
@pytest.mark.parametrize("other", non_meta_conditions())
def test_or_simplifies(other):
"""When only one condition is an or, the other is put in a new or, in the correct place
(a | b) | (c > 2) -> (a | b | (c > 2))
(a > 2) | (b | c) -> ((a > 2) | b | c)
"""
a, b, = [condition_for(">"), condition_for("<")]
or_condition = OrCondition(a, b)
assert (or_condition | other).operation == "or"
assert (or_condition | other).values == [a, b, other]
assert (other | or_condition).values == [other, a, b]
def test_or_basic():
a = condition_for(">")
b = condition_for("<")
assert (a | b).operation == "or"
assert (a | b).values == [a, b]
assert (b | a).values == [b, a]
@pytest.mark.parametrize("empty", empty_conditions())
def test_ior_empty_conditions(empty):
"""Similar to or, empty values don't change the non-empty values. LHS always wins if both empty."""
also_empty = Condition()
not_empty = condition_for(">")
# None of the following modify the object
original_empty = empty
empty |= also_empty
assert empty is original_empty
original_also_empty = also_empty
also_empty |= empty
assert also_empty is original_also_empty
original_not_empty = not_empty
not_empty |= empty
assert not_empty is original_not_empty
# The only modifying __ior__
empty |= not_empty
assert empty is not_empty
def test_ior_both_or():
"""other's conditions are appended to self's conditions"""
a, b, c, d = [condition_for(">") for _ in range(4)]
left = OrCondition(a, b)
right = OrCondition(c, d)
original_left = left
left |= right
assert left is original_left
assert left.values == [a, b, c, d]
assert right.values == [c, d]
@pytest.mark.parametrize("other", non_meta_conditions())
def test_ior_simplifies(other):
"""Similar to or, other value is pushed into the or (on LHS) or front of a new or (on RHS)"""
a, b, = [condition_for(">"), condition_for("<")]
or_condition = OrCondition(a, b)
original_other = other
other |= or_condition
assert other is not original_other
assert other.values == [original_other, a, b]
original_or_condition = or_condition
or_condition |= original_other
assert or_condition is original_or_condition
assert or_condition.values == [a, b, original_other]
def test_ior_basic():
a = condition_for(">")
b = condition_for("<")
original_a = a
original_b = b
a |= original_b
assert a is not original_a
assert a.operation == "or"
assert a.values == [original_a, original_b]
b |= original_a
assert b is not original_b
assert b.operation == "or"
assert b.values == [original_b, original_a]
# CONDITIONS REPR ==================================================================================== CONDITIONS REPR
@pytest.mark.parametrize("condition, expected", [
# and
(AndCondition(), "( & )"),
(AndCondition("foo"), "('foo' &)"),
(AndCondition("a", "b", "c"), "('a' & 'b' & 'c')"),
# or
(OrCondition(), "( | )"),
(OrCondition("foo"), "('foo' |)"),
(OrCondition("a", "b", "c"), "('a' | 'b' | 'c')"),
# not
(NotCondition("a"), "(~'a')"),
# comparisons
(ComparisonCondition("<", column=c, value=3), "(M.c < 3)"),
(ComparisonCondition(">", column=c, value=3), "(M.c > 3)"),
(ComparisonCondition("<=", column=c, value=3), "(M.c <= 3)"),
(ComparisonCondition(">=", column=c, value=3), "(M.c >= 3)"),
(ComparisonCondition("==", column=c, value=3), "(M.c == 3)"),
(ComparisonCondition("!=", column=c, value=3), "(M.c != 3)"),
# begins_with, contains
(BeginsWithCondition(column=c, value=2), "begins_with(M.c, 2)"),
(ContainsCondition(column=c, value=2), "contains(M.c, 2)"),
# between
(BetweenCondition(column=c, lower=2, upper=3), "(M.c between [2, 3])"),
# in
(InCondition(column=c, values=[]), "(M.c in [])"),
(InCondition(column=c, values=[2, 3]), "(M.c in [2, 3])"),
(InCondition(column=c, values=[MockColumn("d"), 3]), "(M.c in [<Column[M.d]>, 3])"),
# empty
(Condition(), "()")
])
def test_repr(condition, expected):
assert repr(condition) == expected
# CONDITIONS EQUALITY ============================================================================ CONDITIONS EQUALITY
def test_eq_empty():
empty = Condition()
assert empty == empty
also_empty = Condition()
assert empty is not also_empty
assert empty == also_empty
def test_eq_wrong_type():
"""AttributeError returns False"""
assert not (Condition() == object())
@pytest.mark.parametrize("other", [
BaseCondition("op", values=list("xy"), column=c["wrong"]["path"]),
BaseCondition("??", values=list("xy"), column=c["foo"]["bar"]),
BaseCondition("op", values=list("xy"), column=None),
# Need to attach a path to the wrong proxy object
BaseCondition("op", values=list("xy"), column=Proxy(obj=None, path=["foo", "bar"])),
BaseCondition("op", values=list("xyz"), column=c["foo"]["bar"]),
BaseCondition("op", values=list("yx"), column=c["foo"]["bar"]),
])
def test_eq_one_wrong_field(other):
"""All four of operation, value, column, and path must match"""
self = BaseCondition("op", values=list("xy"), column=c["foo"]["bar"])
assert not (self == other)
@pytest.mark.parametrize("other", [
BaseCondition("op", values=[c]),
BaseCondition("op", values=["x"]),
BaseCondition("op", values=[c, c]),
BaseCondition("op", values=["x", "x"]),
BaseCondition("op", values=["x", c]),
BaseCondition("op", values=[d, "x"]),
])
def test_eq_values_mismatch(other):
condition = BaseCondition("op", values=[c, "x"])
assert not (condition == other)
# CONDITIONS RENDER ================================================================================ CONDITIONS RENDER
@pytest.mark.parametrize("condition, as_str, expected_names, expected_values", [
# Comparison - all operations
(User.age == 3, "(#n0 = :v1)", {"#n0": "age"}, {":v1": {"N": "3"}}),
(User.age != 3, "(#n0 <> :v1)", {"#n0": "age"}, {":v1": {"N": "3"}}),
(User.age < 3, "(#n0 < :v1)", {"#n0": "age"}, {":v1": {"N": "3"}}),
(User.age > 3, "(#n0 > :v1)", {"#n0": "age"}, {":v1": {"N": "3"}}),
(User.age <= 3, "(#n0 <= :v1)", {"#n0": "age"}, {":v1": {"N": "3"}}),
(User.age >= 3, "(#n0 >= :v1)", {"#n0": "age"}, {":v1": {"N": "3"}}),
# Comparison - against None -> attribute_* functions
(User.age.is_(None), "(attribute_not_exists(#n0))", {"#n0": "age"}, None),
(User.age.is_not(None), "(attribute_exists(#n0))", {"#n0": "age"}, None),
# Comparison - against things that become None -> attribute_* functions
(Document.data == dict(), "(attribute_not_exists(#n0))", {"#n0": "data"}, None),
(Document.data != dict(), "(attribute_exists(#n0))", {"#n0": "data"}, None),
# Comparison - against another Column
(User.name == User.email, "(#n0 = #n1)", {"#n0": "name", "#n1": "email"}, None),
# BeginsWith - against value, Column
(User.name.begins_with("foo"), "(begins_with(#n0, :v1))", {"#n0": "name"}, {":v1": {"S": "foo"}}),
(User.name.begins_with(User.email), "(begins_with(#n0, #n1))", {"#n0": "name", "#n1": "email"}, None),
# Between - against value, Column
(User.age.between(3, 4), "(#n0 BETWEEN :v1 AND :v2)", {"#n0": "age"}, {":v1": {"N": "3"}, ":v2": {"N": "4"}}),
(User.age.between(3, User.age), "(#n0 BETWEEN :v1 AND #n0)", {"#n0": "age"}, {":v1": {"N": "3"}}),
(User.age.between(User.age, 4), "(#n0 BETWEEN #n0 AND :v1)", {"#n0": "age"}, {":v1": {"N": "4"}}),
# Contains - against value, Column
(User.name.contains("foo"), "(contains(#n0, :v1))", {"#n0": "name"}, {":v1": {"S": "foo"}}),
(User.name.contains(User.email), "(contains(#n0, #n1))", {"#n0": "name", "#n1": "email"}, None),
# In - mixed values, Column
(User.age.in_(3, User.age, 4), "(#n1 IN (:v0, #n1, :v2))", {"#n1": "age"}, {":v0": {"N": "3"}, ":v2": {"N": "4"}})
])
def test_render_valid_condition(condition, as_str, expected_names, expected_values, renderer):
assert condition.render(renderer) == as_str
if expected_names:
assert renderer.rendered["ExpressionAttributeNames"] == expected_names
else:
assert "ExpressionAttributeNames" not in renderer.rendered
if expected_values:
assert renderer.rendered["ExpressionAttributeValues"] == expected_values
else:
assert "ExpressionAttributeValues" not in renderer.rendered
@pytest.mark.parametrize("condition", [
# Value is None
User.age < None,
User.age > None,
User.age <= None,
User.age >= None,
User.email.begins_with(None),
# At least one None
User.age.between(3, None),
User.age.between(None, 4),
User.age.between(None, None),
User.email.contains(None),
# No values
User.age.in_(),
# At least one None
User.age.in_(None, 4),
User.age.in_(3, None),
User.age.in_(None, None),
# Not literal None, but becomes None
Document.nested_numbers.contains([]),
# Empty meta conditions
AndCondition(),
OrCondition()
])
def test_render_invalid_condition(condition, renderer):
"""After a condition fails to render, all of its name and value refs should be popped."""
with pytest.raises(InvalidCondition):
condition.render(renderer)
assert not renderer.rendered
def test_render_nested_meta_condition(renderer):
"""Test meta conditions AND, OR, NOT"""
has_name = User.name.is_not(None)
is_foo = User.name == "foo"
is_3 = User.age != 3
is_email_address = User.email.contains("@")
# There's no ref with '1' because the first equality condition (is_not) renders a value ref, and then pops it.
expected = "(((attribute_exists(#n0)) AND (#n0 = :v2)) OR (NOT (#n3 <> :v4)) OR (contains(#n5, :v6)))"
expected_names = {"#n0": "name", "#n3": "age", "#n5": "email"}
expected_values = {":v2": {"S": "foo"}, ":v4": {"N": "3"}, ":v6": {"S": "@"}}
condition = (has_name & is_foo) | (~is_3) | is_email_address
assert condition.render(renderer) == expected
assert renderer.rendered == {
"ExpressionAttributeNames": expected_names,
"ExpressionAttributeValues": expected_values
}
@pytest.mark.parametrize("condition_cls", [AndCondition, OrCondition])
def test_render_and_or_simplify(condition_cls, renderer):
"""When AND/OR have exactly one condition, they only render that condition (without an AND/OR)"""
inner = User.age < 3
condition = condition_cls(inner)
expected = "(#n0 < :v1)"
assert condition.render(renderer) == expected
assert renderer.rendered == {
"ExpressionAttributeNames": {"#n0": "age"},
"ExpressionAttributeValues": {":v1": {"N": "3"}}
}
# END CONDITIONS ====================================================================================== END CONDITIONS
# COMPARISON MIXIN ================================================================================== COMPARISON MIXIN
def test_mixin_repr():
assert repr(ComparisonMixin()) == "<ComparisonMixin>"
def test_mixin_path():
mixin = ComparisonMixin()
proxy = mixin["some_attribute"][3]
assert isinstance(proxy, Proxy)
assert proxy._obj is mixin
assert proxy._path == ["some_attribute", 3]
@pytest.mark.parametrize("op, expected", [
(operator.eq, "=="),
(operator.ne, "!="),
(operator.lt, "<"),
(operator.gt, ">"),
(operator.le, "<="),
(operator.ge, ">="),
])
def test_mixin_magic_comparisons(op, expected):
"""==, !=, <, >, <=, >= create condition objects with the corresponding operation"""
condition = op(c, 3)
assert condition.operation == expected
assert condition.column is c
assert condition.values == [3]
def test_mixin_begins_with():
condition = c.begins_with(3)
assert condition.operation == "begins_with"
assert condition.column is c
assert condition.values == [3]
def test_mixin_between():
condition = c.between(3, 4)
assert condition.operation == "between"
assert condition.column is c
assert condition.values == [3, 4]
def test_mixin_contains():
condition = c.contains(3)
assert condition.operation == "contains"
assert condition.column is c
assert condition.values == [3]
def test_mixin_in_():
condition = c.in_(3, 4)
assert condition.operation == "in"
assert condition.column is c
assert condition.values == [3, 4]
def test_mixin_is_():
condition = c.is_(3)
assert condition.operation == "=="
assert condition.column is c
assert condition.values == [3]
condition = c.is_not(3)
assert condition.operation == "!="
assert condition.column is c
assert condition.values == [3]
@pytest.mark.parametrize("op, typedefs, args", [
(
"begins_with",
[
Integer(), List(String), Map(s=String), Boolean(),
Set(Integer), Set(Binary), Set(String)
],
("one-arg",)
),
(
"contains",
[
Integer(), Boolean(), Map(s=String)
],
("one-arg",)
),
(
"between",
[
Set(String), Set(Binary), Set(String),
List(String), Map(s=String), Boolean()
],
("first-arg", "second-arg")
)
])
def test_unsupported_mixin_function_conditions(op, typedefs, args):
class Model(BaseModel):
id = Column(Integer, hash_key=True)
for typedef in typedefs:
column = Column(typedef, name="d")
column.model = Model
column.model_name = "c"
with pytest.raises(InvalidCondition):
getattr(column, op)(*args)
column.begins_with(object())
@pytest.mark.parametrize("typedef", [
Set(Integer), Set(Binary), Set(String),
List(String), Map(s=String), Boolean()
])
@pytest.mark.parametrize("op", [
operator.lt,
operator.gt,
operator.le,
operator.ge
])
def test_unsupported_mixin_comparison_conditions(op, typedef):
class Model(BaseModel):
id = Column(Integer, hash_key=True)
column = Column(typedef, name="d")
column.model = Model
column.model_name = "c"
with pytest.raises(InvalidCondition):
op(column, "value")
def test_printable_column_no_path():
"""Model.column"""
assert printable_column_name(User.email) == "email"
def test_printable_column_mixed_path():
"""Model.column[3].foo[1]"""
assert printable_column_name(User.id, path=[3, "foo", "bar", 0, 1]) == "id[3].foo.bar[0][1]"
def test_printable_column_included_path():
"""Path is part of the 'column' that's provided"""
assert printable_column_name(User.id[3]["foo"]["bar"][0][1]) == "id[3].foo.bar[0][1]"
def test_printable_column_both_paths():
"""When both paths are provided, the explicit path wins"""
assert printable_column_name(User.id["not used"], path=[3, "foo", "bar", 0, 1]) == "id[3].foo.bar[0][1]"
# END COMPARISON MIXIN ========================================================================== END COMPARISON MIXIN
# PROXY ======================================================================================================== PROXY
def test_proxy_delegates_getattr():
sentinel = object()
column = MockColumn("col")
column.attribute = sentinel
proxy = column["some"]["path"]
assert proxy.attribute is sentinel
def test_proxy_masks_protected_path_attr():
"""If a proxied object has a _path or _obj attribute, it's not returned through the proxy"""
sentinel = object()
column = MockColumn("col")
column._obj = sentinel
column._path = sentinel
proxy = column["some"]["path"]
assert proxy._obj is not column._obj
assert proxy._path is not column._path
def test_proxy_repr():
column = MockColumn("col")
proxy = column["some"][2]["path"]
assert repr(proxy) == "<Proxy[M.col.some[2].path]>"
# END PROXY ================================================================================================ END PROXY
# ITERATORS ================================================================================================ ITERATORS
@pytest.mark.parametrize("condition", non_meta_conditions())
def test_iter_conditions_non_meta(condition):
"""These conditions aren't and/or/not, so they can't yield any inner conditions"""
assert set(iter_conditions(condition)) == {condition}
@pytest.mark.parametrize("condition", meta_conditions())
def test_iter_conditions_non_cyclic_meta(condition):
"""Yield the inner conditions for each of these meta conditions"""
expected = condition.values
actual = list(iter_conditions(condition))
assert actual == expected
def test_iter_conditions_cyclic():
"""Cyclic conditions can be iterated safely"""
# Here's the structure to create:
# root
# / \
# a b
# / \
# c root
root = AndCondition()
a = ComparisonCondition("<", MockColumn("a"), 3)
b = OrCondition()
c = ComparisonCondition(">", MockColumn("c"), 3)
root.values.extend([a, b])
b.values.extend([c, root])
expected = {root, a, b, c}
actual = set(iter_conditions(root))
assert actual == expected
@pytest.mark.parametrize("condition", [*non_meta_conditions(column=User.age), *meta_conditions(column=User.age)])
def test_iter_columns_single(condition):
assert set(iter_columns(condition)) == {User.age}
def test_iter_columns_nested():
"""Nested AND, OR, NOT are unpacked"""
a = User.age == 3
b = User.name == "foo"
c = User.email.in_(User.age, User.id, "bar")
# Here's the structure to create:
# __root__
# / | \
# a branch \
# | leaf
# b / \
# c root
branch = ~b
root = a & branch
leaf = c | root
root.values.append(leaf)
assert set(iter_columns(root)) == {User.age, User.name, User.email, User.id}
# END ITERATORS ======================================================================================== END ITERATORS
| [
"joe.mcross@gmail.com"
] | joe.mcross@gmail.com |
5fad4659f82009f792d6692934e99164917d5c9d | 5d7f56534d86e405f47b9b97546950ee9469f68a | /Python/Greedy_1이될때까지.py | 05fafb2b784f90cf610f5ba5a289d70ccf0bbbe3 | [] | no_license | naminyeop/Algorithm | 6a371a2ae8b1dbdfcd7cb04d16d4039004e542ad | 0acd5145a606fcb3e54b0e9947ddf55a6951be45 | refs/heads/master | 2022-05-25T09:20:50.596547 | 2022-03-15T05:34:30 | 2022-03-15T05:34:30 | 77,616,690 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py |
N,K = list(map(int,input().split()))
print(N) # 17
print(K) # 4
num = N
count = 0
while num != 1:
# print(num)
if num % K == 0:
num = num / K
count = count + 1
else:
num = num -1
count = count + 1
print(count) | [
"naminyeop@naver.com"
] | naminyeop@naver.com |
e95450b4b2a062095da6f2a52983a8128ebe702a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02640/s043458506.py | aa5a66ce9487ea4e0b7b83b41044d3742b278eb9 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | # Crane and Turtle
X, Y = [int(i) for i in input().split()]
for t in range(0, X + 1):
legs = 2 * (X + t)
if Y == legs:
a = 'Yes'
break
if Y < legs:
a = 'No'
break
else:
a = 'No'
print(a)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
16d8e89e918f02b740fb31f6d8b1d19b9d2dfda4 | e6e57bf7d4eda37f1188ab72ff249675f40029ee | /cs61a/projects/ants/ants.py | a8a120ed09ea374651365b3c40741aaa2b2431a7 | [] | no_license | juanpedrovel/bomboclap | 4e186331ef1c26c8522e44c21d6a33358471786b | 99db02266c31dd14357ef6a575d35fcf55718617 | refs/heads/master | 2020-04-19T21:16:38.141830 | 2019-01-31T00:31:24 | 2019-01-31T00:31:24 | 168,436,881 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,739 | py | """CS 61A presents Ants Vs. SomeBees."""
import random
from ucb import main, interact, trace
from collections import OrderedDict
################
# Core Classes #
################
class Place(object):
"""A Place holds insects and has an exit to another Place."""
def __init__(self, name, exit=None):
"""Create a Place with the given NAME and EXIT.
name -- A string; the name of this Place.
exit -- The Place reached by exiting this Place (may be None).
"""
self.name = name
self.exit = exit
self.bees = [] # A list of Bees
self.ant = None # An Ant
self.entrance = None # A Place
# Phase 1: Add an entrance to the exit
# BEGIN Problem 2
if self.exit:
self.exit.entrance = self
# END Problem 2
def add_insect(self, insect):
"""Add an Insect to this Place.
There can be at most one Ant in a Place, unless exactly one of them is
a container ant (Problem 9), in which case there can be two. If add_insect
tries to add more Ants than is allowed, an assertion error is raised.
There can be any number of Bees in a Place.
"""
if insect.is_ant:
if self.ant is None:
self.ant = insect
else:
# BEGIN Problem 9
if self.ant.can_contain(insect):
self.ant.contain_ant(insect)
elif insect.can_contain(self.ant):
insect.contain_ant(self.ant)
self.ant = insect
else:
assert self.ant is None, 'Two ants in {0}'.format(self)
# END Problem 9
else:
self.bees.append(insect)
insect.place = self
def remove_insect(self, insect):
"""Remove an INSECT from this Place.
A target Ant may either be directly in the Place, or be contained by a
container Ant at this place. The true QueenAnt may not be removed. If
remove_insect tries to remove an Ant that is not anywhere in this
Place, an AssertionError is raised.
A Bee is just removed from the list of Bees.
"""
if insect.is_ant:
# Special handling for QueenAnt
# BEGIN Problem 13
if isinstance(insect, QueenAnt) and insect.TrueQueen:
return
# END Problem 13
# Special handling for container ants
if self.ant is insect:
# Bodyguard was removed. Contained ant should remain in the game
if hasattr(self.ant, 'is_container') and self.ant.is_container:
self.ant = self.ant.contained_ant
else:
self.ant = None
else:
# Contained ant was removed. Bodyguard should remain
if hasattr(self.ant, 'is_container') and self.ant.is_container \
and self.ant.contained_ant is insect:
self.ant.contained_ant = None
else:
assert False, '{0} is not in {1}'.format(insect, self)
else:
self.bees.remove(insect)
insect.place = None
def __str__(self):
return self.name
class Insect(object):
"""An Insect, the base class of Ant and Bee, has armor and a Place."""
is_ant = False
damage = 0
is_watersafe = False
# ADD CLASS ATTRIBUTES HERE
def __init__(self, armor, place=None):
"""Create an Insect with an ARMOR amount and a starting PLACE."""
self.armor = armor
self.place = place # set by Place.add_insect and Place.remove_insect
def reduce_armor(self, amount):
"""Reduce armor by AMOUNT, and remove the insect from its place if it
has no armor remaining.
>>> test_insect = Insect(5)
>>> test_insect.reduce_armor(2)
>>> test_insect.armor
3
"""
self.armor -= amount
if self.armor <= 0:
self.place.remove_insect(self)
def action(self, colony):
"""The action performed each turn.
colony -- The AntColony, used to access game state information.
"""
def __repr__(self):
cname = type(self).__name__
return '{0}({1}, {2})'.format(cname, self.armor, self.place)
class Bee(Insect):
"""A Bee moves from place to place, following exits and stinging ants."""
name = 'Bee'
damage = 1
is_watersafe = True
# OVERRIDE CLASS ATTRIBUTES HERE
def sting(self, ant):
"""Attack an ANT, reducing its armor by 1."""
ant.reduce_armor(self.damage)
def move_to(self, place):
"""Move from the Bee's current Place to a new PLACE."""
self.place.remove_insect(self)
place.add_insect(self)
def blocked(self):
"""Return True if this Bee cannot advance to the next Place."""
# Phase 4: Special handling for NinjaAnt
# BEGIN Problem 7
return not(self.place.ant is None or not self.place.ant.blocks_path)
# END Problem 7
def action(self, colony):
"""A Bee's action stings the Ant that blocks its exit if it is blocked,
or moves to the exit of its current place otherwise.
colony -- The AntColony, used to access game state information.
"""
destination = self.place.exit
# Extra credit: Special handling for bee direction
# BEGIN EC
"*** YOUR CODE HERE ***"
# END EC
if self.blocked():
self.sting(self.place.ant)
elif self.armor > 0 and destination is not None:
self.move_to(destination)
class Ant(Insect):
"""An Ant occupies a place and does work for the colony."""
is_ant = True
implemented = False # Only implemented Ant classes should be instantiated
food_cost = 0
# ADD CLASS ATTRIBUTES HERE
blocks_path = True
is_container = False
def __init__(self, armor=1):
"""Create an Ant with an ARMOR quantity."""
Insect.__init__(self, armor)
def can_contain(self, other):
return False
class HarvesterAnt(Ant):
"""HarvesterAnt produces 1 additional food per turn for the colony."""
name = 'Harvester'
implemented = True
food_cost = 2
armor = 1
def action(self, colony):
"""Produce 1 additional food for the COLONY.
colony -- The AntColony, used to access game state information.
"""
# BEGIN Problem 1
colony.food += 1
# END Problem 1
class ThrowerAnt(Ant):
"""ThrowerAnt throws a leaf each turn at the nearest Bee in its range."""
name = 'Thrower'
implemented = True
damage = 1
food_cost = 3
armor = 1
def nearest_bee(self, hive, min_range=0, max_range=float('inf')):
"""Return the nearest Bee in a Place that is not the HIVE, connected to
the ThrowerAnt's Place by following entrances.
This method returns None if there is no such Bee (or none in range).
"""
# BEGIN Problem 3 and 4
place = self.place
current_range = 0
while place is not hive and current_range <= max_range:
if not place.bees or current_range < min_range:
place = place.entrance
current_range += 1
else:
return random_or_none(place.bees)
return None
# END Problem 3 and 4
def throw_at(self, target):
"""Throw a leaf at the TARGET Bee, reducing its armor."""
if target is not None:
target.reduce_armor(self.damage)
def action(self, colony):
"""Throw a leaf at the nearest Bee in range."""
self.throw_at(self.nearest_bee(colony.hive))
def random_or_none(s):
"""Return a random element of sequence S, or return None if S is empty."""
if s:
return random.choice(s)
##############
# Extensions #
##############
class ShortThrower(ThrowerAnt):
"""A ThrowerAnt that only throws leaves at Bees at most 3 places away."""
name = 'Short'
# OVERRIDE CLASS ATTRIBUTES HERE
# BEGIN Problem 4
implemented = True # Change to True to view in the GUI
max_range = 3
food_cost = 2
def nearest_bee(self, hive):
return ThrowerAnt.nearest_bee(self, hive, max_range=self.max_range)
# END Problem 4
class LongThrower(ThrowerAnt):
"""A ThrowerAnt that only throws leaves at Bees at least 5 places away."""
name = 'Long'
# OVERRIDE CLASS ATTRIBUTES HERE
# BEGIN Problem 4
implemented = True # Change to True to view in the GUI
min_range = 5
food_cost = 2
def nearest_bee(self, hive):
return ThrowerAnt.nearest_bee(self, hive, min_range=self.min_range)
# END Problem 4
class FireAnt(Ant):
"""FireAnt cooks any Bee in its Place when it expires."""
name = 'Fire'
damage = 3
# OVERRIDE CLASS ATTRIBUTES HERE
# BEGIN Problem 5
implemented = True # Change to True to view in the GUI
food_cost = 5
armor = 1
# END Problem 5
def reduce_armor(self, amount):
"""Reduce armor by AMOUNT, and remove the FireAnt from its place if it
has no armor remaining. If the FireAnt dies, damage each of the bees in
the current place.
"""
# BEGIN Problem 5
if self.armor <= amount:
bees_copy = list(self.place.bees)
for bee in bees_copy:
bee.reduce_armor(self.damage)
Ant.reduce_armor(self, amount)
# END Problem 5
class HungryAnt(Ant):
"""HungryAnt will take three turns to digest a Bee in its place.
While digesting, the HungryAnt can't eat another Bee.
"""
name = 'Hungry'
# OVERRIDE CLASS ATTRIBUTES HERE
# BEGIN Problem 6
implemented = True # Change to True to view in the GUI
time_to_digest = 3
food_cost = 4
armor = 1
# END Problem 6
def __init__(self, armor=1):
# BEGIN Problem 6
self.digesting = 0
# END Problem 6
def eat_bee(self, bee):
# BEGIN Problem 6
bee.reduce_armor(bee.armor)
self.digesting = self.time_to_digest
# END Problem 6
def action(self, colony):
# BEGIN Problem 6
if self.digesting:
self.digesting -= 1
elif self.place.bees:
self.eat_bee(random_or_none(self.place.bees))
# END Problem 6
class NinjaAnt(Ant):
"""NinjaAnt does not block the path and damages all bees in its place."""
name = 'Ninja'
damage = 1
# OVERRIDE CLASS ATTRIBUTES HERE
# BEGIN Problem 7
implemented = True # Change to True to view in the GUI
food_cost = 5
armor = 1
blocks_path = False
# END Problem 7
def action(self, colony):
# BEGIN Problem 7
bees_copy = list(self.place.bees)
for bee in bees_copy:
bee.reduce_armor(self.damage)
# END Problem 7
# BEGIN Problem 8
# The WallAnt class
# END Problem 8
class WallAnt(Ant):
"""WallAnt blocks bees with large armor"""
name = "Wall"
implemented = True
food_cost = 4
def __init__(self, armor=4):
Ant.__init__(self, armor)
class BodyguardAnt(Ant):
"""BodyguardAnt provides protection to other Ants."""
name = 'Bodyguard'
# OVERRIDE CLASS ATTRIBUTES HERE
# BEGIN Problem 9
implemented = True # Change to True to view in the GUI
is_container = True
food_cost = 4
# END Problem 9
def __init__(self, armor=2):
Ant.__init__(self, armor)
self.contained_ant = None # The Ant hidden in this bodyguard
def can_contain(self, other):
# BEGIN Problem 9
if self.contained_ant == None and other.is_container == False:
return True
return False
# END Problem 9
def contain_ant(self, ant):
# BEGIN Problem 9
self.contained_ant = ant
# END Problem 9
def action(self, colony):
# BEGIN Problem 9
if self.contained_ant:
self.contained_ant.action(colony)
# END Problem 9
class TankAnt(BodyguardAnt):
"""TankAnt provides both offensive and defensive capabilities."""
name = 'Tank'
damage = 1
# OVERRIDE CLASS ATTRIBUTES HERE
# BEGIN Problem 10
implemented = True # Change to True to view in the GUI
food_cost = 6
is_container = True
# END Problem 10
def action(self, colony):
# BEGIN Problem 10
bees_copy = list(self.place.bees)
for bee in bees_copy:
bee.reduce_armor(self.damage)
BodyguardAnt.action(self, colony)
# END Problem 10
class Water(Place):
"""Water is a place that can only hold watersafe insects."""
def add_insect(self, insect):
"""Add an Insect to this place. If the insect is not watersafe, reduce
its armor to 0."""
# BEGIN Problem 11
Place.add_insect(self, insect)
if not insect.is_watersafe:
insect.reduce_armor(insect.armor)
# END Problem 11
# BEGIN Problem 12
class ScubaThrower(ThrowerAnt):
name = 'Scuba'
implemented = True
is_watersafe = True
food_cost = 6
# END Problem 12
# BEGIN Problem 13
class QueenAnt(ScubaThrower): # You should change this line
# END Problem 13
"""The Queen of the colony. The game is over if a bee enters her place."""
name = 'Queen'
# OVERRIDE CLASS ATTRIBUTES HERE
# BEGIN Problem 13
implemented = True # Change to True to view in the GUI
food_cost = 7
TrueQueen = True
# END Problem 13
def __init__(self, armor=1):
# BEGIN Problem 13
if self.TrueQueen == True:
QueenAnt.TrueQueen = False
self.TrueQueen = True
self.ants_behind = []
# END Problem 13
def action(self, colony):
"""A queen ant throws a leaf, but also doubles the damage of ants
in her tunnel.
Impostor queens do only one thing: reduce their own armor to 0.
"""
# BEGIN Problem 13
if not self.TrueQueen:
Insect.reduce_armor(self, self.armor)
else:
current_place = self.place.exit
while current_place != None:
place_ant = current_place.ant
if place_ant:
if place_ant not in self.ants_behind:
place_ant.damage *= 2
self.ants_behind.append(place_ant)
if place_ant.is_container and place_ant.contained_ant:
if place_ant.contained_ant not in self.ants_behind:
place_ant.contained_ant.damage *= 2
self.ants_behind.append(place_ant.contained_ant)
current_place = current_place.exit
ThrowerAnt.action(self, colony)
# END Problem 13
def reduce_armor(self, amount):
"""Reduce armor by AMOUNT, and if the True QueenAnt has no armor
remaining, signal the end of the game.
"""
# BEGIN Problem 13
if self.armor <= amount and self.TrueQueen:
bees_win()
else:
Insect.reduce_armor(self, amount)
# END Problem 13
class AntRemover(Ant):
"""Allows the player to remove ants from the board in the GUI."""
name = 'Remover'
implemented = False
def __init__(self):
Ant.__init__(self, 0)
##################
# Status Effects #
##################
def make_slow(action, bee):
"""Return a new action method that calls ACTION every other turn.
action -- An action method of some Bee
"""
# BEGIN Problem EC
"*** YOUR CODE HERE ***"
# END Problem EC
def make_scare(action, bee):
"""Return a new action method that makes the bee go backwards.
action -- An action method of some Bee
"""
# BEGIN Problem EC
"*** YOUR CODE HERE ***"
# END Problem EC
def apply_effect(effect, bee, duration):
"""Apply a status effect to a BEE that lasts for DURATION turns."""
# BEGIN Problem EC
"*** YOUR CODE HERE ***"
# END Problem EC
class SlowThrower(ThrowerAnt):
"""ThrowerAnt that causes Slow on Bees."""
name = 'Slow'
# BEGIN Problem EC
implemented = False # Change to True to view in the GUI
# END Problem EC
def throw_at(self, target):
if target:
apply_effect(make_slow, target, 3)
class ScaryThrower(ThrowerAnt):
"""ThrowerAnt that intimidates Bees, making them back away instead of advancing."""
name = 'Scary'
# BEGIN Problem EC
implemented = False # Change to True to view in the GUI
# END Problem EC
def throw_at(self, target):
# BEGIN Problem EC
"*** YOUR CODE HERE ***"
# END Problem EC
class LaserAnt(ThrowerAnt):
# This class is optional. Only one test is provided for this class.
name = 'Laser'
# OVERRIDE CLASS ATTRIBUTES HERE
# BEGIN Problem OPTIONAL
implemented = False # Change to True to view in the GUI
# END Problem OPTIONAL
def __init__(self, armor=1):
ThrowerAnt.__init__(self, armor)
self.insects_shot = 0
def insects_in_front(self, hive):
# BEGIN Problem OPTIONAL
return {}
# END Problem OPTIONAL
def calculate_damage(self, distance):
# BEGIN Problem OPTIONAL
return 0
# END Problem OPTIONAL
def action(self, colony):
insects_and_distances = self.insects_in_front(colony.hive)
for insect, distance in insects_and_distances.items():
damage = self.calculate_damage(distance)
insect.reduce_armor(damage)
if damage:
self.insects_shot += 1
##################
# Bees Extension #
##################
class Wasp(Bee):
"""Class of Bee that has higher damage."""
name = 'Wasp'
damage = 2
class Hornet(Bee):
"""Class of bee that is capable of taking two actions per turn, although
its overall damage output is lower. Immune to status effects.
"""
name = 'Hornet'
damage = 0.25
def action(self, colony):
for i in range(2):
if self.armor > 0:
super().action(colony)
def __setattr__(self, name, value):
if name != 'action':
object.__setattr__(self, name, value)
class NinjaBee(Bee):
"""A Bee that cannot be blocked. Is capable of moving past all defenses to
assassinate the Queen.
"""
name = 'NinjaBee'
def blocked(self):
return False
class Boss(Wasp, Hornet):
"""The leader of the bees. Combines the high damage of the Wasp along with
status effect immunity of Hornets. Damage to the boss is capped up to 8
damage by a single attack.
"""
name = 'Boss'
damage_cap = 8
action = Wasp.action
def reduce_armor(self, amount):
super().reduce_armor(self.damage_modifier(amount))
def damage_modifier(self, amount):
return amount * self.damage_cap/(self.damage_cap + amount)
class Hive(Place):
"""The Place from which the Bees launch their assault.
assault_plan -- An AssaultPlan; when & where bees enter the colony.
"""
def __init__(self, assault_plan):
self.name = 'Hive'
self.assault_plan = assault_plan
self.bees = []
for bee in assault_plan.all_bees:
self.add_insect(bee)
# The following attributes are always None for a Hive
self.entrance = None
self.ant = None
self.exit = None
def strategy(self, colony):
exits = [p for p in colony.places.values() if p.entrance is self]
for bee in self.assault_plan.get(colony.time, []):
bee.move_to(random.choice(exits))
colony.active_bees.append(bee)
class AntColony(object):
"""An ant collective that manages global game state and simulates time.
Attributes:
time -- elapsed time
food -- the colony's available food total
queen -- the place where the queen resides
places -- A list of all places in the colony (including a Hive)
bee_entrances -- A list of places that bees can enter
"""
def __init__(self, strategy, hive, ant_types, create_places, dimensions, food=2):
"""Create an AntColony for simulating a game.
Arguments:
strategy -- a function to deploy ants to places
hive -- a Hive full of bees
ant_types -- a list of ant constructors
create_places -- a function that creates the set of places
dimensions -- a pair containing the dimensions of the game layout
"""
self.time = 0
self.food = food
self.strategy = strategy
self.hive = hive
self.ant_types = OrderedDict((a.name, a) for a in ant_types)
self.dimensions = dimensions
self.active_bees = []
self.configure(hive, create_places)
def configure(self, hive, create_places):
"""Configure the places in the colony."""
self.queen = QueenPlace('AntQueen')
self.places = OrderedDict()
self.bee_entrances = []
def register_place(place, is_bee_entrance):
self.places[place.name] = place
if is_bee_entrance:
place.entrance = hive
self.bee_entrances.append(place)
register_place(self.hive, False)
create_places(self.queen, register_place, self.dimensions[0], self.dimensions[1])
def simulate(self):
"""Simulate an attack on the ant colony (i.e., play the game)."""
num_bees = len(self.bees)
try:
while True:
self.hive.strategy(self) # Bees invade
self.strategy(self) # Ants deploy
for ant in self.ants: # Ants take actions
if ant.armor > 0:
ant.action(self)
for bee in self.active_bees[:]: # Bees take actions
if bee.armor > 0:
bee.action(self)
if bee.armor <= 0:
num_bees -= 1
self.active_bees.remove(bee)
if num_bees == 0:
raise AntsWinException()
self.time += 1
except AntsWinException:
print('All bees are vanquished. You win!')
return True
except BeesWinException:
print('The ant queen has perished. Please try again.')
return False
def deploy_ant(self, place_name, ant_type_name):
"""Place an ant if enough food is available.
This method is called by the current strategy to deploy ants.
"""
constructor = self.ant_types[ant_type_name]
if self.food < constructor.food_cost:
print('Not enough food remains to place ' + ant_type_name)
else:
ant = constructor()
self.places[place_name].add_insect(ant)
self.food -= constructor.food_cost
return ant
def remove_ant(self, place_name):
"""Remove an Ant from the Colony."""
place = self.places[place_name]
if place.ant is not None:
place.remove_insect(place.ant)
@property
def ants(self):
return [p.ant for p in self.places.values() if p.ant is not None]
@property
def bees(self):
return [b for p in self.places.values() for b in p.bees]
@property
def insects(self):
return self.ants + self.bees
def __str__(self):
status = ' (Food: {0}, Time: {1})'.format(self.food, self.time)
return str([str(i) for i in self.ants + self.bees]) + status
class QueenPlace(Place):
"""QueenPlace at the end of the tunnel, where the queen resides."""
def add_insect(self, insect):
"""Add an Insect to this Place.
Can't actually add Ants to a QueenPlace. However, if a Bee attempts to
enter the QueenPlace, a BeesWinException is raised, signaling the end
of a game.
"""
assert not insect.is_ant, 'Cannot add {0} to QueenPlace'
raise BeesWinException()
def ants_win():
"""Signal that Ants win."""
raise AntsWinException()
def bees_win():
"""Signal that Bees win."""
raise BeesWinException()
def ant_types():
"""Return a list of all implemented Ant classes."""
all_ant_types = []
new_types = [Ant]
while new_types:
new_types = [t for c in new_types for t in c.__subclasses__()]
all_ant_types.extend(new_types)
return [t for t in all_ant_types if t.implemented]
class GameOverException(Exception):
"""Base game over Exception."""
pass
class AntsWinException(GameOverException):
"""Exception to signal that the ants win."""
pass
class BeesWinException(GameOverException):
"""Exception to signal that the bees win."""
pass
def interactive_strategy(colony):
"""A strategy that starts an interactive session and lets the user make
changes to the colony.
For example, one might deploy a ThrowerAnt to the first tunnel by invoking
colony.deploy_ant('tunnel_0_0', 'Thrower')
"""
print('colony: ' + str(colony))
msg = '<Control>-D (<Control>-Z <Enter> on Windows) completes a turn.\n'
interact(msg)
def start_with_strategy(args, strategy):
"""Reads command-line arguments and starts a game with those options."""
import argparse
parser = argparse.ArgumentParser(description="Play Ants vs. SomeBees")
parser.add_argument('-d', type=str, metavar='DIFFICULTY',
help='sets difficulty of game (test/easy/medium/hard/extra-hard)')
parser.add_argument('-w', '--water', action='store_true',
help='loads a full layout with water')
parser.add_argument('--food', type=int,
help='number of food to start with when testing', default=2)
args = parser.parse_args()
assault_plan = make_normal_assault_plan()
layout = dry_layout
tunnel_length = 9
num_tunnels = 3
food = args.food
if args.water:
layout = wet_layout
if args.d in ['t', 'test']:
assault_plan = make_test_assault_plan()
num_tunnels = 1
elif args.d in ['e', 'easy']:
assault_plan = make_easy_assault_plan()
num_tunnels = 2
elif args.d in ['n', 'normal']:
assault_plan = make_normal_assault_plan()
num_tunnels = 3
elif args.d in ['h', 'hard']:
assault_plan = make_hard_assault_plan()
num_tunnels = 4
elif args.d in ['i', 'extra-hard']:
assault_plan = make_extra_hard_assault_plan()
num_tunnels = 4
hive = Hive(assault_plan)
dimensions = (num_tunnels, tunnel_length)
return AntColony(strategy, hive, ant_types(), layout, dimensions, food).simulate()
###########
# Layouts #
###########
def wet_layout(queen, register_place, tunnels=3, length=9, moat_frequency=3):
"""Register a mix of wet and and dry places."""
for tunnel in range(tunnels):
exit = queen
for step in range(length):
if moat_frequency != 0 and (step + 1) % moat_frequency == 0:
exit = Water('water_{0}_{1}'.format(tunnel, step), exit)
else:
exit = Place('tunnel_{0}_{1}'.format(tunnel, step), exit)
register_place(exit, step == length - 1)
def dry_layout(queen, register_place, tunnels=3, length=9):
"""Register dry tunnels."""
wet_layout(queen, register_place, tunnels, length, 0)
#################
# Assault Plans #
#################
class AssaultPlan(dict):
"""The Bees' plan of attack for the Colony. Attacks come in timed waves.
An AssaultPlan is a dictionary from times (int) to waves (list of Bees).
>>> AssaultPlan().add_wave(4, 2)
{4: [Bee(3, None), Bee(3, None)]}
"""
def add_wave(self, bee_type, bee_armor, time, count):
"""Add a wave at time with count Bees that have the specified armor."""
bees = [bee_type(bee_armor) for _ in range(count)]
self.setdefault(time, []).extend(bees)
return self
@property
def all_bees(self):
"""Place all Bees in the hive and return the list of Bees."""
return [bee for wave in self.values() for bee in wave]
def make_test_assault_plan():
return AssaultPlan().add_wave(Bee, 3, 2, 1).add_wave(Bee, 3, 3, 1)
def make_easy_assault_plan():
plan = AssaultPlan()
for time in range(3, 16, 2):
plan.add_wave(Bee, 3, time, 1)
plan.add_wave(Wasp, 3, 4, 1)
plan.add_wave(NinjaBee, 3, 8, 1)
plan.add_wave(Hornet, 3, 12, 1)
plan.add_wave(Boss, 15, 16, 1)
return plan
def make_normal_assault_plan():
plan = AssaultPlan()
for time in range(3, 16, 2):
plan.add_wave(Bee, 3, time, 2)
plan.add_wave(Wasp, 3, 4, 1)
plan.add_wave(NinjaBee, 3, 8, 1)
plan.add_wave(Hornet, 3, 12, 1)
plan.add_wave(Wasp, 3, 16, 1)
#Boss Stage
for time in range(21, 30, 2):
plan.add_wave(Bee, 3, time, 2)
plan.add_wave(Wasp, 3, 22, 2)
plan.add_wave(Hornet, 3, 24, 2)
plan.add_wave(NinjaBee, 3, 26, 2)
plan.add_wave(Hornet, 3, 28, 2)
plan.add_wave(Boss, 20, 30, 1)
return plan
def make_hard_assault_plan():
plan = AssaultPlan()
for time in range(3, 16, 2):
plan.add_wave(Bee, 4, time, 2)
plan.add_wave(Hornet, 4, 4, 2)
plan.add_wave(Wasp, 4, 8, 2)
plan.add_wave(NinjaBee, 4, 12, 2)
plan.add_wave(Wasp, 4, 16, 2)
#Boss Stage
for time in range(21, 30, 2):
plan.add_wave(Bee, 4, time, 3)
plan.add_wave(Wasp, 4, 22, 2)
plan.add_wave(Hornet, 4, 24, 2)
plan.add_wave(NinjaBee, 4, 26, 2)
plan.add_wave(Hornet, 4, 28, 2)
plan.add_wave(Boss, 30, 30, 1)
return plan
def make_extra_hard_assault_plan():
plan = AssaultPlan()
plan.add_wave(Hornet, 5, 2, 2)
for time in range(3, 16, 2):
plan.add_wave(Bee, 5, time, 2)
plan.add_wave(Hornet, 5, 4, 2)
plan.add_wave(Wasp, 5, 8, 2)
plan.add_wave(NinjaBee, 5, 12, 2)
plan.add_wave(Wasp, 5, 16, 2)
#Boss Stage
for time in range(21, 30, 2):
plan.add_wave(Bee, 5, time, 3)
plan.add_wave(Wasp, 5, 22, 2)
plan.add_wave(Hornet, 5, 24, 2)
plan.add_wave(NinjaBee, 5, 26, 2)
plan.add_wave(Hornet, 5, 28, 2)
plan.add_wave(Boss, 30, 30, 2)
return plan
from utils import *
@main
def run(*args):
Insect.reduce_armor = class_method_wrapper(Insect.reduce_armor,
pre=print_expired_insects)
start_with_strategy(args, interactive_strategy) | [
"juanpedrovel@gmail.com"
] | juanpedrovel@gmail.com |
6c04b90eea1e953de658699112f87c4514392784 | c7e2d1d8258de451afae648b4669b6ca23ea233a | /instrument_drivers/EmgWizard/setup.py | 1218274fa4da75e50df2040d6d1594c3dcfb6cb5 | [] | no_license | PaulKumar33/esc499 | 7aeb71238d7754e3e70253b5cbc66e39b8d5a420 | 00b8253794c3c342a57a0a64abf41cac11b8d909 | refs/heads/master | 2021-04-24T00:49:10.483544 | 2020-04-15T20:17:08 | 2020-04-15T20:17:08 | 250,046,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | #!/usr/bin/python
from distutils.core import setup
import py2exe
manifest = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1"
manifestVersion="1.0">
<assemblyIdentity
version="0.64.1.0"
processorArchitecture="x86"
name="Controls"
type="win32"
/>
<description>Your Application</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
"""
"""
installs manifest and icon into the .exe
but icon is still needed as we open it
for the window icon (not just the .exe)
changelog and logo are included in dist
"""
setup(
windows = [
{
"script": "my_app.py",
"other_resources": [(24,1,manifest)]
}
]
) | [
"paul.kumar@mail.utoronto.ca"
] | paul.kumar@mail.utoronto.ca |
db31907a01d52a39d25f3846037ce013121f0968 | ea4de36ab7c7eb2c6945a76a213eb5e3d01c17d8 | /stockmgmt/form.py | 0f49be6ebb2af29c03c8df50bc019495659c6999 | [] | no_license | saadchaudharry/inventory_django | 3fa0a2df6597bb1f3e1956dfc2f685508ae43838 | 93fd6acb98062f4202e3c008d59261da94e30eaf | refs/heads/master | 2023-03-01T01:01:58.283701 | 2021-02-08T16:00:48 | 2021-02-08T16:00:48 | 337,126,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,826 | py | from django import forms
from .models import Stock, StockHistory
class StockCreateForm(forms.ModelForm):
class Meta:
model = Stock
fields = ['category', 'item_name', 'quantity']
def clean_category(self):
category = self.cleaned_data.get('category')
if not category:
raise forms.ValidationError('This field is required')
# for instance in Stock.objects.all():
# if instance.category == category:
# raise forms.ValidationError(f'{category} is already created')
return category
def clean_item_name(self):
item_name = self.cleaned_data.get('item_name')
if not item_name:
raise forms.ValidationError('This field is required')
return item_name
# search from
class StockSearchForm(forms.ModelForm):
export_to_CSV = forms.BooleanField(required=False)
class Meta:
model = Stock
fields = ['category', 'item_name']
# update item
class StockUpdateForm(forms.ModelForm):
class Meta:
model = Stock
fields = ['category', 'item_name', 'quantity']
# issues form
class IssueForm(forms.ModelForm):
class Meta:
model = Stock
fields = ['issue_quantity', 'issue_to']
# recevice from
class ReceiveForm(forms.ModelForm):
class Meta:
model = Stock
fields = ['receive_quantity', 'receive_by']
# reorder
class ReorderLevelForm(forms.ModelForm):
class Meta:
model = Stock
fields = ['reorder_level']
class StockHistorySearchForm(forms.ModelForm):
export_to_CSV = forms.BooleanField(required=False)
# start_date = forms.DateTimeField(required=False)
# end_date = forms.DateTimeField(required=False)
class Meta:
model = StockHistory
fields = ['category', 'item_name']
| [
"saadchaudhary646@gmail.com"
] | saadchaudhary646@gmail.com |
5f2fbef875601c7ebaef35f393a8bee9bbd99875 | 06a4fb59f955853781bc9e98098e387a7beadf55 | /project/component/reminders.py | edbc5a3a69ad81f0de855c74a1cc57f484a7bb09 | [] | no_license | BishtShubham03/RemindTasking | 2178cd180da5d0aebff796186ed8429bd3620748 | 34ebbb4947697b0d28c8614028f84db8fa24d83e | refs/heads/master | 2021-04-25T23:23:42.365366 | 2017-10-17T09:57:41 | 2017-10-17T09:57:41 | 107,243,627 | 0 | 1 | null | 2017-10-23T05:18:19 | 2017-10-17T09:01:18 | Python | UTF-8 | Python | false | false | 1,276 | py |
from project.component.util import process_reply
import time
import os
import json
from time import strftime
from datetime import datetime
from config import REMINDER_FOLDER
def _convert_ampm(quantity):
if type(quantity) is str:
temp_quant = datetime.strptime(quantity, "%I:%M %p")
elif type(quantity) is datetime:
return quantity.strftime("%I:%M %p")
return datetime.strftime(temp_quant, "%H:%M")
def set_reminder(auth, refresh_token, params, resolved_query, speech):
print('in reminder func', speech, params)
time_object = datetime.strptime(params['time'], "%H:%M:%S")
params['time'] = _convert_ampm(time_object)
# print(params['time'])
text = process_reply(resolved_query, params)
ret = 'Reminder : ' + text[10:]
print(ret)
data = 'reminder at ' + \
strftime("%H:%M:%S") + ' : ' + text[10:] + ' -by- ' + speech[-13:]
file_name = REMINDER_FOLDER.rstrip('/') + '/' + time.strftime("%Y%m%d") + '.txt'
root.info('feedback data storing' + str(file_name))
if not os.path.exists(REMINDER_FOLDER):
os.makedirs(REMINDER_FOLDER)
with open(file_name, 'a+') as f:
json.dump(data, f)
f.write('\n')
return {'rmsg': ret, 'contexts': '', 'buttontext': [], 'table': []}
| [
"shubhambisht03@gmail.com"
] | shubhambisht03@gmail.com |
510b351cc1af18f3ed0180c70ef1242ca5bac1d8 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2171/48117/297146.py | bda92c1ed48fa43c5286605774a8b0ab0e50019c | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,525 | py | class Node():
def __init__(self, item):
self.item = item
self.next = None
class LinkList():
def __init__(self, node = None):
self.head = node
def isEmpty(self):
return self.head == None
def append(self, newItem):
newNode = Node(newItem)
if self.isEmpty():
self.head = newNode
newNode.next = self.head
else:
nowNode = self.head
while nowNode.next != self.head:
nowNode = nowNode.next
nowNode.next = newNode
newNode.next = self.head
def add(self, newItem):
newNode = Node(newItem)
if self.isEmpty():
self.head = newNode
else:
nowNode = self.head
while nowNode.next != None:
nowNode = nowNode.next
nowNode.next = newNode
questNum = int(input())
for quest in range(questNum):
n = int(input())
s = input().split(' ')
for i in range(n):
s[i] = int(s[i])
p = LinkList()
for i in range(n):
p.add(s[i])
p1 = p.head
odd = LinkList()
ou = LinkList()
while p1.next != None:
if p1.item % 2 == 0:
ou.add(p1.item)
else:
odd.add(p1.item)
p1 = p1.next
ou1 = ou.head
odd1 = odd.head
while ou1.next != None:
print(ou1.item, end=' ')
ou1 = ou1.next
while odd1.next != None:
print(odd1.item, end = ' ')
odd1 = odd1.next
print() | [
"1069583789@qq.com"
] | 1069583789@qq.com |
21e8e5573f1c6037a1404e7518ad11fd5494c097 | b2319c5e14c94edfb5a39e4c490c1ae6183651ed | /deepgoweb/apps/deepgo/migrations/0013_auto_20190902_0904.py | edb4c8e1436fbb064813d7d04f2b93874adbe234 | [] | no_license | coolmaksat/deepgoweb | 6d67f45059d7bdb4548d50c182a038c6f9c70a31 | fd4904b6b18dd2af06e000679f406b7353a3534f | refs/heads/master | 2021-06-12T14:42:14.513686 | 2021-04-17T10:23:39 | 2021-04-17T10:23:39 | 161,017,035 | 0 | 0 | null | 2018-12-09T07:49:26 | 2018-12-09T07:49:26 | null | UTF-8 | Python | false | false | 2,974 | py | # Generated by Django 2.2.4 on 2019-09-02 09:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('deepgo', '0012_auto_20190505_0848'),
]
operations = [
migrations.CreateModel(
name='Taxonomy',
fields=[
('id', models.PositiveIntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=127)),
],
),
migrations.RemoveField(
model_name='protein',
name='ppi_embedding',
),
migrations.RemoveField(
model_name='protein',
name='sequence',
),
migrations.RemoveField(
model_name='protein',
name='sequence_md5',
),
migrations.RemoveField(
model_name='protein',
name='uni_accession',
),
migrations.RemoveField(
model_name='protein',
name='uni_entry_id',
),
migrations.AddField(
model_name='protein',
name='acc_id',
field=models.CharField(default='PROTEIN', max_length=15, unique=True),
preserve_default=False,
),
migrations.AddField(
model_name='protein',
name='gene',
field=models.CharField(blank=True, max_length=31, null=True),
),
migrations.AddField(
model_name='protein',
name='name',
field=models.CharField(default='name', max_length=127),
preserve_default=False,
),
migrations.AddField(
model_name='protein',
name='pro_id',
field=models.CharField(db_index=True, default='PROTEIN', max_length=31),
preserve_default=False,
),
migrations.AddField(
model_name='protein',
name='reviewed',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='protein',
name='id',
field=models.PositiveIntegerField(primary_key=True, serialize=False),
),
migrations.CreateModel(
name='Annotation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('go_id', models.PositiveIntegerField(db_index=True)),
('score', models.PositiveIntegerField()),
('protein', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='annotations', to='deepgo.Protein')),
],
),
migrations.AddField(
model_name='protein',
name='taxon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='proteins', to='deepgo.Taxonomy'),
),
]
| [
"coolmaksat@gmail.com"
] | coolmaksat@gmail.com |
e7e38176c88a147c9c2da58bb5fa2c210b0c7680 | 982d2163a9ed2e8494a1bbf533979eaacc850b84 | /sentiment_analysis/sentiment.py | 275994cc22b4b9c3973a0dba164807e88fab2b3f | [] | no_license | IshadiGamage/ForexForecast | 7318d5732789e072e4b87dd5a1ba6ac10e953364 | 2b4114992fc5b17501e07af4f14899da79a67aca | refs/heads/master | 2020-03-20T00:36:08.753149 | 2018-06-28T04:49:15 | 2018-06-28T04:49:15 | 137,049,845 | 1 | 0 | null | 2018-08-03T03:47:58 | 2018-06-12T09:36:19 | Jupyter Notebook | UTF-8 | Python | false | false | 2,294 | py |
import nltk.classify.util
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import names
def word_feats(words):
return dict([(word, True) for word in words])
def get_words_from_file(path):
text_file = open(path, "r")
dataset = open(path, "r").read().split('\n')
# print(test)
# lines = text_file.readlines()
# print(lines)
# print(len(lines))
text_file.close()
return dataset
# positive_vocab = [ 'awesome', 'outstanding', 'fantastic', 'terrific', 'good', 'nice', 'great', ':)' ]
# negative_vocab = [ 'bad', 'terrible','useless', 'hate', ':(' ]
# neutral_vocab = [ 'movie','the','sound','was','is','actors','did','know','words','not' ]
positive_vocab = get_words_from_file('Positive.txt')
negative_vocab = get_words_from_file('Negative.txt')
neutral_vocab = get_words_from_file('Interesting.txt')
# print(positive_vocab)
# POLARITY_DATA_DIR = os.path.join('polarityData', 'rt-polaritydata')
# RT_POLARITY_NEG_FILE = os.path.join(POLARITY_DATA_DIR, 'Negative.txt')
# RT_POLARITY_POS_FILE = os.path.join(POLARITY_DATA_DIR, 'Positive.txt')
# RT_POLARITY_CON_FILE = os.path.join(POLARITY_DATA_DIR, 'Constraining.txt')
positive_features = [(word_feats(pos), 'pos') for pos in positive_vocab]
negative_features = [(word_feats(neg), 'neg') for neg in negative_vocab]
neutral_features = [(word_feats(neu), 'neu') for neu in neutral_vocab]
# print(positive_features)
train_set = positive_features + negative_features + neutral_features
# print(train_set)
classifier = NaiveBayesClassifier.train(train_set)
# Predict
neg = 0
pos = 0
neu = 0
sentence = "ABLE ADVANCES ACHIEVES"
sentence = sentence.lower()
words = sentence.split(' ')
print(word_feats(words))
for word in words:
classResult = classifier.classify( word_feats(word))
print(classResult)
if classResult == 'pos':
pos = pos + 1
if classResult == 'neg':
neg = neg + 1
if classResult == 'neu':
neu = neu + 1
# https://stackoverflow.com/questions/48335460/why-did-nltk-naivebayes-classifier-misclassify-one-record
print(float(pos))
print(float(neg))
print(float(neu))
# print('Positive: ' + str(float(pos)/len(words)))
# print('Negative: ' + str(float(neg)/len(words)))
# print('Neutral: ' + str(float(neu) /len(words)))
| [
"h.n.sampathwith@gmail.com"
] | h.n.sampathwith@gmail.com |
ca594cd86cd5ebc0e85ce7bd32c8f43cf4eed9cd | 496959f1bd3b8525b356edf1825b719d7b54770d | /tests/tests.py | 3e6c2305639608b5502cb774b4bcc23a47aab856 | [
"MIT"
] | permissive | akhlul/django-plaintext-password | 75c3bae77b176819d561e383efc564a696ff347d | 752cf0316cdc45dc9bed5f9107614881d613647f | refs/heads/master | 2023-04-12T06:37:06.810214 | 2021-05-13T08:10:02 | 2021-05-13T08:10:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,548 | py | import pytest
from django.contrib.auth.hashers import (
check_password,
get_hashers_by_algorithm,
identify_hasher,
make_password,
)
from hypothesis import given
from hypothesis.strategies import text
from plaintext_password import PlaintextPasswordHasher
PASSWORD = "password123"
@given(text())
def test_makes_plaintext_password(password):
hashed_password = make_password(password, hasher="plaintext")
assert hashed_password == f"plaintext$${password}"
@given(text())
def test_check_password(password):
assert check_password(password, f"plaintext$${password}")
@pytest.mark.parametrize("password", {"$password$", "!password!"})
def test_check_password_with_unsafe_char(password):
hashed_password = make_password(password, hasher="plaintext")
assert check_password(password, hashed_password)
@given(text())
def test_end_to_end(password):
hashed_password = make_password(password, hasher="plaintext")
assert check_password(password, hashed_password)
def test_identify_hasher():
assert isinstance(identify_hasher("plaintext$$password"), PlaintextPasswordHasher)
@pytest.mark.parametrize("hasher", get_hashers_by_algorithm().keys())
def test_make_password_performance(hasher, benchmark):
benchmark(make_password, PASSWORD, hasher=hasher)
@pytest.mark.parametrize("hasher", get_hashers_by_algorithm().keys())
def test_check_password_performance(hasher, benchmark):
encoded_password = make_password(PASSWORD, hasher=hasher)
benchmark(check_password, PASSWORD, encoded_password)
| [
"git@theorangeone.net"
] | git@theorangeone.net |
59c872b273d740f63d60d20c00f25ec6200c950b | 96bf2ec5c1536831b6a3e08675286770d44d858c | /root/os/DSAA/DataStructuresAndAlgorithms/python/recursion_arrangememt_implement.py | 7718e364df1f45330957efb06bd7943e4e891a09 | [
"MIT"
] | permissive | chyidl/chyidlTutorial | aa15d6c8526e87a34ed63f79bd541d10ee43b820 | d7f74280725149be11d818b4fbca6cb23ffa4e25 | refs/heads/master | 2022-05-11T13:33:47.015661 | 2022-05-04T13:42:20 | 2022-05-04T13:42:20 | 156,686,624 | 4 | 3 | MIT | 2021-04-14T13:58:38 | 2018-11-08T10:02:35 | Python | UTF-8 | Python | false | false | 1,400 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:encoding=utf-8
#
# recursion_arrangememt_implement.py
# python
#
# __________
# / ___ ___ \
# / / \/ \ \
# \ \___/\___/ /\
# \____\/____/||
# / /\\\\\//
# | 🔥 |\\\\\\
# \ \\\\\\
# \______/\\\\
# _||_||_
#
# Created by Chyi Yaqing on 03/24/19 21:28.
# Copyright © 2019. Chyi Yaqing. All rights reserved.
#
# Distributed under terms of the MIT
"""
排列:从n个元素中任取m个元素,并按照一定的顺序进行排列,称为排列
全排列: 当n==m时,称为全排列
permutation, also called an "arrangement number" or "order", is a
rearrangement of the elements of an ordered list S into a one-to-one
correspondence with S itself. A string of length n has n! permutation.
Algorithm Paradigm: Backtracking
Time Complexity: O(n*n)
"""
# Python program to print all permutations
def toString(alist):
return ''.join(alist)
# Function to print permutations of string
def permute(alist, left, right):
if left == right:
print(toString(alist))
else:
for i in range(left, right+1):
alist[left], alist[i] = alist[i], alist[left]
permute(alist, left+1, right)
# backtrack
alist[left], alist[i] = alist[i], alist[left]
# Driver program to test the above function
alist = ["A", "B", "C"]
n = len(alist)
permute(alist, 0, n-1)
| [
"chyidl.com@gmail.com"
] | chyidl.com@gmail.com |
ae9e12ea332f93c1785a40bb98ab1c626d69b741 | 6108302db96647da5132391bcd17f3188a82c08e | /backjoon/1260.py | 824c6eb4fa15b58a2b72e8c5d8180eaa95662f72 | [] | no_license | iamyoungjin/algorithms | b5873db95a5022c67be210754c1083ecb3ea6a91 | 551121ead833f6f29ac2dcb1d3007a9b50c8c365 | refs/heads/master | 2023-05-06T15:32:15.311206 | 2021-05-19T10:47:48 | 2021-05-19T10:47:48 | 293,109,783 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | #https://www.acmicpc.net/problem/1260
# N : 정점의 개수
# M : 간선의 개수
# V : 탐색을 시작할 정점의 번호
N,M,V=map(int,input().split())
matrix=[[0]*(N+1) for i in range(N+1)] # 4 5 1 -> 5 x 5 matrix (start 0)
for i in range(M):
a,b = map(int,input().split())
matrix[a][b]=matrix[b][a]=1
# print('matrix changed--->',matrix)
visit_list=[0]*(N+1)
# print('visit_list--->',visit_list) #[0,0,0,0,0]
def dfs(V):
visit_list[V]=1 #방문한 점 1로 표시
print(V, end=' ')
for i in range(1,N+1):
if(visit_list[i]==0 and matrix[V][i]==1):
dfs(i)
def bfs(V):
queue=[V] #들려야 할 정점 저장
visit_list[V]=0 #방문한 점 0으로 표시
while queue:
V=queue.pop(0)
print(V, end=' ')
for i in range(1, N+1):
if(visit_list[i]==1 and matrix[V][i]==1):
queue.append(i)
visit_list[i]=0
dfs(V)
print()
bfs(V) | [
"nnna08@likelion.org"
] | nnna08@likelion.org |
4b702800cbd23351c3aa3180552ec07b6a7a7da6 | 17e77e661335e91326d72e53bfa068bd5fb79b1a | /_ots/GL3/scripts/windllexport.py | 2992cac6e97f66af7ec940c625022761003c953c | [] | no_license | wibus/ExperimentalTheatre | cfd92546aed84138d21ad9845bf4518c828badf8 | 46c0e83d2319c5731fc7a7dbad6b0efee95d289c | refs/heads/master | 2020-04-04T07:41:51.935537 | 2019-03-19T08:03:57 | 2019-03-19T08:03:57 | 7,976,973 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | if __name__ == "__main__":
import os
outfilename = 'gl3w.h.out'
infilename = 'gl3w.h'
alreadyconfigured = False
with open(outfilename, 'w') as out:
for line in open(infilename, 'r'):
if 'GL3_EXPORT' in line:
alreadyconfigured = True
break
line = line.replace(
"""#include \"glcorearb.h\"""",
"""#include \"glcorearb.h\"
#ifdef _WINDOWS
# if defined(GL3_LIBRARY)
# define GL3_EXPORT __declspec(dllexport)
# else
# define GL3_EXPORT __declspec(dllimport)
# endif
#else
# define GL3_EXPORT
#endif""")
line = line.replace(
'extern PFN',
'extern GL3_EXPORT PFN')
out.write(line)
if alreadyconfigured:
#os.remove(outfilename)
print('File already configured for Windows dll export')
else:
os.remove(infilename)
os.rename(outfilename,infilename)
print('File now configured for Windows dll export')
| [
"william.bussiere@gmail.com"
] | william.bussiere@gmail.com |
a4779ef284503e231a27a8925693ca55db0ab77c | 99a32aa3b7048d9002c453e9af832802125c0d43 | /config.py | d29adfca3cf01b07db2ad21fcea750ac93af6611 | [] | no_license | huzaifarasheedmir/flask-backend | dabeb9bc7e1377663e05077d968bf20a31f0b6f6 | 86d44e7496f9697d782d66a87934c264eb5ff1b1 | refs/heads/master | 2021-01-18T17:36:11.701002 | 2018-03-10T17:57:58 | 2018-03-10T18:04:33 | 71,997,453 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | import os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SSL_DISABLE = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_POOL_RECYCLE = 20
PRESERVE_CONTEXT_ON_EXCEPTION = False
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
PORT = 8081
DEV_DB_PARAMS = {
'DB_USER': "root",
'DB_PASSWORD': "123456789",
'DB_HOST': '127.0.0.1:3306',
'DB_DATABASE': "mydb"
}
MYSQL_DB = "mysql+mysqldb://{DB_USER}:{DB_PASSWORD}@{DB_HOST}/{DB_DATABASE}".format(**DEV_DB_PARAMS)
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or MYSQL_DB
class TestingConfig(Config):
TESTING = True
PORT = 1000
SQLALCHEMY_DATABASE_URI = "sqlite:///:memory:"
FIXTURES_DIR = os.path.join(BASE_DIR, 'tests/fixtures')
class ProductionConfig(Config):
PORT = 8080
PROD_DB_PARAMS = {
'DB_USER': "root",
'DB_PASSWORD': "123456789",
'DB_HOST': '127.0.0.1:3306',
'DB_DATABASE': "mydb"
}
MYSQL_DB = "mysql+mysqldb://{DB_USER}:{DB_PASSWORD}@{DB_HOST}/{DB_DATABASE}".format(**PROD_DB_PARAMS)
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or MYSQL_DB
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| [
"huzaifa@wanclouds.net"
] | huzaifa@wanclouds.net |
c1395406230bb6f5616f9eabc0e1b9a4999b8e2a | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/9/wo-.py | bace8c7b0b9cb45c71200a51bd1870340df7d916 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'wO-':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
dea54da218080aeb093fb550b488d32c7e2bd61b | 499742cb14fe4567e40019aa7f50896f47e5208b | /Scraped_midi_data/midi_crawl.py | 32105da5d071ca63c81ac22d3d2acedbe34fc49d | [] | no_license | FinchMF/Computational-Music-Box | 0e7da9bd23ffcf14f06c380106c88f886a7ee8de | 57c1b47a07a39bcceb7e86bae39cdc33ddd7bfd3 | refs/heads/master | 2021-01-05T10:17:26.705502 | 2020-03-12T22:34:14 | 2020-03-12T22:34:14 | 240,987,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | import requests
import re
from bs4 import BeautifulSoup
import wget
class Crawler:
link =
def __init__(self, composer, link):
self.composer = composer
self.link = link | [
"matt.fiinch92@gmail.com"
] | matt.fiinch92@gmail.com |
c2a4f5d1df47a8bbc2d9299c027cf474b3047220 | d436b439b20507d8d15a89925891f9573a29d140 | /code/protein/transcell_PCA_default/generate_parallel_command_pro20.py | 5fb712d6b0eb7d0f67e55d3e4d7842647ec07a71 | [] | no_license | Bin-Chen-Lab/transcell | 46f8abd5330a1f1e354e039bd53d85ba93bfc15c | 2237672b1b0c6096a1c7d60d652bc3f8f05079f7 | refs/heads/master | 2023-04-08T15:07:19.270438 | 2023-03-31T04:27:43 | 2023-03-31T04:27:43 | 320,335,150 | 1 | 2 | null | 2023-09-08T08:00:10 | 2020-12-10T16:56:09 | Python | UTF-8 | Python | false | false | 347 | py | import pandas as pd
import numpy as np
import csv
pro_20 = pd.read_csv('/home/ubuntu/chenlab_deeplearning/chenlab_deeplearning_V2/DL_yeh/GeneExp_prediction/output/protein/other_models/LASSO.csv')
pro_20 = pro_20['Unnamed: 0']
j = 0
for i in pro_20:
print("python pro_kfold_average_ks5000_parallel.py %s > %d.out" % (i,j) )
j = j + 1
| [
"m793281@gmail.com"
] | m793281@gmail.com |
3c91eb2e783415b65421ba84ddc6c8cbb7bfd91b | 9f98c8811e9aa992bc3181c8611d72920439a2ee | /CS111/PS1/ps1pr5.py | b6201047345f8737c801034dcd5b141814482212 | [] | no_license | vnikov/Fall-2017 | 7bf0aafa47a3b06b65cbbb089a017a07fe738f58 | 2198574a526f4cc14f1b911c02f15814a0ee969f | refs/heads/master | 2020-05-04T00:31:53.417111 | 2018-05-23T18:19:17 | 2018-05-23T18:19:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | #
# ps1pr5.py - Problem Set 1, Problem 5
#
# Functions with numeric inputs
#
# name: Phumin Walaipatchara
# email: phuminw@bu.edu
#
def mirror(s):
""" return a mirrored version of s that is twice the length of the original string """
return s + s[-1::-1]
def is_mirror(s):
""" returns True if s is a mirrored string and False otherwise """
return s[0:len(s) // 2] == s[-1:(len(s) // 2) - 1:-1]
def replace_end(values, new_end_vals):
""" return a new list in which the elements in new_end_vals have replaced the last n elements of the list values, where n is the length of new_end_vals """
if len(new_end_vals) >= len(values):
return new_end_vals
else:
return values[0:len(values) - len(new_end_vals)] + new_end_vals
def repeat_elem(values, index, num_times):
""" returns a new list in which the element of values at position index has been repeated num_times times """
return values[0:index] + [values[index]] * num_times + values[index + 1:]
def test():
print(mirror('bacon'))
print(mirror('XYZ'))
print(is_mirror('baconnocab'))
print(is_mirror('baconcona'))
print(is_mirror('assa'))
print(is_mirror('asdf'))
print(replace_end([1, 2, 3, 4, 5], [7, 8, 9]))
print(replace_end([1, 2, 3, 4, 5], [12]))
print(replace_end([0, 2, 4, 6], [4, 3, 2, 1, 0]))
print(replace_end([0, 2, 4, 6], [4, 3, 2, 1]))
print(repeat_elem([10, 11, 12, 13], 2, 4))
print(repeat_elem([10, 11, 12, 13], 2, 6))
print(repeat_elem([5, 6, 7], 1, 3))
| [
"phuminw@dhcp-wifi-8021x-168-122-214-226.bu.edu"
] | phuminw@dhcp-wifi-8021x-168-122-214-226.bu.edu |
0be528bd16f73bebc7d7b2d35ced4d9dc303cb72 | 25d94c5e2abf1921d9f392c748b942bd0ba4f291 | /code/8/main.py | a60cbaea49e97cd51bd5e0c3b9afd11d71d60bc8 | [] | no_license | diksha/fss16noobs | 2d9d523b7118ed2497d372aac8fa96db3b78726c | 8e72f2c27ab211b0422c0594bec20bd7d6a5d910 | refs/heads/master | 2020-12-03T07:43:15.572583 | 2016-12-08T06:20:24 | 2016-12-08T06:20:24 | 66,399,337 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,556 | py | from nsgaii import NSGAII
import dtlz
from init import MODEL
from hypervolume import *
from stats import rdivDemo as stat
def print_generation(population, generation_num):
print("Generation: {}".format(generation_num))
models1 = [dtlz.dtlz1, dtlz.dtlz3, dtlz.dtlz5, dtlz.dtlz7]
print 'NSGAII RUNNING: agupta25'
for models in models1:
for num_obj in [2, 4, 6, 8]:
hvol = []
repeat = []
for num_dec in [10, 20, 40]:
for dom in ["bdom", "cdom"]:
vol = 0
rep = 0
repeats = 20
hvDisplay = [models.__name__ + " " + str(num_obj) + " " + str(num_dec) + " " + dom]
runsDisplay = [models.__name__ + " " + str(num_obj) + " " + str(num_dec) + " " + dom]
print hvDisplay
for x in range(repeats): #repeats
problem = MODEL(models, num_dec, num_obj, dom)
print 'Model defined entering nsgaii'
#Evolution(problem, max_iterations, pop_size)
nsga = NSGAII(problem, 100, 100, dom)
hypervolume, runs = nsga.run()
print 'Printing hypervolume'
print hypervolume
print runs
hvDisplay.append(hypervolume)
runsDisplay.append(runs)
rep += runs
hvol.append(info_hv)
run_stats.append(info_runs)
print hvDisplay
print runsDisplay
stat(hvol)
stat(repeat)
print
| [
"agupta25@ncsu.edu"
] | agupta25@ncsu.edu |
1c89fc5d3752c2a7fcee2127200e8e25d044077d | 8cd5c8d035b292caacb4d5aeb4bce0ca19ee3061 | /DataScience/Python/scripts/temp_script.py | 04002e9f9c92412c9ef91111bd7c3092dca4b80e | [] | no_license | toddjm/projects | 33be8c753d24a1ba1f719d661fed36d264957c45 | 709497603a6dcfb43feabfc812b3ad8bb3f183c9 | refs/heads/master | 2023-02-19T03:13:46.538278 | 2023-02-07T22:33:00 | 2023-02-07T22:33:00 | 47,042,227 | 1 | 1 | null | 2015-12-01T00:01:22 | 2015-11-28T22:44:22 | Python | UTF-8 | Python | false | false | 400 | py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from matplotlib import pyplot as plt
import pandas as pd
pd.read_csv('/Users/emily/Downloads/Wahoo.csv')
pd.read_csv('/Users/emily/Downloads/Wahoo.csv', skiprows=19)
wahoo_data = pd.read_csv('/Users/emily/Downloads/Wahoo.csv', skiprows=19)
wahoo_data.cad_cadence
cadence = wahoo_data.cad_cadence
plt.plot(cadence)
| [
"todd.minehardt@gmail.com"
] | todd.minehardt@gmail.com |
742bb40e2ce4ec25a75d16d1778f148017ec516c | a87fa6b99466f74b8c6a00c596055de9a76bcda3 | /TorchTuner.py | d303af0a1ba5d49f4ef594c77449a9b88529b508 | [] | no_license | sbetageri/TorchTuner | 9dd7f6bb695349d633e58369f23aaadcf8876217 | a3e832345feb7104a1a47dbbf9f01c452ede4800 | refs/heads/master | 2020-04-12T20:50:05.328219 | 2019-02-25T17:43:29 | 2019-02-25T17:43:29 | 162,748,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,572 | py | import os
import json
import torch
import pprint
import numpy as np
import torch.nn as nn
import matplotlib.pyplot as plt
import seaborn as sns
from torch.utils.data import DataLoader
from copy import deepcopy
from tqdm import tqdm
class TorchTuner:
## Names for parameters
_OPTIM_FUNC = 'optimizer_func'
_OPTIM_PARAM = 'optimizer_param'
_EPOCHS = 'epochs'
_BATCH_SIZE = 'batch_size'
_PARAM_IDX = 'param_id'
_RESULTS = 'results'
_TR_LOSS = 'train_loss'
_TR_ACC = 'train_acc'
_VAL_LOSS = 'val_loss'
_VAL_ACC = 'val_acc'
_MODEL_PATH = 'model_path'
_BEST_LOSS = 'best_loss'
_BEST_ACC = 'best_acc'
_MODE = 'mode'
_MAX_MODE = 'max'
_MIN_MODE = 'min'
CUR_EPOCH = 'current_epoch'
TOT_EPOCH = 'total_epoch'
OPTIMIZER_STATE = 'optimizer_state_dict'
MODEL_STATE = 'model_state_dict'
MODEL_NAME = 'model_name'
BATCH_SIZE = 'batch_size'
MODEL_PREFIX = 'model_'
EXT = '.pth'
PARAM_EXT = '.json'
MODEL_NAME_SEP = '_'
def __init__(self,
model=None,
model_name='myModel',
criterion_func=None,
accuracy_func=None,
train_dataset=None,
test_dataset=None,
val_percentage=0.15,
res_dir=None):
'''Initialise torch tuner
:param model: Model to be tested, defaults to None
:param model: Custom PyTorch model, optional
:param criterion_func: Criterion Function, defaults to None
:param criterion_func: torch.nn, optional
:param accuracy_func: Accuracy funciton, defaults to None
:param accuracy_func: Custom function, optional
:param train_dataset: Training dataset, defaults to None
:param train_dataset: torch.utils.data.Dataset, optional
:param test_dataset: Test dataset, defaults to None
:param test_dataset: torch.utils.data.Dataset, optional
:param val_percentage: Percentage of training set to be used as test set, defaults to 0.15, ie, 15%
:param val_percentage: float, optional
:param res_dir: Directory where models and results are saved, defaults to None
:param res_dir: string, optional
'''
self.name = model_name
self.params = []
self.param_name_prefix = 'param_'
self.model = model
self.criterion = criterion_func()
self.accuracy_func = accuracy_func
self.train_dataset = train_dataset
self.test_dataset = test_dataset
self.results = {}
self.res_dir = res_dir
if train_dataset is not None:
self.train_sampler, self.val_sampler = self._getTrainValSampler(train_dataset, val_percentage)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
## TODO
## Test loader will be interesting
# self.test_loader = DataLoader(dataset=self.test_dataset, batch_size=batch_size, shuffle=True)
# Reporting apparatus
self.pp = pprint.PrettyPrinter(indent=4)
def _getTrainValSampler(self, dataset, val_percentage):
indices = list(range(len(dataset)))
split = int(val_percentage * len(dataset))
np.random.seed(9)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indices)
val_sampler = torch.utils.data.sampler.SubsetRandomSampler(val_indices)
return train_sampler, val_sampler
def evaluateModel(self,
param_id = None,
optimizer_func=None,
optimizer_param=None,
epochs=9,
batch_size=4,
mode=None):
'''Evaluates a model and returns loss and accuracy
Builds and executes the entire training and validation pipeline.
Runs implicitly on the GPU
:param optimizer_func: function to obtain optimizer, defaults to None
:param optimizer_func: torch.optim, optional
:param optimizer_param: parameters for optimizer, defaults to None
:param optimizer_param: dict, optional
:param epochs: number of epochs, defaults to 9
:param epochs: int, optional
:param batch_size: size of batch, defaults to 4
:param batch_size: int, optional
:return: Log of evaluation metrics
:rtype: Dictionary
'''
train_loader = DataLoader(dataset=self.train_dataset,
batch_size=batch_size,
sampler=self.train_sampler)
val_loader = DataLoader(dataset=self.train_dataset,
batch_size=batch_size,
sampler=self.val_sampler)
# Training metrics
tr_loss = []
tr_acc = []
# Validation metrics
val_loss = []
val_acc = []
best_acc = 0.0
best_loss = 0.0
model_path = ''
# Move to GPU
model = deepcopy(self.model)
model = model.to(self.device)
optimizer = optimizer_func(model.parameters(), **optimizer_param)
criterion = self.criterion
for e in range(epochs):
running_acc = 0
running_loss = 0.0
model.train(True)
for data, label in train_loader:
data = data.to(self.device)
label = label.to(self.device)
optimizer.zero_grad()
output = model(data)
output = output.view(output.size()[0], -1)
label = label.view(label.size()[0], -1)
loss = criterion(output, label)
loss.backward()
optimizer.step()
running_loss += loss.item()
running_acc += self.accuracy_func(output, label)
running_loss /= (len(train_loader) * batch_size)
running_acc /= (len(train_loader) * batch_size)
running_acc *= 100
tr_loss.append(running_loss)
tr_acc.append(running_acc)
running_loss = 0.0
running_acc= 0
model.eval()
with torch.no_grad():
for data, label in val_loader:
data = data.to(self.device)
label = label.to(self.device)
output = model(data)
output = output.view(output.size()[0], -1)
label = label.view(label.size()[0], -1)
loss = criterion(output, label)
running_loss += loss.item()
running_acc += self.accuracy_func(output, label)
running_loss /= (len(val_loader) * batch_size)
running_acc /= (len(val_loader) * batch_size)
running_acc *= 100
val_loss.append(running_loss)
val_acc.append(running_acc)
if mode == TorchTuner._MAX_MODE:
if len(val_acc) == 1:
model_path = self.saveModel(model, param_id, optimizer, e, epochs, batch_size)
best_acc = val_acc[-1]
best_loss = val_loss[-1]
elif val_acc[-1] > val_acc[-2]:
best_acc = val_acc[-1]
best_loss = val_loss[-1]
print('Accuracy improved. Saving model')
model_path = self.saveModel(model, param_id, optimizer, e, epochs, batch_size)
else:
if len(val_loss) == 1:
best_acc = val_acc[-1]
best_loss = val_loss[-1]
model_path = self.saveModel(model, param_id, optimizer, e, epochs, batch_size)
elif val_loss[-1] < val_loss[-2]:
best_acc = val_acc[-1]
best_loss = val_loss[-1]
print('Loss decreased. Saving model')
model_path = self.saveModel(model, param_id, optimizer, e, epochs, batch_size)
return {
TorchTuner._TR_LOSS : tr_loss,
TorchTuner._TR_ACC : tr_acc,
TorchTuner._VAL_LOSS : val_loss,
TorchTuner._VAL_ACC : val_acc,
TorchTuner._BEST_ACC : best_acc,
TorchTuner._BEST_LOSS : best_loss,
TorchTuner._MODEL_PATH : model_path
}
def addModel(self,
model=None,
criterion_func=None,
accuracy_func=None,
clear_prev=False):
'''Change model
Change the underlying model for which the hyperparameters need to be tested
:param model: Pytorch model, defaults to None
:param model: Custom model, optional
:param criterion_func: Loss function, defaults to None
:param criterion_func: torch.nn, optional
:param accuracy_func: Evaluation metric function, defaults to None
:param accuracy_func: function, optional
'''
self.model = model
self.criterion_func = criterion_func
self.accuracy_func = accuracy_func
if clear_prev:
self.results = {}
def addHyperparameters(self,
optimizer_func=None,
optimizer_param=None,
epochs=9,
batch_size=8,
mode=None):
'''Add hyperparams for evaluation
:param optimizer_func: Optimizer, defaults to None
:param optimizer_func: torch.optim, optional
:param optimizer_param: Parameters to optimizer, defaults to None
:param optimizer_param: Dict of params, optional
:param epochs: Number of epochs to run evaluation metric on, defaults to 9
:param epochs: int, optional
:param batch_size: Number of data-points to consider during evaluation, defaults to 8
:param batch_size: int, optional
:param mode: Defines which metric to use when saving model, defaults to 'max', max accuracy
:param mode: str, optional
'''
param_idx = self.param_name_prefix + str(len(self.params) + 1)
param = {
TorchTuner._PARAM_IDX : param_idx,
TorchTuner._OPTIM_FUNC : optimizer_func,
TorchTuner._OPTIM_PARAM : optimizer_param,
TorchTuner._EPOCHS : epochs,
TorchTuner._MODE : mode
}
self.params.append(param)
## How should the parameters be?
def evaluateHyperparams(self):
'''Evaluate hyperparameters
Evaluate hyperparams and log results
'''
self.results = deepcopy(self.params)
for param in self.results:
result = self.evaluateModel(**param)
param[TorchTuner._RESULTS] = result
def saveHyperparam(self,
out_file=''):
'''Save hyperparameters to json file
:param out_file: Path to output file, defaults to './param.json'
:param out_file: str, optional
'''
# Change results to savable format
for param in self.results:
param[TorchTuner._OPTIM_FUNC] = str(param[TorchTuner._OPTIM_FUNC])
out_file = out_file + TorchTuner.PARAM_EXT
with open(out_file, 'w') as fp:
json.dump(self.results, indent=4, sort_keys=True, fp=fp)
def saveModel(self,
model,
param_id,
optimizer,
cur_epoch,
total_epoch,
batch_size):
## We're passing model again because model here is supposed to the model on the GPU.
save_dict = {
TorchTuner.MODEL_STATE : model.state_dict(),
TorchTuner.OPTIMIZER_STATE : optimizer.state_dict(),
TorchTuner.CUR_EPOCH : cur_epoch,
TorchTuner.TOT_EPOCH : total_epoch,
TorchTuner.BATCH_SIZE : batch_size,
}
model_name = self.name + TorchTuner.MODEL_NAME_SEP + param_id + TorchTuner.EXT
res_dir = os.path.abspath(self.res_dir)
model_path = os.path.join(res_dir, model_name)
torch.save(save_dict, model_path)
return model_path
def testModel(self, param):
result = param[TorchTuner._RESULTS]
model_path = result[TorchTuner._MODEL_PATH]
checkpoint = torch.load(model_path)
model_state_dict = checkpoint[TorchTuner.MODEL_STATE]
model = deepcopy(self.model)
model = model.to(self.device)
model.load_state_dict(model_state_dict)
batch_size = checkpoint[TorchTuner.BATCH_SIZE]
test_loader = DataLoader(dataset=self.test_dataset,
batch_size=batch_size,
shuffle=True)
running_acc = 0.0
running_loss = 0.0
for data, label in test_loader:
data = data.to(self.device)
label = label.to(self.device)
output = model(data)
output = output.view(output.size()[0], -1)
label = label.view(label.size()[0], -1)
loss = self.criterion(output, label)
running_loss += loss.item()
running_acc += self.accuracy_func(output, label)
running_loss /= (len(test_loader) * batch_size)
running_acc /= (len(test_loader) * batch_size)
running_acc *= 100
return running_loss, running_acc | [
"sbetageri111@gmail.com"
] | sbetageri111@gmail.com |
45e87ed9a82e88d8e774f45921ed3227fd68165e | 4dbd12da17cc45a5482afc8cea02051e798731a9 | /courses_project/apps/courses/urls.py | ab576aa8a6f94c45e5f11e2186a1af9f96e0ddaa | [] | no_license | tsicroxe/django_projects | 71b9bec6d834f53fde892606799b4bc96ba45a91 | c11036c78d120e5ffa51055e2999dbe05b0d36eb | refs/heads/master | 2021-01-11T07:03:53.045558 | 2016-12-07T20:46:05 | 2016-12-07T20:46:05 | 71,937,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from django.conf.urls import url
from . import views
from views import index, create, destroy
#from django.contrib import admin
urlpatterns = [
#url(r'^admin/', admin.site.urls),
url(r'^$', views.index, name='index'),
url(r'^create$', views.create, name='create'),
url(r'^(?P<id>\d+)/destroy$', views.destroy, name='destroy'),
]
| [
"arbanakus@gmail.com"
] | arbanakus@gmail.com |
a294a17f9386d4eb97ad9baaf9e5ea92c5597236 | 03f4a903626bc0ff873f7d8598db53880a943b2e | /tabulations/models.py | e336c495daef34935b879ec7e50bc7032a76eb1d | [] | no_license | capmayer/menyu_project | 645b50a9f2bf617b7e8147157c18a2ac50c8f457 | d8b91e867546f9b86e683856850da3ee43b52dc8 | refs/heads/master | 2021-06-11T12:01:51.737442 | 2017-02-14T02:25:16 | 2017-02-14T02:25:16 | 69,711,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,650 | py | import uuid
from django.db import models
from django.contrib.auth.models import User
from menus.models import Product
from seats.models import Seat
class Tabulation(models.Model): #cria a comanda ao entrar, mesmo sem ter pedido
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
establishment = models.ForeignKey(User, on_delete=models.CASCADE)
origin = models.ForeignKey(Seat)
date = models.DateTimeField(auto_now_add=True)
STATES = (
('op', 'Open'),
('cl', 'Close'),
)
state = models.CharField(max_length=2, choices=STATES, default='op')
value = models.FloatField(default=0)
registered = models.BooleanField(default=False)
last_modified = models.DateTimeField(auto_now=True, auto_now_add=False)
def __str__(self):
return "Mesa: "+ str(self.origin) + " - "+str(self.date)
class Order(models.Model): #pedido sao criados apos comanda já ter sido feita
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
quantity = models.IntegerField()
product = models.ForeignKey(Product, on_delete=models.CASCADE)
tabulation = models.ForeignKey(Tabulation, on_delete=models.CASCADE, related_name='orders')
STATES = (
('wa', 'Waiting'),
('re', 'Ready'),
('de', 'Delivery'),
)
last_modified = models.DateTimeField(auto_now=True, auto_now_add=False)
state = models.CharField(max_length=2, choices=STATES, default='wa')
def __str__(self):
#return str(self.quantity) + " - MESA: " + str(self.tabulation.origin)
return str(self.quantity) + " - "+str(self.product) + " MESA: " + str(self.tabulation.origin)
| [
"henriqmayer@gmail.com"
] | henriqmayer@gmail.com |
5f0d8572f2375ec6cedc737525299a3606379bc2 | 0d2781f258917f37ace2f6258bad52e8b345d092 | /tests/GoogleDrive/test.py | 92e4d368ea9afe67bd7a48f1bb55eb952bcc7307 | [] | no_license | brenorobazza/rPacks | d8738c77c51e8bf927a7ff3b586f892a8855d8a6 | ad30984990b0fd095ad95110b66ce68432bde1c4 | refs/heads/master | 2023-07-02T04:46:11.406995 | 2021-07-30T18:24:50 | 2021-07-30T18:24:50 | 347,108,416 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | from Drive.GoogleDrive import GoogleAPI
def main():
service = GoogleAPI('credentials.json', 'token.pickle')
service.download_folder_content('folder_name')
if __name__ == '__main__':
main() | [
"breno.robazza@hotmail.com"
] | breno.robazza@hotmail.com |
d38ee31baa1e219ba3b238f1cc080660f19c9acc | c64bc713f1d3f566e47665c6b4803f176960f7c6 | /app/models.py | a57ce3d63f18b75d6f82478223594c613901aa47 | [
"MIT"
] | permissive | estherndichu/python_wk2 | 01115a59590d9a67bc15b5d3d5743c01e821188a | 1a76cd5aa1e87b361a049c794202a56dad510924 | refs/heads/master | 2023-03-13T02:58:05.510615 | 2021-03-07T08:32:40 | 2021-03-07T08:32:40 | 340,323,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | class Source:
'''
Source class to define Source Objects
'''
def __init__(self,id,name,description,url,category,country,language):
self.id =id
self.name = name
self.description = description
self.url = url
self.category = category
self.country = country
self.language = language
class Articles():
'''
Articles class that defines Article objects
'''
def __init__(self,id,author,title,description,url,image,date):
self.id = id
self.author = author
self.title = title
self.description = description
self.url = url
self.image = image
self.date = date | [
"itskuijenga@gmail.com"
] | itskuijenga@gmail.com |
c45354d7a9c4f46d4f14484b4ff0aefa5926ed2d | 9d97bbb97860982d4c5be61a64641d3c97028502 | /depswarm/_internal/exceptions/genericexceptions.py | 976072b08ac56f1ed5d065a5610520153016b559 | [
"BSD-3-Clause"
] | permissive | eerotal/depswarm | a4649efd1a0cdee1c8c86b5e6ac9c4e7b00bb61a | dce266310237a7dd424633646189dc760e39411e | refs/heads/master | 2023-03-19T13:46:34.924559 | 2021-03-09T08:17:32 | 2021-03-09T08:17:32 | 345,084,502 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | """Generic exceptions for various cases."""
class UnsupportedVersionException(Exception):
"""Exception for indicating an unsupported version."""
class MissingDependencyException(Exception):
"""Exception for indicating a missing dependency."""
| [
"eerotal@mbnet.fi"
] | eerotal@mbnet.fi |
c6d91a77eb6e1c4c9d2c070e1c106b3f8a4de8ce | 6eb521cf6327ff42d2d4a3651447734c6dc62a1b | /pythonic_martini/utils.py | d2c0754bc875f836412c0318a79fa8f5cac8d29c | [] | no_license | minghao2016/pythonic_martini | bf3badfc7eb033b95263dc1523c21fad43171c09 | b1510879cff440215c91a14ec31f7d73c253cec8 | refs/heads/master | 2023-02-28T12:45:35.799180 | 2021-02-02T23:04:26 | 2021-02-02T23:04:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,543 | py |
import numpy as np
from . import quaternion
def actual_atoms_added(filename, keyword):
"""Count the number of lines with <keyword> in the <filename> - usually gro or pdb file
"""
num_keyword = 0
with open(filename, 'r') as f:
for line in f:
if keyword in line:
num_keyword += 1
return num_keyword
def actual_molecules_added(filename, itpfilename, start_linenumber=0):
"""Calculate the number of molecules, defined in <itpfilename>,
in <filename> - usually gro or pdb file
"""
# get atom names from the <molname>.itp
atom_names=[]
start=False
with open(itpfilename, 'r') as f:
for line in f:
if not start:
if '[ atoms ]' in line:
start = True
continue
if start:
if line.split()==[]:
start = False
break
atom_names += [line.split()[3]]
# get all the atom names from the <outfilename>
with open(filename, 'r') as f:
lines = f.readlines()
all_names = [] # e.g. PAM, ALA, GLU
for line in lines[start_linenumber:]:
if line.split()==[]:
continue
all_names += [line.split()[0].lstrip('0123456789')]
# now count number of atom_names sequence in all_names
actual_nmol = 0
natoms = len(atom_names)
i = 0
while i<len(all_names):
if all_names[i:i+natoms] == atom_names:
i += natoms
actual_nmol += 1
else:
i += 1
return actual_nmol
def make_molecule_template(mol_names, mol_numbers, atom_names, atom_positions, filename):
'''Create the molecule scaffold structure to be used by VMD to make the all-atom structure
molecules: list of [molecule name, position list]
in order to how it should appear in the structure file.
'''
lines = ['COMPND UNNAMED']
for i,(mol_name,mol_number,atom_name,atom_position) in enumerate(zip(mol_names,mol_numbers,atom_names,atom_positions)):
atom_number = i+1
lines += ['ATOM'+ '{:>7} '.format(atom_number)+ '{:<4}'.format(atom_name)+ '{}'.format(mol_name)+ '{:>6} '.format(mol_number)+ '{:>8.2f}'.format(atom_position[0])+ '{:>8.2f}'.format(atom_position[1])+ '{:>8.2f}'.format(atom_position[2])+ ' 1.00 0.00'+ '{:>12}'.format(atom_name[0])]
lines += ['END']
with open(filename, 'w') as f:
f.write('\n'.join(lines))
| [
"amayank@umich.edu"
] | amayank@umich.edu |
c8b3a20fa81bc2a10ac839ee93aa3622a97f9a82 | de070f933453e2d15651af1ccc697acf25507bd7 | /deid/version.py | 785a6ee84d0483f6912ea07c5584e25f6da00280 | [
"MIT"
] | permissive | liu3xing3long/deid | cd968b1b5d8e678ad2c41f2b9f1c4572f5f88013 | 491a8ea301d9d47cd4e62eaab31584c26afcc534 | refs/heads/master | 2021-05-14T11:33:12.193255 | 2017-12-22T21:28:32 | 2017-12-22T21:28:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,871 | py | '''
Copyright (c) 2017 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
__version__ = "0.1.1"
AUTHOR = 'Vanessa Sochat'
AUTHOR_EMAIL = 'vsochat@stanford.edu'
NAME = 'deid'
PACKAGE_URL = "https://github.com/pydicom/deid"
KEYWORDS = 'open source, stanford, python, deidentify, dicom'
DESCRIPTION = "deidentify dicom and other images with python and pydicom"
LICENSE = "LICENSE"
INSTALL_REQUIRES = (
('matplotlib', {'min_version': None}),
('requests', {'min_version': '2.12.4'}),
('retrying', {'min_version': '1.3.3'}),
('simplejson', {'min_version': '3.10.0'}),
('six', {'min_version': '1.10'}),
('pygments', {'min_version': '2.1.3'}),
('python-dateutil',{'min_version': None }),
('urllib3',{'min_version': "1.15" }),
('validator.py',{'min_version': None })
)
DEPENDENCY_LINKS = ['https://github.com/pydicom/pydicom/tarball/master']
| [
"vsochat@stanford.edu"
] | vsochat@stanford.edu |
5e0cccf3d265b399532d7db63b9810ddd9f4d114 | 5e5148f2d11b30baea4f58cc0fc060369c65a337 | /facebook.py | 1b08a0372991f21c8eb7c3eb3a5d94a21760f6b9 | [] | no_license | aryan1521/fb | 829be83d13847a3d96fbd65774728f39db905c66 | 6eb25de22ae00e8039d44e6b544f39e2e1339f19 | refs/heads/master | 2020-03-24T05:14:13.088874 | 2018-07-26T18:46:07 | 2018-07-26T18:46:07 | 142,480,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,758 | py | #detect the fake profiles in online social networks using Neural Network
importsys
importcsv
importos
importdatetime
importmath
importnumpyasnp
importpandasaspd
importmatplotlib.pyplotasplt
fromdatetimeimportdatetime
importsexmachine.detectorasgender
fromsklearn.preprocessingimportImputer
fromsklearnimportcross_validation
fromsklearnimportmetrics
fromsklearnimportpreprocessing
fromsklearn.linear_modelimportLinearRegression
from sklearn.ensembleimportRandomForestClassifier
fromsklearn.decompositionimportPCA
fromsklearn.cross_validationimportStratifiedKFold,train_test_split
fromsklearn.grid_searchimportGridSearchCV
fromsklearn.metricsimportaccuracy_score
fromsklearn.learning_curveimportlearning_curve
fromsklearn.metricsimportroc_curve,auc,roc_auc_score
fromsklearn.metricsimportconfusion_matrix
fromsklearn.metricsimportclassification_report
%matplotlibinline
frompybrain.structureimportSigmoidLayer
frompybrain.datasetsimportClassificationDataSet
frompybrain.utilitiesimportpercentError
frompybrain.tools.shortcutsimportbuildNetwork
frompybrain.supervised.trainersimportBackpropTrainer
frompybrain.structure.modulesimportSoftmaxLayer
frompybrain.tools.xml.networkwriterimportNetworkWriter
frompybrain.tools.xml.networkreaderimportNetworkReader
function for reading dataset from csv files
In[2]: defread_datasets():
"""Readsusersprofilefromcsvfiles"""
genuine_users=pd.read_csv("")
fake_users=pd.read_csv("")
#printgenuine_users.columns
#printgenuine_users.describe()
#printfake_users.describe()
x=pd.concat([genuine_users,fake_users])
y=len(fake_users)*[0]+len(genuine_users)*[1]
returnx,y
#function for predicting sex using name of person
defpredict_sex(name):
sex_predictor=gender.Detector(unknown_value=u"unknown",case_sensitiv
e=False)
first_name=name.str.split('').str.get(0)
sex=first_name.apply(sex_predictor.get_gender)
sex_dict={'female':-2,'mostly_female':-1,'unknown':0,'mostly_mal
e':1,'male':2}
sex_code=sex.map(sex_dict).astype(int)
returnsex_code
#function for feature engineering
defextract_features(x):
lang_list=list(enumerate(np.unique(x['lang'])))
lang_dict={name:ifori,nameinlang_list}
x.loc[:,'lang_code']=x['lang'].map(lambdax:lang_dict[x]).astype(int)
x.loc[:,'sex_code']=predict_sex(x['name'])
feature_columns_to_use=['statuses_count','followers_count','friend
s_count','favourites_count','listed_count','sex_code','lang_code']
x=x.loc[:,feature_columns_to_use]
returnx
#function for plotting confusion matrix
defplot_confusion_matrix(cm,title='Confusionmatrix',cmap=plt.cm.Blue
s):
target_names=['Fake','Genuine']
plt.imshow(cm,interpolation='nearest',cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks=np.arange(len(target_names))
plt.xticks(tick_marks,target_names,rotation=45)
plt.yticks(tick_marks,target_names)
plt.tight_layout()
plt.ylabel('Truelabel')
plt.xlabel('Predictedlabel')
#function for plotting ROC curve
defplot_roc_curve(y_test,y_pred):
false_positive_rate,true_positive_rate,thresholds=roc_curve(y_test,y_pred)
print("FalsePositiverate:",false_positive_rate)
print("TruePositiverate:",true_positive_rate)
roc_auc=auc(false_positive_rate,true_positive_rate)
plt.title('ReceiverOperatingCharacteristic')
plt.plot(false_positive_rate,true_positive_rate,'b',
label='AUC=%0.2f'%roc_auc)
plt.legend(loc='lowerright')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.2])
plt.ylim([-0.1,1.2])
plt.ylabel('TruePositiveRate')
plt.xlabel('FalsePositiveRate')
plt.show()
#Function for training data using Neural Network
deftrain(X,y):
ds=ClassificationDataSet(len(X.columns),1,nb_classes=2)
forkinxrange(len(X)):
ds.addSample(X.iloc[k],np.array(y[k]))
tstdata,trndata=ds.splitWithProportion(0.20)
trndata._convertToOneOfMany()
tstdata._convertToOneOfMany()
input_size=len(X.columns)
target_size=1
hidden_size=5
fnn=None
if os.path.isfile('fnn.xml'):
fnn=NetworkReader.readFrom('fnn.xml')
else:
fnn=buildNetwork(trndata.indim,hidden_size,trndata.outdim,outclass=SoftmaxLayer)
trainer=BackpropTrainer(fnn,dataset=trndata,momentum=0.05,learningrate=0.1,verbose=False,weightdecay=0.01)
trainer.trainUntilConvergence(verbose=False,validationProportion=0.15,maxEpochs=100,continueEpochs=10)
NetworkWriter.writeToFile(fnn,'oliv.xml')
predictions=trainer.testOnClassData(dataset=tstdata)
tstdata['class'],predictions
In[8]: print"readingdatasets.....\n"
x,y=read_datasets()
x.describe()
readingdatasets.....
print("extractingfeatues.....\n")
x=extract_features(x)
printx.columns
printx.describe()
print("trainingdatasets.......\n")
y_test,y_pred=train(x,y)
print'ClassificationAccuracyonTestdataset:',accuracy_score(y_test,
y_pred)
print('PercentErroronTestdataset:',percentError(y_pred,y_test))
extractingfeatues.....
Index([u'statuses_count',u'followers_count',u'friends_count',
u'favourites_count',u'listed_count',u'sex_code',u'lang_code'],
dtype='object')
ClassificationAccuracyonTestdataset: 0.934280639432
PercentErroronTestdataset: 6.57193605684
In[13]: cm=confusion_matrix(y_test,y_pred)
print('Confusionmatrix,withoutnormalization')
print(cm)
plot_confusion_matrix(cm)
cm_normalized=cm.astype('float')/cm.sum(axis=1)[:,np.newaxis]
print('Normalizedconfusionmatrix')
print(cm_normalized)
plot_confusion_matrix(cm_normalized,title='Normalizedconfusionmatrix')
Confusionmatrix,withoutnormalization
s=roc_auc_score(y_test,y_pred)
print"roc_auc_score:",s
plot_roc_curve(y_test,y_pred)
#Detect fake profiles in online social networks using Support Vector
Machine
importsys
importcsv
importdatetime
importnumpyasnp
importpandasaspd
importmatplotlib.pyplotasplt
fromdatetimeimportdatetime
importsexmachine.detectorasgender
fromsklearn.preprocessingimportImputer
fromsklearnimportcross_validation
fromsklearnimportmetrics
fromsklearnimportpreprocessing
fromsklearn.linear_modelimportLinearRegression
fromsklearn.svmimportSVC
fromsklearn.metricsimportroc_curve,auc
fromsklearn.cross_validationimportStratifiedKFold,train_test_split
fromsklearn.grid_searchimportGridSearchCV
fromsklearn.metricsimportaccuracy_score
fromsklearn.learning_curveimportlearning_curve
fromsklearn.metricsimportclassification_report
fromsklearn.metricsimportconfusion_matrix
%matplotlibinline
#function for reading dataset from csv files
defread_datasets():
genuine_users=pd.read_csv("")
fake_users=pd.read_csv("")
#printgenuine_users.columns
#printgenuine_users.describe()
#printfake_users.describe()
x=pd.concat([genuine_users,fake_users])
y=len(fake_users)*[0]+len(genuine_users)*[1]
returnx,y
function for predicting sex using name of person
In[59]: defpredict_sex(name):
sex_predictor=gender.Detector(unknown_value=u"unknown",case_sensitiv
e=False)
first_name=name.str.split('').str.get(0)
sex=first_name.apply(sex_predictor.get_gender)
sex_dict={'female':-2,'mostly_female':-1,'unknown':0,'mostly_mal
e':1,'male':2}
sex_code=sex.map(sex_dict).astype(int)
returnsex_code
function for feature engineering
defextract_features(x):
lang_list=list(enumerate(np.unique(x['lang'])))
lang_dict={name:ifori,nameinlang_list}
x.loc[:,'lang_code']=x['lang'].map(lambdax:lang_dict[x]).astype(i
nt)
x.loc[:,'sex_code']=predict_sex(x['name'])
feature_columns_to_use=['statuses_count','followers_count','friend
s_count','favourites_count','listed_count','sex_code','lang_code']
x=x.loc[:,feature_columns_to_use]
returnx
function for ploting learning curve
defplot_learning_curve(estimator,title,X,y,ylim=None,cv=None,
n_jobs=1,train_sizes=np.linspace(.1,1.0,5)):
plt.figure()
plt.title(title)
ifylimisnotNone:
plt.ylim(*ylim)
plt.xlabel("Trainingexamples")
plt.ylabel("Score")
train_sizes,train_scores,test_scores=learning_curve(
estimator,X,y,cv=cv,n_jobs=n_jobs,train_sizes=train_sizes)
train_scores_mean=np.mean(train_scores,axis=1)
train_scores_std=np.std(train_scores,axis=1)
test_scores_mean=np.mean(test_scores,axis=1)
test_scores_std=np.std(test_scores,axis=1)
plt.grid()
plt.fill_between(train_sizes,train_scores_mean-train_scores_std,
train_scores_mean+train_scores_std,alpha=0.1,
color="r")
plt.fill_between(train_sizes,test_scores_mean-test_scores_std,
test_scores_mean+test_scores_std,alpha=0.1,colo
r="g")
plt.plot(train_sizes,train_scores_mean,'o-',color="r",
label="Trainingscore")
plt.plot(train_sizes,test_scores_mean,'o-',color="g",
label="Cross-validationscore")
plt.legend(loc="best")
returnplt
function for plotting confusion matrix
defplot_confusion_matrix(cm,title='Confusionmatrix',cmap=plt.cm.Blue
s):
target_names=['Fake','Genuine']
plt.imshow(cm,interpolation='nearest',cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks=np.arange(len(target_names))
plt.xticks(tick_marks,target_names,rotation=45)
plt.yticks(tick_marks,target_names)
plt.tight_layout()
plt.ylabel('Truelabel')
plt.xlabel('Predictedlabel')
#function for plotting ROC curve
defplot_roc_curve(y_test,y_pred):
false_positive_rate,true_positive_rate,thresholds=roc_curve(y_tes
t,y_pred)
print"FalsePositiverate:",false_positive_rate
print"TruePositiverate:",true_positive_rate
roc_auc=auc(false_positive_rate,true_positive_rate)
plt.title('ReceiverOperatingCharacteristic')
plt.plot(false_positive_rate,true_positive_rate,'b',
label='AUC=%0.2f'%roc_auc)
plt.legend(loc='lowerright')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.2])
plt.ylim([-0.1,1.2])
plt.ylabel('TruePositiveRate')
plt.xlabel('FalsePositiveRate')
plt.show()
#Function for training data using Support Vector Machine
deftrain(X_train,y_train,X_test):
"""TrainsandpredictsdatasetwithaSVMclassifier"""
#Scalingfeatures
X_train=preprocessing.scale(X_train)
X_test=preprocessing.scale(X_test)
Cs=10.0**np.arange(-2,3,.5)
gammas=10.0**np.arange(-2,3,.5)
param=[{'gamma':gammas,'C':Cs}]
cvk=StratifiedKFold(y_train,n_folds=5)
classifier=SVC()
clf=GridSearchCV(classifier,param_grid=param,cv=cvk)
clf.fit(X_train,y_train)
print("Thebestclassifieris:",clf.best_estimator_)
clf.best_estimator_.fit(X_train,y_train)
#Estimatescore
scores=cross_validation.cross_val_score(clf.best_estimator_,X_trai
n,y_train,cv=5)
printscores
print('Estimatedscore:%0.5f(+/-%0.5f)'%(scores.mean(),scores.st
d()/2))
title='LearningCurves(SVM,rbfkernel,$\gamma=%.6f$)'%clf.best_e
stimator_.gamma
plot_learning_curve(clf.best_estimator_,title,X_train,y_train,c
v=5)
plt.show()
#Predictclass
y_pred=clf.best_estimator_.predict(X_test)
returny_test,y_pred
print("readingdatasets.....\n")
x,y=read_datasets()
int"extractingfeatues.....\n"
x=extract_features(x)
printx.columns
printx.describe()
print"splitingdatasetsintrainandtestdataset...\n"
X_train,X_test,y_train,y_test=train_test_split(x,y,test_size=0.20,random_state=44)
Index([u'statuses_count',u'followers_count',u'friends_count',
u'favourites_count',u'listed_count',u'sex_code',u'lang_code'],
dtype='object')
splitingdatasetsintrainandtestdataset...
In[79]: print("trainingdatasets.......\n")
y_test,y_pred=train(X_train,y_train,X_test)
print('ClassificationAccuracyonTestdataset:',accuracy_score(y_test,y_pred))
('Thebestclassifieris:',SVC(C=1.0,cache_size=200,class_weight=None,coef0=0.0,decision_function_shape=None,degree=3,gamma=31.622776601683793,kernel='rbf',max_iter=-1,probability=False,random_state=None,
shrinking=True,tol=0.001,verbose=False))
Estimatedscore:0.93301(+/-0.00651)
ClassificationAccuracyonTestdataset: 0.904255319149
In[82]: cm=confusion_matrix(y_test,y_pred)
print('Confusionmatrix,withoutnormalization')
print(cm)
plot_confusion_matrix(cm)
cm_normalized=cm.astype('float')/cm.sum(axis=1)[:,np.newaxis]
print('Normalizedconfusionmatrix')
print(cm_normalized)
plot_confusion_matrix(cm_normalized,title='Normalizedconfusionmatrix')
print(classification_report(y_test,y_pred,target_names=['Fake','Genuin
e']))
plot_roc_curve(y_test,y_pred)
importsys
importcsv
importdatetime
importnumpyasnp
importpandasaspd
importmatplotlib.pyplotasplt
fromdatetimeimportdatetime
importsexmachine.detectorasgender
fromsklearn.preprocessingimportImputer
fromsklearnimportcross_validation
fromsklearnimportmetrics
fromsklearnimportpreprocessing
fromsklearn.metricsimportroc_curve,auc
from sklearn.ensembleimportRandomForestClassifier
fromsklearn.cross_validationimportStratifiedKFold,train_test_split
fromsklearn.grid_searchimportGridSearchCV
fromsklearn.metricsimportaccuracy_score
fromsklearn.learning_curveimportlearning_curve
fromsklearn.metricsimportclassification_report
fromsklearn.metricsimportconfusion_matrix
%matplotlibinline
function for reading dataset from csv files
defread_datasets():
#Readsusersprofilefromcsvfiles
#we do not have the dataset of profiles
genuine_users=pd.read_csv("")
fake_users=pd.read_csv(")
#printgenuine_users.columns
#printgenuine_users.describe()
#printfake_users.describe()
x=pd.concat([genuine_users,fake_users])
y=len(fake_users)*[0]+len(genuine_users)*[1]
returnx,y
#function for predicting sex using name of person
defpredict_sex(name):
sex_predictor=gender.Detector(unknown_value=u"unknown",case_sensitive=False)
first_name=name.str.split('').str.get(0)
sex=first_name.apply(sex_predictor.get_gender)
sex_dict={'female':-2,'mostly_female':-1,'unknown':0,'mostly_mal
e':1,'male':2}
sex_code=sex.map(sex_dict).astype(int)
returnsex_code
#function for feature engineering
defextract_features(x):
lang_list=list(enumerate(np.unique(x['lang'])))
lang_dict={name:ifori,nameinlang_list}
x.loc[:,'lang_code']=x['lang'].map(lambdax:lang_dict[x]).astype(i
nt)
x.loc[:,'sex_code']=predict_sex(x['name'])
feature_columns_to_use=['statuses_count','followers_count','friend
s_count','favourites_count','listed_count','sex_code','lang_code']
x=x.loc[:,feature_columns_to_use]
returnx
#function for ploting learning curve
defplot_learning_curve(estimator,title,X,y,ylim=None,cv=None,n_jobs=1,train_sizes=np.linspace(.1,1.0,5)):
plt.figure()
plt.title(title)
ifylimisnotNone:
plt.ylim(*ylim)
plt.xlabel("Trainingexamples")
plt.ylabel("Score")
train_sizes,train_scores,test_scores=learning_curve(
estimator,X,y,cv=cv,n_jobs=n_jobs,train_sizes=train_sizes)
train_scores_mean=np.mean(train_scores,axis=1)
train_scores_std=np.std(train_scores,axis=1)
test_scores_mean=np.mean(test_scores,axis=1)
test_scores_std=np.std(test_scores,axis=1)
plt.grid()
plt.fill_between(train_sizes,train_scores_mean-train_scores_std,
train_scores_mean+train_scores_std,alpha=0.1,
color="r")
plt.fill_between(train_sizes,test_scores_mean-test_scores_std,test_scores_mean+test_scores_std,alpha=0.1,color="g")
plt.plot(train_sizes,train_scores_mean,'o-',color="r",
label="Trainingscore")
plt.plot(train_sizes,test_scores_mean,'o-',color="g",
label="Cross-validationscore")
plt.legend(loc="best")
returnplt
function for plotting confusion matrix
defplot_confusion_matrix(cm,title='Confusionmatrix',cmap=plt.cm.Blues)
target_names=['Fake','Genuine']
plt.imshow(cm,interpolation='nearest',cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks=np.arange(len(target_names))
plt.xticks(tick_marks,target_names,rotation=45)
plt.yticks(tick_marks,target_names)
plt.tight_layout()
plt.ylabel('Truelabel')
plt.xlabel('Predictedlabel')
function for plotting ROC curve
In[62]: defplot_roc_curve(y_test,y_pred):
false_positive_rate,true_positive_rate,thresholds=roc_curve(y_tes
t,y_pred)
print("FalsePositiverate:",false_positive_rate)
print("TruePositiverate:",true_positive_rate)
roc_auc=auc(false_positive_rate,true_positive_rate)
plt.title('ReceiverOperatingCharacteristic')
plt.plot(false_positive_rate,true_positive_rate,'b',
label='AUC=%0.2f'%roc_auc)
plt.legend(loc='lowerright')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.2])
plt.ylim([-0.1,1.2])
plt.ylabel('TruePositiveRate')
plt.xlabel('FalsePositiveRate')
plt.show()
#Function for training data using Random Forest
deftrain(X_train,y_train,X_test):
clf=RandomForestClassifier(n_estimators=40,oob_score=True)
clf.fit(X_train,y_train)
print("Thebestclassifieris:",clf)
#Estimatescore
scores=cross_validation.cross_val_score(clf,X_train,y_train,cv=5)
printscores
print('Estimatedscore:%0.5f(+/-%0.5f)'%(scores.mean(),scores.st
d()/2))
title='LearningCurves(RandomForest)'
plot_learning_curve(clf,title,X_train,y_train,cv=5)
plt.show()
#Predict
y_pred=clf.predict(X_test)
returny_test,y_pred
In[64]: print"readingdatasets.....\n"
x,y=read_datasets()
x.describe()
print"extractingfeatues.....\n"
x=extract_features(x)
printx.columns
printx.describe()
Index([u'statuses_count',u'followers_count',u'friends_count',
u'favourites_count',u'listed_count',u'sex_code',u'lang_code'],
dtype='object')
print"splitingdatasetsintrainandtestdataset...\n"
X_train,X_test,y_train,y_test=train_test_split(x,y,test_size=0.20,ran
dom_state=44)
print"trainingdatasets.......\n"
y_test,y_pred=train(X_train,y_train,X_test)
print'ClassificationAccuracyonTestdataset:',accuracy_score(y_test,
y_pred)
('Thebestclassifieris:',RandomForestClassifier(bootstrap=True,clas
s_weight=None,criterion='gini',
max_depth=None,max_features='auto',max_leaf_nodes=None,
min_samples_leaf=1,min_samples_split=2,
min_weight_fraction_leaf=0.0,n_estimators=40,n_jobs=1,
oob_score=True,random_state=None,verbose=0,warm_start=False))
cm=confusion_matrix(y_test,y_pred)
print('Confusionmatrix,withoutnormalization')
print(cm)
plot_confusion_matrix(cm)
cm_normalized=cm.astype('float')/cm.sum(axis=1)[:,np.newaxis]
print('Normalizedconfusionmatrix')
print(cm_normalized)
plot_confusion_matrix(cm_normalized,title='Normalizedconfusionmatrix')
print(classification_report(y_test,y_pred,target_names=['Fake','Genuin
e']))
plot_roc_curve(y_test,y_pred)
| [
"noreply@github.com"
] | noreply@github.com |
fe6dd014b931c2fa05012f4752ab16a238f922a9 | 3455a90e9586cd11c8584bb284ad48d023263c75 | /backup_script.py | 6b5f4e518b40b31defc70b161ba872cac91ca9c0 | [
"MIT"
] | permissive | fefi42/rebel_backup | 69656b39d372e5d246aa81029ea2f3817e0ee193 | 7495e32f868665a91be4383fb5f4f0b9c012f150 | refs/heads/master | 2020-12-22T14:23:44.251672 | 2019-12-27T10:12:03 | 2019-12-27T10:12:03 | 236,821,783 | 0 | 0 | MIT | 2020-01-28T19:28:48 | 2020-01-28T19:28:47 | null | UTF-8 | Python | false | false | 786 | py | #!/usr/bin/env python3
import os
from dotenv import load_dotenv
import json
from datetime import datetime
from rebel_management_utilities import get_all_members
from os import path
def make_backup(api_key):
data = {'members': get_all_members(api_key)}
current_date_str = datetime.now().strftime("%d-%m-%Y_%H:%M:%S")
file_path = path.join('backups', f'backup_rebels_{current_date_str}.json')
with open(file_path, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def load_api_key():
load_dotenv()
key = os.getenv("ACTION_NETWORK_API_KEY")
if not key:
raise OSError('ACTION_NETWORK_API_KEY not found in .env')
return key
if __name__ == '__main__':
api_key = load_api_key()
make_backup(api_key)
| [
"lubbers.sc@gmail.com"
] | lubbers.sc@gmail.com |
093ac8ffb84f49d42b53ec3aadd14af6122609d1 | 18e8e40a161ee7220ccb54e1a496c804355d8001 | /backend/app/models/models.py | 8d2e875d10217772b89758ed82dd3c1f457bf999 | [] | no_license | heidudu/test_docker | 1f0fb245bdcfd48c32c78bb90854b7b1fc32238a | 6136d320065add19ebf01f8929ed3dc881bfad53 | refs/heads/master | 2022-12-10T03:41:42.457794 | 2019-09-22T03:43:19 | 2019-09-22T03:43:19 | 176,408,290 | 0 | 0 | null | 2022-12-08T01:42:25 | 2019-03-19T02:37:35 | TSQL | UTF-8 | Python | false | false | 9,413 | py | from app import db
import datetime
import shortuuid
import re
# 点赞表
ups = db.Table('ups',
db.Column('user_id', db.String(64), db.ForeignKey('user.id'), primary_key=True),
db.Column('reply_id', db.String(64), db.ForeignKey('reply.id'), primary_key=True),
)
# 收藏表
collects = db.Table('collects',
db.Column('user_id', db.String(64), db.ForeignKey('user.id'), primary_key=True),
db.Column('topic_id', db.String(64), db.ForeignKey('topic.id'), primary_key=True),
db.Column('create_at', db.DateTime, default=datetime.datetime.now())
)
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.String(64), default=shortuuid.ShortUUID().random(length=24), primary_key=True)
loginname = db.Column(db.String(64),unique=True)
avatar_url = db.Column(db.String(150),unique=True)
score = db.Column(db.Integer)
create_at = db.Column(db.DateTime, default=datetime.datetime.now())
accesstoken = db.Column(db.String(64), unique=True)
topics = db.relationship('Topic', backref='author', lazy='dynamic')
replies = db.relationship('Reply', backref='author', lazy='dynamic')
send_messages = db.relationship("Message",foreign_keys='Message.send_user_id', back_populates="send_user",lazy='dynamic')
receive_messages = db.relationship("Message",foreign_keys='Message.receive_user_id', back_populates="receive_user",lazy='dynamic')
@property
def recent_topics(self):
data = []
topics = self.topics.order_by(Topic.create_at.desc()).limit(5)
if topics:
for i in topics:
data.append({
"id": i.id,
"author": {
"loginname":self.loginname ,
"avatar_url": self.avatar_url
},
"title": i.title,
"last_reply_at": i.last_reply_at
})
return data
@property
def recent_replies(self):
data = []
replies = self.replies.group_by(Reply.topic_id).order_by(Reply.create_at.desc()).limit(5)
if replies:
for i in replies:
data.append({
"id": i.topic_id,
"author": {
"loginname": i.topic.author.loginname,
"avatar_url": i.topic.author.avatar_url
},
"title": i.topic.title,
"last_reply_at": i.topic.last_reply_at
})
return data
@property
def author_serialize(self):
return {
'loginname': self.loginname,
'avatar_url': self.avatar_url,
}
@staticmethod
def get_by_accesstoken(accesstoken):
user = User.query.filter(User.accesstoken==accesstoken).first()
return user
def __repr__(self):
return '<User %r>' % self.loginname
class Topic(db.Model):
__tablename__ = 'topic'
id = db.Column(db.String(64),default=shortuuid.ShortUUID().random(length=24), primary_key=True)
title = db.Column(db.String(150))
content = db.Column(db.Text)
author_id = db.Column(db.String(64),db.ForeignKey('user.id'))
top = db.Column(db.Boolean, default=False)
good = db.Column(db.Boolean, default=False)
reply_count = db.Column(db.Integer)
visit_count = db.Column(db.Integer)
create_at = db.Column(db.DateTime, default=datetime.datetime.now())
update_at = db.Column(db.DateTime, default=datetime.datetime.now())
last_reply_at = db.Column(db.DateTime, default=datetime.datetime.now())
tab = db.Column(db.String(64))
replies = db.relationship('Reply', backref='topic', lazy='dynamic')
collects = db.relationship('User', secondary=collects, backref=db.backref('collects', lazy='dynamic'), lazy='dynamic')
messages = db.relationship('Message', backref='topic', lazy='dynamic')
@property
def serialize(self):
return {
'id': self.id,
'author_id': self.author_id,
'tab': self.tab,
'content': self.content,
'title': self.title,
'last_reply_at': self.last_reply_at,
'good': self.good,
'top': self.top,
'reply_count': self.reply_count,
'visit_count': self.visit_count,
'create_at': self.create_at,
'author': self.author.author_serialize
}
@staticmethod
def get_topics(tab, limit, page):
data = []
if tab == 'all':
data = Topic.query.filter(Topic.tab != 'dev').order_by(Topic.last_reply_at.desc()).paginate(page, limit, False).items
elif tab in ['dev', 'share', 'job', 'ask']:
data = Topic.query.filter(Topic.tab == tab).order_by(Topic.last_reply_at.desc()).paginate(page, limit, False).items
elif tab == 'good':
data = Topic.query.filter(Topic.good == True).order_by(Topic.last_reply_at.desc()).paginate(page, limit, False).items
return [i.serialize for i in data]
def include_replies(self, accesstoken):
data = self.serialize
replies_list = []
for i in self.replies.order_by(Reply.create_at.desc()).all():
reply = i.serialize
if accesstoken:
user = User.query.filter(User.accesstoken==accesstoken).first()
if user and (user.id in reply['ups']):
reply['is_uped'] = True
replies_list.append(reply)
data.update({'replies': replies_list})
return data
def __repr__(self):
return '<Topic %r>' % self.id
class Reply(db.Model):
__tablename__ = 'reply'
id = db.Column(db.String(64), default=shortuuid.ShortUUID().random(length=24), primary_key=True)
content = db.Column(db.Text)
topic_id = db.Column(db.String(64), db.ForeignKey('topic.id'))
author_id = db.Column(db.String(64), db.ForeignKey('user.id'))
reply_id = db.Column(db.String(64), db.ForeignKey('reply.id'))
create_at = db.Column(db.DateTime, default=datetime.datetime.now())
ups = db.relationship('User', secondary=ups, backref=db.backref('ups', lazy='dynamic'), lazy='dynamic')
messages = db.relationship('Message', backref='reply', lazy='dynamic')
@property
def serialize(self):
return {
'id': self.id,
'author': self.author.author_serialize,
'content': self.content,
'create_at': self.create_at,
'reply_id': self.reply_id,
'is_uped': False,
'ups': [i.id for i in self.ups],
}
def __repr__(self):
return '<Reply %r>' % self.id
'''
* reply: xx 回复了你的话题
* reply2: xx 在话题中回复了你
* at: xx @了你
'''
class Message(db.Model):
__tablename__ = 'message'
id = db.Column(db.String(64),default=shortuuid.ShortUUID().random(length=24), primary_key=True)
type = db.Column(db.String(64))
# 收到消息的用户
receive_user_id = db.Column(db.String(64), db.ForeignKey('user.id'))
# 触发消息的用户
send_user_id = db.Column(db.String(64), db.ForeignKey('user.id'))
topic_id = db.Column(db.String(64), db.ForeignKey('topic.id'))
reply_id = db.Column(db.String(64), db.ForeignKey('reply.id'))
has_read = db.Column(db.Boolean, default=False)
create_at = db.Column(db.DateTime, default=datetime.datetime.now())
send_user = db.relationship('User', back_populates='send_messages', foreign_keys=[send_user_id] )
receive_user = db.relationship('User',back_populates='receive_messages', foreign_keys=[receive_user_id])
@property
def serialize(self):
return {
'id': self.id,
'type': self.type,
'has_read': self.has_read,
'author': self.send_user.author_serialize,
'topic': {
'id': self.topic.id,
'title': self.topic.title,
'last_reply_at': self.topic.last_reply_at
},
'reply': {
'id': self.reply.id,
'content': self.reply.content,
'ups': [i.id for i in self.reply.ups],
'create_at': self.reply.create_at,
},
'create_at': self.create_at
}
# 发送reply 和 at 消息,其中at消息要剔除reply重复发送
@staticmethod
def send_message(users, topic_id, reply_id, receive_user_id):
topic = Topic.query.filter(Topic.id == topic_id).first()
reply = Reply.query.filter(Reply.id==reply_id).first()
user = User.query.filter(User.id == receive_user_id).first()
# 排除项
for i in users:
username = i.replace('@', '')
at_user = User.query.filter(User.loginname == username).first()
if at_user and at_user.loginname != user.loginname:
message = Message(type='at', receive_user=at_user, send_user=reply.author, topic=topic, reply=reply)
db.session.add(message)
db.session.commit()
else:
continue
reply_message = Message(type='reply' if topic.author.id == receive_user_id else 'reply2', receive_user=user,
send_user=reply.author, topic=topic, reply=reply)
db.session.add(reply_message)
db.session.commit()
return True
def __repr__(self):
return '<Message %r>' % self.id
| [
"54heidudu@gmail.com"
] | 54heidudu@gmail.com |
20f88117321e1a091b904143e980357032924fe5 | 52e542e4e3757ab6a3374cf2f7402501576a327a | /Basic/Diagonal gate.py | 82464679d59d0c482d0f421c398ea189d8f8da05 | [
"CC0-1.0"
] | permissive | QuForce/Quantum-Computing-UK-Repository | 7c7b9b84811b9f385d88136e1c42795c403161de | 1a10c1207cb9d6882fe4cd295d52a36373f1fb2f | refs/heads/main | 2023-08-05T23:03:40.719601 | 2021-09-19T11:19:12 | 2021-09-19T11:19:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | from qiskit import QuantumRegister, ClassicalRegister
from qiskit import QuantumCircuit, execute,IBMQ
from qiskit.tools.monitor import job_monitor
from qiskit.circuit.library import Diagonal
import numpy as np
pi = np.pi
IBMQ.enable_account('ENTER API KEY HERE')
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_qasm_simulator')
diagonals = [-1,-1,-1,-1]
q = QuantumRegister(2,'q')
c = ClassicalRegister(2,'c')
circuit = QuantumCircuit(q,c)
circuit.h(q[0])
circuit.h(q[1])
circuit += Diagonal(diagonals)
circuit.h(q[0])
circuit.h(q[1])
circuit.measure(q,c) # Qubit Measurment
print(circuit)
job = execute(circuit, backend, shots=8192)
job_monitor(job)
counts = job.result().get_counts()
print(counts)
| [
"ScarboroughTechnologies@users.github.com"
] | ScarboroughTechnologies@users.github.com |
62a92212c4991a2ebd127c5af61f91d87f002943 | c75c158acf4adfc9abba2b6d5d39273fba270d98 | /setup.py | 9127e50c9edcf65a6058d78f60541d552bc3ad2d | [] | no_license | Vagrants/blackbird-chrony | bf030939522568e0de2e040af4141e84e65653c5 | 134490110d1d25f70fbf5dc21e642f4092bf43ac | refs/heads/master | 2020-06-06T04:45:54.702619 | 2015-01-13T07:22:41 | 2015-01-13T07:22:41 | 29,177,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 826 | py | #!/usr/bin/env python
# -*- codig: utf-8 -*-
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='blackbird-chrony',
version='0.1.0',
description=(
'get chrony information.'
),
long_description=read('PROJECT.txt'),
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
],
author='makocchi',
author_email='makocchi@gmail.com',
url='https://github.com/Vagrants/blackbird-chrony',
data_files=[
('/opt/blackbird/plugins', ['chrony.py']),
('/etc/blackbird/conf.d', ['chrony.cfg'])
],
test_suite='tests',
)
| [
"makocchi.ca@gmail.com"
] | makocchi.ca@gmail.com |
22b6bfcc234ff82793ffe531306dadb23a566f70 | 9db0aa95e3c8cf44784c1ba5534b9f30bbedc54e | /chapter2/list&generator.py | 908c345829db202fc348167e12c5496eb21aa10d | [] | no_license | wanggis/fluent_python | 9395141655638dce406f5a9a4eb3def7d29f5a81 | 8b73a82f4c6db17088bbf90202bc0216c502fd27 | refs/heads/master | 2021-01-09T12:51:51.081557 | 2020-02-22T09:22:15 | 2020-02-22T09:22:15 | 242,307,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,243 | py | """
列表推导和生成器表达式
python 也从ABC那里继承了用统一的风格去处理序列数据类型
author:wanggis
"""
import array
# 列表推导是快速构建列表(list)的快捷方式(这是其唯一的作用,原书),而生成器表达式则可以用来创建其他任何类型的序列
# 列表推导的作用是为了让代码变得更加简洁,切记不可以乱用
symbols = "$¥€£¢¤"
codes = []
# 传统方法将字符串变成unicode码位的列表
for symbol in symbols:
codes.append(ord(symbol))
print("传统方法:", codes)
# 利用列表推导的方法进行转换
codes = [ord(symbol) for symbol in symbols]
print("列表推导:", codes)
# python3 的改进:列表推导不会再出现变量泄露的问题
x = "ABC"
dummy = [ord(x) for x in x]
# 输出的仍然是“ABC”,同时列表推导也创建了正确的列表
print(x)
print(dummy)
# 列表推导同filter和map的比较
# 会另附两者的速度比较的代码 详见list_comp_speed.py
beyond_ascii = [ord(s) for s in symbols if ord(s) > 127]
print("列表推导的方法:", beyond_ascii)
beyond_ascii = list(filter(lambda c: c > 127, map(ord, symbols)))
print("filter/map 的方法:", beyond_ascii)
# 使用列表推导计算笛卡尔积
# 生成不同颜色,尺寸的衣服
colors = ["black", "white"]
sizes = ['S', 'M', 'L']
clothes = [(color, size) for color in colors
for size in sizes]
print(clothes)
# 生成器表达式
# 虽然可以用列表推导的方式来初始化元组、数组或者其他的序列类型,但是生成器表达式才是更好的选择
# 生成器表达式背后遵循了迭代器协议,可以逐个地产出元素,而不是先建立一个完整的列表,然后再把这个列表传递到某个构造函数里
# 优点:能够进一步的节省内存
# 语法和列表生成器类似,只是将方括号变成了圆括号
# 用生成器表达式初始化元组和数组
print(tuple(ord(symbol) for symbol in symbols)) # 元组
print(array.array("I", (ord(symbol) for symbol in symbols))) # 数组
# 使用生成器表达式计算笛卡尔积
for tshirt in ("%s %s" % (c, s) for c in colors for s in sizes):
print(tshirt)
| [
"ws260308526519@163.com"
] | ws260308526519@163.com |
7b26852ba9120a2024df011559067008fa6240b1 | bb9445cdc8b518d035735375151564c496a1a330 | /meta.py | 079516c96dce160b0c8b56a017153a523f4fa622 | [] | no_license | sundararajan93/regex-examples | ec0ab0b61c0e38ebf012793d579b9084cd79fe23 | 27ac11c249b6917f35463165ab47dadbd0cf4213 | refs/heads/main | 2023-06-09T01:43:14.467906 | 2021-07-01T16:25:49 | 2021-07-01T16:25:49 | 382,092,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | #!/usr/bin/python3
import re
text_pattern = "123asdf45346abc2344ABCaabc."
pattern = re.compile(r".")
matches = pattern.finditer(text_pattern)
print(". - to print all the pattern except newline")
for i in matches:
print(i.group())
print("To print the actual . you can escape with \\")
dot = re.compile(r"\.")
find_dot = dot.finditer(text_pattern)
for i in find_dot:
print(i.group())
print("To find match in start of the text")
start_with = re.compile("^123")
find_start_match = start_with.finditer(text_pattern)
for i in find_start_match:
print(i)
print("To find the word ends with a pattern of text")
text_new = "Kumar Singh"
text_pattern_new = re.compile("Singh$")
find_end_match = text_pattern_new.findall(text_new)
for i in find_end_match:
print(i) | [
"geeksundar1993@gmail.com"
] | geeksundar1993@gmail.com |
abfe6cbaac9ddeffce0019053b066e6517c9ec1f | 4bf3aaf77c309a489100b98a8c03532632df152c | /Python/BOJ/13460.py | b39eb5f4d88753e6b925be54efe84dd74b2b14ff | [] | no_license | murane/PS | 7fbfc54d962231949efc67f1a35c4b0119de0780 | e938c6c503aeac08bf65e1e66709172b0e5da6ef | refs/heads/master | 2023-05-06T22:51:54.105811 | 2021-05-30T03:34:53 | 2021-05-30T03:34:53 | 293,699,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,785 | py | import sys
from collections import deque
r=sys.stdin.readline
N,M=map(int,r().split())
board=[]
D=[(1,0),(-1,0),(0,1),(0,-1)]
for _ in range(N):
board.append(list(r().strip()))
for i in range(N):
for j in range(M):
if board[i][j]=="R":
R=[i,j]
board[i][j]="."
elif board[i][j]=="B":
B=[i,j]
board[i][j]="."
def move(x,y,d):
dist=0
while True:
nextPos=board[x+d[0]][y+d[1]]
if nextPos=='.':
x,y=x+d[0],y+d[1]
elif nextPos=='O':
return True,0,[-1,-1]
elif nextPos=='#':
return False,dist,[x,y]
dist+=1
def bfs():
q=deque()
q.append([R,B,0])
visit=set()
visit.add((tuple(R),tuple(B)))
while q:
red,blue,cnt=q.popleft()
tmpRed,tmpBlue=red,blue
#if cnt==10: return -1
for i in range(4): #4방향
flgR,distR,red=move(tmpRed[0],tmpRed[1],D[i])#일단 움직이고보자
flgB,distB,blue=move(tmpBlue[0],tmpBlue[1],D[i])
if flgR and not flgB:
return cnt+1#빨간색은 들어가고 파란색은 아니면 성공
elif flgB: continue #파란색이 들어가면 실패
elif not flgR and not flgB: #일단 둘다 구멍에 안들어가고
if red==blue: #겹치는 경우
if distR>distB:
red=red[0]-D[i][0],red[1]-D[i][1]
else:
blue=blue[0]-D[i][0],blue[1]-D[i][1]
if (tuple(red),tuple(blue)) not in visit:
q.append([red,blue,cnt+1]) #다시 큐로
visit.add((tuple(red),tuple(blue)))
return -1
print(bfs())
| [
"murane@naver.com"
] | murane@naver.com |
44d232ea537415baef8dba91163e78eed759afb0 | d6882f1f09350f7f03a2d615c9c64462a10438c7 | /WX_FUNC/models.py | d622d17f1cd1385ab5b4d5ae8cee9a0e3e7b30e0 | [] | no_license | yantaobattler/WXGZ | be7ce8d3dfffb1bebf32508b0530284304dc6df2 | 24e562e9fd6171a8aa75689a0413604626c15963 | refs/heads/master | 2020-04-26T04:43:05.603952 | 2019-08-07T11:39:01 | 2019-08-07T11:39:01 | 173,307,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,251 | py | from django.db import models
# Create your models here.
# 微信信息主流水表
class MainTransLog(models.Model):
FromUserName = models.CharField(max_length=100)
CreateTime = models.CharField(max_length=30)
MsgType = models.CharField(max_length=20)
MsgId = models.CharField(max_length=70)
TransTime = models.DateTimeField(auto_now_add=True)
# 空气质量-城市列表
class CityList(models.Model):
CityName = models.CharField(max_length=10, primary_key=True)
CityURL = models.CharField(max_length=50)
# 用户状态表
class UserList(models.Model):
FromUserName = models.CharField(max_length=100, default='')
UserStatus = models.CharField(max_length=3, default='00')
# 机器人对话流水表
class RobotLog(models.Model):
FromUserName = models.CharField(max_length=100, default='')
MsgId = models.CharField(max_length=70, default='')
RobotType = models.CharField(max_length=2) # 1-图灵 2-青云客 3-小i L-localAI
TransTime = models.DateTimeField(auto_now_add=True)
MsgType = models.CharField(max_length=20)
ReqDict = models.CharField(max_length=2000) # req_dict用字符串保存起来
RspDict = models.CharField(max_length=2000) # rsp_dict用字符串保存起来
| [
"yantao2212@126.com"
] | yantao2212@126.com |
f68f7372f7b3e018651f455bc865e5b6a6787a7a | 3031baa0c181cd084b7af2337de99188fdea19ab | /build/c4che/chalk_cache.py | b730211da4cbfc8740043c152171b79af69a43be | [] | no_license | oschebell/pebble_watch_hello_world | 0105ea483daba9dac0e0e54e67e35a0483f918d5 | c12a25bf032753ad9cb17df561e34eceafbac2f5 | refs/heads/master | 2020-12-08T06:28:19.932540 | 2016-09-07T13:25:02 | 2016-09-07T13:25:02 | 67,608,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,933 | py | AR = 'arm-none-eabi-ar'
ARFLAGS = 'rcs'
AS = 'arm-none-eabi-gcc'
BINDIR = '/usr/local/bin'
BLOCK_MESSAGE_KEYS = []
BUILD_DIR = 'chalk'
BUILD_TYPE = 'rocky'
BUNDLE_BIN_DIR = 'chalk'
BUNDLE_NAME = 'helloworld.pbw'
CC = ['arm-none-eabi-gcc']
CCLNK_SRC_F = []
CCLNK_TGT_F = ['-o']
CC_NAME = 'gcc'
CC_SRC_F = []
CC_TGT_F = ['-c', '-o']
CC_VERSION = ('4', '7', '2')
CFLAGS = ['-std=c99', '-mcpu=cortex-m3', '-mthumb', '-ffunction-sections', '-fdata-sections', '-g', '-fPIE', '-Os', '-D_TIME_H_', '-Wall', '-Wextra', '-Werror', '-Wno-unused-parameter', '-Wno-error=unused-function', '-Wno-error=unused-variable']
CFLAGS_MACBUNDLE = ['-fPIC']
CFLAGS_cshlib = ['-fPIC']
CPPPATH_ST = '-I%s'
DEFINES = ['RELEASE', 'PBL_PLATFORM_CHALK', 'PBL_COLOR', 'PBL_ROUND', 'PBL_MICROPHONE', 'PBL_SMARTSTRAP', 'PBL_HEALTH', 'PBL_COMPASS', 'PBL_SMARTSTRAP_POWER', 'PBL_DISPLAY_WIDTH=180', 'PBL_DISPLAY_HEIGHT=180', 'PBL_SDK_3']
DEFINES_ST = '-D%s'
DEST_BINFMT = 'elf'
DEST_CPU = 'arm'
DEST_OS = 'darwin'
INCLUDES = ['chalk']
LD = 'arm-none-eabi-ld'
LIBDIR = '/usr/local/lib'
LIBPATH_ST = '-L%s'
LIB_DIR = 'node_modules'
LIB_JSON = []
LIB_ST = '-l%s'
LINKFLAGS = ['-mcpu=cortex-m3', '-mthumb', '-Wl,--gc-sections', '-Wl,--warn-common', '-fPIE', '-Os']
LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup']
LINKFLAGS_cshlib = ['-shared']
LINKFLAGS_cstlib = ['-Wl,-Bstatic']
LINK_CC = ['arm-none-eabi-gcc']
MESSAGE_KEYS = {'CHUNK_SIZE_BYTES': 2, 'DATA': 5, 'TOTAL_CHUNKS': 3, 'CHANNEL_READY': 1, 'CURRENT_CHUNK': 4}
MESSAGE_KEYS_DEFINITION = '/Users/user/workspace/pebble/helloworld/build/src/message_keys.auto.c'
MESSAGE_KEYS_HEADER = '/Users/user/workspace/pebble/helloworld/build/include/message_keys.auto.h'
MESSAGE_KEYS_JSON = '/Users/user/workspace/pebble/helloworld/build/js/message_keys.json'
NODE = '/usr/local/bin/node'
NODE_PATH = '/Users/user/Library/Application Support/Pebble SDK/SDKs/current/node_modules'
PEBBLE_SDK_COMMON = '/Users/user/Library/Application Support/Pebble SDK/SDKs/current/sdk-core/pebble/common'
PEBBLE_SDK_PLATFORM = '/Users/user/Library/Application Support/Pebble SDK/SDKs/current/sdk-core/pebble/chalk'
PEBBLE_SDK_ROOT = '/Users/user/Library/Application Support/Pebble SDK/SDKs/current/sdk-core/pebble'
PLATFORM = {'TAGS': ['chalk', 'color', 'round', 'mic', 'strap', 'strappower', 'compass', 'health', '180w', '180h'], 'ADDITIONAL_TEXT_LINES_FOR_PEBBLE_H': [], 'MAX_APP_BINARY_SIZE': 65536, 'MAX_RESOURCES_SIZE': 1048576, 'MAX_APP_MEMORY_SIZE': 65536, 'MAX_WORKER_MEMORY_SIZE': 10240, 'NAME': 'chalk', 'BUNDLE_BIN_DIR': 'chalk', 'BUILD_DIR': 'chalk', 'MAX_RESOURCES_SIZE_APPSTORE': 262144, 'DEFINES': ['PBL_PLATFORM_CHALK', 'PBL_COLOR', 'PBL_ROUND', 'PBL_MICROPHONE', 'PBL_SMARTSTRAP', 'PBL_HEALTH', 'PBL_COMPASS', 'PBL_SMARTSTRAP_POWER', 'PBL_DISPLAY_WIDTH=180', 'PBL_DISPLAY_HEIGHT=180']}
PLATFORM_NAME = 'chalk'
PREFIX = '/usr/local'
PROJECT_INFO = {'appKeys': {'CHUNK_SIZE_BYTES': 2, 'DATA': 5, 'TOTAL_CHUNKS': 3, 'CHANNEL_READY': 1, 'CURRENT_CHUNK': 4}, u'sdkVersion': u'3', u'projectType': u'rocky', u'uuid': u'a77120ad-50d9-4c81-b08c-80ae8f3840fa', 'messageKeys': {'CHUNK_SIZE_BYTES': 2, 'DATA': 5, 'TOTAL_CHUNKS': 3, 'CHANNEL_READY': 1, 'CURRENT_CHUNK': 4}, 'companyName': u'MakeAwesomeHappen', u'enableMultiJS': True, u'watchapp': {u'watchface': True}, 'versionLabel': u'1.0', 'longName': u'helloworld', u'displayName': u'helloworld', 'shortName': u'helloworld', u'resources': {u'media': []}, 'name': u'helloworld'}
REQUESTED_PLATFORMS = []
RESOURCES_JSON = []
RPATH_ST = '-Wl,-rpath,%s'
SDK_VERSION_MAJOR = 5
SDK_VERSION_MINOR = 83
SHLIB_MARKER = None
SIZE = 'arm-none-eabi-size'
SONAME_ST = '-Wl,-h,%s'
STLIBPATH_ST = '-L%s'
STLIB_MARKER = None
STLIB_ST = '-l%s'
SUPPORTED_PLATFORMS = ['basalt', 'chalk', 'diorite']
TARGET_PLATFORMS = ['diorite', 'chalk', 'basalt']
TIMESTAMP = 1473254568
cprogram_PATTERN = '%s'
cshlib_PATTERN = 'lib%s.so'
cstlib_PATTERN = 'lib%s.a'
macbundle_PATTERN = '%s.bundle'
| [
"oschebell@gmail.com"
] | oschebell@gmail.com |
6991f93609414d052bc98ac1958b0e8425140975 | 22c977bbfaf24d9ed61eb7cc6f2b3b1daeb79513 | /users/migrations/0001_initial.py | 7c67e4ce8e0c9589d22bda4e36c75bf7e1ac0144 | [
"MIT"
] | permissive | qlma/app | 68a8a8949bfc36cbc51e430c87b2ce6e83040759 | 2febd62b45577febf49d738accaa40fc076e38c4 | refs/heads/master | 2023-04-22T01:57:15.852782 | 2021-05-17T18:29:56 | 2021-05-17T18:29:56 | 271,262,615 | 1 | 3 | MIT | 2021-05-09T21:15:37 | 2020-06-10T11:48:12 | Python | UTF-8 | Python | false | false | 3,737 | py | # Generated by Django 2.2.12 on 2020-05-26 08:20
import os
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
from django.utils import timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
('user_type', models.CharField(choices=[(1, 'Student'), (2, 'Teacher'), (3, 'Parent'), (4, 'Admin')], default=1, max_length=10)),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('address', models.CharField(blank=True, max_length=150, help_text='Users home address.', verbose_name='address')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('email_confirmed', models.BooleanField(default=False))
],
),
]
| [
"jouni.leino@siili.com"
] | jouni.leino@siili.com |
bd6d984d5e8b2f93ad6ba4528669a6a099a0321a | 8f94a4749e2fb07466dc60bb506d0f77e3d59af1 | /Backend/apps/hero/migrations/0004_auto_20190912_0040.py | 7e2c69bb9e70bb32ed870b14e828a7d5ebba7019 | [] | no_license | tagmetag/YouHero | 37cb60f37fa4212ddb0e51fd40bcc6431c2228bd | a8886e070deaf7c28ef4d7c67e8e235db6b30e0a | refs/heads/master | 2022-06-19T09:54:44.809803 | 2019-09-13T06:48:32 | 2019-09-13T06:48:32 | 206,221,428 | 0 | 0 | null | 2022-05-25T04:17:39 | 2019-09-04T03:17:17 | Python | UTF-8 | Python | false | false | 393 | py | # Generated by Django 2.2.4 on 2019-09-12 07:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hero', '0003_auto_20190912_0036'),
]
operations = [
migrations.RemoveField(
model_name='hero',
name='type',
),
migrations.DeleteModel(
name='HeroType',
),
]
| [
"toanlcz@necsv-hcm.com"
] | toanlcz@necsv-hcm.com |
4359e97122a31f5b3c7094707f3696ce0e80e163 | fff1b6228578ffc34b02c556c4a93f5b712364e9 | /twitter_load.py | d80470ce79c3da5a3c9a771fd7e18e966fcfe1f6 | [] | no_license | Leobouloc/twitter_analytics | 447e3393469896ac626dbdd6cc0d318285679394 | a36017fee97d93bb92fb388a5de703c4d0327a91 | refs/heads/master | 2021-04-26T16:42:54.585244 | 2016-09-19T00:10:38 | 2016-09-19T00:10:38 | 37,005,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,353 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 14 09:56:30 2015
@author: leo
"""
import pandas as pd
import os
from os.path import join, exists
from os import listdir
from time import sleep
import twitter
from access_keys import key, secret, key_access, secret_access
def init_new_table(path, list_text_name):
print 'creating csv table...'
print path
f = open (join(path, list_text_name))
text = f.read()
f.close()
api = twitter.Api(consumer_key = key,
consumer_secret = secret,
access_token_key = key_access,
access_token_secret = secret_access)
rappers_list = text.split('\n')
rapper_dict_list = []
for x in rappers_list:
if x[0] != '#':
print x
rapper_dict = dict()
rapper_dict['name'] = x.split(' : ')[0]
rapper_dict['twitter'] = x.split(' : ')[1]
a = api.GetUser(screen_name = rapper_dict['twitter'])
rapper_dict['followers_count'] = a.AsDict()['followers_count']
rapper_dict_list += [rapper_dict]
# import pdb
# pdb.set_trace()
table = pd.DataFrame()
for x in rapper_dict_list:
print x
table.loc[x['name'], 'twitter'] = x['twitter']
table.loc[x['name'], 'followers_count'] = x['followers_count']
table.to_csv(join(path, list_text_name.replace('.txt', '.csv')), sep = ';')
print 'csv table created'
######################################################
def read_cursor(path_cursors, twitter_name):
try:
f = open(join(path_cursors, twitter_name + '_cursor.txt'), 'r')
cursor = f.read().split('\n')[-1]
f.close()
except:
f = open(join(path_cursors, twitter_name + '_cursor.txt'), 'w')
f.write('-1')
cursor = '-1'
f.close()
return str(cursor)
def append_cursor(path_cursors, twitter_name, cursor):
f = open(join(path_cursors, twitter_name + '_cursor.txt'), 'a')
f.write('\n' + str(cursor))
f.close()
def append_ids(path_ids, twitter_name, ids):
try:
f = open(join(path_ids, twitter_name + '_ids.txt'), 'a')
for x in ids:
f.write(str(x) + '\n')
f.close()
except:
f = open(join(path_ids, twitter_name + '_ids.txt'), 'w')
for x in ids:
f.write(str(x) + '\n')
f.close()
def step(path_cursors, path_ids, twitter_name):
api = twitter.Api(consumer_key = key,
consumer_secret = secret,
access_token_key = key_access,
access_token_secret = secret_access)
cursor = read_cursor(path_cursors, twitter_name)
a = api.GetFollowerIDsPaged(screen_name = twitter_name, cursor = cursor)
ids = a[2]['ids']
cursor = a[2]['next_cursor']
append_ids(path_ids, twitter_name, ids)
append_cursor(path_cursors, twitter_name, cursor)
print cursor
return str(cursor) != '0'
def load_all(to_load):
path = '.'
path_cursors = join(path, 'data', 'cursors', to_load)
path_ids = join(path, 'data', 'ids', to_load)
path_queue = join(path, 'data', 'loading_queue')
if not os.path.exists(path_cursors):
os.makedirs(path_cursors)
if not os.path.exists(path_ids):
os.makedirs(path_ids)
## while True:
# try:
if to_load + '.csv' in listdir(path_queue):
table = pd.read_csv(join(path_queue, to_load + '.csv'), sep = ';')
else:
init_new_table(path_queue, to_load + '.txt')
table = pd.read_csv(join(path_queue, to_load + '.csv'), sep = ';')
for twitter_name in table.twitter:
print ' >>>', twitter_name
try:
f = open(join(path_cursors, twitter_name + '_cursor.txt'), 'r')
text = f.read()
f.close()
if '0' not in text.split('\n'):
go = True
else:
go = False
except:
go = True
while go:
go = step(path_cursors, path_ids, twitter_name)
sleep(60)
# except:
# sleep(60)
if __name__ == '__main__':
to_load = 'top_100_fr'
load_all(to_load)
| [
"Leo@bouloc.eu"
] | Leo@bouloc.eu |
777d0b5264527d6b4ef0bb7494f087c9d16875d8 | 17f61afad098620e108ebe07094e44165fac55e3 | /11_env/Scripts/static-script.py | c4331f1fffbe0c5a9fdeeb8d512e828303ae5632 | [] | no_license | LCM233/DjangoProjFirstDemo | d34ff3e69a3f6367040f00fd65d296dd691be5ee | d8faf9c113ab5628ce176d761799c347926a9d36 | refs/heads/main | 2023-01-05T12:05:04.009096 | 2020-11-01T06:21:57 | 2020-11-01T06:21:57 | 309,035,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | #!d:\learning_log\11_env\scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'static3==0.7.0','console_scripts','static'
__requires__ = 'static3==0.7.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('static3==0.7.0', 'console_scripts', 'static')()
)
| [
"805862573@qq.com"
] | 805862573@qq.com |
bbeeb11cca17267c9792370c3b4d60f7661610f6 | 59ac2b3a03e7bbe274b816b720ceab8b45e2c8c1 | /Codes/Chapter07/code/spline.py | cf923715e105ca1ef5d116b739cb9c2b5919a407 | [] | no_license | ltauxe/Essentials-of-Paleomagnetism | 5422317466c63675210f5369da6b6909c1b83a3a | 1ae9f73ef3880cbf248343969f54efcdbc746225 | refs/heads/master | 2022-01-13T15:49:04.277237 | 2021-12-28T17:41:04 | 2021-12-28T17:41:04 | 16,193,127 | 4 | 5 | null | 2015-07-18T10:08:41 | 2014-01-24T02:47:24 | PostScript | UTF-8 | Python | false | false | 5,284 | py | """
Cubic spline approximation class.
Last Modified 9/9/97 by Johann Hibschman <johann@physics.berkeley.edu>
Updated to numpy 11/16/06 by Lisa Tauxe
To create a default ("natural") spline, simply use sp = Spline(x,y).
To specify the slope of the function at either of the endpoints,
use the "low_slope" and "high_slope" keywords.
Example usage:
>>> x = arange(10, typecode=Float) * 0.3
>>> y = cos(x)
>>> sp = Spline(x, y)
>>> print sp(0.5), cos(0.5)
0.878364380585 0.87758256189
Uses "searchsorted" from the Numeric module, aka "binarysearch" in older
versions.
"""
import func
#from Numeric import *
import numpy
BadInput = "Bad xa input to routine splint."
class Spline(func.FuncOps):
def __init__(self, x_array, y_array, low_slope=None, high_slope=None):
self.x_vals = x_array
self.y_vals = y_array
self.low_slope = low_slope
self.high_slope = high_slope
# must be careful, so that a slope of 0 still works...
if low_slope is not None:
self.use_low_slope = 1
else:
self.use_low_slope = 0 # i.e. false
if high_slope is not None:
self.use_high_slope = 1
else:
self.use_high_slope = 0
self.calc_ypp()
def calc_ypp(self):
x_vals = self.x_vals
y_vals = self.y_vals
n = len(x_vals)
y2_vals = numpy.zeros(n, 'f')
u = numpy.zeros(n-1, 'f')
if self.use_low_slope:
u[0] = (3.0/(x_vals[1]-x_vals[0])) * \
((y_vals[1]-y_vals[0])/
(x_vals[1]-x_vals[0])-self.low_slope)
y2_vals[0] = -0.5
else:
u[0] = 0.0
y2_vals[0] = 0.0 # natural spline
for i in range(1, n-1):
sig = (x_vals[i]-x_vals[i-1]) / \
(x_vals[i+1]-x_vals[i-1])
p = sig*y2_vals[i-1]+2.0
y2_vals[i] = (sig-1.0)/p
u[i] = (y_vals[i+1]-y_vals[i]) / \
(x_vals[i+1]-x_vals[i]) - \
(y_vals[i]-y_vals[i-1])/ \
(x_vals[i]-x_vals[i-1])
u[i] = (6.0*u[i]/(x_vals[i+1]-x_vals[i-1]) -
sig*u[i-1]) / p
if self.use_high_slope:
qn = 0.5
un = (3.0/(x_vals[n-1]-x_vals[n-2])) * \
(self.high_slope - (y_vals[n-1]-y_vals[n-2]) /
(x_vals[n-1]-x_vals[n-2]))
else:
qn = 0.0
un = 0.0 # natural spline
y2_vals[n-1] = (un-qn*u[n-2])/(qn*y2_vals[n-1]+1.0)
rng = list(range(n-1))
rng.reverse()
for k in rng: # backsubstitution step
y2_vals[k] = y2_vals[k]*y2_vals[k+1]+u[k]
self.y2_vals = y2_vals
# compute approximation
def __call__(self, arg):
"Simulate a ufunc; handle being called on an array."
if type(arg) == func.ArrayType:
return func.array_map(self.call, arg)
else:
return self.call(arg)
def call(self, x):
"Evaluate the spline, assuming x is a scalar."
# if out of range, return endpoint
if x <= self.x_vals[0]:
return self.y_vals[0]
if x >= self.x_vals[-1]:
return self.y_vals[-1]
pos = numpy.searchsorted(self.x_vals, x)
h = self.x_vals[pos]-self.x_vals[pos-1]
if h == 0.0:
raise BadInput
a = (self.x_vals[pos] - x) / h
b = (x - self.x_vals[pos-1]) / h
return (a*self.y_vals[pos-1] + b*self.y_vals[pos] + \
((a*a*a - a)*self.y2_vals[pos-1] + \
(b*b*b - b)*self.y2_vals[pos]) * h*h/6.0)
class LinInt(func.FuncOps):
def __init__(self, x_array, y_array):
self.x_vals = x_array
self.y_vals = y_array
# compute approximation
def __call__(self, arg):
"Simulate a ufunc; handle being called on an array."
if type(arg) == func.ArrayType:
return func.array_map(self.call, arg)
else:
return self.call(arg)
def call(self, x):
"Evaluate the interpolant, assuming x is a scalar."
# if out of range, return endpoint
if x <= self.x_vals[0]:
return self.y_vals[0]
if x >= self.x_vals[-1]:
return self.y_vals[-1]
pos = numpy.searchsorted(self.x_vals, x)
h = self.x_vals[pos]-self.x_vals[pos-1]
if h == 0.0:
raise BadInput
a = (self.x_vals[pos] - x) / h
b = (x - self.x_vals[pos-1]) / h
return a*self.y_vals[pos-1] + b*self.y_vals[pos]
def spline_interpolate(x1, y1, x2):
"""
Given a function at a set of points (x1, y1), interpolate to
evaluate it at points x2.
"""
sp = Spline(x1, y1)
return sp(x2)
def logspline_interpolate(x1, y1, x2):
"""
Given a function at a set of points (x1, y1), interpolate to
evaluate it at points x2.
"""
sp = Spline(log(x1), log(y1))
return exp(sp(log(x2)))
def linear_interpolate(x1, y1, x2):
"""
Given a function at a set of points (x1, y1), interpolate to
evaluate it at points x2.
"""
li = LinInt(x1, y1)
return li(x2)
| [
"ltauxe@ucsd.edu"
] | ltauxe@ucsd.edu |
4ea909c619f0a0ec9d4cdb49fcf15a6be6954502 | 32a13b8ad89bfd2cf8c5907361dc21403f53437b | /Expedia_Hotel_Recommendations/Expedia_Validation.py | 78a6c476a9b17fe57060d2d68bd491e8067dbf29 | [] | no_license | Misfyre/Competitions | 674e06ee518cf64d8fdff71a21d4b96d9cc63086 | 60165279f1dd8768208ba7688a807c1dfe53dbca | refs/heads/master | 2021-01-10T22:58:38.508796 | 2016-10-10T14:37:58 | 2016-10-10T14:37:58 | 69,694,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,119 | py | import os
import pandas as pd
from heapq import nlargest
from operator import itemgetter
import ml_metrics as metrics
import numpy as np
import math
train_path = os.path.expanduser('~/Desktop/Expedia/CV_Splits/split_train.csv')
test_path = os.path.expanduser('~/Desktop/Expedia/CV_Splits/split_test.csv')
cv_path = os.path.expanduser('~/Desktop/Expedia/CVs/cv_test.csv')
def prepare_arrays_validation(train_path):
print ('')
print ('Generating Validation Score...')
print ('Preparing Arrays...')
print ('')
f = open(train_path, "r")
f.readline()
best_hotels_od_ulc = dict()
best_hotels_search_dest = dict()
best_hotels_uid_miss = dict()
best_hotels_country = dict()
best_hotels_user_ci = dict()
best_hotels_city_ci = dict()
best_s00 = dict()
best_s01 = dict()
popular_hotel_cluster = dict()
total = 0
while 1:
line = f.readline().strip()
total += 1
if total % 2000000 == 0:
print('Read {} lines...'.format(total))
if line == '':
break
arr = line.split(",")
site_name = arr[1]
posa_continent = arr[2]
user_location_country = arr[3]
user_location_region = arr[4]
user_location_city = arr[5]
orig_destination_distance = arr[6]
user_id = arr[7]
is_mobile = arr[8]
is_package = arr[9]
channel = arr[10]
srch_ci = arr[11]
if srch_ci != 'nan':
book_year = int(srch_ci[:4])
book_month = int(srch_ci[5:7])
srch_ci_month = int(srch_ci[5:7])
else:
book_year = int(arr[0][:4])
book_month = int(arr[0][5:7])
srch_ci_month = int(arr[0][5:7])
srch_adults_cnt = arr[13]
srch_children_cnt = arr[14]
srch_rm_cnt = arr[15]
srch_destination_id = arr[16]
is_booking = float(arr[18])
hotel_continent = arr[20]
hotel_country = arr[21]
hotel_market = arr[22]
hotel_cluster = arr[23]
append_0 = ((book_year - 2012)*12 + (book_month - 12))
if not (append_0>0 and append_0<=36):
continue
append_1 = pow(math.log(append_0), 1.35) * (-0.1+0.95*pow(append_0, 1.46)) * (3.5 + 17.6*is_booking)
append_2 = 3 + 5.56*is_booking
### best_s00
if user_location_city != '' and orig_destination_distance != '' and user_id !='' and srch_destination_id != '' and hotel_country != '' and is_booking == 1:
hsh = hash('user_id' + str(user_id) + 'user_location_city' + str(user_location_city) + 'srch_destination_id' + str(srch_destination_id) + 'hotel_country' + str(hotel_country) + 'hotel_market' + str(hotel_market))
if hsh in best_s00:
if hotel_cluster in best_s00[hsh]:
best_s00[hsh][hotel_cluster] += append_0
else:
best_s00[hsh][hotel_cluster] = append_0
else:
best_s00[hsh] = dict()
best_s00[hsh][hotel_cluster] = append_0
### best_s01
if user_location_city != '' and orig_destination_distance != '' and user_id !='' and srch_destination_id != '' and is_booking == 1:
hsh = hash('user_id' + str(user_id) + 'srch_destination_id' + str(srch_destination_id) + 'hotel_country' + str(hotel_country) + 'hotel_market' + str(hotel_market))
if hsh in best_s01:
if hotel_cluster in best_s01[hsh]:
best_s01[hsh][hotel_cluster] += append_0
else:
best_s01[hsh][hotel_cluster] = append_0
else:
best_s01[hsh] = dict()
best_s01[hsh][hotel_cluster] = append_0
### best_hotels_od_ulc
if user_location_city != '' and orig_destination_distance != '':
hsh = hash('user_location_city' + str(user_location_city) + 'orig_destination_distance' + str(orig_destination_distance) + 'hotel_market' + str(hotel_market))
if hsh in best_hotels_od_ulc:
if hotel_cluster in best_hotels_od_ulc[hsh]:
best_hotels_od_ulc[hsh][hotel_cluster] += append_0
else:
best_hotels_od_ulc[hsh][hotel_cluster] = append_0
else:
best_hotels_od_ulc[hsh] = dict()
best_hotels_od_ulc[hsh][hotel_cluster] = append_0
### best_hotels_uid_miss
if user_location_city != '' and orig_destination_distance == '' and user_id !='' and srch_destination_id != '' and hotel_country != '' and is_booking == 1:
hsh = hash('user_id' + str(user_id) + 'user_location_city' + str(user_location_city) + 'srch_destination_id' + str(srch_destination_id) + 'hotel_country' + str(hotel_country) + 'hotel_market' + str(hotel_market))
if hsh in best_hotels_uid_miss:
if hotel_cluster in best_hotels_uid_miss[hsh]:
best_hotels_uid_miss[hsh][hotel_cluster] += append_0
else:
best_hotels_uid_miss[hsh][hotel_cluster] = append_0
else:
best_hotels_uid_miss[hsh] = dict()
best_hotels_uid_miss[hsh][hotel_cluster] = append_0
### best_hotels_search_dest
if srch_destination_id != '' and hotel_country != '' and hotel_market != '':
hsh = hash('srch_destination_id' + str(srch_destination_id) + 'hotel_country' + str(hotel_country) + 'hotel_market' + str(hotel_market))
if hsh in best_hotels_search_dest:
if hotel_cluster in best_hotels_search_dest[hsh]:
best_hotels_search_dest[hsh][hotel_cluster] += append_1
else:
best_hotels_search_dest[hsh][hotel_cluster] = append_1
else:
best_hotels_search_dest[hsh] = dict()
best_hotels_search_dest[hsh][hotel_cluster] = append_1
### best_hotels_user_ci
if user_location_city != '' and hotel_market != '' and srch_ci_month != '' and is_booking == 1:
hsh = hash('user_id' + str(user_id) + 'hotel_market' + str(hotel_market) + 'srch_ci_month' + str(srch_ci_month))
if hsh in best_hotels_user_ci:
if hotel_cluster in best_hotels_user_ci[hsh]:
best_hotels_user_ci[hsh][hotel_cluster] += append_0
else:
best_hotels_user_ci[hsh][hotel_cluster] = append_0
else:
best_hotels_user_ci[hsh] = dict()
best_hotels_user_ci[hsh][hotel_cluster] = append_0
### best_hotels_city_ci
if user_location_city != '' and hotel_market != '' and srch_ci_month != '' and is_booking == 1:
hsh = hash('user_id' + str(user_id) + 'user_location_city' + str(user_location_city) + 'hotel_market' + str(hotel_market) + 'srch_ci_month' + str(srch_ci_month))
if hsh in best_hotels_city_ci:
if hotel_cluster in best_hotels_city_ci[hsh]:
best_hotels_city_ci[hsh][hotel_cluster] += append_0
else:
best_hotels_city_ci[hsh][hotel_cluster] = append_0
else:
best_hotels_city_ci[hsh] = dict()
best_hotels_city_ci[hsh][hotel_cluster] = append_0
### best_hotels_country
if hotel_market != '':
hsh = hash('hotel_market' + str(hotel_market) + 'is_package' + str(is_package))
if hsh in best_hotels_country:
if hotel_cluster in best_hotels_country[hsh]:
best_hotels_country[hsh][hotel_cluster] += append_2
else:
best_hotels_country[hsh][hotel_cluster] = append_2
else:
best_hotels_country[hsh] = dict()
best_hotels_country[hsh][hotel_cluster] = append_2
### popular_hotel_cluster
if hotel_cluster in popular_hotel_cluster:
popular_hotel_cluster[hotel_cluster] += append_0
else:
popular_hotel_cluster[hotel_cluster] = append_0
f.close()
return best_s00, best_s01, best_hotels_uid_miss, best_hotels_od_ulc, best_hotels_search_dest, best_hotels_user_ci, best_hotels_city_ci, best_hotels_country, popular_hotel_cluster
def gen_submission_validation(test_path, cv_path, best_s00, best_s01, best_hotels_uid_miss, best_hotels_od_ulc, best_hotels_search_dest, best_hotels_user_ci, best_hotels_city_ci, best_hotels_country, popular_hotel_cluster):
print ('')
path= cv_path
out = open(path, "w")
f = open(test_path, "r")
f.readline()
total = 0
total1 = 0
total2 = 0
total3 = 0
total4 = 0
total5 = 0
total6 = 0
total7 = 0
out.write("id,hotel_cluster\n")
topclasters = nlargest(5, sorted(popular_hotel_cluster.items()), key=itemgetter(1))
while 1:
line = f.readline().strip()
total += 1
if total % 300000 == 0:
print('Write {} lines...'.format(total))
if line == '':
break
arr = line.split(",")
site_name = arr[2]
posa_continent = arr[3]
user_location_country = arr[4]
user_location_region = arr[5]
user_location_city = arr[6]
orig_destination_distance = arr[7]
user_id = arr[8]
is_mobile = arr[9]
is_package = arr[10]
channel = arr[11]
srch_ci = arr[12]
if srch_ci != 'nan':
book_year = int(srch_ci[:4])
book_month = int(srch_ci[5:7])
srch_ci_month = int(srch_ci[5:7])
else:
book_year = int(arr[0][:4])
book_month = int(arr[0][5:7])
srch_ci_month = int(arr[0][5:7])
srch_adults_cnt = arr[14]
srch_children_cnt = arr[15]
srch_rm_cnt = arr[16]
srch_destination_id = arr[17]
hotel_country = arr[20]
hotel_market = arr[21]
out.write(str(id) + ',')
filled = []
hsh = hash('user_location_city' + str(user_location_city) + 'orig_destination_distance' + str(orig_destination_distance) + 'hotel_market' + str(hotel_market))
if hsh in best_hotels_od_ulc:
d = best_hotels_od_ulc[hsh]
topitems = nlargest(5, sorted(d.items()), key=itemgetter(1))
for i in range(len(topitems)):
if topitems[i][0] in filled:
continue
if len(filled) == 5:
break
out.write(' ' + topitems[i][0])
filled.append(topitems[i][0])
total1 += 1
if orig_destination_distance == '':
hsh = hash('user_id' + str(user_id) + 'user_location_city' + str(user_location_city) + 'srch_destination_id' + str(srch_destination_id) + 'hotel_country' + str(hotel_country) + 'hotel_market' + str(hotel_market))
if hsh in best_hotels_uid_miss:
d = best_hotels_uid_miss[hsh]
topitems = nlargest(4, sorted(d.items()), key=itemgetter(1))
for i in range(len(topitems)):
if topitems[i][0] in filled:
continue
if len(filled) == 5:
break
out.write(' ' + topitems[i][0])
filled.append(topitems[i][0])
total2 += 1
hsh = hash('user_id' + str(user_id) + 'user_location_city' + str(user_location_city) + 'srch_destination_id' + str(srch_destination_id) + 'hotel_country' + str(hotel_country) + 'hotel_market' + str(hotel_market))
hsh1 = hash('user_id' + str(user_id) + 'srch_destination_id' + str(srch_destination_id) + 'hotel_country' + str(hotel_country) + 'hotel_market' + str(hotel_market))
if hsh1 in best_s01 and hsh not in best_s00:
d = best_s01[hsh1]
topitems = nlargest(4, sorted(d.items()), key=itemgetter(1))
for i in range(len(topitems)):
if topitems[i][0] in filled:
continue
if len(filled) == 5:
break
out.write(' ' + topitems[i][0])
filled.append(topitems[i][0])
total3 += 1
hsh = hash('user_id' + str(user_id) + 'user_location_city' + str(user_location_city) + 'hotel_market' + str(hotel_market) + 'srch_ci_month' + str(srch_ci_month))
hsh1 = hash('user_id' + str(user_id) + 'hotel_market' + str(hotel_market) + 'srch_ci_month' + str(srch_ci_month))
if hsh1 in best_hotels_user_ci and hsh not in best_hotels_city_ci:
d = best_hotels_user_ci[hsh1]
topitems = nlargest(4, sorted(d.items()), key=itemgetter(1))
for i in range(len(topitems)):
if topitems[i][0] in filled:
continue
if len(filled) == 5:
break
out.write(' ' + topitems[i][0])
filled.append(topitems[i][0])
total4 += 1
hsh = hash('srch_destination_id' + str(srch_destination_id) + 'hotel_country' + str(hotel_country) + 'hotel_market' + str(hotel_market))
if hsh in best_hotels_search_dest:
d = best_hotels_search_dest[hsh]
topitems = nlargest(5, d.items(), key=itemgetter(1))
for i in range(len(topitems)):
if topitems[i][0] in filled:
continue
if len(filled) == 5:
break
out.write(' ' + topitems[i][0])
filled.append(topitems[i][0])
total5 += 1
hsh = hash('hotel_market' + str(hotel_market) + 'is_package' + str(is_package))
if hsh in best_hotels_country:
d = best_hotels_country[hsh]
topitems = nlargest(5, d.items(), key=itemgetter(1))
for i in range(len(topitems)):
if topitems[i][0] in filled:
continue
if len(filled) == 5:
break
out.write(' ' + topitems[i][0])
filled.append(topitems[i][0])
total6 += 1
for i in range(len(topclasters)):
if topclasters[i][0] in filled:
continue
if len(filled) == 5:
break
out.write(' ' + topclasters[i][0])
filled.append(topclasters[i][0])
total7 += 1
out.write("\n")
out.close()
print ('')
print ('Total 1: {} ...'.format(total1))
print ('Total 2: {} ...'.format(total2))
print ('Total 3: {} ...'.format(total3))
print ('Total 4: {} ...'.format(total4))
print ('Total 5: {} ...'.format(total5))
print ('Total 6: {} ...'.format(total6))
print ('Total 7: {} ...'.format(total7))
print ('')
print ('Loading Data...')
cv_submission_path = os.path.expanduser('~/Desktop/Expedia/CVs/cv_test.csv')
split_test_path = os.path.expanduser('~/Desktop/Expedia/CV_Splits/split_test.csv')
submission_cv = pd.read_csv(cv_submission_path, usecols = ['hotel_cluster'])
split_validation = pd.read_csv(split_test_path, usecols = ['hotel_cluster'], dtype = {'hotel_cluster':np.int16})
print ('Data Loaded...')
preds = []
for i in range(submission_cv.shape[0]):
arr = submission_cv.hotel_cluster[i].split(" ")
arr = list(arr[1:10])
arr = list(map(int, arr))
preds.append(arr)
target_test = [[l] for l in split_validation["hotel_cluster"]]
print ('')
print ('Score:',metrics.mapk(target_test, preds, k=5))
best_s00, best_s01, best_hotels_uid_miss, best_hotels_od_ulc, best_hotels_search_dest, best_hotels_user_ci, best_hotels_city_ci, best_hotels_country, popular_hotel_cluster = prepare_arrays_validation(train_path)
gen_submission_validation(test_path, cv_path, best_s00, best_s01, best_hotels_uid_miss, best_hotels_od_ulc, best_hotels_search_dest, best_hotels_user_ci, best_hotels_city_ci, best_hotels_country, popular_hotel_cluster)
| [
"sarris.nick@verizon.net"
] | sarris.nick@verizon.net |
5c167ea06ee00c6c36dc8d37289c5b58cffbe7f8 | ba1d012f951b0d96c43805d79195bfa1d9c7892e | /backend/base/views.py | 0a8e5a9d6b0084b94b8e9089aa5c0022bb405b54 | [] | no_license | ramoncelestino/react-django-blog | 89c22bf00b35ae35ca0a1e27e0d74e79a4b3dea3 | 7820bfb50bb9bdeaa0dce1d95a3b460c3074fae6 | refs/heads/main | 2023-04-28T01:04:24.984416 | 2021-05-01T01:26:13 | 2021-05-01T01:26:13 | 361,168,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,605 | py | from django.shortcuts import render
from django.http import JsonResponse
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .models import Person, Post
from .serializer import PersonSerializer, PostSerializer
from rest_framework import status
@api_view(['GET'])
def getPeople(request):
people = Person.objects.all()
if 'name' in request.GET:
name = request.GET['name']
people = people.filter(name__icontains=name)
people_serializer = PersonSerializer(people, many=True)
return Response(people_serializer.data)
@api_view(['GET'])
def getPerson(request, pk):
person = Person.objects.get(id=pk)
print(request.GET)
person_serializer = PersonSerializer(person)
return Response(person_serializer.data)
@api_view(['GET', 'POST'])
def getPosts(request):
if request.method == 'GET':
posts = Post.objects.order_by('-created_data')
post_serializer = PostSerializer(posts, many=True)
return Response(post_serializer.data)
elif request.method == 'POST':
print(request.data)
serializer = PostSerializer(data=request.data)
if serializer.is_valid():
print("AQUUI")
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET'])
def getPost(request, pk):
post = Post.objects.get(id=pk)
serializer = PostSerializer(post)
return Response(serializer.data)
| [
"ramoncelesteramos@gmail.com"
] | ramoncelesteramos@gmail.com |
a7ab00c52a80767aa12f6a9ce3a7834cca71ba6a | 026a1329782ff71ae92a6f3d0403dfdefd8fa518 | /database/search.py | 154a1f7bfbf9e4d1a4e2e29fb95934c48bad2f7b | [] | no_license | pkray91/laravel | ad3e75b406e319e6ee6d04c0a134248d9cb09431 | a05015e2c98dc31c533244f618e56fc8a4599eb7 | refs/heads/master | 2021-02-04T23:36:12.074340 | 2020-02-28T09:40:43 | 2020-02-28T09:40:43 | 243,720,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | import mysql.connector
con = mysql.connector.connect(host="localhost",user="root",passwd="",database="python_db");
mycur = con.cursor();
query="select * from employee where name like '%a%'";
mycur.execute(query);
data = mycur.fetchall();
for x in data:
print(x); | [
"pk.ray91@gmail.com"
] | pk.ray91@gmail.com |
1afc9dd6c1437c57d0331dddae1fbd27c3586c23 | f645f17a1cbf996ca45e7c3c77c0a0c02fd8fc8c | /star.py | 36ab633fa60b8801a685fde2bf407ced6507af36 | [] | no_license | prophylacticoder/star_project | 313b2137013537fec4434d304dc6c49a4d0b0f79 | 37597067bf43d009ec645eb72e878137eb94ab96 | refs/heads/master | 2020-12-09T23:13:51.747729 | 2020-01-12T19:02:52 | 2020-01-12T19:02:52 | 233,443,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | import sys
import pygame
import settings
import image
from pygame.sprite import Group
def run_game():
pygame.init()
star_settings = settings.Settings()
screen = pygame.display.set_mode((star_settings.window_width,
star_settings.window_height))
pygame.display.set_caption('Stars')
star_group = Group()
star_image = image.StarImage(screen)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.quit
if event.type == pygame.KEYDOWN:
if event.key == K_q:
sys.quit
screen.fill(star_settings.bg_collor)
star_image.blitme()
pygame.display.flip()
run_game()
| [
"gui.logicalt@gmail.com"
] | gui.logicalt@gmail.com |
0d9c8f3dbbc299c369c4ac837ee49b743180106e | 084db5e25626908a5352339900f12f0000a25a4a | /crediteuropebank/items.py | 9770e32bfb15c3f15bbc7ea5982eda1f5486b696 | [] | no_license | hristo-grudev/crediteuropebank | f60a4c444b9aca06b2e44b699c2ce84703a3382d | 82646cef961dfb318f33ef6a9dd44801a945494a | refs/heads/main | 2023-03-10T08:29:04.156974 | 2021-02-25T09:30:57 | 2021-02-25T09:30:57 | 342,191,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | import scrapy
class CrediteuropebankItem(scrapy.Item):
title = scrapy.Field()
description = scrapy.Field()
date = scrapy.Field()
| [
"hr.grudev@gmail.com"
] | hr.grudev@gmail.com |
0f7551cab7997a659016eb7fa9880927bdea6e9e | 5948858f8f82a86ce7102761fc5393223a358c10 | /area_tabs.py | 51efe1085b37e95c5b50580665f12c6fd8f2701b | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | yshtalen42/simui | 2f5349d748006d54eb5398b866a88f39be15b91f | cc6c8fd0d920b17a517e25ffe3b89568a2e30bfe | refs/heads/master | 2020-08-08T05:55:12.927864 | 2019-10-08T02:32:16 | 2019-10-08T02:32:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,827 | py | import os
import json
from collections import OrderedDict
from utilities import model_list
from pydm import Display
from pydm.widgets import PyDMEmbeddedDisplay, PyDMTabWidget
from qtpy.QtWidgets import QVBoxLayout, QScrollArea, QWidget, QLabel, QApplication
from qtpy.QtCore import Slot
class AreaTabs(Display):
def __init__(self, parent=None, macros=None, args=[]):
super(AreaTabs, self).__init__(parent=parent, macros=macros, args=args)
self.selected_area = macros['area']
self.subsystem = macros['subsystem']
self.setWindowTitle("{} {}".format(self.selected_area.upper(), self.formatted_subsystem()))
self.setup_ui()
def ui_filename(self):
return None
def ui_filepath(self):
return None
def formatted_subsystem(self):
formatted_subsystem = self.subsystem
formatted_subsystem = formatted_subsystem[0].upper() + formatted_subsystem[1:]
if formatted_subsystem[-1] == "s":
formatted_subsystem = formatted_subsystem[:-1]
return formatted_subsystem
@Slot(int)
def tab_changed(self, new_tab_index):
if self.window() is not self:
self.setWindowTitle("{} {}".format(self.tab_widget.tabText(new_tab_index), self.formatted_subsystem()))
self.window().update_window_title()
emb = self.tab_widget.currentWidget().findChildren(PyDMEmbeddedDisplay)[0]
if emb.embedded_widget is not None:
return
top = os.path.dirname(os.path.realpath(__file__))
emb.filename = os.path.join(top, self.subsystem, "dev_list_display.py")
def setup_ui(self):
self.setLayout(QVBoxLayout())
self.titleLabel = QLabel(self)
self.titleLabel.setText("{} Displays".format(self.formatted_subsystem()))
self.layout().addWidget(self.titleLabel)
self.tab_widget = PyDMTabWidget(self)
self.layout().addWidget(self.tab_widget)
top = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(top, 'utilities', 'sectors.json')) as f:
sectors = json.load(f)
for i, sector in enumerate(sectors):
page = QWidget()
page.setLayout(QVBoxLayout())
page.layout().setContentsMargins(0,0,0,0)
emb = PyDMEmbeddedDisplay()
emb.macros = json.dumps(sector)
emb.loadWhenShown = False
emb.disconnectWhenHidden = True
page.layout().addWidget(emb)
self.tab_widget.addTab(page, sector['name'])
if sector['name'] == self.selected_area:
self.tab_widget.setCurrentIndex(i)
self.tab_changed(i)
self.tab_widget.currentChanged.connect(self.tab_changed) | [
"mgibbs@slac.stanford.edu"
] | mgibbs@slac.stanford.edu |
0a4e3dfacfddb5d405649e73397541348816d65c | b72f9d9f0769265cdea2b8caff145af9c532ea09 | /practice/abc058_b.py | 284a2391e2511feb8baa6dffec908c7c9b7fbf63 | [] | no_license | ritzcr/AtCoder | 3335fefa8fb1989a0f9da80fe6d0902b46aa2d1f | 15097b0c2568ace653e5080d789047531e50edde | refs/heads/master | 2021-02-12T19:16:41.757421 | 2020-07-05T06:30:57 | 2020-07-05T06:30:57 | 244,620,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | O = input()
E = input()
out = ""
for x in range(len(O)):
out += O[x]
if len(E) > x:
out += E[x]
print(out)
| [
"ritz@freex.ltd"
] | ritz@freex.ltd |
577a172932777b1f11fa614417a3865aa6b82962 | b7b2c0f5449b8eba2855b255953cffa8a71b6604 | /getting_stock_data.py | e512fdad25587930cde204435b0ea393cab09ec1 | [] | no_license | SrividyaGanapathi/Greenwich-Stock-Project | 42d1da475d83defddb664b41d63697d57d299893 | b1c6de5035f425cbdb8b1ca67d5cfd31a78054f8 | refs/heads/master | 2022-12-14T12:25:05.947019 | 2020-08-18T23:42:53 | 2020-08-18T23:42:53 | 288,576,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,291 | py | # coding: utf-8
# In[1]:
import pandas as pd
import yfinance as yf # Need to install yfinance first before import will work
import datetime
import numpy as np
# In[2]:
datapath = "path_redacted/greenwich_master_backtestsamplepublic.csv"
dat = pd.read_csv(datapath, encoding='latin-1')
# In[3]:
first_job_posting_time_by_ticker = dat.groupby('ticker')[['post_date']].first()
first_job_posting_time_by_ticker.head()
# In[4]:
"""
This cell shows how to access a single stock's information from yfinance:
# Get the data for the stock Apple by specifying the stock ticker, start date, and end date (in the form YYYY-MM-DD)
data = yf.download('AAPL','2016-01-01','2018-01-01')
"""
yf.download('AAPL','2016-01-01','2018-01-01')
# In[25]:
full_ticker_data = pd.DataFrame(columns=['Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume', 'Ticker'])
for ticker, date in first_job_posting_time_by_ticker.iterrows():
start_date = date.post_date.split()[0]
end_date = datetime.date.today().strftime("%Y-%m-%d")
try:
ticker_data = yf.download(ticker, start_date, end_date)
except:
ticker_data = pd.DataFrame(columns=['Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume'])
for col in ticker_data.columns:
ticker_data[col].values[:] = 'No Data Available'
ticker_data['Ticker'] = ticker
full_ticker_data = pd.concat([full_ticker_data, ticker_data])
# In[41]:
# Remove date as the index, and rename the column that corresponds to date to 'date'
full_ticker_data = full_ticker_data.reset_index().rename({'index':'date'}, axis=1)
# In[60]:
mismatching_tickers = []
for ticker in first_job_posting_time_by_ticker.index:
if ticker not in full_ticker_data.Ticker.unique():
mismatching_tickers.append(ticker)
tickers_with_no_data = pd.DataFrame(np.nan, columns=full_ticker_data.columns, index=mismatching_tickers)
tickers_with_no_data.Ticker = mismatching_tickers
tickers_with_no_data = tickers_with_no_data.reset_index(drop=True)
tickers_with_no_data['data_unavailable'] = 1
full_ticker_data['data_unavailable'] = 0
# In[61]:
all_tickers_data = pd.concat([full_ticker_data,tickers_with_no_data])
# In[63]:
all_tickers_data.to_csv("path_redacted/greenwich_stock_data.csv")
# In[64]:
all_tickers_data
| [
"srividyaganapathi2020@u.northwestern.edu"
] | srividyaganapathi2020@u.northwestern.edu |
99bdfe9a366785e4aeec11ab5445ebd4ec604dc1 | 48d8558cdbf06c4c634dd1e7b4e531f9eb1a9202 | /jumeg/gui/wxlib/jumeg_gui_wxlib_experiment_template.py | 1b5e6b35bf63698400347fd8dc8c8411c38a6892 | [] | permissive | pravsripad/jumeg | f8a0c70c63c57437059465c5a741423f0ed931fb | 36b3b4144495819b9282207a74f63dbdb7a32c89 | refs/heads/master_dev | 2023-05-27T23:35:09.794020 | 2023-04-14T09:27:27 | 2023-04-14T09:27:27 | 18,686,190 | 0 | 1 | BSD-3-Clause | 2023-01-17T11:49:19 | 2014-04-11T19:31:41 | Python | UTF-8 | Python | false | false | 11,435 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 27 14:14:42 2018
@author: fboers
-------------------------------------------------------------------------------
Updates:
2018-08-27.001 new structure, refrac
"""
import wx
from pubsub import pub
from jumeg.base.template.jumeg_template_experiments import JuMEG_ExpTemplate
from jumeg.base.ioutils.jumeg_ioutils_functions import JuMEG_IOUtils
from jumeg.gui.wxlib.utils.jumeg_gui_wxlib_utils_controls import JuMEG_wxControlGrid
__version__= "2019.05.14.001"
class JuMEG_wxExpTemplate(wx.Panel):
"""
JuMEG_wxExpTemplate
select Experiment Template [M100] and stage directory [/data/xyz/exp/M100] from Exp Template folder
Paremeters:
-----------
parent widged
template_path: path to experiment templates
pubsub : use wx.pupsub msg systen <False>
example: pub.sendMessage('EXPERIMENT_TEMPLATE',stage=stage,experiment=experiment,TMP=template_data)
or button event from <ExpTemplateApply> button for apply/update
verbose : <False>
bg : backgroundcolor <grey90>
ShowExp : show Experiment combobox <True>
ShowScan : show Scan combobox <True>
ShowStage : show Stage combobox <True>
"""
def __init__(self,parent,name="JUMEG_WX_EXPERIMENT_TEMPLATE",**kwargs):
super().__init__(parent,name=name)
self.TMP = JuMEG_ExpTemplate(**kwargs)
self._ctrl_names = ["EXPERIMENT", "SCAN","STAGE","UPDATE"]
self.prefixes = ["BT.","COMBO."]
self._pubsub_messages={"UPDATE":"UPDATE","SELECT_EXPERIMENT":"SELECT_EXPERIMENT"}
self.IOUtils = JuMEG_IOUtils()
self.ShowExp = True
self.ShowScan = True
self.ShowStage = True
self._init(**kwargs)
def _find_obj(self,prefix,postfix):
"""
find obj by name like <pref>.<self.GetName()>.<postfix>
:param prefix:
:param postfix:
:return: obj
Example:
--------
self.SetName("TEST01")
obj=self._fwn("BT","EXPERIMENT")
obj.GetName()
BT.TEST01.EXPERIMENT
"""
return self.FindWindowByName(self.GetName().upper()+"."+prefix+"_"+postfix)
@property
def wxExpBt(self): return self._find_obj("BT","EXPERIMENT")
@property
def wxExpCb(self): return self._find_obj("COMBO","EXPERIMENT")
@property
def wxExpScanBt(self): return self._find_obj("FLBT","SCAN")
@property
def wxExpScanCb(self): return self._find_obj("COMBO","SCAN")
@property
def wxExpStageBt(self): return self._find_obj("FLBT","STAGE")
@property
def wxExpStageCb(self): return self._find_obj("COMBO","STAGE")
@property
def wxExpUpdateBt(self): return self._find_obj("BT","UPDATE")
@property
def verbose(self): return self.TMP.verbose
@verbose.setter
def verbose(self,v): self.TMP.verbose = v
def GetExperiment(self):
return self.wxExpCb.GetValue()
def GetScan(self):
if self.wxExpScanCb:
return self.wxExpScanCb.GetValue()
return None
def GetStage( self ):
if self.wxExpStageCb:
p = self.TMP.isPath(self.wxExpStageCb.GetValue())
if p : return p
pub.sendMessage("MAIN_FRAME.MSG.INFO",data="<Experiment Template GetStage> <stage> no such file or directory:\n" + self.wxExpStageCb.GetValue())
return
def GetExperimentPath( self ):
try:
return self.GetStage() + "/"+ self.GetExperiment()
except:
pass
# --- pubsub msg
#--- ToDO new CLS
def GetMessageKey( self, msg ): return self._pubsub_messages.get(msg.upper())
def SetMessageKey( self, msg, v ): self._pubsub_messages[msg] = v.upper()
def GetMessage( self, msg ): return self.GetName()+ "." +self.GetMessageKey(msg)
def send_message(self,msg,evt):
""" sends a pubsub msg, can change the message via <MessageKey> but not the arguments
"EXPERIMENT_TEMPLATE.UPDATE",stage=self.GetStage(),scan=self.GetScan(),data_type='mne'
"""
if self.pubsub:
#print("PUBSUB MSG: "+self.GetMessage(msg))
pub.sendMessage(self.GetMessage(msg),stage=self.GetExperimentPath(),scan=self.GetScan(),data_type='mne')
else: evt.Skip()
def _init(self, **kwargs):
"""" init """
self._update_from_kwargs(**kwargs)
self._wx_init()
self.update(**kwargs)
self._ApplyLayout()
def _wx_init(self):
""" init WX controls """
self.SetBackgroundColour(self.bg)
# --- PBS Hosts
ctrls = []
if self.ShowExp:
ctrls.append(["BT", "EXPERIMENT", "Experiment", "update experiment template list",None])
ctrls.append(["COMBO","EXPERIMENT", "COMBO_EXPERIMENT", [], "select experiment templatew",None])
if self.ShowScan:
ctrls.append(["FLBT", "SCAN", "SCAN", "select scan",None])
ctrls.append(["COMBO","SCAN", "SCAN", [], "select experiment template",None])
if self.ShowStage:
ctrls.append(["FLBT", "STAGE", "Stage", "select stage", None])
ctrls.append(["COMBO","STAGE", "Stage", [], "select experiment satge",None])
ctrls.append(["BT", "UPDATE","Update", "update",None])
for i in range(len(ctrls)):
ctrls[i][1] = self.GetName().upper()+"."+ctrls[i][0]+"_"+ctrls[i][1]
self.CtrlGrid = JuMEG_wxControlGrid(self, label=None, drawline=False, control_list=ctrls, cols=len(ctrls) + 4,AddGrowableCol=[1,3,5],set_ctrl_prefix=False)
self.CtrlGrid.SetBackgroundColour(self.bg_pnl)
self.CtrlGrid.EnableDisableCtrlsByName(self._ctrl_names,False,prefix=self.prefixes)
#--- bind CTRLs in class
self.Bind(wx.EVT_BUTTON, self.ClickOnCtrl)
self.Bind(wx.EVT_COMBOBOX,self.ClickOnCtrl)
def _update_from_kwargs(self,**kwargs):
self.verbose = kwargs.get("verbose",self.verbose)
self.pubsub = kwargs.get("pubsub",True)
self.bg = kwargs.get("bg", wx.Colour([230, 230, 230]))
self.bg_pnl = kwargs.get("bg_pnl",wx.Colour([240, 240, 240]))
#---
self.ShowExp = kwargs.get("ShowExp", self.ShowExp)
self.ShowScan = kwargs.get("ShowScan",self.ShowScan)
self.ShowStage = kwargs.get("ShowStage",self.ShowStage)
#---
def update_template(self,name=None):
"""
update template experiment name in combobox and template data
:param name of experiment eg:default
"""
if name:
if name in self.TMP.get_sorted_experiments():
#print("OK update_template: {}".format(name) )
self.TMP.template_name = name
self.wxExpCb.SetValue(self.TMP.template_name)
self.TMP.template_update( name )
def update(self,**kwargs):
""" update kwargs and widgets """
self._update_from_kwargs(**kwargs)
self.UpdateExperimentComBo()
self.UpdateScanStageComBo( experiment = self.wxExpCb.GetValue() )
#print("EXP TEMPLATE MSG: 162 "+self.GetName()+"_UPDATE")
#pub.sendMessage(self.GetName()+"_UPDATE",data=True)
def UpdateExperimentComBo(self,evt=None):
""" update experiment combobox if selected """
self.CtrlGrid.UpdateComBox(self.wxExpCb,self.TMP.get_experiments(issorted=True))
self.CtrlGrid.EnableDisableCtrlsByName(self._ctrl_names[0], True, prefix=self.prefixes) # experiment ctrl first
self.wxExpCb.SetToolTip(wx.ToolTip("Template path: {}".format(self.TMP.template_path) ))
if self.verbose:
wx.LogMessage( self.TMP.pp_list2str(self.TMP.template_name_list,head="Template path: "+self.TMP.template_path))
def UpdateScanComBo( self,scan_list=None ):
"""
:param scan_list:
:return:
"""
if not self.wxExpScanCb: return
if not scan_list:
scan_list = self.TMP.get_sorted_scans()
self.CtrlGrid.UpdateComBox( self.wxExpScanCb,scan_list )
if not self.wxExpStageCb: return
state = bool(len(scan_list))
if state:
self.wxExpStageCb.SetValue( scan_list[0] )
self.CtrlGrid.EnableDisableCtrlsByName(self._ctrl_names[1],state,prefix=self.prefixes)
def UpdateStageComBo(self,stage_list=None):
"""
:param stage_list:
:return:
"""
if not self.wxExpStageCb:return
if not stage_list:
stage_list = self.TMP.stages
stage_list = self.IOUtils.expandvars(stage_list)
self.CtrlGrid.UpdateComBox(self.wxExpStageCb, stage_list)
state = bool(len(stage_list))
if state:
self.wxExpStageCb.SetValue(self.wxExpStageCb.GetItems()[0])
self.CtrlGrid.EnableDisableCtrlsByName(self._ctrl_names[2:],state, prefix=self.prefixes)
def UpdateScanStageComBo( self,experiment=None ):
"""
fill scan
Parameter
---------
experiment name
"""
if experiment:
if not self.TMP.template_update( experiment ):
self.TMP.template_data_reset()
if self.wxExpScanCb:
self.UpdateScanComBo()
if self.wxExpStageCb:
self.UpdateStageComBo()
else:
if self.wxExpScanCb: self.CtrlGrid.UpdateComBox(self.wxExpScanCb, [])
if self.wxExpStageCb: self.CtrlGrid.UpdateComBox(self.wxExpStageCb,[])
self.EnableDisableCtrlsByName(self._ctrl_names[1:],status=False,prefix=self.prefixes)
#---
def show_stage_dlg(self):
dlg = wx.DirDialog(None,"Choose Stage directory","",wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)
dlg.SetPath(self.wxExpStageCb.GetValue())
if (dlg.ShowModal() == wx.ID_OK):
if self.TMP.template_data:
l = [dlg.GetPath()]
l.extend(self.TMP.stages)
self.UpdateComBo(self.wxExpStageCb,l)
dlg.Destroy()
#---
def ClickOnCtrl(self,evt):
""" click on button or combobox send event
"""
obj = evt.GetEventObject()
if not obj: return
#print("gui wxlib exptemp ClickOnCtrl: "+self.GetName())
#print( obj.GetName() )
#--- ExpComBo
if obj.GetName() == self.wxExpCb.GetName():
self.UpdateScanStageComBo( obj.GetValue() )
evt.Skip()
#--- ExpBt
elif obj.GetName() == self.wxExpBt.GetName():
self.update()
#evt.Skip()
#--- ExpStageBt start change Dir DLG
elif obj.GetName() == self.wxExpStageBt.GetName():
self.show_stage_dlg()
#--- ExpBt
elif obj.GetName() == self.wxExpUpdateBt.GetName():
self.send_message("UPDATE",evt)
#evt.Skip()
else:
evt.Skip()
#---
def _ApplyLayout(self):
""" Apply Layout via wx.GridSizers"""
#--- Label + line
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add( self.CtrlGrid,1, wx.ALIGN_LEFT|wx.EXPAND|wx.ALL,2)
self.SetSizer(vbox)
self.Fit()
self.SetAutoLayout(1)
self.GetParent().Layout()
| [
"f.boers@fz-juelich.de"
] | f.boers@fz-juelich.de |
98af4b5854ff7b1356d0574d83ba270fced069fc | 1e80dc7f4e42fde653a7390a4cde3f2dc82100dd | /model/export_data.py | 8fbd2318ceb138625875432c1ecf840166a64649 | [] | no_license | junishitsuka/enjo | bee31f27ac5e9a3dae33f4f358eb0b2a8d8dda16 | 10518215552be011b44daf249ac89978b5c9c166 | refs/heads/master | 2021-01-25T05:34:38.488267 | 2015-01-16T07:14:41 | 2015-01-16T07:14:41 | 23,534,494 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,298 | py | #! /usr/bin/python
# coding: utf-8
import MySQLdb, sys
AAS = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'new']
BURST_LIMIT = 5
TOPIC = 'spirits'
FEATURE = 11124 # 18 word: 11106
# base line
# SQL = 'select b.retweet_count, b.content, a.follower, a.friend, a.favorite, a.entryCount from enjo_basedata as b left join aas_twitter_com.aas_twitter_com_%s as a on b.name = a.authorId left join all_users as au on au.name = b.name where retweet_id = "0" and a.authorId is NOT NULL and (b.retweet_count >= ' + str(BURST_LIMIT) + ' or (b.retweet_count <= 2 and b.retweet_count <> 0)) order by b.retweet_count desc'
# proposal method
SQL = 'select b.retweet_count, b.content, a.follower, a.friend, a.favorite, a.entryCount, au.com_cluster, au.com_degree, au.com_betweenness, au.com_closeness, au.com_eigen, au.com_pagerank, au.com_hub, au.com_authority, c.member_num, c.word from enjo_basedata as b left join aas_twitter_com.aas_twitter_com_%s as a on b.name = a.authorId left join all_users as au on au.name = b.name left join all_communities as c on c.community_id = au.community_id where topic = "kenketsu" and retweet_id = "0" and a.authorId is NOT NULL and (b.retweet_count >= ' + str(BURST_LIMIT) + ' or (b.retweet_count <= 2 and b.retweet_count <> 0)) order by b.retweet_count desc'
def get_tweet(sql):
cursor.execute(sql)
return cursor.fetchall()
def output(t):
output = []
# if t[0] >= BURST_LIMIT:
# output.append('1')
# else:
# output.append('0')
output.append(t[0])
output.append(str(t[1].count('#')))
output.append(str(t[1].count('http')))
output.append(str(t[1].count('@')))
output.append(str(len(t[1])))
if t[1] != '' and t[1][0] == '@':
output.append('1')
else:
output.append('0')
output.extend([str(x) for x in t[2:]])
return output
def main():
f = open('train.csv', 'w')
f.write(','.join(['"%s"' % str(i) for i in xrange(FEATURE)]) + '\n')
for aas in AAS:
tweets = get_tweet(SQL % aas)
for t in tweets:
f.write('%s\n' % ','.join(output(t)).replace('None', 'NA'))
if __name__ == '__main__':
connector = MySQLdb.connect(db="ishitsuka", host="localhost", user="ishitsuka", passwd="ud0nud0n", charset="utf8")
cursor = connector.cursor()
main()
| [
"ishitsuka.jun@gmail.com"
] | ishitsuka.jun@gmail.com |
52b1e429db9ff264272850ea168eeb1c2de376d2 | a3e926f8547f04184c79bdd28b0f886a77778700 | /Lib/fontbakery/reporters/ghmarkdown.py | b7376a00473362e6d22d640af646c8bc5277277e | [
"Apache-2.0"
] | permissive | m4rc1e/fontbakery | 0150a17547b53d6dc79e81407b0374950f90cd16 | da4c4b69abdd41314f9bdb58d9e47722e0680816 | refs/heads/master | 2023-08-02T14:18:00.077821 | 2018-10-17T01:47:51 | 2018-10-17T03:53:06 | 67,598,331 | 0 | 0 | Apache-2.0 | 2018-10-18T09:34:10 | 2016-09-07T10:52:14 | C | UTF-8 | Python | false | false | 3,976 | py | import os
from fontbakery.reporters.serialize import SerializeReporter
from fontbakery.checkrunner import Status
LOGLEVELS=["ERROR","FAIL","WARN","SKIP","INFO","PASS"]
class GHMarkdownReporter(SerializeReporter):
def __init__(self, loglevels, **kwd):
super(GHMarkdownReporter, self).__init__(**kwd)
self.loglevels = loglevels
def emoticon(self, name):
return {
'ERROR': ':broken_heart:',
'FAIL': ':fire:',
'WARN': ':warning:',
'INFO': ':information_source:',
'SKIP': ':zzz:',
'PASS': ':bread:',
}[name]
def html5_collapsible(self, summary, details):
return ("<details>\n"
"<summary>{}</summary>\n"
"{}\n"
"</details>\n").format(summary, details)
def log_md(self, log):
if not self.omit_loglevel(log["status"]):
return "* {} **{}** {}\n".format(self.emoticon(log["status"]),
log["status"],
log["message"])
else:
return ""
def check_md(self, check):
checkid = check["key"][1].split(":")[1].split(">")[0]
check["logs"].sort(key=lambda c: c["status"])
logs = "".join(map(self.log_md, check["logs"]))
github_search_url = ("[{}](https://github.com/googlefonts/fontbakery/"
"search?q={})").format(checkid, checkid)
return self.html5_collapsible("{} <b>{}:</b> {}".format(self.emoticon(check["result"]),
check["result"],
check["description"]),
f"\n* {github_search_url}\n{logs}")
def omit_loglevel(self, msg):
return self.loglevels and (self.loglevels[0] > Status(msg))
def get_markdown(self):
checks = {}
family_checks = []
data = self.getdoc()
num_checks = 0
for section in data["sections"]:
for cluster in section["checks"]:
if not isinstance(cluster, list):
cluster = [cluster]
num_checks += len(cluster)
for check in cluster:
if self.omit_loglevel(check["result"]):
continue
if "filename" not in check.keys():
# That's a family check!
family_checks.append(check)
else:
key = os.path.basename(check["filename"])
if key not in checks:
checks[key] = []
checks[key].append(check)
md = "## Fontbakery report\n\n"
if family_checks:
family_checks.sort(key=lambda c: c["result"])
md += self.html5_collapsible("<b>[{}] Family checks</b>".format(len(family_checks)),
"".join(map(self.check_md, family_checks)) + "<br>")
for filename in checks.keys():
checks[filename].sort(key=lambda c: LOGLEVELS.index(c["result"]))
md += self.html5_collapsible("<b>[{}] {}</b>".format(len(checks[filename]),
filename),
"".join(map(self.check_md, checks[filename])) + "<br>")
if num_checks != 0:
summary_table = "### Summary\n\n" + \
("| {} " + " | {} ".join(LOGLEVELS) + " |\n").format(*[self.emoticon(k) for k in LOGLEVELS]) + \
("|:-----:|:----:|:----:|:----:|:----:|:----:|\n"
"| {} | {} | {} | {} | {} | {} |\n"
"").format(*[data["result"][k] for k in LOGLEVELS]) +\
("| {:.0f}% | {:.0f}% | {:.0f}% | {:.0f}% | {:.0f}% | {:.0f}% |\n"
"").format(*[100*data["result"][k]/num_checks for k in LOGLEVELS])
md += "\n" + summary_table
omitted = [l for l in LOGLEVELS if self.omit_loglevel(l)]
if omitted:
md += "\n" + \
"**Note:** The following loglevels were omitted in this report:\n" + \
"".join(map("* **{}**\n".format, omitted))
return md
| [
"fsanches@metamaquina.com.br"
] | fsanches@metamaquina.com.br |
ec2f7e998921e51d00c9adda6e733edc6486ebca | 048505015d37f2c7c91a44d9fbf7162066650c89 | /moneta/repository/migrations/0006_auto_20171104_1444.py | b156a8f615f18e9960b4dfec61f10a3504ab927f | [
"LicenseRef-scancode-cecill-b-en"
] | permissive | d9pouces/Moneta | 4c8c4943da75e34425d30f76e2768e95a558c391 | 8ccfbadeedd00e080a0cc17a78ba4c48bced52e9 | refs/heads/master | 2020-05-22T01:47:32.204364 | 2019-08-18T14:27:27 | 2019-08-18T14:27:27 | 24,261,346 | 6 | 5 | null | 2017-01-29T23:21:53 | 2014-09-20T11:39:07 | Python | UTF-8 | Python | false | false | 464 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-11-04 13:44
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('repository', '0005_auto_20171104_1300'),
]
operations = [
migrations.AlterModelOptions(
name='repository',
options={'verbose_name': 'repository', 'verbose_name_plural': 'repositories'},
),
]
| [
"github@19pouces.net"
] | github@19pouces.net |
5fbf0df80d5fddd5a0c95f9ce559d3bb2dcbacd8 | 640919b57421463cb0b0c0bd8fa6e9fb1b070276 | /all algorithm here/heapsort.py | 0a9a6977d743dc4ff4aa21fb7d0e3e5766b71a00 | [] | no_license | sazzadahmed/algorithm_implementation | 91267879dd45d7ce15bcf231b8eb38acd3e14468 | bf34a342092de8f79c0e72f64b00f1089229c597 | refs/heads/master | 2021-08-26T04:43:00.427634 | 2017-11-21T16:42:40 | 2017-11-21T16:42:40 | 110,460,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,023 | py | def heapify(arr,i,n):
'''
this is simple max heap code
:param arr:
:param i:
:param n:
:return:
'''
largest=i
left=2*i
right=2*i+1
if left<n and arr[left]>arr[largest]:
largest=left
if right<n and arr[right]>arr[largest]:
largest=right
if i!=largest:
arr[i],arr[largest]=arr[largest],arr[i]
heapify(arr,largest,n)
def heap_sort(arr,n):
'''
this is simple headsort code implementation
here a given array first heapfy it into max heap
then swap the last element and descrese it size again
with root and heapfy it again
and then continue this process untill full sort
:param arr:
:param n:
:return:
'''
for i in range(n/2,-1,-1):
heapify(arr,i,n)
for i in range(n-1,-1,-1):
arr[i],arr[0]=arr[0],arr[i]
heapify(arr,0,i)
arr=[1,2,1,3,52354,23,241,5445,465,456,3532,52334,3,3,546,34,2,1,5,34,12,34]
n=len(arr)
heap_sort(arr,n)
for i in range(n):
print(arr[i],' ') | [
"sazzadahmed41@gmail.com"
] | sazzadahmed41@gmail.com |
8e5407c9b58fcc3babfae6382aca08da2eacb8f3 | 1b982b7d70e03d3312b3ea6f6b22d44e5c8ecdcc | /m3_3_breakTest_003.py | 118f01ee90575fab0882093a2b37ad53087fab9e | [] | no_license | aitiwa/pythonTraining | 2cd3d897c607aa0d4469f34226964d821aaecd7a | 3dabd06105e34c119816f65fc59e81b3bf347116 | refs/heads/master | 2020-06-09T00:17:34.803649 | 2019-07-16T14:57:49 | 2019-07-16T14:57:49 | 193,332,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | print("caseStudy: 반복문과 Break")
print("m3_3_breakTest_003.py")
print()
print("1. a, i 변수 선언과 초기화")
print(' a = 0 ')
a = 0
print()
print("2. 중첩 for 반복문과 조건문")
print(' for i in range(10): ')
print(' print("i = ", i) ')
print(' for j in range(10): ')
print(' a = j ')
print(' print(a) ')
print(' ')
print(' if(a == 5): ')
print(' break ')
print(' print("a = ", a) ')
print()
print("3. 결과값->")
for i in range(10):
print("i = ", i)
for j in range(10):
a = j
print(a)
if(a == 5):
break
# if (a == 5):
# break
print("a = ", a)
print()
print('4. 프로그램 종료')
print(' print("Program End")')
print(" Program End") | [
"37678038+aitiwa@users.noreply.github.com"
] | 37678038+aitiwa@users.noreply.github.com |
ec768edb94d455bc1a76166a4c086a76a8aac1ba | 246c8ed0bdfe780668702f6624b5a48a22569faf | /core/core/util/files.py | 88f30240348cf5fbec40839f63ff9062ff80cb9b | [
"MIT"
] | permissive | HALOCORE/PapersHelper | 7e7e7d9f2fc5dbb76c094c50b1ce50597f843d3e | 4f0bc3ce1df084d05c8ef37dda02fa9f78b4ec36 | refs/heads/master | 2021-09-27T07:57:44.852497 | 2021-04-11T12:54:47 | 2021-04-11T12:54:47 | 232,870,678 | 0 | 0 | MIT | 2021-09-22T18:22:17 | 2020-01-09T17:52:43 | Python | UTF-8 | Python | false | false | 3,518 | py | import os
import re
from . import pdf
from . import tool
from . import datart
def filetree_get(params=None):
filetree = datart.file_tree_cache_read("")
return {"filetree": filetree}
def filetree_update(params=None):
filetree = datart.file_tree_cache_update("")
return {"filetree": filetree}
def file_search_get(params=None):
files = datart.file_search()
return {"files": files}
def file_summary_get(params):
fileid = params['fileid']
summary = datart.file_summary_read(fileid)
if summary is None:
summary = file_summary_gen(params)["summary"]
summary = datart.file_summary_write(fileid, summary)
return {"summary": summary}
def file_summary_check_get(params):
fileid = params['fileid']
return {}
def file_refs_get(params):
fileid = params['fileid']
refs = datart.file_refs_read(fileid)
if refs is None:
refs = file_refs_gen(params)["refs"]
refs = datart.file_refs_write(fileid, refs)
return {"refs": refs}
def file_refs_link_get(params):
fileid = params['fileid']
def file_refs_link_update(params):
fileid = params['fileid']
def file_check_get(params):
fileid = params['fileid']
def file_fulltxt_get(params):
fileid = params['fileid']
fulltxt = datart.file_fulltxt_read(fileid)
if fulltxt is None:
fulltxt = pdf.pdf_text_get(params)['content']
fulltxt = datart.file_fulltxt_write(fileid, fulltxt)
return {"fulltxt": fulltxt}
###########################################
def file_summary_gen(params):
summary = {
"confirmed": False,
"title": None,
"source": None,
"year": None,
"authors": [],
"institude": [],
"keywords": []
}
fileid = params["fileid"]
srcyear = fileid.split('/')[-1].split('-')[0]
summary["year"] = srcyear[-4:]
summary["source"] = srcyear[:-4]
txt = file_fulltxt_get(params)["fulltxt"]
lines = txt.splitlines()
summary["title"] = lines[0]
absidx = tool.first_line_index(
lines, lambda x : x.lower().find("abstract") >= 0)
if absidx != -1:
before_abs = tool.lines_trim(lines[:absidx])
summary["institude"] = before_abs
keysidx = tool.first_line_index(
lines, lambda x : x.lower().find("keywords") >= 0)
if absidx != -1:
kws = (lines[keysidx] + lines[keysidx+1]).replace("Keywords", "").split(";")
summary["keywords"] = tool.strip_elems(kws)
return {"summary": summary}
def file_refs_gen(params):
refs = {
"confirmed": False,
"entries": []
}
txt = file_fulltxt_get(params)["fulltxt"]
lines = txt.splitlines()
absidx = tool.first_line_index(
lines, lambda x : x.lower().endswith("references"))
reflines = tool.lines_trim(lines[absidx + 1:])
refstrs = []
refnos = []
current_ref = ""
for refline in reflines:
rl = refline.strip()
if rl == "": continue
pos = re.search(r"\[\d{1,3}\]", rl)
if pos != None:
if current_ref != "": refstrs.append(current_ref)
refnos.append(int(pos.group(0)[1:-1]))
current_ref = rl[len(pos.group(0)):]
else:
current_ref += rl + " "
if current_ref != "": refstrs.append(current_ref)
if len(refnos) != len(refstrs): print("# [WARN] references counting error.")
refs["entries"] = [(no, s) for no, s in zip(refnos, refstrs)]
return {"refs": refs}
| [
"bwcs@uchicago.edu"
] | bwcs@uchicago.edu |
03bae9b4d97783d87ddf203221a910c2c7ecd05e | ed6294b44f01df941facc45fcf4b7c50e16dd428 | /first_app/migrations/0002_auto_20210921_2320.py | 0ca1434328f26692a5e0a310364b8761a97c718b | [] | no_license | moh-sagor/Django-2nd-learning | 37a0fc0f082306ee6990efb5f1fa366deefd1134 | a8839103b6ca1ce5482a046943ef4f2cef86bca6 | refs/heads/master | 2023-08-23T05:57:15.309679 | 2021-10-15T16:46:09 | 2021-10-15T16:46:09 | 417,570,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | # Generated by Django 3.2.7 on 2021-09-21 17:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('first_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='album',
name='release_date',
field=models.DateField(default=None),
),
migrations.AddField(
model_name='album',
name='star_num',
field=models.IntegerField(choices=[(1, 'Wrost'), (2, 'Bad'), (3, 'Not Bad'), (4, 'Good'), (5, 'Excellent!')], default=None),
),
migrations.AlterField(
model_name='album',
name='artist',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='first_app.musician'),
),
]
| [
"sagorhassain4@gmail.com"
] | sagorhassain4@gmail.com |
1b8fb8d7b10372b608afaa5f628de8f096425737 | f9c2f77fea6ffdf820867f02805c7a037627f235 | /PythonBasics/03_Volleyball.py | 0f1cb9aab9fd10836c1d9eb2eb0e9fc07e0f77e6 | [] | no_license | Nikolov-A/SoftUni | 6f253694757f195a5c0df8f24b12dbb4ad4d76c6 | 351b0b970da84e5d930a235fce76853c4dcaa365 | refs/heads/master | 2022-01-12T13:57:11.842394 | 2019-07-07T10:53:48 | 2019-07-07T10:53:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from math import floor
year = input()
holiday = int(input())
weekend = int(input())
games_in_sofia = (48 - weekend) * (3 / 4)
games_in_home = weekend
games_in_holiday_sofia = holiday * (2 / 3)
total_games = games_in_sofia + games_in_home + games_in_holiday_sofia
if year == "leap":
additional_games = 0.15 * total_games
total_games = additional_games + total_games
print(f"{floor(total_games)}")
else:
print(f"{floor(total_games)}")
| [
"alexander.nikolov092@gmail.com"
] | alexander.nikolov092@gmail.com |
5dc989d74aca9e4a4f3ecdab57efbe8cd2f76af1 | 556cfa6d4ef67abbf3a01a53c5a40a2287524c02 | /imageCnnAndPooling.py | b464629cd1ed13046e779edbf69607c27430b8a1 | [] | no_license | xiahuadong1981/FingerveinRecognitionModel4 | f8c2c13772d788235839a5bc0a11345a3b5e3eab | 007ff5a3be514c90d729d50eb9d7648f40a5c0ad | refs/heads/master | 2020-08-19T20:33:29.458639 | 2018-04-12T07:14:19 | 2018-04-12T07:14:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,159 | py | # mnist cnn 99.2%
import tensorflow as tf
import os
import numpy as np
import scipy.io
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #close the warning
# --------------------load data-----------------------------------------------------------
# from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
featurelNum = 40*80
classNum = 50
trainNum = 250
testNum = 50
nnlambda = 0.009
train = 'imageTrainData.mat'
trainData = scipy.io.loadmat(train)['trainFeatures'].ravel()
trainData = np.reshape(trainData,[featurelNum ,trainNum ])
trainData = np.transpose(trainData)
trainl = 'trainLabel.mat'
trainLabel = scipy.io.loadmat(trainl)['trainLabel'].ravel()
trainLabel = np.reshape(trainLabel,[trainNum, classNum ])
test = 'imageTestData.mat'
testData = scipy.io.loadmat(test)['testFeatures'].ravel()
testData = np.reshape(testData,[featurelNum ,classNum ])
testData = np.transpose(testData)
testl = 'testLabel.mat'
testLabel = scipy.io.loadmat(testl)['testLabel'].ravel()
testLabel = np.reshape(testLabel,[50,classNum ])
# ----------------construct session and initialize-----------------------------------------
sess = tf.InteractiveSession()
x = tf.placeholder("float", shape=[None, 3200])
y_ = tf.placeholder("float", shape=[None, 50])
W = tf.Variable(tf.zeros([3200, 50]))
b = tf.Variable(tf.zeros([50]))
# sess.run(tf.initialize_all_variables())
# ---weight initialization,use 0.1 to prevent 0 gradients-------------------------
# 0.1 to prevent 0 gradient
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# ---Convolution and Pooling --------------------------------------------------
# 1stride size,0 padding size
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
# block:2x2
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1],
padding='SAME')
# ---First Convolution Layer-------------------------------------
# every patch:5x5 32features 1:input channel 32:output channels
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
# x to 4d,28x28 image width and height,1:color channels
x_image = tf.reshape(x, [-1, 40, 80, 1])
# ReLu function
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
# ----Second Convolutional Layer--------------------------------------
# 64:output channels
W_conv2 = weight_variable([10, 20, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
# ----Densely Connected Layer----------------------------------------
# image size:7x7 neurons number:1024 ReLu function
W_fc1 = weight_variable([3*5*64, 1024])
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 3*5*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# --Dropout,reduce overfitting
# a place-holder for thr probability kept during dropout
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# ---Readout Layer
W_fc2 = weight_variable([1024, 50])
b_fc2 = bias_variable([50])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# --Train and Evaluate the Model
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
sess.run(tf.global_variables_initializer())
for i in range(20000):
# batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x: trainData , y_: trainLabel, keep_prob: 1.0})
print "setup_%d,_training_accuracy%g" % (i, train_accuracy)
print "test_accuracy_%g" % accuracy.eval(feed_dict={
x: testData, y_: testLabel, keep_prob: 1.0})
train_step.run(feed_dict={x: trainData, y_: trainLabel, keep_prob: 0.5})
| [
"a963778829@outlook.com"
] | a963778829@outlook.com |
d1e937b3359d9a3c2805617112c8a151e694b813 | dd2ba3c51479ec64683806e020febabfedd463de | /deliverance/nav.py | 307a6d8b6a5a6236f3efef104ab56c5b8fcb9f9f | [
"MIT"
] | permissive | chuanzhidong/whole-foods-deliverance | 071cb673564bee617b6869e7fc7255bd71965f14 | 83f64fa76b50ef65fda1d69f3661820dd92d1f3f | refs/heads/master | 2022-04-18T05:50:57.977240 | 2020-04-18T06:59:10 | 2020-04-18T06:59:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,266 | py | import logging
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from config import BASE_URL, Patterns
from deliverance.exceptions import (NavigationException, RouteRedirect,
UnhandledRedirect)
from deliverance.utils import (wait_for_element, click_when_enabled, jitter,
remove_qs, wait_for_auth, handle_oos)
log = logging.getLogger(__name__)
def handle_redirect(driver, ignore_oos, valid_dest=None, timeout=None,
route=None):
current = remove_qs(driver.current_url)
log.warning("Redirected to: '{}'".format(current))
if Patterns.AUTH_URL in current:
wait_for_auth(driver)
elif Patterns.OOS_URL in current:
handle_oos(driver, ignore_oos)
elif route and current == route.route_start:
if not route.waypoints_reached:
driver.refresh()
raise RouteRedirect()
elif valid_dest and timeout:
log.warning(
'Handling unknown redirect (timeout in {}s)'.format(timeout)
)
try:
WebDriverWait(driver, timeout).until(
EC.url_matches('|'.join(valid_dest))
)
except TimeoutException:
raise UnhandledRedirect(
"Timed out waiting for redirect to a valid dest\n"
"Current URL: '{}'".format(driver.current_url)
)
else:
raise UnhandledRedirect()
class Waypoint:
def __init__(self, locator, dest, optional=False):
self.locator = locator
self.dest = dest
self.optional = optional
def __str__(self):
return "<Waypoint {} -> '{}'>".format(self.locator, self.dest)
class Route:
def __init__(self, route_start, parser_args, *args):
self.route_start = route_start
self.args = parser_args
self.waypoints = args
self.waypoints_reached = 0
def __len__(self):
return len(self.waypoints)
def __str__(self):
return "<Route beginning at '{}' with {} stops>".format(
self.route_start, len(self))
def navigate_waypoint(self, driver, waypoint, timeout, valid_dest):
log.info('Navigating ' + str(waypoint))
elem = wait_for_element(driver, waypoint.locator, timeout=timeout)
jitter(.4)
click_when_enabled(driver, elem)
try:
WebDriverWait(driver, timeout).until(
EC.staleness_of(elem)
)
except TimeoutException:
pass
current = remove_qs(driver.current_url)
if current == BASE_URL + waypoint.dest:
log.info("Navigated to '{}'".format(waypoint.dest))
elif valid_dest and any(d in current for d in valid_dest):
log.info("Navigated to valid dest '{}'".format(current))
else:
raise NavigationException(
"Navigation to '{}' failed".format(waypoint.dest)
)
def navigate(self, driver, timeout=20):
log.info('Navigating ' + str(self))
self.waypoints_reached = 0
if remove_qs(driver.current_url) != self.route_start:
log.info('Navigating to route start: {}'.format(self.route_start))
driver.get(self.route_start)
for waypoint in self.waypoints:
try:
valid_dest = [
waypnt.dest for waypnt in
self.waypoints[self.waypoints.index(waypoint)+1:]
]
if remove_qs(driver.current_url) == BASE_URL + waypoint.dest:
log.warning("Already at dest: '{}'".format(waypoint.dest))
else:
self.navigate_waypoint(driver, waypoint, timeout,
valid_dest)
except NavigationException:
handle_redirect(driver,
ignore_oos=self.args.ignore_oos,
valid_dest=valid_dest,
timeout=timeout,
route=self)
self.waypoints_reached += 1
log.info('Route complete')
| [
"mark@cueradioshow.com"
] | mark@cueradioshow.com |
89783e213fd43bb014b05e4057a28b61359b16bc | f24a2574875042ad2f39bfea027098f1bee21050 | /Organization/forms.py | d7490521983d4f2a5d0cccaa6446735e5017b103 | [] | no_license | shashi634/QuizChamp | 9f0190b57add9781389aa9efef82b97ead0b9173 | 8bb9ed390e4cc0646c4b777e65a01b4825755a46 | refs/heads/master | 2020-08-28T10:27:09.570442 | 2019-11-10T10:21:01 | 2019-11-10T10:21:01 | 217,673,135 | 0 | 0 | null | 2019-11-02T17:19:34 | 2019-10-26T07:36:14 | JavaScript | UTF-8 | Python | false | false | 179 | py | from django import forms
from Organization.models import OrganizationLogo
class DocumentForm(forms.Form):
docfile = forms.FileField(
label='Select a file',
)
| [
"shankar634@hotmail.com"
] | shankar634@hotmail.com |
d193ad5e153534e160e9202d845a191dd8b1645e | 06c1f5f2d2c234d90ce4760a54ea05e20fefc68b | /Charper 12 Practice Projects.py | 4140131620a7ad57c9d2cecb6924b50e5d18d809 | [] | no_license | lampard1990/Ruolin | 0a4f622d0743395d85d089729dbbf193a523e600 | fd5d18cdb6d9231dff4c7e47a68802647d370c58 | refs/heads/master | 2020-04-10T17:41:03.981939 | 2019-12-02T13:22:14 | 2019-12-02T13:22:14 | 161,180,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,116 | py | #Multiplication Table
import openpyxl
from openpyxl.styles import Font
wb=openpyxl.Workbook()
sheet=wb.active
bold_font = Font(bold=True)
for i in range(1,7):
sheet.cell(row=1,column=i+1).value=i
sheet.cell(row=1,column=i+1).font=bold_font
sheet.cell(row=i+1,column=1).value=i
sheet.cell(row=i+1,column=1).font=bold_font
for j in range(1,7):
sheet.cell(row=i+1,column=j+1).value=i*j
wb.save('muitiplication_table.xlsx')
#blank row insert
print('which row you want to start to insert')
N=input()
print('how many rows you want to start to insert')
M=input()
mysheet = openpyxl.load_workbook('BlankRowInsert.xlsx')
before_sheet = mysheet.active
before_sheet.title = 'Before'
mysheet.create_sheet(title='After')
after_sheet = mysheet['After']
n=before_sheet.max_row
for i in range(1, n+1):
for c, cell in enumerate(before_sheet[i]): #c is for get the sequence of cells,
#start with 0, so you should +1 when paste
#for c, cell in enumerate(before_sheet[1]):
# print(c,cell)
if i < int(N):
after_sheet.cell(row=i, column=c+1).value = cell.value
else:
after_sheet.cell(row=i+int(M), column=c+1).value = cell.value
mysheet.save('BlankRowInsert.xlsx')
#Spreadsheet cell inverter
#mysheet1 = openpyxl.load_workbook('Cell Inverter.xlsx')
#dota1 = mysheet1['Before']
#mysheet1.create_sheet(title='Inverter')
#dota2 = mysheet1['Inverter']
#k=dota1.max_row
#for i in range(1,k+1):
# for v, mycell in enumerate(dota1[i]):
# dota2.cell(row=v+1, column=i).value = mycell.value
#mysheet1.save('Cell Inverter.xlsx')
#Spreadsheet cell inverter method 2
mysheet1 = openpyxl.load_workbook('Cell Inverter.xlsx')
dota1 = mysheet1['Before']
mysheet1.create_sheet(title='Inverter')
dota2 = mysheet1['Inverter']
k=dota1.max_row
j=dota1.max_column
for i in range(1,k+1):
for s in range(1,j+1):
dota2.cell(row=s, column=i).value = dota1.cell(row=i, column=s).value
mysheet1.save('Cell Inverter.xlsx')
| [
"noreply@github.com"
] | noreply@github.com |
b76bc0fe589088534fb3f9e7a4aa7d763579c8ad | dc197c13b3b5cac8f95a76295214a418d6ed744a | /test.py | d0bdc4acae8704d267d55b97764011d46cdf574e | [] | no_license | nganhkhoa67/python_assistant | 309f42c8207ccddbb0a66d80c6313c07263787a4 | a002ceafa439b18a8d57f1727d409eee264dff46 | refs/heads/master | 2023-04-15T11:55:09.211062 | 2021-05-01T13:29:03 | 2021-05-01T13:29:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | from neuralintents import GenericAssistant
from googletrans import Translator
translator = Translator()
assistant = GenericAssistant('intents.json', model_name="test_model")
assistant.train_model()
assistant.save_model()
done = False
while not done:
message = input("Enter a message: ")
message = translation = translator.translate(message, dest='en')
if message == "STOP":
done = True
else:
assistant.request(message.text) | [
"nvkha97@gmail.com"
] | nvkha97@gmail.com |
654bde5deddbb976c2e3fe5e7a9a4b33bd606463 | e780a5bd72f98ca2513c993d64a85b08578166a6 | /buildout-cache/eggs/Zope2-2.13.26-py2.7.egg/App/Permission.py | 26fc6c96cef75bd35a47508c6bf2a627db0822a3 | [] | no_license | vedantc98/Plone-test | 023246597ffe848e2a49b9f65742ff49127b190b | 9fd520fc78481e2c0b9b7ec427821e7f961c777e | refs/heads/master | 2021-03-30T22:14:33.368739 | 2018-03-11T19:22:58 | 2018-03-11T19:22:58 | 124,671,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,468 | py | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
'''Zope registerable permissions
'''
from AccessControl.class_init import InitializeClass
from AccessControl.SecurityInfo import ClassSecurityInfo
from Acquisition import Implicit
from OFS.role import RoleManager
from OFS.SimpleItem import Item
from Persistence import Persistent
class Permission(RoleManager,
Persistent,
Implicit,
Item
):
"""Model Permission meta-data
"""
meta_type = 'Zope Permission'
icon = 'p_/Permission_icon'
index_html = None
security = ClassSecurityInfo()
manage_options=(
RoleManager.manage_options
+ Item.manage_options
)
def __init__(self, id, title, name):
self.id=id
self.title=title
self.name=name
InitializeClass(Permission)
| [
"vedantc98@gmail.com"
] | vedantc98@gmail.com |
66ce4924978a9b83853de0f8c0593d579a84fa2c | f443b315a245c15dfcd9335a799ead18c9732ca3 | /deploy.py | 83377b84f4a962f0fee732aedfc22dfa02e3840d | [] | no_license | oliverkpan/Iowatch | 1cd2d0279a395c0b16f3b410817197840dd4e42b | 46f1644913e087b441309072e8c915dcae7a5329 | refs/heads/master | 2023-01-21T11:00:09.850906 | 2020-11-27T01:06:33 | 2020-11-27T01:06:33 | 286,863,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,960 | py | import pandas as pd
import numpy as np
import pickle
import streamlit as st
from PIL import Image
pickle_in = open('/Users/oliverpan/Desktop/catboost_updated3.pkl', 'rb')
classifier = pickle.load(pickle_in)
def welcome():
return 'welcome all'
def prediction(overall_qual, lot_area, garage_yr, garage_cars, bsmt_qual, sale_condition):
prediction = classifier.predict(
[[overall_qual, lot_area, garage_yr, garage_cars, bsmt_qual, sale_condition]])
print(prediction)
return prediction
def main():
st.title("Predicting House Prices in Iowa")
html_temp = """
<div style ="background-color:green;padding:13px">
<h1 style ="color:black;text-align:center;">Iowatch</h1>
</div>
"""
# this line allows us to display the front end aspects we have
# defined in the above code
st.markdown(html_temp, unsafe_allow_html = True)
# the following lines create text boxes in which the user can enter
# the data required to make the prediction
overall_qual = st.slider('Overall Qual', min_value = 0, max_value = 10, key = '1')
bsmt_qual = st.slider('Basement Quality', min_value = 0, max_value = 10, key = '2')
sale_condition = st.slider('Sale Quality', min_value = 0, max_value = 10, key = '3')
lot_area = st.text_input("Lot Area", "")
garage_yr = st.text_input("Year Built", "")
garage_cars = st.selectbox('Number of Garages', options = ['1','2','3','4'])
result = ""
# the below line ensures that when the button called 'Predict' is clicked,
# the prediction function defined above is called to make the prediction
# and store it in the variable result
if st.button("Predict"):
result = prediction(overall_qual, lot_area, garage_yr, garage_cars, bsmt_qual, sale_condition)
st.success('The predicted house price is {}'.format(result))
if __name__=='__main__':
main() | [
"noreply@github.com"
] | noreply@github.com |
b598b5d2bb70234030c1dea7afad9329793ddf80 | d11719360ac70a63524658a90f250b6e3d9b4329 | /python_prototype/DFT_Rudimentary.py | 7bcc58f51d72cf0430f7ef3ddfc35d97ecf427d0 | [] | no_license | thetaprimeio/QHacks2021 | 52b0c55a7abc857cb943aec4ed775f05de9c4fe5 | 4d006b6f6d9b83204d4e6c63a7bb6aa424c09de8 | refs/heads/master | 2023-08-25T10:12:19.585882 | 2023-08-08T01:15:53 | 2023-08-08T01:15:53 | 332,239,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,847 | py | import math as m
import cmath as c
import csv
# Open data set to match frequency valuations to musical notes
# in the Western scale, format into a list
file_freq = open("frequency_notes.csv")
freq_data = csv.reader(file_freq)
freq_list = list(freq_data)
# Define the frequencies of the constitutent sinusoids of f(x)
# The frequency of every sinusoid is f = omega/2pi
omega_1 = 1643
omega_2 = 2070
omega_3 = 2460
# Define a sinusoid function f(x), it is assumed that x
# is a time unit in seconds
def sint(x):
return m.sin(omega_1*x) + m.sin(omega_2*x) + m.sin(omega_3*x)
# We will evaluate this sint function at discrete points
# We propose a change of variables x_k = 2pi*k/N
# Where N is the number of sample points and k is an integer
# from 0 to N-1 to enforce function valuations over one period
# for any N and integer k
def sintk(k,N):
return sint(2*m.pi*k/N)
# Compute the DFT of the sintk function, valuations of p correspdond
# to the previously defined omega values
# [N] = number of sample points
# [pmax] = upper bound on p valuations for DFT function
def dft(N, pmax):
gp = 0 + 0j
for p in range(1000,pmax):
gp = 0 + 0j
for k in range(0,N):
gp += m.sqrt(N)*sintk(k,N)*(c.exp(2*m.pi*1j*k*p/N))
if abs(gp) > 1:
print("------------")
# Convert identified omega value to frequency in Hz
print(str.format('{0:.2f}', p/(2*m.pi)) + " Hz")
# Look for a match with a note in the Western scale
for i in range(0,len(freq_list)):
if abs(float(freq_list[i][2])-p/(2*m.pi)) < 1:
print("Note identified: " + freq_list[i][0] + freq_list[i][1])
print("------------")
print("DONE")
return gp
# Expected to see greater DFT magnitude around 2pi frequency
dft(20_000, 3_000)
| [
"18bb6@caslab.queensu.ca"
] | 18bb6@caslab.queensu.ca |
48c2d5ae87dd342cc79ce18451312b0b3d5d7cba | c72f421c47bd009e2c8fdd0b3e15a5610485d860 | /lib/naoqi_proxy_python_classes/ALColorBlobDetection.py | 3bc4e75fdeb3aeb678981d2954873e2d0de0bba8 | [] | no_license | florianbaer/Gandalf_GateKeeper | d96b438988de8d771cde9575b23ae861f6fe18ae | d547ba95dca3546062fcbb600dacd8c5a49417c7 | refs/heads/master | 2020-08-05T16:05:49.586915 | 2019-12-19T18:13:45 | 2019-12-19T18:13:45 | 212,606,927 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,116 | py | #!/usr/bin/env python
# Class autogenerated from .\alcolorblobdetectionproxy.h
# by Sammy Pfeiffer's <Sammy.Pfeiffer at student.uts.edu.au> generator
# You need an ALBroker running
class ALColorBlobDetection(object):
def __init__(self, session):
self.session = session
self.proxy = None
def force_connect(self):
self.proxy = self.session.service("ALColorBlobDetection")
def getActiveCamera(self):
"""Gets extractor active camera
:returns int: Id of the current active camera of the extractor
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.getActiveCamera()
def getAutoExposure(self):
"""Get the camera auto exposure mode
:returns bool: A flag saying the exposure is auto or not
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.getAutoExposure()
def getCircle(self):
"""Send back the x,y,radius of the circle if any
:returns AL::ALValue: The circle as x,y,radius in image relative coordinates (x,radius divided by rows and y by cols)
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.getCircle()
def getCurrentPeriod(self):
"""Gets the current period.
:returns int: Refresh period (in milliseconds).
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.getCurrentPeriod()
def getCurrentPrecision(self):
"""Gets the current precision.
:returns float: Precision of the extractor.
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.getCurrentPrecision()
def getEventList(self):
"""Get the list of events updated in ALMemory.
:returns std::vector<std::string>: Array of events updated by this extractor in ALMemory
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.getEventList()
def getFrameRate(self):
"""Gets extractor framerate
:returns int: Current value of the framerate of the extractor
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.getFrameRate()
def getMemoryKeyList(self):
"""Get the list of events updated in ALMemory.
:returns std::vector<std::string>: Array of events updated by this extractor in ALMemory
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.getMemoryKeyList()
def getMyPeriod(self, name):
"""Gets the period for a specific subscription.
:param str name: Name of the module which has subscribed.
:returns int: Refresh period (in milliseconds).
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.getMyPeriod(name)
def getMyPrecision(self, name):
"""Gets the precision for a specific subscription.
:param str name: name of the module which has subscribed
:returns float: precision of the extractor
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.getMyPrecision(name)
def getOutputNames(self):
"""Get the list of values updated in ALMemory.
:returns std::vector<std::string>: Array of values updated by this extractor in ALMemory
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.getOutputNames()
def getResolution(self):
"""Gets extractor resolution
:returns int: Current value of the resolution of the extractor
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.getResolution()
def getSubscribersInfo(self):
"""Gets the parameters given by the module.
:returns AL::ALValue: Array of names and parameters of all subscribers.
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.getSubscribersInfo()
def isPaused(self):
"""Gets extractor pause status
:returns bool: True if the extractor is paused, False if not
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.isPaused()
def isProcessing(self):
"""Gets extractor running status
:returns bool: True if the extractor is currently processing images, False if not
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.isProcessing()
def pause(self, paused):
"""Changes the pause status of the extractor
:param bool paused: New pause satus
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.pause(paused)
def ping(self):
"""Just a ping. Always returns true
:returns bool: returns true
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.ping()
def setActiveCamera(self, cameraId):
"""Sets extractor active camera
:param int cameraId: Id of the camera that will become the active camera
:returns bool: True if the update succeeded, False if not
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.setActiveCamera(cameraId)
def setAutoExposure(self, mode):
"""Set the camera auto exposure to on
:param bool mode: Whether the exposure is auto or not
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.setAutoExposure(mode)
def setColor(self, r, g, b, colorThres):
"""Color parameter setting
:param int r: The R component in RGB of the color to find
:param int g: The G component in RGB of the color to find
:param int b: The B component in RGB of the color to find
:param int colorThres: The color threshold
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.setColor(r, g, b, colorThres)
def setFrameRate(self, subscriberName, framerate):
"""Sets the extractor framerate for a chosen subscriber
:param str subscriberName: Name of the subcriber
:param int framerate: New framerate
:returns bool: True if the update succeeded, False if not
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.setFrameRate(subscriberName, framerate)
def setFrameRate2(self, framerate):
"""Sets the extractor framerate for all the subscribers
:param int framerate: New framerate
:returns bool: True if the update succeeded, False if not
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.setFrameRate(framerate)
def setObjectProperties(self, minSize, span):
"""Object parameter setting
:param int minSize: The minimum size of the cluster to find
:param float span: The span of the object in meters
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.setObjectProperties(minSize, span)
def setObjectProperties2(self, minSize, span, shape):
"""Object parameter setting
:param int minSize: The minimum size of the cluster to find
:param float span: The span of the object in meters
:param str shape: The shape of the object
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.setObjectProperties(minSize, span, shape)
def setParameter(self, paramName, value):
"""DEPRECATED: Sets pause and resolution
:param str paramName: Name of the parameter to set
:param AL::ALValue value: New value
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.setParameter(paramName, value)
def setResolution(self, resolution):
"""Sets extractor resolution
:param int resolution: New resolution
:returns bool: True if the update succeeded, False if not
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.setResolution(resolution)
def subscribe(self, name, period, precision):
"""Subscribes to the extractor. This causes the extractor to start writing information to memory using the keys described by getOutputNames(). These can be accessed in memory using ALMemory.getData("keyName"). In many cases you can avoid calling subscribe on the extractor by just calling ALMemory.subscribeToEvent() supplying a callback method. This will automatically subscribe to the extractor for you.
:param str name: Name of the module which subscribes.
:param int period: Refresh period (in milliseconds) if relevant.
:param float precision: Precision of the extractor if relevant.
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.subscribe(name, period, precision)
def subscribe2(self, name):
"""Subscribes to the extractor. This causes the extractor to start writing information to memory using the keys described by getOutputNames(). These can be accessed in memory using ALMemory.getData("keyName"). In many cases you can avoid calling subscribe on the extractor by just calling ALMemory.subscribeToEvent() supplying a callback method. This will automatically subscribe to the extractor for you.
:param str name: Name of the module which subscribes.
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.subscribe(name)
def unsubscribe(self, name):
"""Unsubscribes from the extractor.
:param str name: Name of the module which had subscribed.
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.unsubscribe(name)
def updatePeriod(self, name, period):
"""Updates the period if relevant.
:param str name: Name of the module which has subscribed.
:param int period: Refresh period (in milliseconds).
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.updatePeriod(name, period)
def updatePrecision(self, name, precision):
"""Updates the precision if relevant.
:param str name: Name of the module which has subscribed.
:param float precision: Precision of the extractor.
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.updatePrecision(name, precision)
def version(self):
"""Returns the version of the module.
:returns str: A string containing the version of the module.
"""
if not self.proxy:
self.proxy = self.session.service("ALColorBlobDetection")
return self.proxy.version()
| [
"florian.baer@stud.hslu.ch"
] | florian.baer@stud.hslu.ch |
c6e0eb509fc5037954b5bb3c03ff4e8a8e1ffd26 | d4df04b3cca3a53c18cbe78cb57c6dde61b955e9 | /superset/migrations/versions/c501b7c653a3_add_missing_uuid_column.py | 786b41a1c72b82bb3339d96a96c4b291a0d50f6b | [
"Apache-2.0",
"OFL-1.1"
] | permissive | cruvigeo/incubator-superset | 66c940212bcc5c2265b990a9211dc04253693903 | 523bd8b79cfd48d1cb3a94f89c8095976844ce59 | refs/heads/master | 2023-03-17T21:28:08.151730 | 2022-04-25T04:53:52 | 2022-04-25T04:53:52 | 249,028,833 | 0 | 0 | Apache-2.0 | 2023-03-13T16:05:35 | 2020-03-21T17:42:12 | TypeScript | UTF-8 | Python | false | false | 3,178 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add missing uuid column
Revision ID: c501b7c653a3
Revises: 070c043f2fdb
Create Date: 2021-02-18 09:13:00.028317
"""
# revision identifiers, used by Alembic.
revision = "c501b7c653a3"
down_revision = "070c043f2fdb"
import logging
from uuid import uuid4
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import mysql
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.orm import load_only
from sqlalchemy_utils import UUIDType
from superset import db
from superset.migrations.versions.b56500de1855_add_uuid_column_to_import_mixin import (
assign_uuids,
models,
update_dashboards,
)
def has_uuid_column(table_name, bind):
inspector = Inspector.from_engine(bind)
columns = {column["name"] for column in inspector.get_columns(table_name)}
has_uuid_column = "uuid" in columns
if has_uuid_column:
logging.info("Table %s already has uuid column, skipping...", table_name)
else:
logging.info("Table %s doesn't have uuid column, adding...", table_name)
return has_uuid_column
def upgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
for table_name, model in models.items():
# this script adds missing uuid columns
if has_uuid_column(table_name, bind):
continue
with op.batch_alter_table(table_name) as batch_op:
batch_op.add_column(
sa.Column(
"uuid",
UUIDType(binary=True),
primary_key=False,
default=uuid4,
),
)
assign_uuids(model, session)
# add uniqueness constraint
with op.batch_alter_table(table_name) as batch_op:
# batch mode is required for sqllite
batch_op.create_unique_constraint(f"uq_{table_name}_uuid", ["uuid"])
# add UUID to Dashboard.position_json; this function is idempotent
# so we can call it for all objects
slice_uuid_map = {
slc.id: slc.uuid
for slc in session.query(models["slices"])
.options(load_only("id", "uuid"))
.all()
}
update_dashboards(session, slice_uuid_map)
def downgrade() -> None:
"""
This script fixes b56500de1855_add_uuid_column_to_import_mixin.py by adding any
uuid columns that might have been skipped. There's no downgrade.
"""
pass
| [
"noreply@github.com"
] | noreply@github.com |
d76a4f8ce232044b6af8f621ce7371a61233bd02 | 8fac45868bb6dfd1410dd3447c28e4f736c0064e | /Task2.py | 251a1377b46f172aa573070873ef7885909106b1 | [] | no_license | sudheermeka/Unscramble-Computer-Science-Problems | 38fc934af6a1c30bce3517ca95788abb8b385a46 | 8f2edc9ccebc715479172c163bdd69bd4e82947a | refs/heads/master | 2022-05-27T20:49:41.423279 | 2020-04-26T01:50:33 | 2020-04-26T01:50:33 | 258,914,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | """
Read file into texts and calls.
It's ok if you don't understand how to read files
"""
import csv
from datetime import datetime
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
time_spent_map = {}
for row in calls:
format_date = datetime.strptime(row[2].split(" ")[0], '%d-%m-%Y')
if format_date.month != 9 or format_date.year != 2016:
continue
if row[0] not in time_spent_map:
time_spent_map[row[0]] = 0
if row[1] not in time_spent_map:
time_spent_map[row[1]] = 0
time_spent_map[row[0]] += int(row[3])
time_spent_map[row[1]] += int(row[3])
number = None
max_time = None
for key, value in time_spent_map.items():
if number is None or max_time < value:
number = key
max_time = value
print("{} spent the longest time, {} seconds, on the phone during September 2016.".format(number, max_time))
"""
TASK 2: Which telephone number spent the longest time on the phone
during the period? Don't forget that time spent answering a call is
also time spent on the phone.
Print a message:
"<telephone number> spent the longest time, <total time> seconds, on the phone during
September 2016.".
"""
| [
"sudheer.meka@copart.com"
] | sudheer.meka@copart.com |
03f3d7e0dcf79c3d795b7e3443dc1547b2def213 | 9d428d6ff489bb1bf219962ab80afae387acc631 | /0x08-python-more_classes/4-rectangle.py | 0acbf02744b5876b3991f614fa6c37247b00aa19 | [] | no_license | NikkiE-Dev/holbertonschool-higher_level_programming | 3a0ac5d5d3c6aa4eb768612831aaeecd0e870ed0 | e7071922552cc8fc286fd0706de99674916b4b3f | refs/heads/main | 2023-06-29T09:09:56.896583 | 2021-07-07T14:09:22 | 2021-07-07T14:09:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,873 | py | #!/usr/bin/python3
""" This module is how to create a simple class."""
class Rectangle:
""" This is how you create an empty class."""
def __init__(self, width=0, height=0):
"""This is how you make a rectange """
if not isinstance(height, int):
raise TypeError("height must be an integer")
if height < 0:
raise ValueError("height must be >= 0")
if not isinstance(width, int):
raise TypeError("width must be an integer")
if width < 0:
raise ValueError("width must be >= 0")
self.__width = width
self.__height = height
@property
def width(self):
return self.__width
@width.setter
def width(self, value):
if not isinstance(value, int):
raise TypeError("width must be an integer")
if value < 0:
raise ValueError("width must be >= 0")
self.__width = value
@property
def height(self):
return self.__height
@height.setter
def height(self, value):
if not isinstance(value, int):
raise TypeError("height must be an integer")
if value < 0:
raise ValueError("height must be >= 0")
self.__height = value
def area(self):
return self.width * self.height
def perimeter(self):
if self.width == 0 or self.height == 0:
return 0
return (self.width * 2) + (self.height * 2)
def __str__(self):
if self.width == 0 or self.height == 0:
return ("")
rect = ""
for i in range(self.height):
for j in range(self.width):
rect += "#"
if i != self.height - 1:
rect += ("\n")
return rect
def __repr__(self):
return "Rectangle({}, {})".format(self.__width, self.__height)
| [
"2495@holbertonschool.com"
] | 2495@holbertonschool.com |
b955d56966ce8448cd5661fb2b6d09d87b3e1b83 | d331f2180ec14634374478329970baa6006dfefa | /Day03/list_delete.py | 7c297e691954f136c5d368cddc904e8f2c3db825 | [] | no_license | SungjinJo/git-python | 84d4cd1c185af20900be5cfc6ec93430751c7d89 | 532364021677145ece07106b60069add6b4b830a | refs/heads/master | 2023-07-17T17:08:48.265595 | 2021-09-05T02:43:39 | 2021-09-05T02:43:39 | 399,032,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | '''
* 리스트 내부 요소 삭제
1. remove(): 삭제할 값을 직접 지정하여 삭제
2. 내장함수 del(): 삭제할 요소의 인덱스를 통해 삭제합니다.
3. clear(): 리스트 내부 요소 전체 삭제
'''
points = [88, 99, 56, 92, 100, 78]
points.remove(92)
print(points)
del(points[2])
print(points)
points.clear()
print(points)
pokemon = ['피카츄', '라이츄', '파이리', '꼬부기', '버터풀']
'''
- 삭제할 이름을 입력받아서 그에 해당하는 이름을 실제로
리스트에서 삭제한 후 삭제 후 정보를 출력하세요. (리스트 출력)
- remove()와 del()을 이용하여 각각 출력해 보세요.
'''
name = input('삭제할 이름을 입력하세요: ')
# remove 사용
# pokemon.remove(name)
# del 사용
for idx in range(5):
if name == pokemon[idx]:
del(pokemon[idx])
break
print('삭제 후 정보:',pokemon)
| [
"aoem9603@naver.com"
] | aoem9603@naver.com |
94e396780751dd22c01ca9f737b59599d35f3801 | 8dfefed8b288a65134e2f3587e96b6313ba138c2 | /Ranges/venv/bin/pip3 | b7bc29cf2419535c2f5f7f8bcb83fc98216d429c | [] | no_license | gsvetleen/Python-Basics | b80a04350149e04278d705ba4bc69555965c957d | 68dae06c1403d9a2dc82a447f394b82e994d88b2 | refs/heads/master | 2020-04-29T15:55:28.613717 | 2019-04-01T09:27:22 | 2019-04-01T09:27:22 | 176,243,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | #!/home/svetleen/Documents/PythonLearning/Basics/Ranges/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"gsvetleen@gmail.com"
] | gsvetleen@gmail.com | |
f8d55dbb4e67431691ffb05b657d57677eeaae24 | b24b44b68e625ae955f1182a1b7c6dd663384bb8 | /sub_arr_with_equal_0_1.py | beaa40d00dce4d9ac3d4f3825a9c0cba0dbb5ff6 | [] | no_license | avinashraghuthu/Arrays | 6379bf2e0ee50818859ccaeae5883461b4241e13 | e58441560ec0db9ebfa7c49eefe101d0bf48460b | refs/heads/master | 2021-10-24T15:32:24.651417 | 2021-10-18T13:54:25 | 2021-10-18T13:54:25 | 129,359,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | # https://www.geeksforgeeks.org/largest-subarray-with-equal-number-of-0s-and-1s/
def sub_arr_with_equal_0_1(arr):
arr_len = len(arr)
sum_map = {}
curr_sum = 0
end_index = -1
max_len = 0
for i in range(arr_len):
if arr[i] == 0:
curr_sum += -1
else:
curr_sum += 1
if curr_sum == 0:
max_len = i + 1
end_index = i
else:
if curr_sum in sum_map:
max_len = max(max_len, i - sum_map[curr_sum])
end_index = i
else:
sum_map[curr_sum] = i
if end_index != -1:
print("start index:", end_index - max_len + 1)
print("end index:", end_index)
else:
print("Not found")
arr = [1, 1, 1, 1, 1, 0, 0]
# arr = [1, 0, 0, 1, 0, 1, 1]
sub_arr_with_equal_0_1(arr)
| [
"avinash.raghuthu@gmail.com"
] | avinash.raghuthu@gmail.com |
a8e9cdc95648147b293fee0523d23d4854b8e6af | 815ae9857d75bb9c44cc746f9ebebd5177203fa4 | /facile_backlog/backlog/views.py | f437ae1585481fe1115584a7528aaacc97aaf83b | [] | no_license | dsaradini/facile_backlog | adbc0ec98c35c06ceb07c5f97ab087ed3974763b | 1eb53fed41c31da31556c5ec6292815bf4002d6a | refs/heads/master | 2021-01-01T05:47:23.398294 | 2015-11-20T13:10:56 | 2015-11-20T13:10:56 | 10,429,872 | 4 | 2 | null | 2015-11-20T13:10:57 | 2013-06-01T23:23:12 | JavaScript | UTF-8 | Python | false | false | 67,136 | py | # -*- coding: utf-8 -*-
import urllib
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import RequestSite
from django.core import signing
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.forms import forms
from django.http import Http404
from django.http.response import (HttpResponseForbidden)
from django.shortcuts import get_object_or_404, redirect, render_to_response
from django.template import loader
from django.template.context import RequestContext
from django.utils.cache import patch_cache_control
from django.utils.translation import ugettext as _
from django.views import generic
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import csrf_protect
from .models import (Project, Backlog, UserStory, AuthorizationAssociation,
create_event, Organization, status_for, STATUS_COLORS,
Status)
from .forms import (ProjectCreationForm, ProjectEditionForm,
BacklogCreationForm, BacklogEditionForm,
StoryEditionForm, StoryCreationForm, InviteUserForm,
OrgCreationForm, OrgEditionForm,
AuthorizationAssociationForm)
from ..core.models import User
from .pdf import generate_pdf
from .excel import export_excel
from ..util import get_websocket_url
AUTH_TYPE_PROJECT = "prj"
AUTH_TYPE_ORG = "org"
def pie_element(name, value):
return {
'name': status_for(name),
'color': STATUS_COLORS[name],
'count': value['stories'],
'y': value['points']
}
def get_projects(user):
return Project.my_recent_projects(user)
def get_organizations(user):
return Organization.my_organizations(user)
def get_my_object_or_404(klass, user, pk):
obj = get_object_or_404(klass, pk=pk)
if not hasattr(obj, "can_read") or not obj.can_read(user):
raise Http404()
return obj
class Dashboard(generic.TemplateView):
template_name = "backlog/dashboard.html"
def get_context_data(self, **kwargs):
context = super(Dashboard, self).get_context_data(**kwargs)
context['events'] = self.request.user.events.select_related(
"project", "backlog", "user", "story", "story__project",
"organization")[:10]
all_projects = get_projects(self.request.user)
context['projects'] = all_projects.filter(org=None, is_archive=False)
context['archived_projects'] = all_projects.filter(
org=None, is_archive=True)
orgs = get_organizations(self.request.user)
context['organizations'] = orgs
# list projects where current user has access rights but not
# having access to the organization
my_org_pks = [o.pk for o in orgs]
guest_p = [p for p in all_projects if
p.org_id and p.org_id not in my_org_pks]
context['guest_projects'] = guest_p
return context
dashboard = login_required(Dashboard.as_view())
class NoCacheMixin(object):
no_cache = False
def dispatch(self, request, *args, **kwargs):
result = super(NoCacheMixin, self).dispatch(request, *args, **kwargs)
if self.no_cache:
patch_cache_control(
result, no_cache=True, no_store=True, must_revalidate=True)
return result
class BackMixin(object):
def dispatch(self, request, *args, **kwargs):
if request.method == "POST":
self.back = request.POST.get("_back", None)
elif request.method == "GET":
self.back = request.GET.get("_back", None)
else:
self.back = None
return super(BackMixin, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(BackMixin, self).get_form_kwargs()
kwargs['_back'] = self.back
return kwargs
class FilteredStoriesMixin(object):
def __init__(self, *args, **kwargs):
self.sort = ""
self.query = {}
def setup_filter(self, request):
self.sort = request.GET.get('s', "")
self.query = {
'q': request.GET.get('q', ""),
't': request.GET.get('t', ""),
'st': request.GET.get('st', ""),
'sa': request.GET.get('sa', "")
}
def get_stories_query(self):
return self.query
def get_stories_sort(self):
return self.sort
def get_stories(self):
raise NotImplemented
def get_queryset(self):
query = self.get_stories_query()
sort = self.get_stories_sort()
stories_qs = self.get_stories().select_related("backlog", "project")
if not query['sa']:
stories_qs = stories_qs.filter(backlog__is_archive=False)
if sort:
stories_qs = stories_qs.extra(order_by=["{0}".format(sort)])
if query['t']:
stories_qs = stories_qs.filter(
theme__icontains=query['t']
)
if query['st']:
stories_qs = stories_qs.filter(
status=query['st']
)
if query['q']:
stories_qs = stories_qs.filter(
Q(as_a__icontains=query['q']) |
Q(i_want_to__icontains=query['q']) |
Q(so_i_can__icontains=query['q']) |
Q(number__icontains=query['q'])
)
return stories_qs
class OrgCreate(generic.CreateView):
template_name = "backlog/org_form.html"
model = Organization
form_class = OrgCreationForm
def form_valid(self, form):
self.object = form.save()
AuthorizationAssociation.objects.create(
org=form.instance,
user=self.request.user,
is_admin=True,
is_active=True
)
Backlog.objects.create(
name=_("Main backlog"),
description=_("This is the main backlog for the organization."),
org=self.object,
kind=Backlog.TODO,
is_main=True,
order=1,
)
create_event(
self.request.user, organization=self.object,
text="created this organization"
)
messages.success(self.request,
_("Organization successfully created."))
return redirect(reverse("dashboard"))
org_create = login_required(OrgCreate.as_view())
class OrgMixin(NoCacheMixin):
admin_only = False
"""
Mixin to fetch a organization by a view.
"""
def dispatch(self, request, *args, **kwargs):
self.organization = get_my_object_or_404(Organization,
request.user,
pk=kwargs['org_id'])
if self.admin_only and not self.organization.can_admin(request.user):
if self.organization.can_read(request.user):
return HttpResponseForbidden(_("Not authorized"))
else:
raise Http404
self.request = request
self.pre_dispatch()
return super(OrgMixin, self).dispatch(request, *args, **kwargs)
def pre_dispatch(self):
pass
class OrgDetail(OrgMixin, generic.DetailView):
template_name = "backlog/org_detail.html"
def get_object(self):
return self.organization
def get_context_data(self, **kwargs):
context = super(OrgDetail, self).get_context_data(**kwargs)
context['organization'] = self.organization
context['projects'] = self.organization.active_projects.order_by(
"-last_modified")
context['archived_projects'] = self.organization.projects.filter(
is_archive=True
).order_by("-last_modified")
backlogs = self.organization.backlogs.order_by(
"-is_main", "is_archive", "order"
).all()
context['backlogs'] = backlogs
context['archived_count'] = len(
[b for b in backlogs if b.is_archive]
)
return context
org_detail = login_required(OrgDetail.as_view())
class OrgEdit(OrgMixin, generic.UpdateView):
admin_only = True
template_name = "backlog/org_form.html"
form_class = OrgEditionForm
def get_object(self):
return self.organization
def form_valid(self, form):
org = form.save()
create_event(
self.request.user, organization=org,
text="modified the organization"
)
messages.success(self.request,
_("Organization successfully updated."))
return redirect(reverse("dashboard"))
org_edit = login_required(OrgEdit.as_view())
class OrgDelete(OrgMixin, generic.DeleteView):
admin_only = True
template_name = "backlog/org_confirm_delete.html"
def get_object(self):
return self.organization
def delete(self, request, *args, **kwargs):
self.organization.delete()
messages.success(request,
_("Organization successfully deleted."))
return redirect(reverse('dashboard'))
org_delete = login_required(OrgDelete.as_view())
class OrgUsers(OrgMixin, generic.TemplateView):
template_name = "backlog/org_users.html"
def get_context_data(self, **kwargs):
context = super(OrgUsers, self).get_context_data(**kwargs)
context['organization'] = self.organization
return context
org_users = login_required(OrgUsers.as_view())
class OrgBacklogMixin(BackMixin):
admin_only = False
"""
Mixin to fetch a organization and backlog by a view.
"""
def dispatch(self, request, *args, **kwargs):
org_id = kwargs['org_id']
self.organization = get_my_object_or_404(Organization,
request.user, org_id)
try:
self.backlog = Backlog.objects.select_related().get(
pk=kwargs['backlog_id'])
except Backlog.DoesNotExist:
raise Http404('Not found.')
if self.backlog.org_id != self.organization.pk:
raise Http404('No matches found.')
if self.admin_only and not self.organization.can_admin(request.user):
if self.organization.can_read(request.user):
return HttpResponseForbidden(_("Not authorized"))
else:
raise Http404
self.request = request
render = self.pre_dispatch(request, **kwargs)
if render:
return render
return super(OrgBacklogMixin, self).dispatch(request, *args, **kwargs)
def pre_dispatch(self, request, **kwargs):
pass
def get_context_data(self, **kwargs):
context = super(OrgBacklogMixin, self).get_context_data(**kwargs)
context['organization'] = self.organization
context['backlog'] = self.backlog
return context
class OrgBacklogCreate(OrgMixin, generic.CreateView):
admin_only = True
template_name = "backlog/backlog_form.html"
model = Backlog
form_class = BacklogCreationForm
def get_form_kwargs(self):
kwargs = super(OrgBacklogCreate, self).get_form_kwargs()
kwargs['holder'] = self.organization
return kwargs
def get_context_data(self, **kwargs):
context = super(OrgBacklogCreate, self).get_context_data(**kwargs)
context['organization'] = self.organization
return context
def form_valid(self, form):
self.object = form.save()
create_event(
self.request.user, organization=self.organization,
text="created this backlog",
backlog=self.object
)
messages.success(self.request,
_("Backlog successfully created."))
return redirect(reverse("org_sprint_planning", args=(
self.organization.pk,
)))
org_backlog_create = login_required(OrgBacklogCreate.as_view())
class OrgBacklogEdit(OrgBacklogMixin, generic.UpdateView):
admin_only = True
template_name = "backlog/backlog_form.html"
form_class = BacklogEditionForm
def get_object(self):
return self.backlog
def form_valid(self, form):
backlog = form.save()
create_event(
self.request.user, organization=self.organization,
text="modified the backlog",
backlog=self.object
)
messages.success(self.request,
_("Backlog successfully updated."))
if backlog.project_id:
return redirect(reverse("project_backlogs",
args=(backlog.project_id,)))
elif backlog.org_id:
return redirect(reverse("org_detail",
args=(backlog.org_id,)))
return redirect(reverse("home"))
org_backlog_edit = login_required(OrgBacklogEdit.as_view())
class OrgBacklogs(OrgMixin, generic.TemplateView):
SESSION_PREF_KEY = 'org_pref_proj'
template_name = "backlog/org_sprint_planning.html"
no_cache = True
def store_preferred_project(self, project_id):
preferred = self.request.session.get(self.SESSION_PREF_KEY, dict())
preferred[self.organization.pk] = project_id
self.request.session[self.SESSION_PREF_KEY] = preferred
def load_preferred_project(self):
preferred = self.request.session.get(self.SESSION_PREF_KEY, dict())
return preferred.get(self.organization.pk, None)
def pre_dispatch(self):
self.project_id = self.request.GET.get("project_id", None)
if self.project_id:
self.store_preferred_project(self.project_id)
else:
self.project_id = self.load_preferred_project()
def get_context_data(self, **kwargs):
context = super(OrgBacklogs, self).get_context_data(**kwargs)
context['organization'] = self.organization
backlog = None
if self.project_id:
try:
backlog = self.organization.projects.get(
pk=self.project_id
).main_backlog
except Project.DoesNotExist:
backlog = None
else:
first = self.organization.projects.filter(
backlogs__is_main=True
).all()[:1]
if first:
backlog = first[0].main_backlog
context['backlog_of_interest'] = backlog
backlogs = Backlog.objects.filter(
is_main=True,
project__org=self.organization,
project__is_archive=False,
).select_related("project").order_by("project__name")
context['projects_with_main'] = [b.project for b in backlogs.all()]
backlogs = self.organization.backlogs.filter(
is_archive=False
).select_related("project").all()
context['backlog_list'] = backlogs
context['backlog_width'] = 320 * (max(len(backlogs)+1, 2))
context['ws_url'] = get_websocket_url(self.request)
return context
org_backlogs = login_required(OrgBacklogs.as_view())
class OrgBacklogDelete(OrgBacklogMixin, generic.DeleteView):
admin_only = True
template_name = "backlog/backlog_confirm_delete.html"
def pre_dispatch(self, request, **kwargs):
if self.backlog.stories.exists():
messages.error(request,
_("Backlog is not empty, unable to delete."))
return redirect(reverse('org_sprint_planning',
args=(self.backlog.org_id,)))
def get_object(self):
return self.backlog
def delete(self, request, *args, **kwargs):
self.backlog.delete()
create_event(
self.request.user, organization=self.organization,
text=u"deleted backlog {0}".format(self.backlog.name),
)
messages.success(request,
_("Backlog successfully deleted."))
return redirect(reverse('org_sprint_planning',
args=(self.organization.pk,)))
org_backlog_delete = login_required(OrgBacklogDelete.as_view())
class OrgBacklogArchive(OrgBacklogMixin, generic.DeleteView):
admin_only = True
template_name = "backlog/backlog_confirm_archive.html"
def get_object(self):
return self.backlog
def post(self, request, *args, **kwargs):
self.backlog.archive()
create_event(
self.request.user, organization=self.organization,
text=u"archived backlog {0}".format(self.backlog.name),
)
messages.success(request,
_("Backlog successfully archived."))
return redirect(reverse('org_detail',
args=(self.organization.pk,)))
org_backlog_archive = login_required(OrgBacklogArchive.as_view())
class OrgBacklogRestore(OrgBacklogMixin, generic.DeleteView):
admin_only = True
template_name = "backlog/backlog_confirm_restore.html"
def get_object(self):
return self.backlog
def post(self, request, *args, **kwargs):
self.backlog.restore()
create_event(
self.request.user, organization=self.organization,
text=u"restored backlog {0}".format(self.backlog.name),
)
messages.success(request,
_("Backlog successfully restored."))
return redirect(reverse('org_detail',
args=(self.organization.pk,)))
org_backlog_restore = login_required(OrgBacklogRestore.as_view())
class OrgStories(OrgMixin, FilteredStoriesMixin, generic.ListView):
template_name = "backlog/org_stories.html"
paginate_by = 30
def dispatch(self, request, *args, **kwargs):
self.setup_filter(request)
return super(OrgStories, self).dispatch(request, *args, **kwargs)
def get_stories_query(self):
return self.query
def get_stories_sort(self):
return self.sort
def get_stories(self):
return self.organization.stories
def get_context_data(self, **kwargs):
context = super(OrgStories, self).get_context_data(**kwargs)
context['organization'] = self.organization
if self.sort:
if self.sort[0] == '-':
context['sort_sign'] = "-"
context['sort'] = self.sort[1:]
else:
context['sort_sign'] = "+"
context['sort'] = self.sort
context['query'] = self.query
context['current_query'] = urllib.urlencode(
encoded_dict(self.query)
)
if self.sort:
context['current_sort'] = urllib.urlencode({
's': self.sort[1:] if self.sort[0] == '-' else self.sort
})
return context
org_stories = login_required(OrgStories.as_view())
class OrgInviteUser(OrgMixin, generic.FormView):
admin_only = True
salt = 'facile_user_invitation'
template_name = "users/invite_user.html"
email_template_name = "users/invitation_email.txt"
email_subject_template_name = "users/invitation_email_subject.txt"
form_class = InviteUserForm
def get_context_data(self, **kwargs):
data = super(OrgInviteUser, self).get_context_data(**kwargs)
data['organization'] = self.organization
return data
def send_notification(self, user, is_admin):
context = {
'site': RequestSite(self.request),
'user': user,
'activation_key': signing.dumps({
't': AUTH_TYPE_ORG,
'id': self.organization.pk
}, salt=self.salt),
'secure': self.request.is_secure(),
'organization': self.organization,
'object': self.organization,
'is_admin': is_admin,
}
body = loader.render_to_string(self.email_template_name,
context).strip()
subject = loader.render_to_string(self.email_subject_template_name,
context).strip()
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,
[user.email])
def form_valid(self, form):
super(OrgInviteUser, self).form_valid(form)
email = form.cleaned_data['email'].lower()
admin = form.cleaned_data['admin']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
user = User.objects.create_user(email)
try:
auth = AuthorizationAssociation.objects.get(
org=self.organization,
user=user,
)
# Can upgrade to admin only (no downgrade)
if not auth.is_admin and admin:
auth.is_admin = True
auth.save()
except AuthorizationAssociation.DoesNotExist:
AuthorizationAssociation.objects.create(
org=self.organization,
user=user,
is_active=False,
is_admin=admin,
)
# invite to all projects too
for p in self.organization.projects.all():
auth, create = AuthorizationAssociation.objects.get_or_create(
project=p,
user=user
)
if admin:
auth.is_admin = admin
auth.save()
self.send_notification(user, admin)
messages.success(self.request,
_('Invitation has been sent to {0}.'.format(email)))
return redirect(self.get_success_url())
def get_success_url(self):
return reverse("org_users", args=(self.organization.pk,))
org_invite_user = login_required(OrgInviteUser.as_view())
class OrgRevokeAuthorization(OrgMixin, generic.DeleteView):
admin_only = True
template_name = "users/auth_confirm_delete.html"
email_template_name = "users/revoke_email.txt"
email_subject_template_name = "users/revoke_email_subject.txt"
def dispatch(self, request, *args, **kwargs):
self.auth = get_object_or_404(AuthorizationAssociation,
pk=kwargs['auth_id'])
return super(OrgRevokeAuthorization, self).dispatch(request, *args,
**kwargs)
def get_object(self, queryset=None):
return self.auth
def get_context_data(self, **kwargs):
data = super(OrgRevokeAuthorization, self).get_context_data(**kwargs)
data['organization'] = self.organization
return data
def send_notification(self, user):
context = {
'site': RequestSite(self.request),
'user': user,
'secure': self.request.is_secure(),
'organization': self.organization,
}
body = loader.render_to_string(self.email_template_name,
context).strip()
subject = loader.render_to_string(self.email_subject_template_name,
context).strip()
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,
[user.email])
def delete(self, request, *args, **kwargs):
user = self.auth.user
if user.is_active:
self.send_notification(user)
self.auth.delete()
if self.auth.org:
AuthorizationAssociation.objects.filter(
project__in=self.auth.org.projects.all(),
user=self.auth.user
).delete()
messages.success(self.request,
_('User {0} has been revoked.'.format(user.email)))
return redirect(reverse('org_users', args=(self.organization.pk,)))
org_auth_delete = login_required(OrgRevokeAuthorization.as_view())
class OrgEditAuthorization(OrgMixin, generic.UpdateView):
admin_only = True
template_name = "users/auth_edit.html"
form_class = AuthorizationAssociationForm
def get_success_url(self):
return reverse('org_users', args=(self.organization.pk,))
def dispatch(self, request, *args, **kwargs):
self.auth = get_object_or_404(AuthorizationAssociation,
pk=kwargs['auth_id'])
return super(OrgEditAuthorization, self).dispatch(request, *args,
**kwargs)
def get_object(self, queryset=None):
return self.auth
def get_context_data(self, **kwargs):
data = super(OrgEditAuthorization, self).get_context_data(**kwargs)
data['organization'] = self.organization
data['auth'] = self.auth
return data
def form_valid(self, form):
super(OrgEditAuthorization, self).form_valid(form)
user = self.auth.user
projects_auth = AuthorizationAssociation.objects.filter(
user=self.object.user,
project__in=self.organization.projects.values_list('pk', flat=True)
)
for auth in projects_auth.all():
auth.is_admin = self.object.is_admin
auth.save()
create_event(self.request.user,
_("Authorization changed for "
"user {0}".format(user.email)),
organization=self.organization)
messages.success(self.request,
_('Authorization for user {0} has '
'been changed.'.format(user.email)))
return redirect(reverse('org_users', args=(self.organization.pk,)))
org_auth_edit = login_required(OrgEditAuthorization.as_view())
#############
# Projects #
###########
class ProjectMixin(NoCacheMixin):
admin_only = False
no_cache = False
"""
Mixin to fetch a project by a view.
"""
def dispatch(self, request, *args, **kwargs):
self.project = get_my_object_or_404(Project, request.user,
pk=kwargs['project_id'])
if self.admin_only and not self.project.can_admin(request.user):
if self.project.can_read(request.user):
return HttpResponseForbidden(_("Not authorized"))
else:
raise Http404
self.request = request
self.pre_dispatch()
response = super(ProjectMixin, self).dispatch(request, *args, **kwargs)
if self.no_cache:
patch_cache_control(
response, no_cache=True, no_store=True, must_revalidate=True)
return response
def pre_dispatch(self):
pass
def get_context_data(self, **kwargs):
context = super(ProjectMixin, self).get_context_data(**kwargs)
context['project'] = self.project
if self.project.org_id:
context['organization'] = self.project.org
return context
class ProjectDetail(ProjectMixin, generic.DetailView):
template_name = "backlog/project_detail.html"
def get_object(self):
return self.project
def get_context_data(self, **kwargs):
context = super(ProjectDetail, self).get_context_data(**kwargs)
context['project'] = self.project
context['events'] = self.project.events.select_related(
"backlog", "backlog__project", "project", "org", "story",
"user", "story__project")[:10]
if self.project.dashboards:
dashboards = self.project.dashboards.all()
for d in dashboards:
d.absolute_url = self.request.build_absolute_uri(
reverse("project_dashboard", args=(d.slug,)))
context['dashboards'] = dashboards
backlogs = self.project.backlogs.order_by(
"-is_main", "is_archive", "order"
).all()
context['backlogs'] = backlogs
context['archived_count'] = len(
[b for b in backlogs if b.is_archive]
)
return context
project_detail = login_required(ProjectDetail.as_view())
class ProjectCreate(generic.CreateView):
template_name = "backlog/project_form.html"
model = Project
form_class = ProjectCreationForm
def dispatch(self, request, *args, **kwargs):
org_id = kwargs.pop("org_id", None)
if org_id:
try:
self.org = Organization.my_organizations(
self.request.user).get(pk=org_id)
except Organization.DoesNotExist:
raise Http404
if not self.org.can_admin(request.user):
if self.org.can_read(request.user):
return HttpResponseForbidden(_("Not authorized"))
else:
raise Http404
else:
self.org = None
self.request = request
return super(ProjectCreate, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ProjectCreate, self).get_context_data(**kwargs)
context['organization'] = self.org
return context
def get_form_kwargs(self):
kwargs = super(ProjectCreate, self).get_form_kwargs()
kwargs['org'] = self.org
kwargs['request'] = self.request
return kwargs
def form_valid(self, form):
super(ProjectCreate, self).form_valid(form)
Backlog.objects.create(
name=_("Main backlog"),
description=_("This is the main backlog for the project."),
project=self.object,
kind=Backlog.TODO,
is_main=True,
order=1,
)
org = form.instance.org
if org:
# propagate authorizations (self should be in)
for auth in AuthorizationAssociation.objects.filter(
org=org).all():
AuthorizationAssociation.objects.create(
project=form.instance,
user_id=auth.user_id,
is_admin=auth.is_admin,
is_active=auth.is_active
)
else:
# create self authorization
AuthorizationAssociation.objects.create(
project=form.instance,
user=self.request.user,
is_admin=True,
is_active=True
)
create_event(
self.request.user, project=self.object,
organization=self.object.org,
text="created this project"
)
messages.success(self.request,
_("Project successfully created."))
return redirect(reverse("project_detail", args=(self.object.pk,)))
project_create = login_required(ProjectCreate.as_view())
class ProjectEdit(ProjectMixin, generic.UpdateView):
admin_only = True
template_name = "backlog/project_form.html"
form_class = ProjectEditionForm
def get_object(self):
return self.project
def form_valid(self, form):
project = form.save()
create_event(
self.request.user, project=project,
text="modified the project"
)
messages.success(self.request,
_("Project successfully updated."))
return redirect(reverse("project_detail", args=(self.object.pk,)))
project_edit = login_required(ProjectEdit.as_view())
class ProjectDelete(ProjectMixin, generic.DeleteView):
admin_only = True
template_name = "backlog/project_confirm_delete.html"
def get_object(self):
return self.project
def delete(self, request, *args, **kwargs):
self.project.delete()
messages.success(request,
_("Project successfully deleted."))
return redirect(reverse('dashboard'))
project_delete = login_required(ProjectDelete.as_view())
class ProjectGenStats(ProjectMixin, generic.DeleteView):
admin_only = True
template_name = "backlog/project_confirm_gen_stats.html"
def get_object(self):
return self.project
def post(self, request, *args, **kwargs):
self.project.generate_daily_statistics()
create_event(
self.request.user, project=self.project,
text=u"manually generated statistics for project backlog "
u"{0}".format(self.project.name),
)
messages.success(request,
_("Statistics successfully generated."))
return redirect(reverse('project_stats', args=(self.project.pk,)))
project_gen_stats = login_required(ProjectGenStats.as_view())
class ProjectUsers(ProjectMixin, generic.TemplateView):
template_name = "backlog/project_users.html"
def get_context_data(self, **kwargs):
context = super(ProjectUsers, self).get_context_data(**kwargs)
context['project'] = self.project
return context
project_users = login_required(ProjectUsers.as_view())
class ProjectStories(ProjectMixin, FilteredStoriesMixin, generic.ListView):
template_name = "backlog/project_stories.html"
paginate_by = 30
def dispatch(self, request, *args, **kwargs):
self.setup_filter(request)
return super(ProjectStories, self).dispatch(request, *args, **kwargs)
def get_stories_query(self):
return self.query
def get_stories_sort(self):
return self.sort
def get_stories(self):
return self.project.stories
def get_context_data(self, **kwargs):
context = super(ProjectStories, self).get_context_data(**kwargs)
context['project'] = self.project
if self.sort:
if self.sort[0] == '-':
context['sort_sign'] = "-"
context['sort'] = self.sort[1:]
else:
context['sort_sign'] = "+"
context['sort'] = self.sort
context['query'] = self.query
context['current_query'] = urllib.urlencode(
encoded_dict(self.query)
)
if self.sort:
context['current_sort'] = urllib.urlencode({
's': self.sort[1:] if self.sort[0] == '-' else self.sort
})
return context
project_stories = login_required(ProjectStories.as_view())
class ProjectBacklogs(ProjectMixin, generic.TemplateView):
template_name = "backlog/project_backlogs.html"
no_cache = True
def get_context_data(self, **kwargs):
context = super(ProjectBacklogs, self).get_context_data(**kwargs)
context['project'] = self.project
context['backlog_list'] = [b for b in self.project.backlogs.all()
if not b.is_archive]
context['ws_url'] = get_websocket_url(self.request)
return context
project_backlogs = login_required(ProjectBacklogs.as_view())
class ProjectStats(ProjectMixin, generic.TemplateView):
DEFAULT_DAYS = 45
template_name = "backlog/project_stats.html"
def dispatch(self, request, *args, **kwargs):
days = request.GET.get('days', "")
self.days = int(days) if days.isdigit() else self.DEFAULT_DAYS
return super(ProjectStats, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ProjectStats, self).get_context_data(**kwargs)
context['project'] = self.project
base = list(self.project.statistics.all()[:self.days])
if not base:
return context
def compute_series(name, call):
return {
'name': status_for(name),
'color': STATUS_COLORS[name],
'data': [s.time_series(call) for s in base],
}
context['all_points'] = [
compute_series(Status.TODO, "all.by_status.to_do.points"),
compute_series(Status.IN_PROGRESS,
"all.by_status.in_progress.points"),
compute_series(Status.COMPLETED, "all.by_status.completed.points"),
compute_series(Status.REJECTED, "all.by_status.rejected.points"),
compute_series(Status.ACCEPTED, "all.by_status.accepted.points"),
]
context['main_points'] = [
compute_series(Status.TODO, "main.by_status.to_do.points"),
compute_series(Status.IN_PROGRESS,
"main.by_status.in_progress.points"),
compute_series(Status.COMPLETED,
"main.by_status.completed.points"),
compute_series(Status.REJECTED, "main.by_status.rejected.points"),
compute_series(Status.ACCEPTED, "main.by_status.accepted.points"),
]
s = base[0]
if 'main' in s.data:
context['main_status_pie'] = [pie_element(k, v) for k, v in
s.data['main']['by_status'].items()]
context['project_status_pie'] = [pie_element(k, v) for k, v in
s.data['all']['by_status'].items()]
return context
project_stats = login_required(ProjectStats.as_view())
class ProjectEditAuthorization(ProjectMixin, generic.UpdateView):
admin_only = True
template_name = "users/auth_edit.html"
form_class = AuthorizationAssociationForm
def get_success_url(self):
return reverse('project_users', args=(self.project.pk,))
def dispatch(self, request, *args, **kwargs):
self.auth = get_object_or_404(AuthorizationAssociation,
pk=kwargs['auth_id'])
return super(ProjectEditAuthorization, self).dispatch(request, *args,
**kwargs)
def get_object(self, queryset=None):
return self.auth
def get_context_data(self, **kwargs):
data = super(ProjectEditAuthorization, self).get_context_data(**kwargs)
data['project'] = self.project
data['auth'] = self.auth
return data
def form_valid(self, form):
super(ProjectEditAuthorization, self).form_valid(form)
user = self.auth.user
create_event(self.request.user,
_("Authorization changed for "
"user {0}".format(user.email)),
project=self.project)
messages.success(self.request,
_('Authorization for user {0} has '
'been changed.'.format(user.email)))
return redirect(reverse('project_users', args=(self.project.pk,)))
project_auth_edit = login_required(ProjectEditAuthorization.as_view())
# Backlogs
class ProjectBacklogMixin(BackMixin):
admin_only = False
"""
Mixin to fetch a project and backlog by a view.
"""
def dispatch(self, request, *args, **kwargs):
project_id = kwargs['project_id']
self.project = get_my_object_or_404(Project, request.user, project_id)
try:
self.backlog = Backlog.objects.select_related().get(
pk=kwargs['backlog_id'])
except Backlog.DoesNotExist:
raise Http404('Not found.')
if self.backlog.project.pk != self.project.pk:
raise Http404('No matches found.')
if self.admin_only and not self.project.can_admin(request.user):
if self.project.can_read(request.user):
return HttpResponseForbidden(_("Not authorized"))
else:
raise Http404
self.request = request
response = self.pre_dispatch(request, **kwargs)
if not response:
response = super(ProjectBacklogMixin,
self).dispatch(request, *args, **kwargs)
self.post_dispatch(request, response)
return response
def pre_dispatch(self, request, **kwargs):
pass
def post_dispatch(self, request, response):
pass
def get_context_data(self, **kwargs):
context = super(ProjectBacklogMixin, self).get_context_data(**kwargs)
context['project'] = self.project
context['backlog'] = self.backlog
return context
class ProjectBacklogCreate(ProjectMixin, generic.CreateView):
admin_only = True
template_name = "backlog/backlog_form.html"
model = Backlog
form_class = BacklogCreationForm
def get_form_kwargs(self):
kwargs = super(ProjectBacklogCreate, self).get_form_kwargs()
kwargs['holder'] = self.project
return kwargs
def get_context_data(self, **kwargs):
context = super(ProjectBacklogCreate, self).get_context_data(**kwargs)
context['project'] = self.project
return context
def form_valid(self, form):
self.object = form.save()
create_event(
self.request.user, project=self.project,
text="created this backlog",
backlog=self.object
)
messages.success(self.request,
_("Backlog successfully created."))
return redirect(reverse("project_backlogs", args=(
self.project.pk,
)))
project_backlog_create = login_required(ProjectBacklogCreate.as_view())
class ProjectBacklogEdit(ProjectBacklogMixin, generic.UpdateView):
admin_only = True
template_name = "backlog/backlog_form.html"
form_class = BacklogEditionForm
def get_object(self):
return self.backlog
def get_context_data(self, **kwargs):
context = super(ProjectBacklogEdit, self).get_context_data(**kwargs)
if self.back == "project" or not self.object:
context['cancel_url'] = reverse("project_backlogs", args=(
self.project.pk,
))
else:
context['cancel_url'] = reverse("project_detail", args=(
self.project.pk,
))
return context
def form_valid(self, form):
backlog = form.save()
create_event(
self.request.user, project=self.project,
text="modified the backlog",
backlog=self.object
)
messages.success(self.request,
_("Backlog successfully updated."))
if self.back == 'project':
return redirect("{0}#backlog-{1}".format(
reverse("project_backlogs", args=(
self.project.pk,
)), backlog.pk))
return redirect(reverse("project_backlogs", args=(self.project.pk,)))
project_backlog_edit = login_required(ProjectBacklogEdit.as_view())
class ProjectBacklogDelete(ProjectBacklogMixin, generic.DeleteView):
admin_only = True
template_name = "backlog/backlog_confirm_delete.html"
def get_object(self):
return self.backlog
def delete(self, request, *args, **kwargs):
self.backlog.delete()
create_event(
self.request.user, project=self.project,
text=u"deleted backlog {0}".format(self.backlog.name),
)
messages.success(request,
_("Backlog successfully deleted."))
return redirect(reverse('project_backlogs', args=(self.project.pk,)))
project_backlog_delete = login_required(ProjectBacklogDelete.as_view())
class ProjectBacklogArchive(ProjectBacklogMixin, generic.DeleteView):
admin_only = True
template_name = "backlog/backlog_confirm_archive.html"
def get_object(self):
return self.backlog
def post(self, request, *args, **kwargs):
self.backlog.archive()
create_event(
self.request.user, project=self.project,
text=u"archived backlog {0}".format(self.backlog.name),
)
messages.success(request,
_("Backlog successfully archived."))
return redirect(reverse('project_detail', args=(self.project.pk,)))
project_backlog_archive = login_required(ProjectBacklogArchive.as_view())
class ProjectBacklogRestore(ProjectBacklogMixin, generic.DeleteView):
admin_only = True
template_name = "backlog/backlog_confirm_restore.html"
def get_object(self):
return self.backlog
def post(self, request, *args, **kwargs):
self.backlog.restore()
create_event(
self.request.user, project=self.project,
text=u"restored backlog {0}".format(self.backlog.name),
)
messages.success(request,
_("Backlog successfully restored."))
return redirect(reverse('project_detail', args=(self.project.pk,)))
project_backlog_restore = login_required(ProjectBacklogRestore.as_view())
class ProjectRestore(ProjectMixin, generic.DeleteView):
admin_only = True
template_name = "backlog/project_confirm_restore.html"
def get_object(self):
return self.project
def post(self, request, *args, **kwargs):
self.project.restore()
create_event(
self.request.user, project=self.project,
text=u"restored project {0}".format(self.project.name),
)
messages.success(request,
_("Project successfully restored."))
if self.project.org_id:
return redirect(reverse('org_detail', args=(self.project.org_id,)))
else:
return redirect(reverse('dashboard'))
project_restore = login_required(ProjectRestore.as_view())
#############
# Backlogs #
###########
class BacklogMixin(NoCacheMixin):
admin_only = False
"""
Mixin to fetch a a backlog by a view.
"""
def dispatch(self, request, *args, **kwargs):
backlog_id = kwargs['backlog_id']
self.backlog = get_my_object_or_404(Backlog, request.user, backlog_id)
if self.admin_only and not self.backlog.can_admin(request.user):
if self.backlog.can_read(request.user):
return HttpResponseForbidden(_("Not authorized"))
else:
raise Http404
self.request = request
render = self.pre_dispatch(request, **kwargs)
if render:
return render
return super(BacklogMixin, self).dispatch(request, *args,
**kwargs)
def pre_dispatch(self, request, **kwargs):
pass
def get_context_data(self, **kwargs):
context = super(BacklogMixin, self).get_context_data(**kwargs)
context['backlog'] = self.backlog
return context
class BacklogSetMain(BacklogMixin, generic.FormView):
template_name = "backlog/backlog_confirm_main.html"
form_class = forms.Form
def get_object(self, queryset=None):
return self.backlog
def form_valid(self, form):
if self.backlog.org_id:
backlog_list = self.backlog.org.backlogs
follow = redirect(reverse('org_detail',
args=(self.backlog.org_id,)))
elif self.backlog.project_id:
backlog_list = self.backlog.project.backlogs
follow = redirect(reverse('project_detail',
args=(self.backlog.project_id,)))
else:
raise ValueError("Backlog has no project nor organization")
backlog_list.update(is_main=False)
self.backlog.is_main = True
self.backlog.save()
create_event(
self.request.user, backlog=self.backlog,
organization=self.backlog.org_id,
project=self.backlog.project_id,
text="Set backlog as main",
)
messages.success(self.request,
_("Backlog successfully set as main."))
return follow
backlog_set_main = login_required(BacklogSetMain.as_view())
class BacklogDetail(BacklogMixin, generic.TemplateView):
template_name = "backlog/backlog_detail.html"
no_cache = True
def get_context_data(self, **kwargs):
simple = self.request.GET.get("simple", False)
context = super(BacklogDetail, self).get_context_data(**kwargs)
context['stories'] = self.backlog.ordered_stories.select_related(
"project", "backlog")
context['ws_url'] = get_websocket_url(self.request)
context['simple'] = simple
return context
backlog_detail = login_required(BacklogDetail.as_view())
############
# Stories #
##########
class StoryMixin(BackMixin):
"""
Mixin to fetch a story, backlog and project used by a view.
"""
def dispatch(self, request, *args, **kwargs):
project_id = kwargs['project_id']
backlog_id = kwargs.get('backlog_id', None)
if request.method == "GET":
self.direct = request.GET.get('direct', False)
elif request.method == "POST":
self.direct = request.POST.get('direct', False)
self.project = get_my_object_or_404(Project, request.user, project_id)
try:
self.story = UserStory.objects.select_related().get(
pk=kwargs['story_id'])
except UserStory.DoesNotExist:
raise Http404('Not found.')
if self.story.project.pk != self.project.pk:
raise Http404('No matches found.')
if backlog_id and self.story.backlog.pk != int(backlog_id):
raise Http404('No matches found.')
if not self.story.can_read(request.user):
raise Http404('No matches found.')
self.project = self.story.project
self.backlog = self.story.backlog if backlog_id else None
self.pre_dispatch()
return super(StoryMixin, self).dispatch(request, *args, **kwargs)
def pre_dispatch(self):
pass
def get_context_data(self, **kwargs):
context = super(StoryMixin, self).get_context_data(**kwargs)
context['project'] = self.project
if self.back == "project":
context['cancel_url'] = "{0}#story-{1}".format(
reverse("project_backlogs", args=(self.project.pk,)),
self.story.pk
)
else:
context['cancel_url'] = reverse("story_detail", args=(
self.project.pk, self.story.pk))
context['story'] = self.story
context['direct'] = self.direct
return context
class StoryDetail(StoryMixin, generic.DetailView):
template_name = "backlog/story_detail.html"
def get_object(self):
return self.story
def get_context_data(self, **kwargs):
context = super(StoryDetail, self).get_context_data(**kwargs)
context['project'] = self.project
context['story'] = self.story
return context
story_detail = login_required(StoryDetail.as_view())
class StoryCreate(ProjectBacklogMixin, generic.CreateView):
template_name = "backlog/story_form.html"
model = UserStory
form_class = StoryCreationForm
def dispatch(self, request, *args, **kwargs):
src_story_id = request.GET.get('src_story_id', None)
if src_story_id:
try:
story = UserStory.objects.get(pk=src_story_id)
if not story.can_read(request.user):
story = None
except UserStory.DoesNotExist:
story = None
else:
story = None
self.src_story = story
return super(StoryCreate, self).dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super(StoryCreate, self).get_form_kwargs()
kwargs['project'] = self.project
if self.backlog:
kwargs['backlog'] = self.backlog
kwargs['_back'] = self.back
kwargs['source_story'] = self.src_story
return kwargs
def get_context_data(self, **kwargs):
context = super(StoryCreate, self).get_context_data(**kwargs)
context['project'] = self.project
context['_back'] = self.back
if self.back:
context['cancel_url'] = self.back
else:
context['cancel_url'] = reverse("project_backlogs", args=(
self.project.pk,))
return context
def form_valid(self, form):
super(StoryCreate, self).form_valid(form)
create_event(
self.request.user, project=self.project,
text="created this story",
backlog=self.backlog,
story=self.object,
)
messages.success(self.request,
_("Story successfully created."))
if self.back:
return redirect(self.back)
return redirect(
"{0}#story-{1}".format(reverse("project_backlogs", args=(
self.project.pk,
)), self.object.pk))
story_create = login_required(StoryCreate.as_view())
class StoryEdit(StoryMixin, generic.UpdateView):
template_name = "backlog/story_form.html"
form_class = StoryEditionForm
notify_changed = ('status', 'points')
def get_object(self):
return self.story
def pre_dispatch(self):
self._old_values = {}
for k in self.notify_changed:
self._old_values[k] = getattr(self.story, k)
def get_back_url(self):
if self.back == "project":
return "{0}#story-{1}".format(
reverse("project_backlogs", args=(self.project.pk,)),
self.object.pk)
elif self.back == "organization":
return "{0}#story-{1}".format(
reverse("org_sprint_planning", args=(
self.object.project.org.pk,)
),
self.object.pk)
elif self.back == "backlog":
return "{0}#story-{1}".format(
reverse("backlog_detail", args=(self.object.backlog_id,)),
self.object.pk)
return self.object.get_absolute_url()
def get_context_data(self, **kwargs):
context = super(StoryMixin, self).get_context_data(**kwargs)
context['cancel_url'] = self.get_back_url()
context['backlog'] = self.object.backlog
context['project'] = self.project
return context
def form_valid(self, form):
story = form.save()
create_event(
self.request.user, project=self.project,
text="modified the story",
backlog=self.backlog,
story=story,
)
story.property_changed(self.request.user, **self._old_values)
messages.success(self.request,
_("Story successfully updated."))
return redirect(self.get_back_url())
story_edit = login_required(StoryEdit.as_view())
class StoryDelete(StoryMixin, generic.DeleteView):
template_name = "backlog/story_confirm_delete.html"
def get_object(self):
return self.story
def delete(self, request, *args, **kwargs):
self.story.delete()
create_event(
self.request.user, project=self.project,
text=u"deleted story {0}, {1}".format(self.story.code,
self.story.text),
backlog=self.backlog
)
messages.success(request,
_("Story successfully deleted."))
return redirect(reverse('project_backlogs',
args=(self.project.pk,)))
story_delete = login_required(StoryDelete.as_view())
class StoriesMixin(object):
def dispatch(self, request, *args, **kwargs):
if request.method == "GET":
source = request.GET
elif request.method == "POST":
source = request.POST
else:
source = {}
backlog_id = source.get('backlog_id', None)
project_id = source.get('project_id', None)
org_id = source.get('org_id', None)
if backlog_id:
self.object = get_object_or_404(Backlog, pk=backlog_id)
self.stories = self.object.stories
self.sort = "order"
self.object_type = "backlog"
elif project_id:
self.object = get_object_or_404(Project, pk=project_id)
self.stories = self.object.stories
self.object_type = "project"
elif org_id:
self.object = get_object_or_404(Organization, pk=org_id)
self.stories = self.object.stories
self.object_type = "org"
elif request.method != "POST":
raise Http404
else:
self.object = None
self.object_type = "none"
self.stories = UserStory.objects.none()
if self.object and not self.object.can_read(request.user):
raise Http404
self.stories = self.stories.select_related(
"project", "backlog", "project__org")
self.pre_dispatch()
return super(StoriesMixin, self).dispatch(request, *args, **kwargs)
def pre_dispatch(self):
pass
def get_context_data(self, **kwargs):
data = super(StoriesMixin, self).get_context_data(**kwargs)
data['stories'] = self.stories.all()
if self.object_type == "project":
data['back_url'] = reverse("project_stories", args=(
self.object.pk,))
elif self.object_type == "org":
data['back_url'] = reverse("org_stories", args=(
self.object.pk,))
elif self.object_type == "backlog":
if self.object.project_id:
data['back_url'] = reverse("project_backlogs", args=(
self.object.project_id,))
else:
data['back_url'] = reverse("org_sprint_planning", args=(
self.object.org_id,))
return data
class PrintStories(StoriesMixin, generic.TemplateView):
template_name = "backlog/print_stories.html"
def post(self, request, *args, **kwargs):
ids = []
for k, v in request.POST.items():
if k.find("story-") == 0:
ids.append(k.split("-")[1])
# potential security problem here
stories = UserStory.objects.filter(pk__in=ids)
print_format = request.POST.get("print-format")
print_side = request.POST.get("print-side")
name = "Backlogman-user-stories"
return generate_pdf(stories, name, print_side=print_side,
print_format=print_format)
print_stories = login_required(PrintStories.as_view())
class ExportStories(StoriesMixin, FilteredStoriesMixin, generic.TemplateView):
template_name = "backlog/export_stories.html"
def dispatch(self, request, *args, **kwargs):
self.setup_filter(request)
return super(ExportStories, self).dispatch(request, *args, **kwargs)
def get_stories(self):
return self.stories
def get(self, request, *args, **kwargs):
name = u"Backlogman-user-stories-{0}[{0}]".format(
self.object_type, self.object.pk
)
title = u"Backlogman: {0} - {1}".format(
self.object_type,
self.object
)
return export_excel(self.get_queryset(), name, title)
export_stories = login_required(ExportStories.as_view())
class ProjectInviteUser(ProjectMixin, generic.FormView):
admin_only = True
salt = 'facile_user_invitation'
template_name = "users/invite_user.html"
email_template_name = "users/invitation_email.txt"
email_subject_template_name = "users/invitation_email_subject.txt"
form_class = InviteUserForm
def get_context_data(self, **kwargs):
data = super(ProjectInviteUser, self).get_context_data(**kwargs)
data['project'] = self.project
return data
def send_notification(self, user, is_admin):
context = {
'site': RequestSite(self.request),
'user': user,
'activation_key': signing.dumps({
't': AUTH_TYPE_PROJECT,
'id': self.project.pk
}, salt=self.salt),
'secure': self.request.is_secure(),
'project': self.project,
'object': self.project,
'is_admin': is_admin,
}
body = loader.render_to_string(self.email_template_name,
context).strip()
subject = loader.render_to_string(self.email_subject_template_name,
context).strip()
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,
[user.email])
def form_valid(self, form):
super(ProjectInviteUser, self).form_valid(form)
email = form.cleaned_data['email'].lower()
admin = form.cleaned_data['admin']
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
user = User.objects.create_user(email)
try:
auth = AuthorizationAssociation.objects.get(
project=self.project,
user=user,
)
# Can upgrade to admin only (no downgrade)
if not auth.is_admin and admin:
auth.is_admin = True
auth.save()
except AuthorizationAssociation.DoesNotExist:
AuthorizationAssociation.objects.create(
project=self.project,
user=user,
is_active=False,
is_admin=admin,
)
self.send_notification(user, admin)
messages.success(self.request,
_('Invitation has been sent to {0}.'.format(email)))
return redirect(self.get_success_url())
def get_success_url(self):
return reverse("project_users", args=(self.project.pk,))
project_invite_user = login_required(ProjectInviteUser.as_view())
class InvitationActivate(generic.TemplateView):
template_name = "users/invitation_completed.html"
def dispatch(self, request, *args, **kwargs):
token = kwargs['token']
src = signing.loads(token, salt=ProjectInviteUser.salt,
max_age=60*60*24*7)
invitation_type = src['t']
object_id = src['id']
if object_id != int(kwargs['object_id']):
raise Http404()
auth_kwargs = {
'user': request.user
}
if invitation_type == AUTH_TYPE_PROJECT:
auth_kwargs['project_id'] = object_id
self.project = get_object_or_404(Project, pk=object_id)
self.organization = None
elif invitation_type == AUTH_TYPE_ORG:
auth_kwargs['org_id'] = object_id
self.organization = get_object_or_404(Organization, pk=object_id)
self.project = None
else:
raise Http404
try:
auth = AuthorizationAssociation.objects.get(**auth_kwargs)
except AuthorizationAssociation.DoesNotExist:
return render_to_response('error_page.html', {
'error': _("This invitation does not match your current user "
"(%s). Check that you're logged in with the same "
"user as the email you "
"received.") % request.user.email
}, context_instance=RequestContext(request))
if auth.is_active:
return render_to_response('error_page.html', {
'error': _("You already accepted this invitation.")
}, context_instance=RequestContext(request))
auth.activate(request.user)
return super(InvitationActivate, self).dispatch(
request, *args, **kwargs)
def get_context_data(self, **kwargs):
data = super(InvitationActivate, self).get_context_data(**kwargs)
data['project'] = self.project
data['organization'] = self.organization
return data
invitation_activate = login_required(InvitationActivate.as_view())
class ProjectRevokeAuthorization(ProjectMixin, generic.DeleteView):
admin_only = True
template_name = "users/auth_confirm_delete.html"
email_template_name = "users/revoke_email.txt"
email_subject_template_name = "users/revoke_email_subject.txt"
def dispatch(self, request, *args, **kwargs):
self.auth = get_object_or_404(AuthorizationAssociation,
pk=kwargs['auth_id'])
return super(ProjectRevokeAuthorization, self).dispatch(request, *args,
**kwargs)
def get_object(self, queryset=None):
return self.auth
def get_context_data(self, **kwargs):
data = super(ProjectRevokeAuthorization,
self).get_context_data(**kwargs)
data['project'] = self.project
return data
def send_notification(self, user):
context = {
'site': RequestSite(self.request),
'user': user,
'secure': self.request.is_secure(),
'project': self.project,
}
body = loader.render_to_string(self.email_template_name,
context).strip()
subject = loader.render_to_string(self.email_subject_template_name,
context).strip()
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL,
[user.email])
def delete(self, request, *args, **kwargs):
user = self.auth.user
if user.is_active:
self.send_notification(user)
self.auth.delete()
messages.success(self.request,
_('User {0} has been revoked.'.format(user.email)))
return redirect(reverse('project_users', args=(self.project.pk,)))
project_auth_delete = login_required(ProjectRevokeAuthorization.as_view())
class NotificationView(generic.TemplateView):
template_name = "users/notifications.html"
def get_context_data(self, **kwargs):
data = super(NotificationView, self).get_context_data(**kwargs)
data['invitations'] = AuthorizationAssociation.objects.filter(
user=self.request.user,
is_active=False,
)
return data
notification_view = login_required(NotificationView.as_view())
@require_POST
@login_required
@csrf_protect
def invitation_accept(request, auth_id):
auth = AuthorizationAssociation.objects.get(pk=auth_id)
if auth.user != request.user:
raise Http404
if not auth.is_active:
auth.activate(request.user)
if auth.project_id:
messages.success(request, _("You are now a member of this project"))
return redirect(reverse("project_backlogs", args=(auth.project_id,)))
else:
messages.success(request, _("You are now a member of this "
"organization"))
return redirect(reverse("org_detail", args=(auth.org_id,)))
@require_POST
@login_required
@csrf_protect
def invitation_decline(request, auth_id):
auth = AuthorizationAssociation.objects.get(pk=auth_id)
if auth.user != request.user:
raise Http404
if not auth.is_active:
auth.delete()
messages.info(request, _("Invitation has been declined"))
return redirect(reverse("my_notifications"))
def encoded_dict(in_dict):
out_dict = {}
for k, v in in_dict.iteritems():
if isinstance(v, unicode):
v = v.encode('utf8')
elif isinstance(v, str):
# Must be encoded in UTF-8
v.decode('utf8')
out_dict[k] = v
return out_dict
| [
"david.saradini@me.com"
] | david.saradini@me.com |
16174f6a0ceaacfd5739e6f757c7da92e64ce151 | ca8d183f5d6f1f260483a3555efd05870fe1d891 | /com_blacktensor/cop/cov/status/model/status_tf.py | 5e77659c4866db16ffcc2845eb4984f7d7c1aec3 | [
"MIT"
] | permissive | Jelly6489/Stock-Proj | b559304f10614122ddaa00e39c821a65faa9f91d | 3e7b1ad5cddc5b142f0069e024199fe969c7c7e8 | refs/heads/main | 2023-01-13T17:18:33.729747 | 2020-11-13T08:19:33 | 2020-11-13T08:19:33 | 312,512,688 | 0 | 0 | MIT | 2020-11-13T08:11:04 | 2020-11-13T08:11:04 | null | UTF-8 | Python | false | false | 37 | py |
class CovidStatusTf(object):
... | [
"rlaalsrlzld@naver.com"
] | rlaalsrlzld@naver.com |
44f0243c3ed35006b5532825bb5bd1f5234dbfe8 | 272fa0b70f45f7820ff9fb64afcc519799688086 | /core/migrations/0001_initial.py | 4b7a41e0f12381073ef2068eb8115736abef6ac6 | [] | no_license | felipepanegalli/mini-agenda | 3a112f20d91213dc80d93ae80db2a0d82d46241d | 6a0e75f41c3c60ca65f5a689de21d977fb2cec11 | refs/heads/master | 2020-07-11T05:26:52.761467 | 2019-08-27T12:38:45 | 2019-08-27T12:38:45 | 204,455,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | # Generated by Django 2.2.4 on 2019-08-24 15:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Evento',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=100)),
('descricao', models.TextField(blank=True, null=True)),
('data_evento', models.DateTimeField()),
('data_criacao', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'evento',
},
),
]
| [
"felipe.panegalli@gmail.com"
] | felipe.panegalli@gmail.com |
64a0c196fd8148b2a984ade744da8ceb40baf34d | fc342986f310d764e738796086f14b76dc2aedca | /functional_tests/tests.py | e36140d21b0846e1aff6cabd6b5ae65d39f150b0 | [] | no_license | yasminaraujo/tdd-project | 2a005f83bffd3883fb43d12cf02503b37b6c943d | 8f790e3177b82743d61a7360980770d9466ec7b5 | refs/heads/master | 2023-07-03T00:56:55.990249 | 2021-08-09T17:06:02 | 2021-08-09T17:06:02 | 392,688,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,635 | py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import WebDriverException
import time
import unittest
MAX_WAIT = 5
class NewVisitorTest(LiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_can_start_a_list_for_one_user(self):
# Maria decidiu utilizar o novo app TODO. Ela entra em sua página principal:
self.browser.get(self.live_server_url)
# Ela nota que o título da página menciona TODO
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# Ela é convidada a entrar com um item TODO imediatamente
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertEqual(inputbox.get_attribute('placeholder'), 'Enter a to-do item')
# Ela digita "Estudar testes funcionais" em uma caixa de texto
inputbox.send_keys('Estudar testes funcionais')
# Quando ela aperta enter, a página atualiza, e mostra a lista
# "1: Estudar testes funcionais" como um item da lista TODO
inputbox.send_keys(Keys.ENTER)
time.sleep(1)
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn('1: Estudar testes funcionais', [row.text for row in rows])
# Ainda existe uma caixa de texto convidando para adicionar outro item
# Ela digita: "Estudar testes de unidade"
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Estudar testes de unidade')
inputbox.send_keys(Keys.ENTER)
time.sleep(1)
# A página atualiza novamente, e agora mostra ambos os itens na sua lista
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn('1: Estudar testes funcionais', [row.text for row in rows])
self.assertIn('2: Estudar testes de unidade', [row.text for row in rows])
# Maria se pergunta se o site vai lembrar da sua lista. Então, ela verifica que
# o site gerou uma URL única para ela -- existe uma explicação sobre essa feature
# Ela visita a URL: a sua lista TODO ainda está armazenada
# Quando ela aperta enter, a página atualiza, e mostra a lista
# "1: Estudar testes funcionais" como um item da lista TODO
inputbox.send_keys(Keys.ENTER)
time.sleep(1)
self.check_for_row_in_list_table('1: Estudar testes funcionais')
# Ainda existe uma caixa de texto convidando para adicionar outro item
# Ela digita: "Estudar testes de unidade"
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Estudar testes de unidade')
inputbox.send_keys(Keys.ENTER)
time.sleep(1)
# A página atualiza novamente, e agora mostra ambos os itens na sua lista
self.check_for_row_in_list_table('1: Estudar testes funcionais')
self.check_for_row_in_list_table('2: Estudar testes de unidade')
# Satisfeita, ela vai dormir
def wait_for_row_in_list_table(self, row_text):
start_time = time.time()
while True:
try:
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
return
except (AssertionError, WebDriverException) as e:
if time.time() - start_time > MAX_WAIT:
raise e
time.sleep(0.5)
def test_multiple_users_can_start_lists_at_different_urls(self):
# Maria começa uma nova lista
self.browser.get(self.live_server_url)
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Estudar testes funcionais')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Estudar testes funcionais')
# Ela nota que sua lista possui uma URL única
maria_list_url = self.browser.current_url
self.assertRegex(maria_list_url, '/lists/.+')
# Agora, um novo usuário, João, entra no site
self.browser.quit()
self.browser = webdriver.Firefox()
# João visita a página inicial. Não existe nenhum sinal da lista de Maria
self.browser.get(self.live_server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('1: Estudar testes funcionais', page_text)
self.assertNotIn('2: Estudar testes de unidade', page_text)
# João inicia uma nova lista
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Comprar leite')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Comprar leite')
# João pega sua URL única
joao_list_url = self.browser.current_url
self.assertRegex(joao_list_url, '/lists/.+')
self.assertNotEqual(joao_list_url, maria_list_url)
# Novamente, não existe sinal da lista de Maria
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Estudar testes funcionais', page_text)
self.assertIn('Comprar leite', page_text)
# Satisfeitos, ambos vão dormir
unittest.main()
| [
"yasmin.araujo@take.net"
] | yasmin.araujo@take.net |
dfcbd965b15747b838c49c47c157b335cf339ed8 | 9f0b31287f5be908da6b6b0d06bc1ca55ded9a93 | /financeLinearRegressionImportsLibs.py | 310d2672e733e797e9f034d608d2ad5f868bac88 | [] | no_license | SalehRazian/Past-Project-6-Simple-Machine-Learning-Neural-Network | 9b14bae15b6d60eec986250a290fd0324f68aa00 | 5fc0830f6bc7914acb829b8fd918705736af5ab6 | refs/heads/main | 2023-05-27T07:57:09.190808 | 2021-06-08T06:24:37 | 2021-06-08T06:24:37 | 374,904,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,329 | py | import pandas as pd
import quandl, math, datetime
import numpy as np
from sklearn import preprocessing, svm
from sklearn.model_selection import cross_validate
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from matplotlib import style
import pickle
style.use('ggplot')
#Define the Data
df = quandl.get('WIKI/GOOGL')
df = df[['Adj. Open','Adj. High','Adj. Low','Adj. Close','Adj. Volume']]
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Close'])/df['Adj. Close'] *100.0
df['PCT_change'] = (df['Adj. Close'] - df['Adj. Open'])/df['Adj. Open'] *100.0
df = df[['Adj. Close','HL_PCT','PCT_change','Adj. Volume']]
forecast_col = 'Adj. Close'
#Replace NA data with an outlier to prevent removal of data
df.fillna(-9999, inplace=True)
#How much of the data available would be used in the regression
#eg. using data from 10 days ago to predict today
forecast_out = int(math.ceil(0.1*len(df)))
#It will shift the 'adj. close' rows up {-forecast_out}
#The Values will be stored in label
#We will try to predict the label with the given info {which is the future}
df['label'] = df[forecast_col].shift(-(forecast_out))
#df.dropna(inplace=True)
X = np.array(df.drop(['label'],1)) #Everything but "Lable"
#You scale X would normalize the data, it helps with training and testing
#Takes time
X = preprocessing.scale(X)
X_lately = X[-forecast_out:] #the really unknown data
X = X[:-forecast_out]
#remove the extra bit at the bottom because we want to match X with y
#X = X[:-forcast_out+1] ##We Droped the labeles after the shift so ////Ignore things changed
df.dropna(inplace=True)
y = np.array(df['label'])
#It shuffle the data and produces the datasets to be used to avoid bias data
#test size the percentage of test size from the whole data
X_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2)
#Classifier
#clf = svm.SVR(kernel='linear') #Support Vector Machine
#n_jobs = -1 just uses all the processing power a
clf = LinearRegression(n_jobs=-1) #not needed after save
clf.fit(X_train, y_train) #not needed after save
#saving the Classifier after training
with open('linearregression.pickle','wb') as f: #not needed after save
pickle.dump(clf,f) #not needed after save
#Calling and reading the file
pickle_in = open('linearregression.pickle','rb')
clf = pickle.load(pickle_in)
#testing the file
accuracy = clf.score(X_test, y_test)
print(accuracy)
forecast_set = clf.predict(X_lately)
print(forecast_set)
df['Forecast'] = np.nan # making a colum which is empty next to every value in the table because there is no forecast for them
last_date = df.iloc[-1].name #finding the last date in the table now
last_unix = last_date.timestamp()
one_day = 86400 #seconds
next_unix = last_unix + one_day
for i in forecast_set:
next_date = datetime.datetime.fromtimestamp(next_unix)
next_unix += one_day
df.loc[next_date] = [np.nan for _ in range(len(df.columns)-1)]+[i]
#Add in the position of the next day the date, Add NAN to all columns except the last one
#in the last column add [i] which is the for3casted value
df['Adj. Close'].plot()
df['Forecast'].plot()
plt.legend(loc=4)
plt.xlabel('Date')
plt.ylabel('Price')
plt.show()
| [
"cipher2633@gmail.com"
] | cipher2633@gmail.com |
6681a031c181c0df237e7640a5209cedf397fbb5 | 12d71cd47a2167d550bab93fa3c11b4da1c5d6a7 | /mpp-backend/api/models/quarters/product_quarter_date_model.py | 59a954f69355b05a10319b7c0b7f30ce75f25b33 | [] | no_license | dieptran43/Medicine-Data-Portal__Data-Reporting-Tool | 69596c3a6606111debcc070227f3eadb27c45c7a | 2d32bb4a05adaca1235d1bb3d1e6c0fa74cef6b3 | refs/heads/master | 2023-02-14T01:26:54.494022 | 2020-12-06T07:14:29 | 2020-12-06T07:14:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,892 | py | from django.db import models
from rest_framework import serializers
from django import forms
class ProductQuarterDate(models.Model):
class Meta:
db_table = "product_quarter_date"
product_quarter_date_id = models.AutoField(primary_key=True)
product_quarter_id = models.ForeignKey("api.ProductQuarter",on_delete=models.CASCADE,db_column='product_quarter_id')
stage_id = models.ForeignKey("api.Stage",on_delete=models.CASCADE,db_column='stage_id')
start_date = models.DateField(null=True)
end_date = models.DateField(null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.IntegerField(null=True)
updated_by = models.IntegerField(null=True)
class ProductQuarterDateSerializer(serializers.ModelSerializer):
class Meta:
model = ProductQuarterDate
fields = ('product_quarter_date_id','product_quarter_id','stage_id','start_date','end_date')
def create(self,validated_data):
curr_user = self.context['request'].user.id
validated_data['created_by'] = curr_user
validated_data['updated_by'] = curr_user
product_quarter_date = ProductQuarterDate(**validated_data)
product_quarter_date.save()
return product_quarter_date
def update(self,instance,validated_data):
curr_user = self.context['request'].user.id
instance.product_quarter_id = validated_data.get('product_quarter_id', instance.product_quarter_id)
instance.stage_id = validated_data.get('stage_id', instance.stage_id)
instance.start_date = validated_data.get('start_date', instance.start_date)
instance.end_date = validated_data.get('end_date', instance.end_date)
instance.updated_by = curr_user
instance.save()
return instance | [
"kaushikjadhav01@gmail.com"
] | kaushikjadhav01@gmail.com |
134e783822e9a879615d35d2b58613af231d7864 | 53be839ec30082e9e49e7593ddc5f508466ea413 | /ripozo_html/__init__.py | d9e2b84c579175040b813d4681c259d1ba05d50d | [] | no_license | timmartin19/ripozo-html | c0f62fad333f1a25b351eb6f9e4e817f8ebd0542 | 1455723ac1074c8b8081542df46c1797d0169fc4 | refs/heads/master | 2021-01-10T02:36:56.984431 | 2016-01-19T06:09:42 | 2016-01-19T06:09:42 | 48,511,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,190 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from jinja2 import Environment, FileSystemLoader
from ripozo import adapters
from ripozo.adapters.base import AdapterBase
from ripozo.utilities import titlize_endpoint
from ripozo.resources.resource_base import create_url
from ripozo.resources.constants import input_categories
from ripozo import fields
import six
_TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'templates')
_TEMPLATE_LOADER = FileSystemLoader(searchpath=_TEMPLATE_DIR)
_JINJA_ENV = Environment(loader=_TEMPLATE_LOADER)
_FIELD_TYPES = {
fields.BaseField: 'text',
fields.BooleanField: 'checkbox',
fields.DateTimeField: 'datetime',
fields.FloatField: 'number',
fields.IntegerField: 'integer',
fields.StringField: 'text'
}
class _HTMLField(object):
"""
A holder in place of a namedtuple due to performance
"""
def __init__(self, name, type_, default):
"""
A data holder to construct an HTMl field.
:param unicode name: The name of the field
:param unicode type_: The html type of the input
:param unicde default: The default value for the field
"""
self.name = name
self.type_ = type_
self.default = default
class HTMLAdapter(AdapterBase):
"""
An adapter that displays browser readable content. This content
is still 100% ReSTful simply displaying the content in an easy to read
manner. Perfect for development or getting users comfortable with using
the API.
:param list[unicode] formats: The formats that this adapter exposes
"""
formats = ['text/html']
_default_adapters = (adapters.BasicJSONAdapter, adapters.JSONAPIAdapter,
adapters.SirenAdapter, adapters.HalAdapter)
def __init__(self, resource=None, base_url='', adapters=None):
"""
Initialize an adapter capable of showing the resources available
on the API
:param resource: The resource that is being formatted.
:type resource: ripozo.resource.resource_base.ResourceBase
:param str|unicode base_url: The url to prepend to
all of the urls.
:param adapters: A list of adapters whose formats will be displayed to the user.
:type adapters: list[ripozo.adapters.AdapterBase]|tuple(ripozo.adapters.AdapterBase)
:param tuple adapters: The adapters to show when
as available options in the api.
"""
super(HTMLAdapter, self).__init__(resource, base_url=base_url)
self._adapter_types = adapters if adapters is not None else self._default_adapters
def __call__(self, resource, **kwargs):
"""
Creates a new instance of the HTMLAdapter from this
adapter passing the adapters that were used to instantiate
this adapter instance
:return: A new HTMLAdapter instance
:rtype: HTMLAdapter
"""
return self.__class__(resource, adapters=self._adapter_types, **kwargs)
def extra_headers(self):
"""
:return: Returns a dictionary with the Content-Type Header
:rtype: dict
"""
return {'Content-Type': self.formats[0]}
@property
def formatted_body(self):
"""
Returns an HTML string that can be rendered by a browser.
The HTML will also show the formats for various adapters types.
:return: An HTML string
:rtype: unicode
"""
template_vars = dict(resources=[self.resource], adapter=self,
actions=self._actions, all_adapters=self._adapters)
template = _JINJA_ENV.get_template('full.jinja')
return template.render(template_vars)
@property
def _adapters(self):
"""
:return: A list of adapter dicts for Jinja
:rtype: list[dict]
"""
adapter_dicts = []
for adapter_type in self._adapter_types:
adapter = adapter_type(self.resource, base_url=self.base_url)
adapter_dict = dict(name=adapter_type.__name__,
content_type=adapter_type.formats[0],
body=adapter.formatted_body)
adapter_dicts.append(adapter_dict)
return adapter_dicts
@property
def _actions(self):
"""
Gets the list of actions in an appropriate format for Jinja2
to read.
:return: The list of actions
:rtype: list
"""
actions = []
for endpoint, options in six.iteritems(self.resource.endpoint_dictionary()):
options = options[0]
all_methods = options.get('methods', ('GET',))
meth = all_methods[0] if all_methods else 'GET'
base_route = options.get('route', self.resource.base_url)
route = create_url(base_route, **self.resource.properties)
route = self.combine_base_url_with_resource_url(route)
action_fields = self.generate_fields_for_endpoint_funct(options.get('endpoint_func'))
actn = dict(title=titlize_endpoint(endpoint),
method=meth, url=route, fields=action_fields)
actn['id'] = id(actn)
actions.append(actn)
return actions
def generate_fields_for_endpoint_funct(self, endpoint_func):
"""
Returns the action's fields for the inputs on the form.
:param apimethod endpoint_func:
:return: A dictionary of action fields
:rtype: dict
"""
return_fields = []
fields_method = getattr(endpoint_func, 'fields', None)
if not fields_method:
return []
action_fields = fields_method(self.resource.manager)
for field in action_fields:
if field.arg_type is input_categories.URL_PARAMS:
continue
field_obj = _HTMLField(field.name, _FIELD_TYPES[type(field)],
self.resource.properties.get(field.name))
return_fields.append(field_obj)
return return_fields
| [
"tim.martin@vertical-knowledge.com"
] | tim.martin@vertical-knowledge.com |
cf9a5277fdc840bee95e3f08258157f74b0eb148 | 9d31366d2f0df3f89841b2f1759aa4fb6bc06780 | /setup.py | 189eb6bd193fe29d8e64473523b599c1cba03d03 | [
"MIT"
] | permissive | EmilStenstrom/emojizones | 96e6f958295a764e8cd06eed59ab2315989bca16 | c8a5ef7c5f58ce9beb448ab01c00c5c9d5454e5f | refs/heads/master | 2023-08-01T10:36:11.543973 | 2022-03-07T20:06:16 | 2022-03-07T20:06:16 | 245,609,219 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | # -*- coding: utf-8 -*-
import os
from setuptools import setup
VERSION = '0.3.2'
setup(
name='emojizones',
packages=["emojizones"],
version=VERSION,
description='A helper library to convert dates between timezone using emojiis',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.md')).read(),
long_description_content_type="text/markdown",
author=u'Emil Stenström',
author_email='em@kth.se',
url='https://github.com/EmilStenstrom/emojizones/',
install_requires=["pytz", "grapheme"],
keywords=['emojizones', 'conll', 'conll-u', 'parser', 'nlp'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
],
)
| [
"em@kth.se"
] | em@kth.se |
4a9cd2050ce1ad1ddda5ed230b8ca4bad878934d | 9183379a07d1d8936d8205d99ecd0e40269e667a | /sphinx/source/exercises/solution/05_encapsulation/printer.py | 414590fa8dc069be2a003ab1ed68e1baaddb3428 | [] | no_license | boegeskov/fall2020 | 477983eb97568e274d3cef9ee22706de172b6046 | 9e50030e3fa99cc5ddb95ff46f93c1a530d256b1 | refs/heads/master | 2023-01-23T18:30:19.893424 | 2020-12-09T07:16:20 | 2020-12-09T07:16:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,540 | py | # printer.py (solution)
"""
3. Machine -> printer
Create a Machine class that takes care of powering on and off a the machine.
Create a printer class that is a subclass of the Machine super class.
The printer should be able to print to console.
The printer should have a papertray, which should be in its own class. The papertray class should keep track of the paper, it should have the abillity to use paper and and load new paper in the tray if empty.
"""
class Machine:
""" takes care of turning on and off """
def __init__(self):
self.__is_on = False
@property
def is_on(self):
return self.__is_on
def power(self):
self.__is_on = not self.__is_on
class Printer(Machine):
def __init__(self):
# 1.
super().__init__()
# 2.
# Machine.__init__(self)
self.__pt = Papertray()
def print(self, text):
if self.__pt.paper == 0:
print('Papertray is empty')
else:
if self.is_on:
print(text)
self.__pt.paper = self.__pt.paper - 1
else:
print('Printer is off')
@property
def load(self):
return self.__pt.paper
load.setter
def load(self, no):
self.__pt.paper = no
class Papertray:
def __init__(self):
self.paper = 2
@property
def paper(self):
return self.__paper
@paper.setter
def paper(self, paper):
self.__paper = paper
| [
"clbo@kea.dk"
] | clbo@kea.dk |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.