text stringlengths 38 1.54M |
|---|
# -*- coding: utf-8 -*-
from django.db import models
class Function(models.Model):
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=255)
icon = models.CharField(max_length=255)
belong_to_building = models.ForeignKey('building.Building', null=True, blank=True, verbose_name="所属建筑")
order_in_building = models.IntegerField(default=1, verbose_name="在界面中显示顺序")
need_building_level = models.IntegerField(default=1, verbose_name="所需建筑等级")
unlock_des = models.TextField(blank=True, verbose_name="解锁描述")
def __unicode__(self):
return self.name
class Meta:
db_table = 'function'
verbose_name = "功能"
verbose_name_plural = "功能"
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.userLogin, name='index'),
url(r'^login_app/$', views.authenticateUser, name='login_app'),
url(r'^privacy/$', views.privacy_view, name='privacy'),
url(r'^signup/$',views.signup, name = 'signup'),
url(r'^registeruser/$',views.registeruser, name = 'registeruser'),
url(r'^logout/$',views.user_logout, name = 'logout'),
] |
#-*- coding:utf-8 -*-
#增加数字“4”的数据量,对每个“4”的样例,生成一个新样例
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import Image
import methods
import random
import os
def gen_single_txt(data_path,txt_name,is_random = True):
txt_path = os.path.join('/home/nikoong/Algorithm_test/handwritting/data/txt')
txt = os.path.join(txt_path,txt_name+'.txt')
all_files = []
for root, dirs,files in os.walk(data_path,topdown=False):
for name in files:
filename = os.path.join(root,name)
all_files.append(filename)
if is_random:
random.shuffle(all_files)
with open(txt,'w') as f:
for filename in all_files:
f.write(filename+' '+'4'+'\n')
if __name__ == '__main__':
output_path='/home/nikoong/Algorithm_test/handwritting/data/new_four/new_four_train'
new_list=[]
sourse_list = methods.txt2list('/home/nikoong/Algorithm_test/handwritting/data/txt/finish/train.txt')
data_path = output_path
'''
for i in range(len(sourse_list)):
if int(sourse_list[i].split(' ')[1])==4:
imagepath = sourse_list[i].split(' ')[0]
imagename = imagepath.split('/')[-1].split('.jpg')[0]
methods.DataAugmentation(imagepath,1,output_path,imagename)
'''
txt_name = 'new_four_train'
gen_single_txt(data_path,txt_name,True)
four_list = methods.txt2list('/home/nikoong/Algorithm_test/handwritting/data/txt/new_four_train.txt')
sourse_list.extend(four_list)
random.shuffle(sourse_list)
methods.list2txt(sourse_list,'/home/nikoong/Algorithm_test/handwritting/data/txt/finish/train_withnewfour.txt')
|
from django.shortcuts import render, redirect
from .models import Category, photos
# Create your views here.
def gallery(request):
category = request.GET.get('category')
print(category)
if category == None:
images = photos.objects.all()
else:
images = photos.objects.filter(category__name = category)
categories = Category.objects.all()
context = {'categories': categories, 'images': images}
return render(request, 'photos/gallery.html', context)
def viewphoto(request, pk):
img = photos.objects.get(id=pk)
return render(request, 'photos/photo.html',{'img': img})
def addphoto(request):
categories = Category.objects.all()
if request.method == 'POST':
data = request.POST
images = request.FILES.get('img')
if data['category'] != 'none':
print(data['category'], 'asdasd')
category = Category.objects.get(id=data['category'])
elif data['category_new'] != '':
category, created = Category.objects.get_or_create(
name=data['category_new'])
else:
category = None
photo = photos.objects.create(
category = category,
description = data['description'],
img = images,
)
return redirect('gallery')
context = {'categories': categories}
return render(request, 'photos/add.html', context) |
import matplotlib.pyplot as plt
import torch
import torchvision.io
from mmseg.models import VisionTransformer
from mpl_toolkits.axes_grid1 import make_axes_locatable
from torch import nn
from torch.nn.functional import interpolate
from src_lib.models_hub.trans_unet import get_r50_b16_config, VisionTransformer as ViT
from src.position_maps.utils import generate_position_map
def extract_patches_2d(img, patch_shape, step=[1.0, 1.0], batch_first=False):
patch_H, patch_W = patch_shape[0], patch_shape[1]
if (img.size(2) < patch_H):
num_padded_H_Top = (patch_H - img.size(2)) // 2
num_padded_H_Bottom = patch_H - img.size(2) - num_padded_H_Top
padding_H = nn.ConstantPad2d((0, 0, num_padded_H_Top, num_padded_H_Bottom), 0)
img = padding_H(img)
if (img.size(3) < patch_W):
num_padded_W_Left = (patch_W - img.size(3)) // 2
num_padded_W_Right = patch_W - img.size(3) - num_padded_W_Left
padding_W = nn.ConstantPad2d((num_padded_W_Left, num_padded_W_Right, 0, 0), 0)
img = padding_W(img)
step_int = [0, 0]
step_int[0] = int(patch_H * step[0]) if (isinstance(step[0], float)) else step[0]
step_int[1] = int(patch_W * step[1]) if (isinstance(step[1], float)) else step[1]
patches_fold_H = img.unfold(2, patch_H, step_int[0])
if ((img.size(2) - patch_H) % step_int[0] != 0):
patches_fold_H = torch.cat((patches_fold_H, img[:, :, -patch_H:, ].permute(0, 1, 3, 2).unsqueeze(2)), dim=2)
patches_fold_HW = patches_fold_H.unfold(3, patch_W, step_int[1])
if ((img.size(3) - patch_W) % step_int[1] != 0):
patches_fold_HW = torch.cat(
(patches_fold_HW, patches_fold_H[:, :, :, -patch_W:, :].permute(0, 1, 2, 4, 3).unsqueeze(3)), dim=3)
patches = patches_fold_HW.permute(2, 3, 0, 1, 4, 5)
patches = patches.reshape(-1, img.size(0), img.size(1), patch_H, patch_W)
if (batch_first):
patches = patches.permute(1, 0, 2, 3, 4)
return patches
def reconstruct_from_patches_2d(patches, img_shape, step=[1.0, 1.0], batch_first=False):
if (batch_first):
patches = patches.permute(1, 0, 2, 3, 4)
patch_H, patch_W = patches.size(3), patches.size(4)
img_size = (patches.size(1), patches.size(2), max(img_shape[0], patch_H), max(img_shape[1], patch_W))
step_int = [0, 0]
step_int[0] = int(patch_H * step[0]) if (isinstance(step[0], float)) else step[0]
step_int[1] = int(patch_W * step[1]) if (isinstance(step[1], float)) else step[1]
nrow, ncol = 1 + (img_size[-2] - patch_H) // step_int[0], 1 + (img_size[-1] - patch_W) // step_int[1]
r_nrow = nrow + 1 if ((img_size[2] - patch_H) % step_int[0] != 0) else nrow
r_ncol = ncol + 1 if ((img_size[3] - patch_W) % step_int[1] != 0) else ncol
patches = patches.reshape(r_nrow, r_ncol, img_size[0], img_size[1], patch_H, patch_W)
img = torch.zeros(img_size, device=patches.device)
overlap_counter = torch.zeros(img_size, device=patches.device)
for i in range(nrow):
for j in range(ncol):
img[:, :, i * step_int[0]:i * step_int[0] + patch_H, j * step_int[1]:j * step_int[1] + patch_W] += patches[
i, j,]
overlap_counter[:, :, i * step_int[0]:i * step_int[0] + patch_H,
j * step_int[1]:j * step_int[1] + patch_W] += 1
if ((img_size[2] - patch_H) % step_int[0] != 0):
for j in range(ncol):
img[:, :, -patch_H:, j * step_int[1]:j * step_int[1] + patch_W] += patches[-1, j,]
overlap_counter[:, :, -patch_H:, j * step_int[1]:j * step_int[1] + patch_W] += 1
if ((img_size[3] - patch_W) % step_int[1] != 0):
for i in range(nrow):
img[:, :, i * step_int[0]:i * step_int[0] + patch_H, -patch_W:] += patches[i, -1,]
overlap_counter[:, :, i * step_int[0]:i * step_int[0] + patch_H, -patch_W:] += 1
if ((img_size[2] - patch_H) % step_int[0] != 0 and (img_size[3] - patch_W) % step_int[1] != 0):
img[:, :, -patch_H:, -patch_W:] += patches[-1, -1,]
overlap_counter[:, :, -patch_H:, -patch_W:] += 1
img /= overlap_counter
if (img_shape[0] < patch_H):
num_padded_H_Top = (patch_H - img_shape[0]) // 2
num_padded_H_Bottom = patch_H - img_shape[0] - num_padded_H_Top
img = img[:, :, num_padded_H_Top:-num_padded_H_Bottom, ]
if (img_shape[1] < patch_W):
num_padded_W_Left = (patch_W - img_shape[1]) // 2
num_padded_W_Right = patch_W - img_shape[1] - num_padded_W_Left
img = img[:, :, :, num_padded_W_Left:-num_padded_W_Right]
return img
def quick_viz(im1, im2=None, img_idx=0, c_map=None):
im1 = preprocess_image(im1, img_idx)
if im2 is None:
fig, ax = plt.subplots(1, 1, sharex='none', sharey='none', figsize=(14, 12))
image_axis = ax
else:
im2 = preprocess_image(im2, img_idx)
fig, ax = plt.subplots(1, 2, sharex='none', sharey='none', figsize=(14, 12))
image_axis, stitched_image_axis = ax
s_im = stitched_image_axis.imshow(im2, cmap=c_map)
stitched_image_axis.set_title('Stitched')
divider = make_axes_locatable(stitched_image_axis)
cax = divider.append_axes('right', size='5%', pad=0.10)
fig.colorbar(s_im, cax=cax, orientation='vertical')
i_im = image_axis.imshow(im1, cmap=c_map)
image_axis.set_title('Original')
divider = make_axes_locatable(image_axis)
cax = divider.append_axes('right', size='5%', pad=0.10)
fig.colorbar(i_im, cax=cax, orientation='vertical')
plt.tight_layout()
plt.show()
def preprocess_image(im, img_idx):
im = im.detach().cpu() if isinstance(im, torch.Tensor) else im
im = im[img_idx] if im.ndim == 4 else im
im = im.permute(1, 2, 0) if im.shape[0] in [1, 2, 3] else im
return im
if __name__ == '__main__':
im_path = "/home/rishabh/Thesis/TrajectoryPredictionMastersThesis/Datasets/SDD/annotations/" \
"deathCircle/video1/reference.jpg"
random_idx = 0
patch_size = (256, 256)
batch_size = 2
num_locs = 40
img = torchvision.io.read_image(im_path).unsqueeze(0)
img = interpolate(img, scale_factor=0.5).repeat(batch_size, 1, 1, 1)
patches = extract_patches_2d(img, patch_size, batch_first=True)
stitched_img = reconstruct_from_patches_2d(patches, img.shape[-2:], batch_first=True).to(dtype=torch.uint8)
p = torchvision.utils.make_grid(patches[random_idx].squeeze(0), nrow=img.shape[2] // patch_size[0])
# quick_viz(p)
# quick_viz(img, stitched_img)
loc_x = torch.randint(0, img.shape[-2], (num_locs,))
loc_y = torch.randint(0, img.shape[-1], (num_locs,))
locs = torch.stack((loc_x, loc_y)).t()
p_img = torch.from_numpy(
generate_position_map(
list(img.shape[-2:]), locs, sigma=2, return_combined=True))[(None,) * 2].repeat(batch_size, 1, 1, 1)
patches_target = extract_patches_2d(p_img, patch_size, batch_first=True)
stitched_img_target = reconstruct_from_patches_2d(patches_target, p_img.shape[-2:], batch_first=True)
p_target = torchvision.utils.make_grid(patches_target[random_idx].squeeze(0), nrow=p_img.shape[2] // patch_size[0])
# quick_viz(p_target)
# quick_viz(p_img, stitched_img_target)
conf = get_r50_b16_config()
conf.n_classes = 1
# # from mm-seg -> reshape to input shape -> put Seg head to get desired classes
# m = VisionTransformer(img_size=patch_size)
m = ViT(conf, img_size=patch_size, num_classes=1, use_attn_decoder=True)
inp = (patches.contiguous().view(-1, *patches.shape[2:]).float() / 255.0)
o = m(inp)
o = o.view(batch_size, -1, 1, *patches.shape[3:])
stitched_output = reconstruct_from_patches_2d(o, img.shape[-2:], batch_first=True)
quick_viz(img, stitched_output)
print()
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Time-to-string and time-from-string routines."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import calendar
import datetime
import time
class Error(Exception):
"""Exception used to indicate problems with time routines."""
pass
HTML_TIME_FMT = '%a, %d %b %Y %H:%M:%S GMT'
HTML_DATE_WIDGET_FORMAT = '%Y-%m-%d'
MONTH_YEAR_FMT = '%b %Y'
MONTH_DAY_FMT = '%b %d'
MONTH_DAY_YEAR_FMT = '%b %d %Y'
# We assume that all server clocks are synchronized within this amount.
MAX_CLOCK_SKEW_SEC = 30
def TimeForHTMLHeader(when=None):
"""Return the given time (or now) in HTML header format."""
if when is None:
when = int(time.time())
return time.strftime(HTML_TIME_FMT, time.gmtime(when))
def TimestampToDateWidgetStr(when):
"""Format a timestamp int for use by HTML <input type="date">."""
return time.strftime(HTML_DATE_WIDGET_FORMAT, time.gmtime(when))
def DateWidgetStrToTimestamp(val_str):
"""Parse the HTML <input type="date"> string into a timestamp int."""
return int(calendar.timegm(time.strptime(val_str, HTML_DATE_WIDGET_FORMAT)))
def FormatAbsoluteDate(
timestamp, clock=datetime.datetime.utcnow,
recent_format=MONTH_DAY_FMT, old_format=MONTH_YEAR_FMT):
"""Format timestamp like 'Sep 5', or 'Yesterday', or 'Today'.
Args:
timestamp: Seconds since the epoch in UTC.
clock: callable that returns a datetime.datetime object when called with no
arguments, giving the current time to use when computing what to display.
recent_format: Format string to pass to strftime to present dates between
six months ago and yesterday.
old_format: Format string to pass to strftime to present dates older than
six months or more than skew_tolerance in the future.
Returns:
If timestamp's date is today, "Today". If timestamp's date is yesterday,
"Yesterday". If timestamp is within six months before today, return the
time as formatted by recent_format. Otherwise, return the time as formatted
by old_format.
"""
ts = datetime.datetime.utcfromtimestamp(timestamp)
now = clock()
month_delta = 12 * now.year + now.month - (12 * ts.year + ts.month)
delta = now - ts
if ts > now:
# If the time is slightly in the future due to clock skew, treat as today.
skew_tolerance = datetime.timedelta(seconds=MAX_CLOCK_SKEW_SEC)
if -delta <= skew_tolerance:
return 'Today'
# Otherwise treat it like an old date.
else:
fmt = old_format
elif month_delta > 6 or delta.days >= 365:
fmt = old_format
elif delta.days == 1:
return 'Yesterday'
elif delta.days == 0:
return 'Today'
else:
fmt = recent_format
return time.strftime(fmt, time.gmtime(timestamp)).replace(' 0', ' ')
def FormatRelativeDate(timestamp, days_only=False, clock=None):
"""Return a short string that makes timestamp more meaningful to the user.
Describe the timestamp relative to the current time, e.g., '4
hours ago'. In cases where the timestamp is more than 6 days ago,
we return '' so that an alternative display can be used instead.
Args:
timestamp: Seconds since the epoch in UTC.
days_only: If True, return 'N days ago' even for more than 6 days.
clock: optional function to return an int time, like int(time.time()).
Returns:
String describing relative time.
"""
if clock:
now = clock()
else:
now = int(time.time())
# TODO(jrobbins): i18n of date strings
delta = int(now - timestamp)
d_minutes = delta // 60
d_hours = d_minutes // 60
d_days = d_hours // 24
if days_only:
if d_days > 1:
return '%s days ago' % d_days
else:
return ''
if d_days > 6:
return ''
if d_days > 1:
return '%s days ago' % d_days # starts at 2 days
if d_hours > 1:
return '%s hours ago' % d_hours # starts at 2 hours
if d_minutes > 1:
return '%s minutes ago' % d_minutes
if d_minutes > 0:
return '1 minute ago'
if delta > -MAX_CLOCK_SKEW_SEC:
return 'moments ago'
return ''
def GetHumanScaleDate(timestamp, now=None):
"""Formats a timestamp to a course-grained and fine-grained time phrase.
Args:
timestamp: Seconds since the epoch in UTC.
now: Current time in seconds since the epoch in UTC.
Returns:
A pair (course_grain, fine_grain) where course_grain is a string
such as 'Today', 'Yesterday', etc.; and fine_grained is a string describing
relative hours for Today and Yesterday, or an exact date for longer ago.
"""
if now is None:
now = int(time.time())
now_year = datetime.datetime.fromtimestamp(now).year
then_year = datetime.datetime.fromtimestamp(timestamp).year
delta = int(now - timestamp)
delta_minutes = delta // 60
delta_hours = delta_minutes // 60
delta_days = delta_hours // 24
if 0 <= delta_hours < 24:
if delta_hours > 1:
return 'Today', '%s hours ago' % delta_hours
if delta_minutes > 1:
return 'Today', '%s min ago' % delta_minutes
if delta_minutes > 0:
return 'Today', '1 min ago'
if delta > 0:
return 'Today', 'moments ago'
if 0 <= delta_hours < 48:
return 'Yesterday', '%s hours ago' % delta_hours
if 0 <= delta_days < 7:
return 'Last 7 days', time.strftime(
'%b %d, %Y', (time.localtime(timestamp)))
if 0 <= delta_days < 30:
return 'Last 30 days', time.strftime(
'%b %d, %Y', (time.localtime(timestamp)))
if delta > 0:
if now_year == then_year:
return 'Earlier this year', time.strftime(
'%b %d, %Y', (time.localtime(timestamp)))
return ('Before this year',
time.strftime('%b %d, %Y', (time.localtime(timestamp))))
if delta > -MAX_CLOCK_SKEW_SEC:
return 'Today', 'moments ago'
# Only say something is in the future if it is more than just clock skew.
return 'Future', 'Later'
|
import os
from processing.tile_image import TileSlide
from queue import Queue
from threading import Thread
from time import time
import logging
logging.basicConfig(filename='run_global_001.log', level=logging.DEBUG)
def preprocessing():
print("preprocessing_001 started")
ts = time()
slideextension1 = "mrxs"
slideextension2 = "svs"
datasetpath = "/opt/storage/testImageTiling/"
outputpath = "/opt/storage/testImageTilingOutput_001/"
os.mkdir(outputpath)
# Preprocessing:
queue = Queue()
for x in range(60):
worker = TileSlide(queue)
# Setting daemon to True will let the main thread exit even though the workers are blocking
worker.daemon = True
worker.start()
for root, dirs, files in os.walk(datasetpath, topdown=False):
for name in files:
if name.endswith(slideextension2):
logging.info(name)
print(name)
queue.put((root, outputpath, name, 0, 4))
queue.join()
logging.info('Preprocessing took %s', time() - ts)
print('Preprocessing took %s', time() - ts)
if __name__ == '__main__':
preprocessing() |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-09-02 23:11
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
from dataentry.models.form_migration import FormMigration
def migrate_forms(apps, schema_editor):
# Invoke form migration with specific file containing lastest form data
FormMigration.migrate(apps, schema_editor, 'form_data_20180903.json')
class Migration(migrations.Migration):
dependencies = [
('dataentry', '0063_auto_20180830_1002'),
]
operations = [
migrations.CreateModel(
name='ExportImportField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('field_name', models.CharField(max_length=126)),
('export_name', models.CharField(max_length=126)),
('arguments_json', django.contrib.postgres.fields.jsonb.JSONField(null=True)),
('answer_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='field_answer_type', to='dataentry.AnswerType')),
],
),
migrations.RemoveField(
model_name='export_import_question',
name='export_import',
),
migrations.RemoveField(
model_name='export_import_question',
name='question',
),
migrations.RenameField(
model_name='googlesheet',
old_name='key_column',
new_name='key_field_name',
),
migrations.AddField(
model_name='googlesheet',
name='suppress_column_warnings',
field=models.BooleanField(default=True),
),
migrations.RemoveField(
model_name='exportimportcard',
name='card_export_import',
),
migrations.RemoveField(
model_name='exportimportcard',
name='start_position',
),
migrations.AddField(
model_name='exportimport',
name='form',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='dataentry.Form'),
preserve_default=False,
),
migrations.AddField(
model_name='exportimport',
name='implement_module',
field=models.CharField(max_length=126, null=True),
),
migrations.AddField(
model_name='exportimportcard',
name='category',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='export_import_card', to='dataentry.Category'),
preserve_default=False,
),
migrations.AddField(
model_name='question',
name='export_name',
field=models.CharField(max_length=126, null=True),
),
migrations.AddField(
model_name='question',
name='export_params',
field=django.contrib.postgres.fields.jsonb.JSONField(null=True),
),
migrations.AlterField(
model_name='exportimportcard',
name='export_import',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='export_import_base', to='dataentry.ExportImport'),
preserve_default=False,
),
migrations.DeleteModel(
name='Export_Import_Question',
),
migrations.AddField(
model_name='exportimportfield',
name='card',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='dataentry.ExportImportCard'),
),
migrations.AddField(
model_name='exportimportfield',
name='export_import',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dataentry.ExportImport'),
),
migrations.RenameModel(
old_name='GoogleSheet',
new_name='GoogleSheetConfig',
),
migrations.RunPython(migrate_forms),
]
|
"""
Initialize Flask app
"""
from flask import Flask
import os
import application.settings
from flask_restful import Api
app = Flask('__name__')
api = Api(app)
if os.getenv('FLASK_CONF') == 'TEST':
app.config.from_object('application.settings.Testing')
else:
app.config.from_object('application.settings.Production')
# Enable jinja2 loop controls extension
app.jinja_env.add_extension('jinja2.ext.loopcontrols')
# Pull in URL dispatch routes
import application.urls
|
# 2. Determine if the sum of two integers is equal to the given value
# Given an array of integers and a value,
# determine if there are any two integers in the array whose sum is equal to the given value.
# Return true if the sum exists and return false if it does not.
def solution_by_educative(A, val):
found_values = set() # for unique value
for a in A: # iterate loop => foreach
if val - a in found_values: # checking if calculate value is in set => 10 - 7 = 3
return True # if found then true
# or add that number => 3 and it will be in a list at that 3
found_values.add(a)
return False # if not found then false
def find_sum_of_two(A, val):
try:
found = False
for x in range(len(A)):
for y in range(x + 1, len(A)):
if A[x] + A[y] == val:
found = True
return found
# print(f"{A[x]}+{A[y]}={val}", end=", ")
# print(str(A[x])+"+"+str(A[y])+"="+str(val), end=", ")
# if not(found):
# print(f"No 2 values sum up to {val}")
return found
except:
print('Exception Occured')
if __name__ == '__main__':
arr = [5, 7, 1, 2, 8, 4, 3]
print(find_sum_of_two(arr, 10))
print(solution_by_educative(arr, 10))
# passed
|
from .astnode import AstNode
class ArrayDeclaration(AstNode):
def __init__(self):
super(ArrayDeclaration, self).__init__(parent=None)
self.type = None
self.dim = None
self.dim_quals = None
def prepare_to_print(self):
self.name += "ArrayDecl: "
def c_visitor(self):
return "[" + self.dim.to_string() + "]"
|
import random
import os
wordsCount = 1000000
linkers = ["So", "Also", "By the way", "Moreover"]
personal = ["I", "personaly I"]
like = ["like", "love", "prefer"]
code = ["to write programms", "to program", "to code", "to write code"]
file = open("text.txt", "w")
for i in range(0, wordsCount):
sentence = random.choice(linkers) + ", " + random.choice(personal) + " " + random.choice(like) + " " + random.choice(code) + ".\n"
file.write(sentence)
file.close()
os.system("git add .")
os.system("git commit -m 'new file was generated!'")
os.system("git push") |
from PyQt5.QtWidgets import QWidget,QPushButton,QListWidget,QHBoxLayout,QVBoxLayout
from PyQt5.QtCore import QObject,pyqtSignal
from PyQt5.QtGui import QIcon
from view.AuxiliaryElements.BlinkingText import BlinkingText
from view.AuxiliaryElements.ListWidgetCustomScroll import ListWidgetCustomScroll
class ChosePostWindow(QWidget,QObject):
choose_post_closed = pyqtSignal()
def __init__(self):
super(ChosePostWindow, self).__init__()
self.setting_window()
self.create_buttons()
self.create_list_widget_and_setting()
self.__blinking_label.start_blinking()
self.__blinking_label.info_label.hide()
self.set_layout()
self.setWindowIcon(QIcon('../../model/AuxElements/icon.png'))
@property
def blinking_label(self):
return self.__blinking_label
def setting_window(self):
self.setWindowTitle("Edit Post")
self.setGeometry(100,100,500,500)
def create_buttons(self):
self.btn_delete_post = QPushButton("Delete Post")
self.btn_delete_post.setMinimumHeight(30)
self.btn_add_new_post = QPushButton("Add New Post")
self.btn_add_new_post.setMinimumHeight(30)
self.btn_backed = QPushButton("Back")
self.btn_backed.setMinimumHeight(30)
self.btn_backed.setStyleSheet("background-color: rgb(255,167,167)")
self.__blinking_label = BlinkingText('Hint: Double click on post to start edit')
def create_list_widget_and_setting(self):
self.list_post_widget = ListWidgetCustomScroll()
self.list_post_widget.setSelectionMode(QListWidget.SingleSelection)
def creating_layouts(self,obj_for_qv = None ,obj_for_qh = None):
qv_b = QVBoxLayout()
qh_b = QHBoxLayout()
if None is not obj_for_qv:
for obj in obj_for_qv:
qv_b.addWidget(obj)
if None is not obj_for_qh:
for obj in obj_for_qh:
qh_b.addWidget(obj)
qv_b.addLayout(qh_b)
return qv_b
def set_layout(self):
self.setLayout(self.creating_layouts([self.__blinking_label.info_label, self.list_post_widget],
[self.btn_backed,self.btn_add_new_post,self.btn_delete_post]))
def closeEvent(self, QCloseEvent):
self.choose_post_closed.emit()
QCloseEvent.accept()
|
import switch
import time
temp = 1
setTemp = 5
#while True:
def controlPower(temp, setTemp):
if temp < setTemp:
print "low, "+str(temp)
switch.setPower(1)
time.sleep(2)
temp = tempRead.read_temp()
if temp >= setTemp:
print "high, "+str(temp)
switch.setPower(0)
time.sleep(2)
temp = tempRead.read_temp()
#controlPower(temp,setTemp)
|
# Time Complexity : O(nlogn)
# Auxiliary Space : O(n)
def activityselection(start, end, n):
if not (start and end):
return 0
num_of_activity = 0
i = 0
for j in range(n):
if start[j] >= end[i]:
num_of_activity += 1
i = j
return num_of_activity
|
from enum import Enum
class linkType(Enum):
P2P = 'p2p' # peer to peer
P2C = 'p2c' # provider to customer
# holds values for each AS link entry
class ASEntry():
### ctor
def __init__(self,
as1,
as2,
type):
self.as1 = as1
self.as2 = as2
if type == -1:
self.type = linkType.P2C
else:
self.type = linkType.P2P
# def __del__(self):
# do nothing lol
def __str__(self):
return "AS1: " + str(self.as1) + " AS2: " + str(self.as2) + " Type: " + self.type.value |
"""
SOLR module
Generates the required Solr cores
"""
from typing import Any
import luigi
from luigi.contrib.spark import PySparkTask
from pyspark import SparkContext
from pyspark.sql import SparkSession
from pyspark.sql.functions import (
col,
concat_ws,
collect_set,
when,
flatten,
explode_outer,
concat,
lit,
)
from impc_etl.jobs.load import ExperimentToObservationMapper
from impc_etl.jobs.load.solr.pipeline_mapper import ImpressToParameterMapper
from impc_etl.workflow.config import ImpcConfig
class ImpcImagesLoader(PySparkTask):
name = "IMPC_Images_Core_Loader"
omero_ids_csv_path = luigi.Parameter()
output_path = luigi.Parameter()
def requires(self):
return [
ExperimentToObservationMapper(),
ImpressToParameterMapper(),
]
def app_options(self):
return [
self.input()[0].path,
self.input()[1].path,
self.omero_ids_csv_path,
self.output().path,
]
def output(self):
self.output_path = (
self.output_path + "/"
if not self.output_path.endswith("/")
else self.output_path
)
return ImpcConfig().get_target(f"{self.output_path}impc_images_core_parquet")
def main(self, sc: SparkContext, *args: Any):
"""
Solr Core loader
:param list argv: the list elements should be:
[1]: source IMPC parquet file
[2]: Output Path
"""
observations_parquet_path = args[0]
pipeline_core_parquet_path = args[1]
omero_ids_csv_path = args[2]
output_path = args[3]
spark = SparkSession.builder.getOrCreate()
observations_df = spark.read.parquet(observations_parquet_path)
pipeline_core_df = spark.read.parquet(pipeline_core_parquet_path)
pipeline_core_df = pipeline_core_df.select(
"fully_qualified_name",
"mouse_anatomy_id",
"mouse_anatomy_term",
"embryo_anatomy_id",
"embryo_anatomy_term",
col("mp_id").alias("impress_mp_id"),
col("mp_term").alias("impress_mp_term"),
"top_level_mouse_anatomy_id",
"top_level_mouse_anatomy_term",
"top_level_embryo_anatomy_id",
"top_level_embryo_anatomy_term",
col("top_level_mp_id").alias("impress_top_level_mp_id"),
col("top_level_mp_term").alias("impress_top_level_mp_term"),
col("intermediate_mp_id").alias("impress_intermediate_mp_id"),
col("intermediate_mp_term").alias("impress_intermediate_mp_term"),
).distinct()
omero_ids_df = spark.read.csv(omero_ids_csv_path, header=True).dropDuplicates()
omero_ids_df = omero_ids_df.alias("omero")
image_observations_df = observations_df.where(
col("observation_type") == "image_record"
)
image_observations_df = image_observations_df.alias("obs")
image_observations_df = image_observations_df.join(
omero_ids_df,
[
"observation_id",
"download_file_path",
"phenotyping_center",
"pipeline_stable_id",
"procedure_stable_id",
"parameter_stable_id",
"datasource_name",
],
)
image_observations_df = image_observations_df.select("obs.*", "omero.omero_id")
parameter_association_fields = [
"parameter_association_stable_id",
"parameter_association_sequence_id",
"parameter_association_name",
"parameter_association_value",
]
image_observations_exp_df = image_observations_df
for parameter_association_field in parameter_association_fields:
image_observations_exp_df = image_observations_exp_df.withColumn(
f"{parameter_association_field}_exp",
explode_outer(parameter_association_field),
)
image_observations_x_impress_df = image_observations_exp_df.withColumn(
"fully_qualified_name",
concat_ws(
"_",
"pipeline_stable_id",
"procedure_stable_id",
"parameter_association_stable_id_exp",
),
)
image_observations_x_impress_df = image_observations_x_impress_df.join(
pipeline_core_df,
(
image_observations_x_impress_df["fully_qualified_name"]
== pipeline_core_df["fully_qualified_name"]
),
"left_outer",
)
group_by_expressions = [
collect_set(
when(
col("mouse_anatomy_id").isNotNull(), col("mouse_anatomy_id")
).otherwise(col("embryo_anatomy_id"))
).alias("embryo_anatomy_id_set"),
collect_set(
when(
col("mouse_anatomy_term").isNotNull(), col("mouse_anatomy_term")
).otherwise(col("embryo_anatomy_term"))
).alias("embryo_anatomy_term_set"),
collect_set(
when(
col("mouse_anatomy_id").isNotNull(), col("mouse_anatomy_id")
).otherwise(col("embryo_anatomy_id"))
).alias("anatomy_id"),
collect_set(
when(
col("mouse_anatomy_term").isNotNull(), col("mouse_anatomy_term")
).otherwise(col("embryo_anatomy_term"))
).alias("anatomy_term"),
flatten(
collect_set(
when(
col("mouse_anatomy_id").isNotNull(),
col("top_level_mouse_anatomy_id"),
).otherwise(col("top_level_embryo_anatomy_id"))
)
).alias("selected_top_level_anatomy_id"),
flatten(
collect_set(
when(
col("mouse_anatomy_id").isNotNull(),
col("top_level_mouse_anatomy_term"),
).otherwise(col("top_level_embryo_anatomy_term"))
)
).alias("selected_top_level_anatomy_term"),
collect_set("impress_mp_id").alias("mp_id"),
collect_set("impress_mp_term").alias("mp_term"),
flatten(collect_set("impress_top_level_mp_id")).alias(
"top_level_mp_id_set"
),
flatten(collect_set("impress_top_level_mp_term")).alias(
"top_level_mp_term_set"
),
flatten(collect_set("impress_intermediate_mp_id")).alias(
"intermediate_mp_id_set"
),
flatten(collect_set("impress_intermediate_mp_term")).alias(
"intermediate_mp_term_set"
),
]
image_observations_x_impress_df = image_observations_x_impress_df.select(
[
"observation_id",
"mouse_anatomy_id",
"embryo_anatomy_id",
"mouse_anatomy_term",
"embryo_anatomy_term",
"top_level_mouse_anatomy_id",
"top_level_embryo_anatomy_id",
"top_level_mouse_anatomy_term",
"top_level_embryo_anatomy_term",
"impress_mp_id",
"impress_mp_term",
"impress_top_level_mp_id",
"impress_top_level_mp_term",
"impress_intermediate_mp_id",
"impress_intermediate_mp_term",
]
)
image_observations_x_impress_df = image_observations_x_impress_df.groupBy(
"observation_id"
).agg(*group_by_expressions)
image_observations_df = image_observations_df.join(
image_observations_x_impress_df, "observation_id"
)
image_observations_df = image_observations_df.withColumn(
"download_url",
concat(
lit(
"//www.ebi.ac.uk/mi/media/omero/webgateway/archived_files/download/"
),
col("omero_id"),
),
)
image_observations_df = image_observations_df.withColumn(
"jpeg_url",
concat(
lit("//www.ebi.ac.uk/mi/media/omero/webgateway/render_image/"),
col("omero_id"),
),
)
image_observations_df = image_observations_df.withColumn(
"thumbnail_url",
concat(
lit("//www.ebi.ac.uk/mi/media/omero/webgateway/render_birds_eye_view/"),
col("omero_id"),
),
)
image_observations_df.write.parquet(output_path)
|
# Copyright (C) 2019 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import asyncio
import base64
from PyQt5.QtCore import Qt, QThread, pyqtSignal
from PyQt5.QtWidgets import (QWidget, QVBoxLayout, QLabel, QProgressBar,
QHBoxLayout, QPushButton, QDialog)
from electrum import version
from electrum import constants
from electrum import ecc
from electrum.i18n import _
from electrum.util import make_aiohttp_session
from electrum.logging import Logger
from electrum.network import Network
from electrum._vendor.distutils.version import StrictVersion
class UpdateCheck(QDialog, Logger):
url = "https://electrum.org/version"
download_url = "https://electrum.org/#download"
VERSION_ANNOUNCEMENT_SIGNING_KEYS = (
"13xjmVAB1EATPP8RshTE8S8sNwwSUM9p1P", # ThomasV (since 3.3.4)
"1Nxgk6NTooV4qZsX5fdqQwrLjYcsQZAfTg", # ghost43 (since 4.1.2)
)
def __init__(self, *, latest_version=None):
QDialog.__init__(self)
self.setWindowTitle('Electrum - ' + _('Update Check'))
self.content = QVBoxLayout()
self.content.setContentsMargins(*[10]*4)
self.heading_label = QLabel()
self.content.addWidget(self.heading_label)
self.detail_label = QLabel()
self.detail_label.setTextInteractionFlags(Qt.LinksAccessibleByMouse)
self.detail_label.setOpenExternalLinks(True)
self.content.addWidget(self.detail_label)
self.pb = QProgressBar()
self.pb.setMaximum(0)
self.pb.setMinimum(0)
self.content.addWidget(self.pb)
versions = QHBoxLayout()
versions.addWidget(QLabel(_("Current version: {}".format(version.ELECTRUM_VERSION))))
self.latest_version_label = QLabel(_("Latest version: {}".format(" ")))
versions.addWidget(self.latest_version_label)
self.content.addLayout(versions)
self.update_view(latest_version)
self.update_check_thread = UpdateCheckThread()
self.update_check_thread.checked.connect(self.on_version_retrieved)
self.update_check_thread.failed.connect(self.on_retrieval_failed)
self.update_check_thread.start()
close_button = QPushButton(_("Close"))
close_button.clicked.connect(self.close)
self.content.addWidget(close_button)
self.setLayout(self.content)
self.show()
def on_version_retrieved(self, version):
self.update_view(version)
def on_retrieval_failed(self):
self.heading_label.setText('<h2>' + _("Update check failed") + '</h2>')
self.detail_label.setText(_("Sorry, but we were unable to check for updates. Please try again later."))
self.pb.hide()
@staticmethod
def is_newer(latest_version):
return latest_version > StrictVersion(version.ELECTRUM_VERSION)
def update_view(self, latest_version=None):
if latest_version:
self.pb.hide()
self.latest_version_label.setText(_("Latest version: {}".format(latest_version)))
if self.is_newer(latest_version):
self.heading_label.setText('<h2>' + _("There is a new update available") + '</h2>')
url = "<a href='{u}'>{u}</a>".format(u=UpdateCheck.download_url)
self.detail_label.setText(_("You can download the new version from {}.").format(url))
else:
self.heading_label.setText('<h2>' + _("Already up to date") + '</h2>')
self.detail_label.setText(_("You are already on the latest version of Electrum."))
else:
self.heading_label.setText('<h2>' + _("Checking for updates...") + '</h2>')
self.detail_label.setText(_("Please wait while Electrum checks for available updates."))
class UpdateCheckThread(QThread, Logger):
checked = pyqtSignal(object)
failed = pyqtSignal()
def __init__(self):
QThread.__init__(self)
Logger.__init__(self)
self.network = Network.get_instance()
async def get_update_info(self):
# note: Use long timeout here as it is not critical that we get a response fast,
# and it's bad not to get an update notification just because we did not wait enough.
async with make_aiohttp_session(proxy=self.network.proxy, timeout=120) as session:
async with session.get(UpdateCheck.url) as result:
signed_version_dict = await result.json(content_type=None)
# example signed_version_dict:
# {
# "version": "3.9.9",
# "signatures": {
# "1Lqm1HphuhxKZQEawzPse8gJtgjm9kUKT4": "IA+2QG3xPRn4HAIFdpu9eeaCYC7S5wS/sDxn54LJx6BdUTBpse3ibtfq8C43M7M1VfpGkD5tsdwl5C6IfpZD/gQ="
# }
# }
version_num = signed_version_dict['version']
sigs = signed_version_dict['signatures']
for address, sig in sigs.items():
if address not in UpdateCheck.VERSION_ANNOUNCEMENT_SIGNING_KEYS:
continue
sig = base64.b64decode(sig)
msg = version_num.encode('utf-8')
if ecc.verify_message_with_address(address=address, sig65=sig, message=msg,
net=constants.BitcoinMainnet):
self.logger.info(f"valid sig for version announcement '{version_num}' from address '{address}'")
break
else:
raise Exception('no valid signature for version announcement')
return StrictVersion(version_num.strip())
def run(self):
if not self.network:
self.failed.emit()
return
try:
update_info = asyncio.run_coroutine_threadsafe(self.get_update_info(), self.network.asyncio_loop).result()
except Exception as e:
self.logger.info(f"got exception: '{repr(e)}'")
self.failed.emit()
else:
self.checked.emit(update_info)
|
from tkinter import *
import tkinter.messagebox
import adv_backend
from tkinter import ttk
from ttkthemes import themed_tk as tk
import backend
class infrch:
def __init__(self):
root = tk.ThemedTk()
root.get_themes()
root.set_theme('radiance')
root.title('Advertiser System')
def callback(self):
if tkinter.messagebox.askokcancel("quit", "Do You really want to quit?"):
root.destroy()
def clear(self):
e1.delete(0, END)
e2.delete(0, END)
e3.delete(0, END)
e4.delete(0, END)
def view_all():
self.listing.delete(0, END)
for row in backend.view():
self.listing.insert(END, row)
self.clear()
def get_selected_row(self,event):
global selected_tuple
self.clear()
index = self.listing.curselection()[0]
selected_tuple = self.listing.get(index)
e1.insert(END, selected_tuple[1])
e2.insert(END, selected_tuple[3])
e3.insert(END, selected_tuple[2])
e4.insert(END, selected_tuple[4])
def search():
print(self.start_foll_txt.get(),self.end_foll_txt.get())
self.listing.delete(0, END)
search_data = backend.inf_search(self.start_foll_txt.get(),self.end_foll_txt.get(),self.inf_name_txt.get())
print(search_data)
if len(search_data) != 0:
for row in search_data:
self.listing.insert(END, row)
else:
tkinter.messagebox.showinfo('Message', 'NO RESULT FOUND')
self.clear()
selected_tuple = tuple()
self.inf_name_txt = StringVar(root)
self.inf_cat_txt = StringVar(root)
self.adv_phone_txt = StringVar(root)
self.comp_name_txt = StringVar(root)
self.start_foll_txt = IntVar(root)
self.end_foll_txt = IntVar(root)
l = ttk.Label(root, text='Influencer Name', relief=RAISED)
l.grid(row=0, column=0, padx=5, pady=5, sticky='nswe')
l = ttk.Label(root, text='Genre/Category', relief=RAISED)
l.grid(row=1, column=0, padx=5, pady=5, sticky='nswe')
e1 = Entry(root, textvariable=self.inf_name_txt)
e1.grid(row=0, column=1, padx=5, pady=5, sticky='nswe')
e2 = ttk.Combobox(root, textvariable=self.inf_cat_txt)
e2['values'] = ('Select Category',
'Vlogger',
'Photographer',
'StandUp Comedian',
'Reality Show',
'Activist',
'Journalist',
'Sports',
'Food Blogger',
'Videographer',
'Beauty',
'Gamer')
e2.grid(row=1, column=1, padx=5, pady=5, sticky='nswe')
e2.current(0)
l = ttk.Label(root, text='Start Range', relief=RAISED)
l.grid(row=0, column=2, padx=5, pady=5, sticky='nswe')
l = ttk.Label(root, text='End Range', relief=RAISED)
l.grid(row=1, column=2, padx=5, pady=5, sticky='nswe')
e3 = Entry(root, textvariable=self.start_foll_txt)
e3.grid(row=0, column=3, padx=5, pady=5, sticky='nswe')
e4 = Entry(root, textvariable= self.end_foll_txt)
e4.grid(row=1, column=3, padx=5, pady=5, sticky='nswe')
b1 = ttk.Button(root, text='View All Influencers', command=lambda: view_all())
b1.grid(row=2, column=3, padx=5, pady=5, rowspan=2, sticky='nswe')
b2 = ttk.Button(root, text='Search Influencers', command=lambda: search())
b2.grid(row=4, column=3, padx=5, pady=5, rowspan=2, sticky='nswe')
b6 = ttk.Button(root, text='Exit', command=root.destroy)
b6.grid(row=6, column=3, padx=5, pady=5, rowspan=2, sticky='nswe')
self.listing = Listbox(root)
self.listing.grid(row=2, column=0, rowspan=6, columnspan=3, padx=5, pady=5, sticky='nswe')
self.listing.bind('<<ListboxSelect>>', get_selected_row)
for i in range(4):
root.grid_columnconfigure(i, weight=1)
for i in range(8):
root.grid_rowconfigure(i, weight=1)
root.protocol("WM_DELETE_WINDOW", callback)
root.mainloop()
|
class SumTree:
def __init__(self, capacity):
self.capacity = capacity
self.tree = [0] * (2 * capacity - 1)
self.data = [None] * capacity
self.size = 0
self.curr_point = 0
# 添加一个节点数据,默认优先级为当前的最大优先级+1
def add(self, data):
self.data[self.curr_point] = data
self.update(self.curr_point, max(self.tree[self.capacity - 1:self.capacity + self.size]) + 1)
self.curr_point += 1
if self.curr_point >= self.capacity:
self.curr_point = 0
if self.size < self.capacity:
self.size += 1
# 更新一个节点的优先级权重
def update(self, point, weight):
idx = point + self.capacity - 1
change = weight - self.tree[idx]
self.tree[idx] = weight
parent = (idx - 1) // 2
while parent >= 0:
self.tree[parent] += change
parent = (parent - 1) // 2
def get_total(self):
return self.tree[0]
# 获取最小的优先级,在计算重要性比率中使用
def get_min(self):
return min(self.tree[self.capacity - 1:self.capacity + self.size - 1])
# 根据一个权重进行抽样
def sample(self, v):
idx = 0
while idx < self.capacity - 1:
l_idx = idx * 2 + 1
r_idx = l_idx + 1
if self.tree[l_idx] >= v:
idx = l_idx
else:
idx = r_idx
v = v - self.tree[l_idx]
point = idx - (self.capacity - 1)
# 返回抽样得到的 位置,transition信息,该样本的概率
return point, self.data[point], self.tree[idx] / self.get_total()
|
from abc import ABC, abstractmethod
class Car(ABC):
"""Product"""
@abstractmethod
def show(self):
pass
class FerrariCar(Car):
"""法拉利车抽象类"""
def __init__(self):
self.origin_place = "未知产地的"
def show(self):
print(f"这是一辆{self.origin_place}法拉利")
class DomesticFerrariCar(FerrariCar):
"""国产法拉利"""
def __init__(self):
super().__init__()
self.origin_place = "国产"
class ImportFerrariCar(FerrariCar):
"""进口法拉利"""
def __init__(self):
super().__init__()
self.origin_place = "进口"
class BenzCar(Car):
"""法拉利车抽象类"""
def __init__(self):
self.origin_place = "未知产地的"
def show(self):
print(f"这是一辆{self.origin_place}奔驰")
class DomesticBenzCar(FerrariCar):
"""国产法奔驰"""
def __init__(self):
super().__init__()
self.origin_place = "国产"
class ImportBenzCar(FerrariCar):
"""进口法奔驰"""
def __init__(self):
super().__init__()
self.origin_place = "进口"
class CarFactory(ABC):
"""Creator"""
@abstractmethod
def create_ferrari_car(self):
pass
@abstractmethod
def create_benz_car(self):
pass
class DomesticCarFactory(CarFactory):
"""国内车工厂"""
def create_benz_car(self):
return DomesticBenzCar()
def create_ferrari_car(self):
return DomesticFerrariCar()
class ImportCarFactory(CarFactory):
"""原厂车工厂"""
def create_ferrari_car(self):
return ImportFerrariCar()
def create_benz_car(self):
return ImportBenzCar()
if __name__ == "__main__":
domestic_car_factory = DomesticCarFactory()
benz_car = domestic_car_factory.create_benz_car()
benz_car.show()
ferrari_car = domestic_car_factory.create_ferrari_car()
ferrari_car.show()
import_car_factory = ImportCarFactory()
benz_car = import_car_factory.create_benz_car()
benz_car.show()
ferrari_car = import_car_factory.create_ferrari_car()
ferrari_car.show()
|
#!/usr/bin/python
import qlib
import numpy as np
def isClose(x,y, eps = 1e-10):
t = np.abs(x-y) < eps
if not t:
print x,y,eps
return t
def test_getNextAction():
epsilon = 0.1
learnRate = 0.3
discountRate = 0.5
value = qlib.ValueFunction(epsilon = epsilon,
learnRate = learnRate,
discountRate = discountRate)
stateActionPair1 = ([0,0,0,0], (1,1) )
stateActionPair2 = ([0,1,0,0], (0,1) )
state1,action1 = stateActionPair1
state2,action2 = stateActionPair2
actions = [(1,1),(0,1)]
replayMemory = [stateActionPair1,stateActionPair2]
reward = 100
value.updateValueByDelayedReward(replayMemory,reward)
n = 10000
nTrue = 0
for i in xrange(n):
nextAction = value.getEpsilonGreedyAction(state1,actions)
if nextAction == action1:
nTrue += 1
p_est = 1.0 * nTrue / n
p_true = 1.0 - epsilon / 2
assert isClose(p_est,p_true, eps = 1e-2)
def test_updateValuefunction():
epsilon = 0.1
learnRate = 0.3
discountRate = 0.5
value = qlib.ValueFunction(epsilon = epsilon,
learnRate = learnRate,
discountRate = discountRate)
state0,action0 = [0,0,0,0],[0,1]
state1,action1 = [1,0,0,0],[1,1]
state2,action2 = [1,1,0,0],[2,1]
state3,action3 = [1,1,1,0],[3,1]
state4,action4 = [1,1,1,1],[0,1]
replayMemory = [ (state0,action0),
(state1,action1),
(state2,action2),
(state3,action3),
(state4,action4) ]
reward = 1.0
value.updateValueByDelayedReward(replayMemory,reward)
gamma = learnRate * discountRate
assert isClose( value.getValue(state4,action4),
gamma**0 * learnRate * reward )
assert isClose( value.getValue(state3,action3),
gamma**1 * learnRate * reward )
assert isClose( value.getValue(state2,action2),
gamma**2 * learnRate * reward )
assert isClose( value.getValue(state1,action1),
gamma**3 * learnRate * reward )
assert isClose( value.getValue(state0,action0),
gamma**4 * learnRate * reward )
|
# cv2.cvtColor takes a numpy ndarray as an argument
import numpy as nm
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files (x86)\Tesseract-OCR\tesseract.exe"
# importing OpenCV
import cv2
from PIL import ImageGrab, Image, ImageEnhance
import datetime
import time
import os
import sys
from tinydb import TinyDB, Query
dbLocation = os.getcwd() + "/SRdb.json"
print("saving data to", dbLocation)
db = TinyDB(dbLocation)
# general dimensions
SRTextWidth = 62
SRTextHeight = 40
topOffset = 555
def getSR(img, leftOffsetPx):
cropSize = (leftOffsetPx, topOffset, leftOffsetPx+SRTextWidth, topOffset+SRTextHeight)
cropped = img.crop(cropSize)
processed = ImageEnhance.Contrast(cropped).enhance(2)
return makeInt(pytesseract.image_to_string(cv2.cvtColor(nm.array(processed), cv2.COLOR_BGR2GRAY), lang="eng"))
def makeInt(someString):
try:
return int(someString.strip())
except:
return -1
def difference(numA, numB):
if (numA == -1 or numB == -1):
return 0
else:
return abs(numA - numB)
# max change allowed in SR between two games, used to reduce number of incorrect readings
SR_DIFF_TOLERANCE = 60
SR_MIN = 500
SR_MAX = 4800
def shouldSaveStats(tankSR, damageSR, supportSR):
if (tankSR == -1 and damageSR == -1 and supportSR == -1):
return False
allEntries = db.all()
if (len(allEntries) > 0):
lastEntry = allEntries[-1]
if (tankSR == lastEntry["tankSR"] and damageSR == lastEntry["damageSR"] and supportSR == lastEntry["supportSR"]):
return False
return True
# manual tracking
while True:
print("Update SR for Tank, DPS, Support or all? Type quit to quit. Other input = cancel")
roleChoice = input()
allEntries = db.all()
newEntry = None
if (len(allEntries) > 0):
newEntry = allEntries[-1]
else:
newEntry = {"captureTime" : "", "tankSR" : "", "damageSR" : "", "supportSR" : ""}
newEntry["captureTime"] = str(datetime.datetime.now())
if (roleChoice == "tank"):
newTankSR = int(input("Enter new Tank SR: "))
newEntry["tankSR"] = newTankSR
elif (roleChoice == "dps"):
newDamageSR = int(input("Enter new Damage SR: "))
newEntry["damageSR"] = newDamageSR
elif (roleChoice == "support"):
newSupportSR = int(input("Enter new Support SR: "))
newEntry["supportSR"] = newSupportSR
elif (roleChoice == "all"):
newTankSR = int(input("Enter new Tank SR: "))
newEntry["tankSR"] = newTankSR
newDamageSR = int(input("Enter new Damage SR: "))
newEntry["damageSR"] = newDamageSR
newSupportSR = int(input("Enter new Support SR: "))
newEntry["supportSR"] = newSupportSR
elif (roleChoice == "quit"):
sys.exit()
else:
continue
print("Saving:", newEntry)
db.insert(newEntry)
# automated tracking
img = Image.open(os.getcwd() + "\SRimage_hover.png")
tankLeftOffset = 860 # not right
damageLeftOffset = 940
supportLeftOffset = 1220
starttime=time.time()
secondsBetweenUpdate = 5
while True:
print("Analyzing screen...")
img = ImageGrab.grab()
tankSR = getSR(img, tankLeftOffset)
damageSR = getSR(img, damageLeftOffset)
supportSR = getSR(img, supportLeftOffset)
captureTime = datetime.datetime.now()
if (shouldSaveStats(tankSR, damageSR, supportSR)):
allEntries = db.all()
if (len(allEntries) > 0):
newEntry = allEntries[-1]
if ((difference(tankSR, newEntry["tankSR"]) > SR_DIFF_TOLERANCE)):
tankSR = newEntry["tankSR"]
if (difference(damageSR, newEntry["damageSR"]) > SR_DIFF_TOLERANCE):
damageSR = newEntry["damageSR"]
if (difference(supportSR, newEntry["supportSR"]) > SR_DIFF_TOLERANCE):
supportSR = newEntry["supportSR"]
if (not shouldSaveStats): continue
print("saving stats")
print("time:", str(captureTime))
print("Tank SR:", tankSR)
print("Damage SR:", damageSR)
print("Support SR:", supportSR)
db.insert({"captureTime" : str(captureTime), "tankSR" : tankSR, "damageSR" : damageSR, "supportSR" : supportSR})
else:
print("not saving stats")
time.sleep(secondsBetweenUpdate - ((time.time() - starttime) % secondsBetweenUpdate)) |
class Solution:
def maxProfit(self, prices: List[int]) -> int:
ans=0
ini=prices[0]
for i in range(len(prices)):
if prices[i]<ini:
ini=prices[i]
else:
ans=max(ans,prices[i]-ini)
return ans
|
# Generated by Django 2.0.4 on 2018-04-08 15:27
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UserAsk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='name')),
('mobile', models.CharField(max_length=11, verbose_name='mobile')),
('course', models.CharField(max_length=50, verbose_name='course_name')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='add_time')),
],
options={
'verbose_name': 'user_ask',
'verbose_name_plural': 'user_ask',
},
),
migrations.CreateModel(
name='UserCourse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='add_time')),
],
options={
'verbose_name': 'user_course',
'verbose_name_plural': 'user_course',
},
),
migrations.CreateModel(
name='UserFavorite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fav_id', models.IntegerField(default=0, verbose_name='data_id')),
('fav_type', models.IntegerField(choices=[(1, '课程'), (2, '课程机构'), (3, '讲师')], default=1, verbose_name='favorite_type')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='add_time')),
],
options={
'verbose_name': 'user_favorite',
'verbose_name_plural': 'user_favorite',
},
),
migrations.CreateModel(
name='UserMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.IntegerField(default=0, verbose_name='receive_user')),
('message', models.CharField(max_length=500, verbose_name='msg_content')),
('has_read', models.BooleanField(default=False, verbose_name='has_read_msg')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='add_time')),
],
options={
'verbose_name': 'user_msg',
'verbose_name_plural': 'user_msg',
},
),
migrations.CreateModel(
name='UserComments',
fields=[
('usercourse_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='operation.UserCourse')),
('comments', models.CharField(max_length=200, verbose_name='comment')),
],
options={
'verbose_name': 'course_comments',
'verbose_name_plural': 'course_comments',
},
bases=('operation.usercourse',),
),
]
|
# Generated by Django 2.1.2 on 2019-11-26 08:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import utils.upload
import utils.validators
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.CharField(max_length=1000)),
('price', models.PositiveIntegerField()),
('city', models.CharField(max_length=255)),
('category', models.CharField(max_length=255)),
('color', models.PositiveSmallIntegerField(choices=[(1, 'RED'), (2, 'WHITE'), (3, 'GREEN'), (4, 'BLUE')], default=2)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='articles', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ArticleImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.FileField(blank=True, null=True, upload_to=utils.upload.article_image_path, validators=[utils.validators.article_image_size, utils.validators.article_image_extension])),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='main.Article')),
],
),
migrations.CreateModel(
name='Favourite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='favs', to='main.Article')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='favs', to=settings.AUTH_USER_MODEL)),
],
),
]
|
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import time, os, shutil, sys
sys.path.append("c:/users/yamen/appdata/local/programs/python/python38-32/lib/site-packages")
from colorama import init, Fore, Back, Style
class Handler(FileSystemEventHandler):
def on_created(self, event):
time.sleep(2)
print (f"{Fore.GREEN}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\nGot event for file {event.src_path} \n{Fore.BLUE}Processing... {Fore.RESET}")
for fileName in os.listdir(folderToTrack):
print(f"\t{fileName}")
ProcessFile(folderToTrack + "/" + fileName)
print(Fore.GREEN +"Done\n-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=" + Fore.RESET)
def ProcessFile(filePath):
fileName, fileExtension = os.path.splitext(filePath)
fileExtension = fileExtension.replace(".", "")
for key in extensionsToPathDict:
if fileExtension in key.split(","):
MoveFile(filePath, extensionsToPathDict[key])
return
print(f"{Fore.YELLOW}\tThere is no folder attached to the file type \".{fileExtension}\" {Fore.RESET}\n")
def MoveFile(filePath, destination):
try:
shutil.move(filePath, destination)
print(f"{Fore.GREEN}\t[32m Moved \"{os.path.basename(filePath)}\" to \"{destination}\"\n {Fore.RESET}")
except :
print(Fore.RED +" █████[[ERROR]] an ERROR happned while moving the file to the new destination█████"+ Fore.RESET)
def PrintAnimation(text):
for letter in text:
sys.stdout.write(letter)
sys.stdout.flush()
if letter == " ": continue
time.sleep(0.05)
def StopApp():
input(Fore.MAGENTA +"Press Enter to Exit"+ Fore.RESET)
sys.exit(0)
extensionsToPathDict = {} #Stores the extensions as keys and destination as values (to set the new path for each file)
dataLines = [] #Stores the lines that are in data.txt file
folderToTrack = ""
isLoading = False
init()
#Load data
try:
dataFile = open("data.txt", "r")
except IOError:
dataFile = open("data.txt", "w+")
dataFile.write("<folder to watch>\n<extensions> :: <destination>")
dataFile.seek(0)
finally:
dataLines = dataFile.readlines()
dataFile.close()
pathLine = dataLines[0].strip()
if os.path.exists(pathLine):
folderToTrack = pathLine
else:
PrintAnimation(f"{Fore.RED}The folder {pathLine} doesn't exist{Fore.RESET}")
StopApp()
for line in range(1, len(dataLines)):
extensions, destination = dataLines[line].split("::")
destination = destination.strip()
if not os.path.exists(destination):
print(f"{Fore.CYAN}The folder {destination} doesn't exist and for that the following line will be ignored: \"{dataLines[line]}\"{Fore.RESET}")
break
extensionsToPathDict.update({extensions.replace(" ","").replace(".","") : destination})
observer = Observer()
event_handler = Handler() # create event handler
observer.schedule(event_handler, path = folderToTrack)
observer.start()
PrintAnimation("<=======Ready=======>".center(os.get_terminal_size().columns))
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
# sys.stdout.write("\r/")
# sys.stdout.flush()
# time.sleep(0.5)
|
# -*- coding: utf-8 -*-
# Advent of Code 2019 - Day 13
# Care package
from os import system
import sys
sys.path.append("../python_modules/custom")
from intcomputer import Intcomputer
arcade = Intcomputer(list(map(int, open("game_free.txt", "r").read().split(","))))
sprites = { 0: ' ', 1: '#', 2: '█', 3: '_', 4: 'O'}
empty = []
wall = []
block = []
paddle = []
ball = []
score = 0
blocks = { 0: empty, 1: wall, 2: block, 3: paddle, 4: ball }
next_cycle = False
plays = 0
while arcade.run() != 'F' or not next_cycle:
next_cycle = arcade.state() == 'F'
out = arcade.output()
screen = list(zip(out[0::3], out[1::3], out[2::3]))
#print(out)
for x, y, b in screen:
if x == -1 and y == 0:
score = b
else:
for k in range(5):
if k != b:
if (x, y) in blocks[k]:
blocks[k].remove((x, y))
blocks[b].append((x, y))
# max_x = max([max(empty), max(wall), max(block), max(paddle), max(ball)])[0]
# max_y = max([max([e[::-1] for e in empty]), max([e[::-1] for e in wall]), max([e[::-1] for e in block]), max([e[::-1] for e in paddle]), max([e[::-1] for e in ball]), ])[0]
# for j in range(max_y):
# line = ""
# for i in range(max_x):
# for k in range(5):
# if (i, j) in blocks[k]:
# line += sprites[k]
# print(line)
# print()
# print("Score:", score)
#print(ball)
#print(paddle)
arcade.input([ball[0][0] - paddle[0][0]])
plays += 1
# system("clear")
print("Final score:", score)
print("Number of plays:", plays)
|
from rest_framework import serializers
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import Personal
from rest_framework.response import Response
from rest_framework import pagination
class ImageSerializer(serializers.ModelSerializer):
class Meta:
model = Personal
fields =('id','imagen',)
class PersonalSerializer(serializers.ModelSerializer):
matricula = serializers.IntegerField(required=False)
fec_nacimiento = serializers.DateTimeField(format='%d/%m/%Y',input_formats=['%d/%m/%Y'])
fec_alta =serializers.DateTimeField(format='%d/%m/%Y',input_formats=['%d/%m/%Y'])
def get_validation_exclusions(self, *args, **kwargs):
exclusions = super(PersonalSerializer, self).get_validation_exclusions(*args, **kwargs)
return exclusions + ['matricula']
class Meta:
model = Personal
fields =('id','paterno','matricula','materno','nombre','rfc','curp','cuip','fec_nacimiento',
'cdu_estado_nac','cdu_municipio_nac','cdu_genero','cdu_estado_civil','cdu_escolaridad',
'cdu_seguridad_social','id_seguridad_social','telefono','portacion','cdu_tipo_alta','fec_alta','condicionada','condiciones_alta','cdu_tipo_empleado','calle_dom',
'numero_dom','numero_int_dom','colonia_dom','cp_dom','cdu_estado_dom','cdu_municipio_dom','imagen','user',)
read_only_fields =('imagen',)
#read_only_fields = ('matricula',)django "Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z]"
class PaginatedPersonalSerializer(pagination.PaginationSerializer):
class Meta:
object_serializer_class = PersonalSerializer |
# Command Pattern: Controlling the sequence of operations
from abc import abstractmethod,ABC
class Command(ABC):
@abstractmethod
def execute(self):
pass
class Copy(Command):
def execute(self):
print('Copying...')
class Paste(Command):
def execute(self):
print('Pasting...')
class Print(Command):
def execute(self):
print('Printing...')
class Macro:
def __init__(self):
self._seq_commands = []
def add(self, command):
self._seq_commands.append(command)
def run(self):
for _ in self._seq_commands:
_.execute()
def main():
macro = Macro()
macro.add(Copy())
macro.add(Paste())
macro.add(Print())
macro.run()
if __name__ == '__main__':
main()
|
num = (int(input('Digite o 1º numero: ')),int(input('Digite o 2º numero: ')),
int(input('Digite o 3º numero: ')),int(input('Digite o 4º numero: ')))
print(f'O numero 9 apareceu {num.count(9)} vezes')
if 3 in num:
print(f'O numero 3 esta na posição {num.index(3)+1}')
else:
print('O valor 3 não foi digitado')
for n in num:
if n % 2 == 0:
print(f'Os numeros pares são {n}')
|
import random
import time
class Node:
def __init__(self, k, v):
self.key = k
self.value = v
self.left = None
self.right = None
self.count = 1
self.dups = 1 # Duplicates
def __str__(self):
return f"Key: {self.key}, Value: {self.value}"
def generate_random_Intlist(n, range_L, range_R):
result = []
for _ in range(n):
result.append(random.randint(range_L, range_R))
return result
def generate_nearly_odered(n, swap_time):
array = list(range(n))
for _ in range(swap_time):
x, y = random.randint(0, n-1), random.randint(0, n-1)
array[x], array[y] = array[y], array[x]
return array
def is_sorted(arr):
for i in range(len(arr) - 1):
if arr[i] > arr[i+1]:
return False
return True
def test_sort(name, sort_func, arr):
start = time.time()
sort_func(arr)
cost_time = time.time() - start
assert is_sorted(arr), print(arr)
print(f"{name}: {cost_time} s")
def print_list(array):
print(" ".join([str(ele) for ele in array]))
def print_tree(arr):
n = len(arr)
i = 1
while i < n:
print(arr[i:i**2])
i = i**2 |
p=float(input("Enter the Principle Amount(P) : "))
t=float(input("Enter the Time Period(T) : "))
r=float(input("Enter the rate of interest per annum: "))
print("The simple interest is {:.6f}".format(p*t*r/100))
|
def matrix_mul(s, j):
for i in range(len(s)):
if s[i] == '(' and s[i + 3] == ')': # 最先计算的两个矩阵位置,i/2
k = j[int(i / 2)][0] * j[int(i / 2)][1] * j[int(i / 2) + 1][1] # 每次计算量
s = s[0:i] + 'Z' + s[i + 4:] # 更新计算法则
j = j[:int(i / 2)] + [[j[int(i / 2)][0], j[int(i / 2) + 1][1]]] + j[int(i / 2) + 1:] # 得到新的矩阵的行列数
return s, j, k
while True:
try:
n = int(input())
ij = [[int(i) for i in input().split()] for k in range(n)]
cf = str(input())
m = 0
for i in range(n - 1):
cf, ij, mn = matrix_mul(cf, ij)
m += mn
print(m)
except:
break
|
import numpy as np
import matplotlib.pyplot as pt
import scipy
P,L,U=scipy.linalg.lu(A)
np.set_printoptions(precision=1)
#print(A)
#Find d_8
#A(d_8)=d_9
#A=PLU PLU(d_8) = d_9 LU(d_8) = (P.T)(d_9)
#L(U(d_8)) = d_9
p_9 = (P.T).dot(d_9)
#print(p_9)
U_8 = scipy.linalg.solve_triangular(L,p_9,lower=True)
d_8 = scipy.linalg.solve_triangular(U,U_8)
#For d_7
p_8 = (P.T).dot(d_8)
U_7 = scipy.linalg.solve_triangular(L,p_8,lower=True)
d_7 = scipy.linalg.solve_triangular(U,U_7)
#For d_6
p_7 = (P.T).dot(d_7)
U_6 = scipy.linalg.solve_triangular(L,p_7,lower=True)
d_6 = scipy.linalg.solve_triangular(U,U_6)
#For d_5
p_6 = (P.T).dot(d_6)
U_5 = scipy.linalg.solve_triangular(L,p_6,lower=True)
d_5 = scipy.linalg.solve_triangular(U,U_5)
#For d_4
p_5 = (P.T).dot(d_5)
U_4 = scipy.linalg.solve_triangular(L,p_5,lower=True)
d_4 = scipy.linalg.solve_triangular(U,U_4)
#For d_3
p_4 = (P.T).dot(d_4)
U_3 = scipy.linalg.solve_triangular(L,p_4,lower=True)
d_3 = scipy.linalg.solve_triangular(U,U_3)
#For d_2
p_3 = (P.T).dot(d_3)
U_2 = scipy.linalg.solve_triangular(L,p_3,lower=True)
d_2 = scipy.linalg.solve_triangular(U,U_2)
#For d_1
p_2 = (P.T).dot(d_2)
U_1 = scipy.linalg.solve_triangular(L,p_2,lower=True)
d_1 = scipy.linalg.solve_triangular(U,U_1)
#For d_0
p_1 = (P.T).dot(d_1)
U_0 = scipy.linalg.solve_triangular(L,p_1,lower=True)
d_0 = scipy.linalg.solve_triangular(U,U_0)
#origin_index
origin_index = np.argmax(d_0)
#Plot four graphs
pt.figure()
pt.title("9 months")
plot_graph(A, d_9)
pt.figure()
pt.title("6 months")
plot_graph(A, d_6)
pt.figure()
pt.title("3 months")
plot_graph(A, d_3)
pt.figure()
pt.title("0 months")
plot_graph(A, d_0)
#Inference
print("The graph infers the distribution of people in different city. The darker the color, more people in that city. For example, in graph 4, all people are in one city and that city has the darkest color, all other city has no color. The graph also shows the tendency of how people move from one city to another, the arrow and the number indicates the trend.")
|
next(iterdata)
for (columnName, columnData) in iterdata:
key = "ln" + str(item)
data[key].append(float(columnData[count]))
lines[key].set_data(xdata, data[key])
item+=1
count+=1 |
#Metodos de los Strings
loro= "Azul Noruego"
print(len(loro))#equivalente a .length()
#.lower() convierte todo un string a minusculas,
#Fijate que se usan diferente que len()
print (loro.lower())
#.upper() convierte todo a MAYUSCULAS
print (loro.upper())
"""las funciones .lower() y .upper() solo funcionan con strings,
por eso su sintaxis es distinta
a diferencia de srt() y len() estas funcionaan con variso tipos de objetos
por ello necesitan la variable como parametro"""
pi= 3.14159
print (str(pi))
#Formateo de strings con %, es muy interesante
camelot = 'Camelot'
lugar = 'lugar'
print ("No vayamos a %s. Es un %s tonto." % (camelot, lugar))
#Raw_input cambio a input, cuidado con los parentesis de print
nombre = input("Cuál es tu nombre")
mision = input("Cuál es tu mision")
color = input("Cuál es tu color favorito")
print ("Ah, asi que tu nombre es %s, tu mision es %s, \
y tu color favorito es %s." % (nombre, mision, color))
|
"""
This is the main file. This contains code generated from the game_main_window.ui
file and modifications. Run this file to launch the Fantasy Cricket app.
"""
import sqlite3
from PyQt5 import QtCore, QtGui, QtWidgets
from final_dialog_box import Ui_dialog
from final_evaluate_teams import Ui_evaluate_team_dialog
from final_new_team_dialog import Ui_Dialog_new_team
from final_open_team import Ui_open_team_dialog
class Ui_Fantasy_Cricket_Game(object):
def setupUi(self, Fantasy_Cricket_Game):
Fantasy_Cricket_Game.setObjectName("Fantasy_Cricket_Game")
Fantasy_Cricket_Game.resize(700, 604)
Fantasy_Cricket_Game.setMinimumSize(QtCore.QSize(700, 604))
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(10)
font.setBold(True)
font.setItalic(False)
font.setUnderline(False)
font.setWeight(75)
font.setStrikeOut(False)
font.setKerning(True)
Fantasy_Cricket_Game.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("ball.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Fantasy_Cricket_Game.setWindowIcon(icon)
Fantasy_Cricket_Game.setAutoFillBackground(False)
self.centralwidget = QtWidgets.QWidget(Fantasy_Cricket_Game)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.selection_box = QtWidgets.QGroupBox(self.centralwidget)
self.selection_box.setFont(font)
self.selection_box.setObjectName("selection_box")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.selection_box)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label = QtWidgets.QLabel(self.selection_box)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.verticalLayout_2.addWidget(self.label)
self.bat_num = QtWidgets.QLabel(self.selection_box)
self.bat_num.setFont(font)
self.bat_num.setAlignment(QtCore.Qt.AlignCenter)
self.bat_num.setObjectName("bat_num")
self.verticalLayout_2.addWidget(self.bat_num)
self.horizontalLayout.addLayout(self.verticalLayout_2)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.label_3 = QtWidgets.QLabel(self.selection_box)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.verticalLayout_3.addWidget(self.label_3)
self.bwl_num = QtWidgets.QLabel(self.selection_box)
self.bwl_num.setFont(font)
self.bwl_num.setAlignment(QtCore.Qt.AlignCenter)
self.bwl_num.setObjectName("bwl_num")
self.verticalLayout_3.addWidget(self.bwl_num)
self.horizontalLayout.addLayout(self.verticalLayout_3)
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_5 = QtWidgets.QLabel(self.selection_box)
self.label_5.setFont(font)
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName("label_5")
self.verticalLayout_4.addWidget(self.label_5)
self.ar_num = QtWidgets.QLabel(self.selection_box)
self.ar_num.setFont(font)
self.ar_num.setAlignment(QtCore.Qt.AlignCenter)
self.ar_num.setObjectName("ar_num")
self.verticalLayout_4.addWidget(self.ar_num)
self.horizontalLayout.addLayout(self.verticalLayout_4)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.label_7 = QtWidgets.QLabel(self.selection_box)
self.label_7.setFont(font)
self.label_7.setAlignment(QtCore.Qt.AlignCenter)
self.label_7.setObjectName("label_7")
self.verticalLayout_5.addWidget(self.label_7)
self.wk_num = QtWidgets.QLabel(self.selection_box)
self.wk_num.setFont(font)
self.wk_num.setAlignment(QtCore.Qt.AlignCenter)
self.wk_num.setObjectName("wk_num")
self.verticalLayout_5.addWidget(self.wk_num)
self.horizontalLayout.addLayout(self.verticalLayout_5)
self.horizontalLayout_5.addLayout(self.horizontalLayout)
self.verticalLayout_7.addWidget(self.selection_box)
self.make_team_box = QtWidgets.QGroupBox(self.centralwidget)
self.make_team_box.setFont(font)
self.make_team_box.setObjectName("make_team_box")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.make_team_box)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout_6 = QtWidgets.QVBoxLayout()
self.verticalLayout_6.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.label_11 = QtWidgets.QLabel(self.make_team_box)
self.label_11.setFont(font)
self.label_11.setObjectName("label_11")
self.horizontalLayout_6.addWidget(self.label_11)
self.points_available_label = QtWidgets.QLabel(self.make_team_box)
self.points_available_label.setFont(font)
self.points_available_label.setObjectName("points_available_label")
self.horizontalLayout_6.addWidget(self.points_available_label)
self.verticalLayout_6.addLayout(self.horizontalLayout_6)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.radioButton_bat = QtWidgets.QRadioButton(self.make_team_box)
self.radioButton_bat.setFont(font)
self.radioButton_bat.setObjectName("radioButton_bat")
self.horizontalLayout_4.addWidget(self.radioButton_bat)
self.radioButton_bwl = QtWidgets.QRadioButton(self.make_team_box)
self.radioButton_bwl.setFont(font)
self.radioButton_bwl.setObjectName("radioButton_bwl")
self.horizontalLayout_4.addWidget(self.radioButton_bwl)
self.radioButton_ar = QtWidgets.QRadioButton(self.make_team_box)
self.radioButton_ar.setFont(font)
self.radioButton_ar.setObjectName("radioButton_ar")
self.horizontalLayout_4.addWidget(self.radioButton_ar)
self.radioButton_wk = QtWidgets.QRadioButton(self.make_team_box)
self.radioButton_wk.setMinimumSize(QtCore.QSize(0, 27))
self.radioButton_wk.setFont(font)
self.radioButton_wk.setObjectName("radioButton_wk")
self.horizontalLayout_4.addWidget(self.radioButton_wk)
self.verticalLayout_6.addLayout(self.horizontalLayout_4)
self.list1 = QtWidgets.QListWidget(self.make_team_box)
self.list1.setMinimumSize(QtCore.QSize(260, 300))
self.list1.setObjectName("list1")
self.verticalLayout_6.addWidget(self.list1)
self.horizontalLayout_2.addLayout(self.verticalLayout_6)
self.verticalLayout_10 = QtWidgets.QVBoxLayout()
self.verticalLayout_10.setObjectName("verticalLayout_10")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_10.addItem(spacerItem)
self.label_15 = QtWidgets.QLabel(self.make_team_box)
self.label_15.setFont(font)
self.label_15.setAlignment(QtCore.Qt.AlignCenter)
self.label_15.setObjectName("label_15")
self.verticalLayout_10.addWidget(self.label_15)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.verticalLayout_10.addItem(spacerItem1)
self.horizontalLayout_2.addLayout(self.verticalLayout_10)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetMinimumSize)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout()
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_14 = QtWidgets.QLabel(self.make_team_box)
self.label_14.setFont(font)
self.label_14.setObjectName("label_14")
self.horizontalLayout_7.addWidget(self.label_14)
self.points_used_label = QtWidgets.QLabel(self.make_team_box)
self.points_used_label.setFont(font)
self.points_used_label.setObjectName("points_used_label")
self.horizontalLayout_7.addWidget(self.points_used_label)
self.verticalLayout.addLayout(self.horizontalLayout_7)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.label_9 = QtWidgets.QLabel(self.make_team_box)
self.label_9.setMinimumSize(QtCore.QSize(0, 27))
self.label_9.setFont(font)
self.label_9.setAlignment(QtCore.Qt.AlignCenter)
self.label_9.setObjectName("label_9")
self.horizontalLayout_3.addWidget(self.label_9)
self.team_name_label = QtWidgets.QLabel(self.make_team_box)
self.team_name_label.setFont(font)
self.team_name_label.setAlignment(QtCore.Qt.AlignCenter)
self.team_name_label.setObjectName("team_name_label")
self.horizontalLayout_3.addWidget(self.team_name_label)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.list2 = QtWidgets.QListWidget(self.make_team_box)
self.list2.setMinimumSize(QtCore.QSize(260, 300))
self.list2.setObjectName("list2")
self.verticalLayout.addWidget(self.list2)
self.horizontalLayout_2.addLayout(self.verticalLayout)
self.horizontalLayout_2.setStretch(0, 1)
self.horizontalLayout_2.setStretch(2, 1)
self.verticalLayout_7.addWidget(self.make_team_box)
Fantasy_Cricket_Game.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(Fantasy_Cricket_Game)
self.menubar.setGeometry(QtCore.QRect(0, 0, 700, 29))
self.menubar.setFont(font)
self.menubar.setObjectName("menubar")
self.menuManage_Teams = QtWidgets.QMenu(self.menubar)
self.menuManage_Teams.setFont(font)
self.menuManage_Teams.setObjectName("menuManage_Teams")
Fantasy_Cricket_Game.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(Fantasy_Cricket_Game)
self.statusbar.setObjectName("statusbar")
Fantasy_Cricket_Game.setStatusBar(self.statusbar)
self.actionAdd_Team = QtWidgets.QAction(Fantasy_Cricket_Game)
self.actionAdd_Team.setFont(font)
self.actionAdd_Team.setObjectName("actionAdd_Team")
self.actionOpen_Team = QtWidgets.QAction(Fantasy_Cricket_Game)
self.actionOpen_Team.setFont(font)
self.actionOpen_Team.setObjectName("actionOpen_Team")
self.actionSave_Team = QtWidgets.QAction(Fantasy_Cricket_Game)
self.actionSave_Team.setFont(font)
self.actionSave_Team.setObjectName("actionSave_Team")
self.actionEvaluate_Team = QtWidgets.QAction(Fantasy_Cricket_Game)
self.actionEvaluate_Team.setFont(font)
self.actionEvaluate_Team.setObjectName("actionEvaluate_Team")
self.menuManage_Teams.addAction(self.actionAdd_Team)
self.menuManage_Teams.addAction(self.actionOpen_Team)
self.menuManage_Teams.addAction(self.actionSave_Team)
self.menuManage_Teams.addAction(self.actionEvaluate_Team)
self.menubar.addAction(self.menuManage_Teams.menuAction())
stylesheet = """
QMainWindow {
border-image: url("b1.png");
}
"""
self.list1.setStyleSheet("border-image : url(b2.png); ")
self.list2.setStyleSheet("border-image : url(b3.png); ")
Fantasy_Cricket_Game.setStyleSheet(stylesheet)
self.selection_box.setStyleSheet('QGroupBox:title {color: white;}')
self.make_team_box.setStyleSheet('QGroupBox:title {color: white;}')
self.label_3.setStyleSheet('color:white')
self.label_5.setStyleSheet('color:white')
self.label_7.setStyleSheet('color:white')
self.label.setStyleSheet('color:white')
self.retranslateUi(Fantasy_Cricket_Game)
self.team_name_change = 0
self.bat = 0
self.bwl = 0
self.ar = 0
self.wk = 0
self.radioButton_bat.clicked.connect(lambda: self.show_players('BAT'))
self.radioButton_bwl.clicked.connect(lambda: self.show_players('BWL'))
self.radioButton_ar.clicked.connect(lambda: self.show_players('AR'))
self.radioButton_wk.clicked.connect(lambda: self.show_players('WK'))
self.menuManage_Teams.triggered[QtWidgets.QAction].connect(self.menufunction)
self.list1.itemDoubleClicked.connect(self.removelist1)
self.list2.itemDoubleClicked.connect(self.removelist2)
QtCore.QMetaObject.connectSlotsByName(Fantasy_Cricket_Game)
def retranslateUi(self, Fantasy_Cricket_Game):
_translate = QtCore.QCoreApplication.translate
Fantasy_Cricket_Game.setWindowTitle(_translate("Fantasy_Cricket_Game", "Fantasy Cricket Game"))
self.selection_box.setTitle(_translate("Fantasy_Cricket_Game", "Your Selections"))
self.label.setText(_translate("Fantasy_Cricket_Game", "Batsmen (BAT)"))
self.bat_num.setText(_translate("Fantasy_Cricket_Game", "0"))
self.label_3.setText(_translate("Fantasy_Cricket_Game", "Bowlers (BWL)"))
self.bwl_num.setText(_translate("Fantasy_Cricket_Game", "0"))
self.label_5.setText(_translate("Fantasy_Cricket_Game", "Allrounders (AR)"))
self.ar_num.setText(_translate("Fantasy_Cricket_Game", "0"))
self.label_7.setText(_translate("Fantasy_Cricket_Game", "Wicket-Keeper (WK)"))
self.wk_num.setText(_translate("Fantasy_Cricket_Game", "0"))
self.make_team_box.setTitle(_translate("Fantasy_Cricket_Game", "Make Team"))
self.label_11.setText(_translate("Fantasy_Cricket_Game", "Points Available:"))
self.points_available_label.setText(_translate("Fantasy_Cricket_Game", "1000"))
self.radioButton_bat.setText(_translate("Fantasy_Cricket_Game", "BAT"))
self.radioButton_bwl.setText(_translate("Fantasy_Cricket_Game", "BWL"))
self.radioButton_ar.setText(_translate("Fantasy_Cricket_Game", "AR"))
self.radioButton_wk.setText(_translate("Fantasy_Cricket_Game", "WK"))
self.label_15.setText(_translate("Fantasy_Cricket_Game", ">"))
self.label_14.setText(_translate("Fantasy_Cricket_Game", "Points Used:"))
self.points_used_label.setText(_translate("Fantasy_Cricket_Game", "0"))
self.label_9.setText(_translate("Fantasy_Cricket_Game", "Team Name:"))
self.team_name_label.setText(_translate("Fantasy_Cricket_Game", "Team"))
self.menuManage_Teams.setTitle(_translate("Fantasy_Cricket_Game", "Manage Teams"))
self.actionAdd_Team.setText(_translate("Fantasy_Cricket_Game", "New Team"))
self.actionOpen_Team.setText(_translate("Fantasy_Cricket_Game", "Open Team"))
self.actionSave_Team.setText(_translate("Fantasy_Cricket_Game", "Save Team"))
self.actionEvaluate_Team.setText(_translate("Fantasy_Cricket_Game", "Evaluate Team"))
def value_change(self):
"""
This function calculates the current value of the selected team and
displays the points spent and points available to the user for team
selection.
Returns
-------
total : int
Current spent points of the selected team.
"""
player_data = sqlite3.connect('player_database.db')
curplayers = player_data.cursor()
total = 0
for i in range(self.list2.count()):
player = self.list2.item(i).text()
command = "SELECT value FROM stats WHERE player = '{}';".format(player)
curplayers.execute(command)
record = curplayers.fetchall()[0][0]
total += record
player_data.close()
_translate = QtCore.QCoreApplication.translate
if self.list2.count() > 0:
self.points_available_label.setText(_translate("Fantasy_Cricket_Game", "{}".format(1000 - total)))
self.points_used_label.setText(_translate("Fantasy_Cricket_Game", "{}".format(total)))
else:
self.points_available_label.setText(_translate("Fantasy_Cricket_Game", "1000"))
self.points_used_label.setText(_translate("Fantasy_Cricket_Game", "0"))
return total
def change_ctg_count(self):
"""
This function displays the current composition of the team, i.e. it
displays how many players of each category are there in the team
currently.
Returns
-------
None.
"""
_translate = QtCore.QCoreApplication.translate
self.bat_num.setText(_translate("Fantasy_Cricket_Game", "{}".format(self.bat)))
self.bwl_num.setText(_translate("Fantasy_Cricket_Game", "{}".format(self.bwl)))
self.ar_num.setText(_translate("Fantasy_Cricket_Game", "{}".format(self.ar)))
self.wk_num.setText(_translate("Fantasy_Cricket_Game", "{}".format(self.wk)))
def show_players(self, ctg):
"""
This function is run when any of the category radio button is selected.
If a team has been selected, it displays players of the selected category
and disables the ones which are already selected. If no team is selected,
it shows a message to the user.
Parameters
----------
ctg : str
string representing the category selected
Returns
-------
None.
"""
if self.team_name_change == 1:
self.ctg = ctg
self.list1.clear()
_translate = QtCore.QCoreApplication.translate
player_data = sqlite3.connect('player_database.db')
curplayers = player_data.cursor()
command = "SELECT player FROM stats WHERE ctg = '{}';".format(ctg)
curplayers.execute(command)
record = curplayers.fetchall()
__sortingEnabled = self.list1.isSortingEnabled()
self.list1.setSortingEnabled(False)
for i in range(len(record)):
if self.list1.count() < len(record):
item = QtWidgets.QListWidgetItem()
self.list1.addItem(item)
item = self.list1.item(i)
item.setText(_translate("Fantasy_Cricket_Game", "{}".format(record[i][0])))
temp = self.list2.findItems('{}'.format(record[i][0]), QtCore.Qt.MatchExactly)
if temp != []:
item.setFlags(QtCore.Qt.NoItemFlags)
self.list1.setSortingEnabled(__sortingEnabled)
player_data.close()
else:
self.dialog_box(1)
def menufunction(self, action):
"""
This function handles the menu of the app.
Depending on which option is selected, different steps are taken.
Parameters
----------
action :
The menu option selected
Returns
-------
None.
"""
txt = action.text() # string giving the selected menu option
if txt == 'New Team':
"""
If New Team is selected, a New Team Wizard from the final_new_team_dialog.py
file is run.
"""
dialog = QtWidgets.QDialog()
dialog.ui = Ui_Dialog_new_team()
dialog.ui.setupUi(dialog)
dialog.exec_()
try:
name = dialog.ui.name
_translate = QtCore.QCoreApplication.translate
self.team_name_label.setText(_translate("Fantasy_Cricket_Game", name))
self.team_name_change = 1
self.list2.clear()
self.radioButton_bat.setChecked(True)
self.show_players('BAT')
self.list2_player_details()
self.change_ctg_count()
self.value_change()
except:
pass
if txt == 'Save Team':
if self.list2.count() == 11:
player_data = sqlite3.connect('player_database.db')
curplayers = player_data.cursor()
try:
team_name = self.team_name_label.text()
curplayers.execute(('SELECT name FROM teams;'))
record = curplayers.fetchall()
team_list = []
for i in record:
team_list.append(i[0])
player_list = self.list2_player_details()
player_list = '///'.join(player_list)
if team_name in team_list:
curplayers.execute('UPDATE teams SET players = ? WHERE name = ?;', (player_list, team_name))
else:
curplayers.execute('INSERT INTO teams (name, players) VALUES (?,?);', (team_name, player_list))
player_data.commit()
player_data.close()
except:
player_data.rollback()
player_data.close()
else:
if self.team_name_change == 1:
self.dialog_box(3)
else:
self.dialog_box(1)
if txt == 'Open Team':
dialog = QtWidgets.QDialog()
dialog.ui = Ui_open_team_dialog()
dialog.ui.setupUi(dialog, )
dialog.exec_()
if dialog.ui.team_change_toggle:
try:
selected_team = dialog.ui.selected_team
self.list2.clear()
_translate = QtCore.QCoreApplication.translate
player_data = sqlite3.connect('player_database.db')
curplayers = player_data.cursor()
command = "SELECT players FROM teams WHERE name = '{}';".format(selected_team)
curplayers.execute(command)
record = curplayers.fetchall()[0][0]
record = record.split('///')
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(10)
font.setWeight(60)
font.setKerning(True)
__sortingEnabled = self.list2.isSortingEnabled()
self.list2.setSortingEnabled(False)
for i in range(len(record)):
if self.list2.count() < len(record):
item = QtWidgets.QListWidgetItem()
self.list2.addItem(item)
item = self.list2.item(i)
item.setText(_translate("evaluate_tean_dialog", "{}".format(record[i])))
item.setForeground(QtGui.QColor("white"))
item.setFont(font)
self.list2.setSortingEnabled(__sortingEnabled)
player_data.close()
self.team_name_label.setText(_translate("Fantasy_Cricket_Game", selected_team))
self.team_name_change = 1
self.radioButton_bat.setChecked(True)
self.show_players('BAT')
self.list2_player_details()
self.change_ctg_count()
self.value_change()
except:
pass
if txt == 'Evaluate Team':
dialog = QtWidgets.QDialog()
dialog.ui = Ui_evaluate_team_dialog()
dialog.ui.setupUi(dialog)
dialog.exec_()
def removelist1(self, item):
if self.list2.count() < 11:
ctg = self.ctg
total = self.value_change()
player_data = sqlite3.connect('player_database.db')
curplayers = player_data.cursor()
player = item.text()
command = "SELECT value FROM stats WHERE player = '{}';".format(player)
curplayers.execute(command)
record = curplayers.fetchall()[0][0]
try:
total += record
except:
total = 0
player_data.close()
if total <= 1000:
if ctg == 'BAT' and self.bat < 4:
self.list2.addItem(item.text())
item.setFlags(QtCore.Qt.NoItemFlags)
elif ctg == 'BWL' and self.bwl < 3:
self.list2.addItem(item.text())
item.setFlags(QtCore.Qt.NoItemFlags)
elif ctg == 'AR' and self.ar < 3:
self.list2.addItem(item.text())
item.setFlags(QtCore.Qt.NoItemFlags)
elif ctg == 'WK' and self.wk < 1:
self.list2.addItem(item.text())
item.setFlags(QtCore.Qt.NoItemFlags)
else:
self.dialog_box(5, ctg)
else:
self.dialog_box(6)
self.list2_player_details()
self.change_ctg_count()
self.value_change()
font = QtGui.QFont()
font.setFamily("Segoe UI")
font.setPointSize(10)
font.setWeight(60)
font.setKerning(True)
try:
item = self.list2.findItems(item.text(), QtCore.Qt.MatchExactly)
item[0].setForeground(QtGui.QColor("white"))
item[0].setFont(font)
except:
pass
else:
self.dialog_box(4)
def removelist2(self, item):
self.list2.takeItem(self.list2.row(item))
item = self.list1.findItems(item.text(), QtCore.Qt.MatchExactly)
self.list2_player_details()
self.change_ctg_count()
self.value_change()
try:
item[0].setFlags(QtCore.Qt.ItemIsEnabled)
except:
pass
def dialog_box(self, dialog_number, ctg='none'):
dialog = QtWidgets.QDialog()
dialog.ui = Ui_dialog()
title = 'Error'
if dialog_number == 1:
label = 'Make or open a team to start.'
title = 'Help'
elif dialog_number == 2:
label = 'Team name can only contain alphanumeric characters!'
elif dialog_number == 3:
label = 'Your team has {} players. Select {} more to continue.'.format(self.list2.count(),
11 - self.list2.count())
elif dialog_number == 4:
label = 'Team can not have more than 11 players!'
elif dialog_number == 5:
if ctg == 'BAT':
label = 'You can not have more than 4 batsmen in the team!'
elif ctg == 'BWL':
label = 'You can not have more than 3 bowlers in the team!'
elif ctg == 'AR':
label = 'You can not have more than 3 allrounders in the team!'
elif ctg == 'WK':
label = 'You can not have more than 1 wicket-keeper in the team!'
elif dialog_number == 6:
label = 'Points used can not be more than 1000!'
dialog.ui.setupUi(dialog, title, label)
dialog.exec_()
def list2_player_details(self):
self.bat = 0
self.bwl = 0
self.ar = 0
self.wk = 0
player_data = sqlite3.connect('player_database.db')
curplayers = player_data.cursor()
player_list = []
for i in range(self.list2.count()):
stats = []
player = self.list2.item(i).text()
player_list.append(player)
command = "SELECT * FROM match WHERE player = '{}';".format(player)
curplayers.execute(command)
record = curplayers.fetchall()
for i in record[0]:
stats.append(i)
command = "SELECT * FROM stats WHERE player = '{}';".format(player)
curplayers.execute(command)
record = curplayers.fetchall()
for i in record[0]:
stats.append(i)
ctg = stats[18]
if ctg == 'BAT':
self.bat += 1
elif ctg == 'BWL':
self.bwl += 1
elif ctg == 'AR':
self.ar += 1
else:
self.wk += 1
player_data.close()
return player_list
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Fantasy_Cricket_Game = QtWidgets.QMainWindow()
ui = Ui_Fantasy_Cricket_Game()
ui.setupUi(Fantasy_Cricket_Game)
Fantasy_Cricket_Game.show()
sys.exit(app.exec_())
|
# Generated by Django 2.1 on 2019-07-29 11:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nijaherbs', '0009_auto_20190729_1508'),
]
operations = [
migrations.AlterModelOptions(
name='herb',
options={'ordering': ('-created_on',)},
),
]
|
from opendr.perception.fall_detection.fall_detector_learner import FallDetectorLearner
__all__ = ['FallDetectorLearner']
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render_to_response
from django.template.context_processors import csrf
from django.views import generic
import datamanager.services.config as cfg
from datamanager.models import Configuration
class SettingsView(LoginRequiredMixin, generic.TemplateView):
redirect_field_name = None
template_name = 'settings.html'
def get_context_data(self, **kwargs):
data = super().get_context_data(**kwargs)
user = self.request.user
data['settings'] = Configuration.objects.get(owner=user)
return data
@staticmethod
def post(request):
args = {}
args.update(csrf(request))
unique_values_threshold = request.POST.get('unique_values_threshold')
nn_hidden_min = request.POST.get('nn_hidden_min')
nn_hidden_max = request.POST.get('nn_hidden_max')
poly_min = request.POST.get('poly_min')
poly_max = request.POST.get('poly_max')
try:
cfg.update_config(request.user, unique_values_threshold=unique_values_threshold,
nn_hidden_min=nn_hidden_min,
nn_hidden_max=nn_hidden_max,
poly_min=poly_min,
poly_max=poly_max)
args['message'] = 'Настройки сохранены'
args['message_class'] = 'success'
except:
print('error during saving settings')
args['message'] = 'Ошибка при сохранении настроек'
args['message_class'] = 'danger'
user = request.user
args['settings'] = Configuration.objects.get(owner=user)
return render_to_response('settings.html', args)
|
#!/usr/bin/python
# this script is used for video cut :one picture/per second
import os
import cv2
videos_src_path = "./"
video_formats = [".MP4", ".MOV"]
frames_save_path = "./"
width = 320
height = 240
time_interval = 29
def video2frame(video_src_path, formats, frame_save_path, frame_width, frame_height, interval):
"""
将视频按固定间隔读取写入图片
:param video_src_path: 视频存放路径
:param formats: 包含的所有视频格式
:param frame_save_path: 保存路径
:param frame_width: 保存帧宽
:param frame_height: 保存帧高
:param interval: 保存帧间隔
:return: 帧图片
"""
videos = os.listdir(video_src_path)
def filter_format(x, all_formats):
if x[-4:] in all_formats:
return True
else:
return False
videos = filter(lambda x: filter_format(x, formats), videos)
for each_video in videos:
print("正在读取视频:", each_video)
each_video_name = each_video[:-4]
os.mkdir(frame_save_path + each_video_name)
each_video_save_full_path = os.path.join(
frame_save_path, each_video_name) + "/"
each_video_full_path = os.path.join(video_src_path, each_video)
cap = cv2.VideoCapture(each_video_full_path)
frame_index = 0
frame_count = 0
if cap.isOpened():
success = True
else:
success = False
print("读取失败!")
while(success):
success, frame = cap.read()
print ("---> 正在读取第%d帧:" % frame_index, success)
if frame_index % interval == 0:
resize_frame = cv2.resize(
frame, (frame_width, frame_height), interpolation=cv2.INTER_AREA)
# cv2.imwrite(each_video_save_full_path + each_video_name + "_%d.jpg" % frame_index, resize_frame)
cv2.imwrite(each_video_save_full_path + "%d.jpg" %
frame_count, resize_frame)
frame_count += 1
frame_index += 1
cap.release()
video2frame(videos_src_path, [".mp4"], frames_save_path, 320, 240, 29)
|
class Solution:
def wiggleMaxLength(self, nums) -> int:
if len(nums) <= 1:
return len(nums)
cnt = 1
up_flag = True
i = 1
add_flag = False
while i < len(nums):
if up_flag:
if nums[i] > nums[i-1]:
i += 1
add_flag = True
elif nums[i] == nums[i-1]:
i += 1
else:
up_flag = not up_flag
if add_flag:
cnt += 1
add_flag = False
else:
if nums[i] < nums[i-1]:
i += 1
add_flag = True
elif nums[i] == nums[i-1]:
i += 1
else:
up_flag = not up_flag
if add_flag:
cnt += 1
add_flag = False
if add_flag:
cnt += 1
return cnt |
import re, requests, json
from slackbot.bot import respond_to
from slackbot.bot import listen_to
API_URL = "https://api.zaif.jp/api/1/"
SUPPORTED_COIN = ["zaif","sjcx","btc","ncxc","cicc","xcp","xem","pepecash","jpyz","bitcrystals","bch","eth","fscc","mona"]
MAIN_SUPPORTED_COIN = ["btc","xem","bch","eth","mona","zaif","pepecash"]
def get_price_func(currency_pair, action):
request_data = requests.get(
API_URL + action + currency_pair
, headers = {
})
return request_data
@respond_to(r'^price\s+\S.*')
def price_coin_func(message):
slack_message = ""
text = message.body['text'].lower()
method, coin_name = text.split(" ")
if coin_name == 'all':
for main_coin in MAIN_SUPPORTED_COIN:
data = get_last_price_func(main_coin + "_jpy", 'last_price/').json()
slack_message += main_coin + " - 現在の価格 : " + str(data['last_price']) + "\n"
elif coin_name not in SUPPORTED_COIN:
slack_message = "対象の通貨 : " + coin_name + " は、サポートされていません。\n サポート対象通貨は:\n"
slack_message += "\n".join(SUPPORTED_COIN)
else:
data = get_trade_detail_func(coin_name + "_jpy", 'ticker/').json()
slack_message = coin_name + 'の価格 : \n 買気配 : ' + str(data['bid']) + '\n 売気配 : ' + str(data['ask']) + '\n最高額 : ' + str(data['high']) + '\n最低額 : ' + str(data['low'])
message.reply(slack_message)
|
#!/usr/bin/env python
# encoding=utf-8
# int/long
print(42)
print(100000000000000000000000000000000000000000000000000000)
# float
print(3.1415)
print(1.2e10)
print(3e-3)
print(-3e-3)
# complex
print(2 + 3j)
print(1.5 + 2.8j)
print(1e10 + 3e-5j)
|
# Converting our SNAKE_GAME.py script into a executable file:
import cx_Freeze
executables = [cx_Freeze.Executable("Snake_Game.py")]
cx_Freeze.setup(
name = "Snake Game",
options = {"build_exe":{"packages":["pygame"],"include_files":["apple.png","SnakeHead.png"]}},
description = "Snake Game Tutorial",
executables = executables
)
|
a = int(input("請輸入數字 a:"))
b = int(input("請輸入數字 b:"))
if a > b:
print("a > b")
elif a < b:
print("a < b")
else:
print("a = b") |
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import *
from tensorflow.keras.activations import *
from tensorflow.keras.models import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.initializers import *
np.random.seed(42)
# Load MNIST dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
image = x_train[0]
image = image.reshape((28, 28))
kernel = np.random.uniform(low=0.0, high=1.0, size=(2,2))
#kernel = np.array([[0, 0.5], [0.1, 0.0]])
# Stride (1,1)
# Conv Funktion definieren und anschließend plotten
def conv2D(image, kernel):
shape = ((image.shape[0]-2), image.shape[1]-2)
img = np.ones(shape)
for i in range(image.shape[0]-2):
for j in range(image.shape[1]-2):
box = np.array([[image[i, j], image[i+1, j]],[image[i+1, j], image[i+1, j+1]]], dtype=np.uint8)
Cprod = np.array((box * kernel), dtype=np.uint8)
Cpix = np.sum(Cprod)
img[i, j] = Cpix
return img
conv_image = conv2D(image, kernel)
# Input und Outputbild des Pooling Layers mit imshow() ausgeben
plt.imshow(image, cmap="gray")
plt.show()
plt.imshow(conv_image, cmap="gray")
plt.show() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Changing the shape of an array """
import numpy as np
a = np.random.randint(0, 100, 12).reshape(4, 3)
# =================== #
# Most common methods #
# =================== #
# To convert a multidimensional array in a one-dimensional array
# We can not use "a.flat" because that is an iterator, not a function.
# So we use the method "ravel" instead:
b = a.ravel() # returns the array, flattened
print(b)
print()
# The method "reshape" returns a new array with a modified shape:
c = a.reshape(6, 2)
print(c)
print()
# If a dimension is given as -1 in a reshaping operation,
# the other dimensions are automatically calculated:
d = a.reshape(3, -1) # this will result in a (3, 4) array
print(d)
print()
# ===================================================== #
# Methods to avoid (or, at least, be very careful with) #
# ===================================================== #
# The method "resize" modifies the array itself:
a.resize(6, 2)
print(a)
print()
# So be very very careful when using the method "resize",
# because an expression like "e = a.resize(1, 12)" does not work the way we
# might expect (it changes the shape of "a" and returns "None").
e = a.resize(1, 12)
print(e)
print()
print(a)
print()
|
# encoding: utf-8
"""Unit test suite for the docx.text.paragraph module"""
from __future__ import absolute_import, division, print_function, unicode_literals
from docx.enum.style import WD_STYLE_TYPE
from docx.enum.text import WD_ALIGN_PARAGRAPH
from docx.oxml.text.paragraph import CT_P
from docx.oxml.text.run import CT_R
from docx.parts.document import DocumentPart
from docx.text.paragraph import Paragraph
from docx.text.parfmt import ParagraphFormat
from docx.text.run import Run
import pytest
from ..unitutil.cxml import element, xml
from ..unitutil.mock import (
call, class_mock, instance_mock, method_mock, property_mock
)
class DescribeParagraph(object):
def it_knows_its_paragraph_style(self, style_get_fixture):
paragraph, style_id_, style_ = style_get_fixture
style = paragraph.style
paragraph.part.get_style.assert_called_once_with(
style_id_, WD_STYLE_TYPE.PARAGRAPH
)
assert style is style_
def it_can_change_its_paragraph_style(self, style_set_fixture):
paragraph, value, expected_xml = style_set_fixture
paragraph.style = value
paragraph.part.get_style_id.assert_called_once_with(
value, WD_STYLE_TYPE.PARAGRAPH
)
assert paragraph._p.xml == expected_xml
def it_knows_the_text_it_contains(self, text_get_fixture):
paragraph, expected_text = text_get_fixture
assert paragraph.text == expected_text
def it_can_replace_the_text_it_contains(self, text_set_fixture):
paragraph, text, expected_text = text_set_fixture
paragraph.text = text
assert paragraph.text == expected_text
def it_knows_its_alignment_value(self, alignment_get_fixture):
paragraph, expected_value = alignment_get_fixture
assert paragraph.alignment == expected_value
def it_can_change_its_alignment_value(self, alignment_set_fixture):
paragraph, value, expected_xml = alignment_set_fixture
paragraph.alignment = value
assert paragraph._p.xml == expected_xml
def it_provides_access_to_its_paragraph_format(self, parfmt_fixture):
paragraph, ParagraphFormat_, paragraph_format_ = parfmt_fixture
paragraph_format = paragraph.paragraph_format
ParagraphFormat_.assert_called_once_with(paragraph._element)
assert paragraph_format is paragraph_format_
def it_provides_access_to_the_runs_it_contains(self, runs_fixture):
paragraph, Run_, r_, r_2_, run_, run_2_ = runs_fixture
runs = paragraph.runs
assert Run_.mock_calls == [
call(r_, paragraph), call(r_2_, paragraph)
]
assert runs == [run_, run_2_]
def it_can_add_a_run_to_itself(self, add_run_fixture):
paragraph, text, style, style_prop_, expected_xml = add_run_fixture
run = paragraph.add_run(text, style)
assert paragraph._p.xml == expected_xml
assert isinstance(run, Run)
assert run._r is paragraph._p.r_lst[0]
if style:
style_prop_.assert_called_once_with(style)
def it_can_insert_a_paragraph_before_itself(self, insert_before_fixture):
text, style, paragraph_, add_run_calls = insert_before_fixture
paragraph = Paragraph(None, None)
new_paragraph = paragraph.insert_paragraph_before(text, style)
paragraph._insert_paragraph_before.assert_called_once_with(paragraph)
assert new_paragraph.add_run.call_args_list == add_run_calls
assert new_paragraph.style == style
assert new_paragraph is paragraph_
def it_can_remove_its_content_while_preserving_formatting(
self, clear_fixture):
paragraph, expected_xml = clear_fixture
_paragraph = paragraph.clear()
assert paragraph._p.xml == expected_xml
assert _paragraph is paragraph
def it_inserts_a_paragraph_before_to_help(self, _insert_before_fixture):
paragraph, body, expected_xml = _insert_before_fixture
new_paragraph = paragraph._insert_paragraph_before()
assert isinstance(new_paragraph, Paragraph)
assert body.xml == expected_xml
# fixtures -------------------------------------------------------
@pytest.fixture(params=[
('w:p', None, None, 'w:p/w:r'),
('w:p', 'foobar', None, 'w:p/w:r/w:t"foobar"'),
('w:p', None, 'Strong', 'w:p/w:r'),
('w:p', 'foobar', 'Strong', 'w:p/w:r/w:t"foobar"'),
])
def add_run_fixture(self, request, run_style_prop_):
before_cxml, text, style, after_cxml = request.param
paragraph = Paragraph(element(before_cxml), None)
expected_xml = xml(after_cxml)
return paragraph, text, style, run_style_prop_, expected_xml
@pytest.fixture(params=[
('w:p/w:pPr/w:jc{w:val=center}', WD_ALIGN_PARAGRAPH.CENTER),
('w:p', None),
])
def alignment_get_fixture(self, request):
cxml, expected_alignment_value = request.param
paragraph = Paragraph(element(cxml), None)
return paragraph, expected_alignment_value
@pytest.fixture(params=[
('w:p', WD_ALIGN_PARAGRAPH.LEFT,
'w:p/w:pPr/w:jc{w:val=left}'),
('w:p/w:pPr/w:jc{w:val=left}', WD_ALIGN_PARAGRAPH.CENTER,
'w:p/w:pPr/w:jc{w:val=center}'),
('w:p/w:pPr/w:jc{w:val=left}', None,
'w:p/w:pPr'),
('w:p', None, 'w:p/w:pPr'),
])
def alignment_set_fixture(self, request):
initial_cxml, new_alignment_value, expected_cxml = request.param
paragraph = Paragraph(element(initial_cxml), None)
expected_xml = xml(expected_cxml)
return paragraph, new_alignment_value, expected_xml
@pytest.fixture(params=[
('w:p', 'w:p'),
('w:p/w:pPr', 'w:p/w:pPr'),
('w:p/w:r/w:t"foobar"', 'w:p'),
('w:p/(w:pPr, w:r/w:t"foobar")', 'w:p/w:pPr'),
])
def clear_fixture(self, request):
initial_cxml, expected_cxml = request.param
paragraph = Paragraph(element(initial_cxml), None)
expected_xml = xml(expected_cxml)
return paragraph, expected_xml
@pytest.fixture(params=[
(None, None),
('Foo', None),
(None, 'Bar'),
('Foo', 'Bar'),
])
def insert_before_fixture(self, request, _insert_paragraph_before_, add_run_):
text, style = request.param
paragraph_ = _insert_paragraph_before_.return_value
add_run_calls = [] if text is None else [call(text)]
paragraph_.style = None
return text, style, paragraph_, add_run_calls
@pytest.fixture(params=[
('w:body/w:p{id=42}', 'w:body/(w:p,w:p{id=42})')
])
def _insert_before_fixture(self, request):
body_cxml, expected_cxml = request.param
body = element(body_cxml)
paragraph = Paragraph(body[0], None)
expected_xml = xml(expected_cxml)
return paragraph, body, expected_xml
@pytest.fixture
def parfmt_fixture(self, ParagraphFormat_, paragraph_format_):
paragraph = Paragraph(element('w:p'), None)
return paragraph, ParagraphFormat_, paragraph_format_
@pytest.fixture
def runs_fixture(self, p_, Run_, r_, r_2_, runs_):
paragraph = Paragraph(p_, None)
run_, run_2_ = runs_
return paragraph, Run_, r_, r_2_, run_, run_2_
@pytest.fixture
def style_get_fixture(self, part_prop_):
style_id = 'Foobar'
p_cxml = 'w:p/w:pPr/w:pStyle{w:val=%s}' % style_id
paragraph = Paragraph(element(p_cxml), None)
style_ = part_prop_.return_value.get_style.return_value
return paragraph, style_id, style_
@pytest.fixture(params=[
('w:p', 'Heading 1', 'Heading1',
'w:p/w:pPr/w:pStyle{w:val=Heading1}'),
('w:p/w:pPr', 'Heading 1', 'Heading1',
'w:p/w:pPr/w:pStyle{w:val=Heading1}'),
('w:p/w:pPr/w:pStyle{w:val=Heading1}', 'Heading 2', 'Heading2',
'w:p/w:pPr/w:pStyle{w:val=Heading2}'),
('w:p/w:pPr/w:pStyle{w:val=Heading1}', 'Normal', None,
'w:p/w:pPr'),
('w:p', None, None,
'w:p/w:pPr'),
])
def style_set_fixture(self, request, part_prop_):
p_cxml, value, style_id, expected_cxml = request.param
paragraph = Paragraph(element(p_cxml), None)
part_prop_.return_value.get_style_id.return_value = style_id
expected_xml = xml(expected_cxml)
return paragraph, value, expected_xml
@pytest.fixture(params=[
('w:p', ''),
('w:p/w:r', ''),
('w:p/w:r/w:t', ''),
('w:p/w:r/w:t"foo"', 'foo'),
('w:p/w:r/(w:t"foo", w:t"bar")', 'foobar'),
('w:p/w:r/(w:t"fo ", w:t"bar")', 'fo bar'),
('w:p/w:r/(w:t"foo", w:tab, w:t"bar")', 'foo\tbar'),
('w:p/w:r/(w:t"foo", w:br, w:t"bar")', 'foo\nbar'),
('w:p/w:r/(w:t"foo", w:cr, w:t"bar")', 'foo\nbar'),
])
def text_get_fixture(self, request):
p_cxml, expected_text_value = request.param
paragraph = Paragraph(element(p_cxml), None)
return paragraph, expected_text_value
@pytest.fixture
def text_set_fixture(self):
paragraph = Paragraph(element('w:p'), None)
paragraph.add_run('must not appear in result')
new_text_value = 'foo\tbar\rbaz\n'
expected_text_value = 'foo\tbar\nbaz\n'
return paragraph, new_text_value, expected_text_value
# fixture components ---------------------------------------------
@pytest.fixture
def add_run_(self, request):
return method_mock(request, Paragraph, 'add_run')
@pytest.fixture
def document_part_(self, request):
return instance_mock(request, DocumentPart)
@pytest.fixture
def _insert_paragraph_before_(self, request):
return method_mock(request, Paragraph, '_insert_paragraph_before')
@pytest.fixture
def p_(self, request, r_, r_2_):
return instance_mock(request, CT_P, r_lst=(r_, r_2_))
@pytest.fixture
def ParagraphFormat_(self, request, paragraph_format_):
return class_mock(
request, 'docx.text.paragraph.ParagraphFormat',
return_value=paragraph_format_
)
@pytest.fixture
def paragraph_format_(self, request):
return instance_mock(request, ParagraphFormat)
@pytest.fixture
def part_prop_(self, request, document_part_):
return property_mock(
request, Paragraph, 'part', return_value=document_part_
)
@pytest.fixture
def Run_(self, request, runs_):
run_, run_2_ = runs_
return class_mock(
request, 'docx.text.paragraph.Run', side_effect=[run_, run_2_]
)
@pytest.fixture
def r_(self, request):
return instance_mock(request, CT_R)
@pytest.fixture
def r_2_(self, request):
return instance_mock(request, CT_R)
@pytest.fixture
def run_style_prop_(self, request):
return property_mock(request, Run, 'style')
@pytest.fixture
def runs_(self, request):
run_ = instance_mock(request, Run, name='run_')
run_2_ = instance_mock(request, Run, name='run_2_')
return run_, run_2_
|
from rest_framework import viewsets
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.authtoken.models import Token
from ..models import SSUser
from rest_framework.response import Response
from ..API.serializers import SSUserSerializer, SSUserShortSerializer
class GetAuthToken(ObtainAuthToken):
def post(self, request, *args, **kwargs):
response = super(self, GetAuthToken).post(self, request, *args, **kwargs)
token = Token.objects.get(key=response.data['token'])
user = SSUser.objects.get(id=token.user_id)
return Response({'token': token.key, 'user': user})
class SSUserViewSet(viewsets.ModelViewSet):
queryset = SSUser.objects.all()
serializer_class = SSUserSerializer
class SSUserShortViewSet(viewsets.ModelViewSet):
queryset = SSUser.objects.all()
serializer_class = SSUserShortSerializer
|
#!/usr/bin/env python3
import os
import sys
import argparse
from .usage import usage
from .smdimerge import smdimerge
from .timerge import timerge
from .maganim import maganim
def parse_args(args):
funcs = {
"smdimerge": smdimerge,
"timerge": timerge,
"maganim": maganim
}
if "--help" in args or len(args) == 0:
return usage(args)
if args[0] in funcs.keys():
pargs = list(filter(lambda x: x[0] != "-", args[1:]))
oargs = list(filter(lambda x: x[0] == "-", args[1:]))
rcode = funcs[args[0]](pargs, oargs)
if rcode < 0:
return usage(args)
else:
sys.exit(rcode)
return usage(args)
|
'''
Test for grabbing a list of manifests, and then working through
retrieving and extracting data using the IIIF_Manifest class
in iiif_collections.
'''
import json
from iiif_collections import IIIF_Manifest
harvest_list = []
top_level_manifests = json.loads(open('master.json').read())
for top_level_collection in top_level_manifests:
item = top_level_collection['manifests']
if len(item)> 0:
for manifest in item[:1]:
harvest_list.append(manifest)
for harvest_manifest in harvest_list:
manifest_item = IIIF_Manifest(harvest_manifest)
manifest_item.get_manifest_metadata()
# print json.dumps(json.loads(manifest_item.source_data), sort_keys=True,indent=4)
|
#!/usr/bin/python
import pyrebase
import cgi
from time import gmtime, strftime
from calamities import calamities_list
print "Content-type: text/html"
print ""
config = {
"apiKey": "AIzaSyCMoO7CX52RaO5CqSBWTZ67PiLiigAh4jM",
"authDomain": "calamity-control-1478121312942.firebaseapp.com",
"databaseURL": "https://calamity-control-1478121312942.firebaseio.com",
"storageBucket": "calamity-control-1478121312942.appspot.com"
}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
form = cgi.FieldStorage()
import csv
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
lat = form.getvalue('lat')
lon = form.getvalue('lon')
db = firebase.database()
db = db.child("reports")
with open('coords.csv', 'w') as csvfile:
fieldnames = ['lat', 'lng','calamity']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
json_content = db.get()
for child in json_content.each():
item = {}
item['lat'] = child.val()['lat']
item['lng'] = child.val()['lng']
cal = child.val()['calamity'].encode('ascii')
item['calamity'] = calamities_list.index(cal.lower())
writer.writerow(item)
ds = np.loadtxt("coords.csv", delimiter=",")
features = ds[:,:2]
labels = ds[:,2]
knn.fit(features, labels)
calamity = calamities_list[knn.predict([lat,lon]).astype(int)]
db.child("reports").push({"calamity":calamity,"lat":lat,"lng":lon,"time": strftime("%Y-%m-%d %H:%M:%S", gmtime())})
|
#
# @lc app=leetcode.cn id=454 lang=python3
#
# [454] 四数相加 II
#
# @lc code=start
from typing import List
class Solution:
def fourSumCount(self, A: List[int], B: List[int], C: List[int], D: List[int]) -> int:
from collections import Counter
countAB = Counter([a+b for a in A for b in B])
return sum(countAB[-c-d] for c in C for d in D )
# @lc code=end
|
#!/usr/bin/env python3
# Most of this code was not mine but was from
# https://www.programcreek.com/python/example/136/logging.basicConfig
import logging
###################################
# Initialize the logging.
# logFile is the name of the logfile
# logLevel is the logging level (INFO,DEBUG,ERROR)
# loggingMode is the logging mode(a for append, w for overwrite)
###################################
def initLogging(log_file, log_dir, log_level, logging_mode):
# logging.basicConfig(filename=log_dir+log_file,
logging.basicConfig(filename=log_file,
filemode=logging_mode,
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=log_level)
logging.info("_______________________NEW RUN_______________________")
logging.info("Logger Initalized at level %s", log_level)
|
# coding=utf-8
'''
3D 数据图
'''
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
if __name__ == '__main__':
fig = plt.figure(figsize = (12,8))
ax = Axes3D(fig)
#生成 X,Y
X = np.arange(-4,4,0.25)
Y = np.arange(-4,4,0.25)
X,Y = np.meshgrid(X,Y)
R = np.sqrt(X ** 2 + Y ** 2)
# height value
Z = np.sin(R)
# 绘图
# rstride(row) 和 cstride(column) 表示的是行/列的跨度
ax.plot_surface(X,Y,Z,
rstride = 1, # 行的跨度
cstride = 1, # 列的跨度
cmap = plt.get_cmap('rainbow') # 颜色映射样式设置
)
# offset 表示距离 zdir 的轴距离
ax.contourf(X,Y,Z,zdir = 'z',offset = -2,cmap = 'rainbow')
ax.set_zlim(-2,2)
plt.show() |
from ..helper_scrapping import Scrapping_helper
import unittest
class TestConvertingStringToInt(unittest.TestCase):
def test_square_meters_int(self):
self.assertEqual(Scrapping_helper.string2float_of_square_meters(' 26 м²'), 26)
def test_square_meters_float(self):
self.assertEqual(Scrapping_helper.string2float_of_square_meters(' 26.54 м²'), 26.54)
if __name__ == '__main__':
unittest.main()
|
from tkinter import *
from tkinter import Canvas
import pandas as pd
import random
PINK = "#e2979c"
RED = "#e7305b"
GREEN = "#9bdeac"
YELLOW = "#f7f5dd"
HONEYDEW = "#F0FFF0"
# -------------------------------------------------
# Lists
new_data = pd.read_csv(
"/Users/kevinwong/Python Projects/Small Python Games + Programs/Flash Card Capstone Project - Day 31/csv files/chinesewords100.csv"
)
chinese_pinyin = new_data["Chinese Pronunciation"]
english_meanings = new_data["English Definition"]
try:
data = pd.read_csv(
"/Users/kevinwong/Python Projects/Small Python Games + Programs/Flash Card Capstone Project - Day 31/csv files/words_to_learn.csv"
)
except FileNotFoundError:
data = pd.read_csv(
"/Users/kevinwong/Python Projects/Small Python Games + Programs/Flash Card Capstone Project - Day 31/csv files/chinesewords100.csv"
)
to_learn = data.to_dict(orient="records")
# ---------------------------------------------------
# Implementation of Pandas Data
# data = pd.read_csv('1000chinesewords.csv')
# Most common Chinese Words
# chinese_words = data['Chinese']
# Pinyin that relate to those words
# chinese_pinyin = data["Pinyin"]
# The meaning of those words in English
# english_meanings = data['Definition']
"""Created simpler data(.csv) to interpret into real data"""
# simple_data = {
# "Chinese Words": chinese_words,
# "Chinese Pronunication": chinese_pinyin,
# "English Definition": english_meanings
# }
# frame = pd.DataFrame(simple_data)
# frame.to_csv('chinesewords.csv')
# words_known = {
# "Words": []
# }
# frame = pd.DataFrame(words_known)
# frame.to_csv('/Users/kevinwong/Python Projects/Small Python Games + Programs/Flash Card Capstone Project - Day 31/csv files/known_words.csv')
# words_unknown = {
# "Words": chinese_words
# }
# frame2 = pd.DataFrame(words_unknown)
# frame2.to_csv('/Users/kevinwong/Python Projects/Small Python Games + Programs/Flash Card Capstone Project - Day 31/csv files/unknown_words.csv')
# -------------------------------------------------
def change_words_right():
global current_card, flip_timer
window.after_cancel(flip_timer)
current_card = random.choice(to_learn)
canvas.itemconfig(canvas_image, image=front_img)
canvas.itemconfig(
card_word,
text=current_card["Traditional"],
fill="black",
font=("Arial", 60, "bold"),
)
canvas.itemconfig(card_title, text="Chinese", fill="black")
canvas.itemconfig(card_pinyin, text="", fill="black")
flip_timer = window.after(3000, func=switch)
to_learn.remove(current_card)
data_to_learn = pd.DataFrame(to_learn)
data_to_learn.to_csv(
"/Users/kevinwong/Python Projects/Small Python Games + Programs/Flash Card Capstone Project - Day 31/csv files/words_to_learn.csv",
index=False,
)
# dataframe_dict = {
# "Traditional": [current_card["Traditional"]],
# "Pinyin": [current_card['Chinese Pronunciation']],
# "Meaning": [current_card["English Definition"]]
# }
# dataframe = pd.DataFrame(dataframe_dict)
# dataframe.to_csv('known_words.csv')
def change_words_wrong():
global current_card, flip_timer
window.after_cancel(flip_timer)
current_card = random.choice(to_learn)
canvas.itemconfig(canvas_image, image=front_img)
canvas.itemconfig(
card_word,
text=current_card["Traditional"],
fill="black",
font=("Arial", 60, "bold"),
)
canvas.itemconfig(card_title, text="Chinese", fill="black")
canvas.itemconfig(card_pinyin, text="", fill="black")
flip_timer = window.after(3000, func=switch)
# print(data_dict)
# df = pd.DataFrame(data_dict)
# df.to_csv('unknown_words1.csv')
# df = pd.DataFrame({
# "Words": [current_word]
# })
# df.to_csv('/Users/kevinwong/Python Projects/Small Python Games + Programs/Flash Card Capstone Project - Day 31/csv files/known_words.csv', mode = 'a', index=False,header=False)
def switch():
canvas.itemconfig(canvas_image, image=back_img)
if len(current_card["English Definition"]) > 15:
canvas.itemconfig(
card_word,
text=current_card["English Definition"],
fill="white",
font=("Arial", 35, "bold"),
)
canvas.itemconfig(card_word, text=current_card["English Definition"], fill="white")
canvas.itemconfig(card_title, text="English", fill="white")
canvas.itemconfig(
card_pinyin, text=current_card["Chinese Pronunciation"], fill="white"
)
# --------------------------------------------------
# Implementation of Buttons + Boxes
window = Tk()
window.title("Flashcard Project")
window.config(padx=50, pady=50, bg=HONEYDEW)
flip_timer = window.after(3000, switch)
# flashcard = Label(fg = HONEYDEW)
front_img = PhotoImage(
file=r"/Users/kevinwong/Python Projects/Small Python Games + Programs/Flash Card Capstone Project - Day 31/images.png/card_front.png"
)
back_img = PhotoImage(
file=r"/Users/kevinwong/Python Projects/Small Python Games + Programs/Flash Card Capstone Project - Day 31/images.png/card_back.png"
)
canvas = Canvas(width=800, height=526, bg=HONEYDEW, highlightbackground=HONEYDEW)
canvas_image = canvas.create_image(400, 263, image=front_img)
card_title = canvas.create_text(
400, 150, fill="black", text="", font=("Ariel", 40, "italic")
)
card_word = canvas.create_text(
400, 263, fill="black", text="", font=("Ariel", 60, "bold")
)
card_pinyin = canvas.create_text(
400, 350, fill="black", text="", font=("Ariel", 40, "bold")
)
canvas.grid(row=0, column=0, columnspan=2)
# language = Label(text = "Chinese", font = ("Arial", 40, 'italic'))
check_mark = PhotoImage(
file="/Users/kevinwong/Python Projects/Small Python Games + Programs/Flash Card Capstone Project - Day 31/images.png/right_one.png"
)
check_button = Button(
image=check_mark,
bd=0,
highlightthickness=0,
highlightbackground=HONEYDEW,
command=change_words_right,
)
check_button.grid(row=1, column=1)
x_mark = PhotoImage(
file="/Users/kevinwong/Python Projects/Small Python Games + Programs/Flash Card Capstone Project - Day 31/images.png/wrong_one.png"
)
x_button = Button(
image=x_mark,
highlightthickness=0,
bd=0,
highlightbackground=HONEYDEW,
command=change_words_wrong,
)
x_button.grid(row=1, column=0)
change_words_wrong()
window.mainloop()
|
import unittest
import pytest
import PIL
from erika.image_converter import *
root_path = 'tests/test_resources/'
# noinspection SpellCheckingInspection
class WrappedImageUnitTest(unittest.TestCase):
def testPillowIsProperlyInstalled(self):
self.assertIsNotNone(PIL.PILLOW_VERSION)
self.assertNotEqual(PIL.PILLOW_VERSION, "1.1.7")
self.assertEqual(PIL.PILLOW_VERSION, "6.2.0")
def testBmpImageIsRecognizedAsGrayScale(self):
"""simple test that grayscale images are recognized as such"""
image1 = WrappedImage(root_path + 'test_image_grayscale_1.bmp')
self.assertTrue(image1.is_grayscale())
self.assertFalse(image1.is_rgb())
image2 = WrappedImage(root_path + 'test_image_grayscale_2.bmp')
self.assertTrue(image2.is_grayscale())
self.assertFalse(image2.is_rgb())
image3 = WrappedImage(root_path + 'test_image_monochrome_1.bmp')
self.assertTrue(image3.is_grayscale())
self.assertFalse(image3.is_rgb())
image4 = WrappedImage(root_path + 'test_image_monochrome_2.bmp')
self.assertTrue(image4.is_grayscale())
self.assertFalse(image4.is_rgb())
def testBmpImageIsRecognizedAsRgb(self):
"""simple test that RGB color images are recognized as such"""
image = WrappedImage(root_path + 'test_image_color.bmp')
self.assertTrue(image.is_rgb())
self.assertFalse(image.is_grayscale())
def testPngImageIsRecognizedAsRgb(self):
"""simple test that RGB color images are recognized as such"""
image = WrappedImage(root_path + 'ubuntu-logo32.png')
self.assertTrue(image.is_rgb())
self.assertFalse(image.is_grayscale())
def testIsPixelSetWorksForGrayScale(self):
"""simple test that the wrapper can correctly determine if a pixel is colored-in (grayscale image)"""
image_white_first_pixel = WrappedImage(root_path + 'test_image_monochrome_1.bmp')
self.assertFalse(image_white_first_pixel.is_pixel_set(0, 0))
imageBlackFirstPixel = WrappedImage(root_path + 'test_image_grayscale_2.bmp')
self.assertTrue(imageBlackFirstPixel.is_pixel_set(0, 0))
def testAdjustableThresholdWorksForGrayScale(self):
"""simple test that the wrapper can correctly determine if a pixel is colored-in when the threshold is
adjusted """
image_gray_first_pixel = WrappedImage(root_path + 'test_image_grayscale_1.bmp')
self.assertTrue(image_gray_first_pixel.is_pixel_set(0, 0))
image_gray_first_pixel_higher_threshold = WrappedImage(root_path + 'test_image_grayscale_1.bmp', threshold=127)
self.assertFalse(image_gray_first_pixel_higher_threshold.is_pixel_set(0, 0))
def testIsPixelSetWorksForRgb(self):
"""simple test that the wrapper can correctly determine if a pixel is colored-in (RGB image)"""
color_image = WrappedImage(root_path + 'test_image_color.bmp')
self.assertFalse(color_image.is_pixel_set(0, 0))
image_black_first_pixel = WrappedImage(root_path + 'ubuntu-logo32.png')
self.assertTrue(image_black_first_pixel.is_pixel_set(0, 0))
def testAdjustableThresholdWorksForRgb(self):
"""simple test that the wrapper can correctly determine if a pixel is colored-in when the threshold is
adjusted """
color_image = WrappedImage(root_path + 'test_image_color.bmp')
self.assertFalse(color_image.is_pixel_set(0, 0))
color_image_higher_threshold = WrappedImage(root_path + 'test_image_color.bmp', threshold=153)
self.assertTrue(color_image_higher_threshold.is_pixel_set(0, 0))
def testTextFileInput(self):
"""simple test how the wrapper behaves when given a text file (ASCII art)"""
self.assertRaisesRegex(NotAnImageException, "Exception when opening the file .* - maybe not an image[?]",
load_text_file_as_wrapped_image)
def testNonExistentFileInput(self):
"""simple test how the wrapper behaves when the file is not found"""
self.assertRaisesRegex(FileNotFoundError, "Exception when opening the file .* - file not found",
load_non_existent_file)
def testWidth(self):
grayscale_image = WrappedImage(root_path + 'test_image_grayscale_1.bmp')
self.assertEqual(grayscale_image.height(), 30)
def testHeight(self):
grayscale_image = WrappedImage(root_path + 'test_image_grayscale_1.bmp')
self.assertEqual(grayscale_image.width(), 20)
def load_renamed_png_file_as_wrapped_image():
WrappedImage(root_path + 'ubuntu-logo32.png.renamedwithextension.txt')
def load_text_file_as_wrapped_image():
WrappedImage(root_path + 'test_ascii_art.txt')
def load_non_existent_file():
WrappedImage(root_path + 'nonexistent_file_for_test.xyz')
def main():
unittest.main()
if __name__ == '__main__':
main()
|
# Copyright (c) 2013, Pullenti. All rights reserved. Non-Commercial Freeware.
# This class is generated using the converter UniSharping (www.unisharping.ru) from Pullenti C#.NET project (www.pullenti.ru).
# See www.pullenti.ru/downloadpage.aspx.
from pullenti.unisharp.Utils import Utils
from pullenti.ner.ReferentClass import ReferentClass
from pullenti.ner.business.FundsKind import FundsKind
class FundsMeta(ReferentClass):
def __init__(self) -> None:
super().__init__()
self.kind_feature = None;
@staticmethod
def initialize() -> None:
from pullenti.ner.business.FundsReferent import FundsReferent
FundsMeta.GLOBAL_META = FundsMeta()
f = FundsMeta.GLOBAL_META.addFeature(FundsReferent.ATTR_KIND, "Класс", 0, 1)
FundsMeta.GLOBAL_META.kind_feature = f
f.addValue(Utils.enumToString(FundsKind.STOCK), "Акция", None, None)
f.addValue(Utils.enumToString(FundsKind.CAPITAL), "Уставной капитал", None, None)
FundsMeta.GLOBAL_META.addFeature(FundsReferent.ATTR_TYPE, "Тип", 0, 1)
FundsMeta.GLOBAL_META.addFeature(FundsReferent.ATTR_SOURCE, "Эмитент", 0, 1)
FundsMeta.GLOBAL_META.addFeature(FundsReferent.ATTR_PERCENT, "Процент", 0, 1)
FundsMeta.GLOBAL_META.addFeature(FundsReferent.ATTR_COUNT, "Количество", 0, 1)
FundsMeta.GLOBAL_META.addFeature(FundsReferent.ATTR_PRICE, "Номинал", 0, 1)
FundsMeta.GLOBAL_META.addFeature(FundsReferent.ATTR_SUM, "Денежная сумма", 0, 1)
@property
def name(self) -> str:
from pullenti.ner.business.FundsReferent import FundsReferent
return FundsReferent.OBJ_TYPENAME
@property
def caption(self) -> str:
return "Ценная бумага"
IMAGE_ID = "funds"
def getImageId(self, obj : 'Referent'=None) -> str:
return FundsMeta.IMAGE_ID
GLOBAL_META = None |
#!/usr/bin/env python
# coding: utf-8
# # Projected Gradient-based algorithms
#
# In this notebook, we code our Projected gradient-based optimization algorithms.
# We consider here
# * Positivity constraints
# * Interval constraints
# # 1. Projected Gradient algorithms (for positivity or interval constraints)
#
# For minimizing a differentiable function $f:\mathbb{R}^n \to \mathbb{R}$, given:
# * the function to minimize `f`
# * a 1st order oracle `f_grad` (see `problem1.ipynb` for instance)
# * an initialization point `x0`
# * the sought precision `PREC`
# * a maximal number of iterations `ITE_MAX`
#
#
# these algorithms perform iterations of the form
# $$ x_{k+1} = P\left(x_k - \gamma_k \nabla f(x_k)\right) $$
# where $\gamma_k$ is a stepsize to choose and $P$ is the projector onto the convex constraint set. We only consider positivity and interval constraints.
# ### 1.a. Constant stepsize projected gradient algorithm for positivity constraints
#
# First, we consider the case where the stepsize is fixed over iterations and passed an argument `step` to the algorithm.
# Q. Fill the function below accordingly.
import numpy as np
import timeit
def positivity_gradient_algorithm(f , f_grad , x0 , step , PREC , ITE_MAX ):
x = np.copy(x0)
g = f_grad(x) # we initialize both x and f_grad(x)
stop = PREC*np.linalg.norm(g)
epsilon = PREC*np.ones_like(x0)
x_tab = np.copy(x)
print("------------------------------------\n Constant Stepsize gradient\n------------------------------------\nSTART -- stepsize = {:0}".format(step))
t_s = timeit.default_timer()
for k in range(ITE_MAX):
x = x ####### ITERATION --> To complete by the projection onto the set "x >= 0"
#######
x_tab = np.vstack((x_tab,x))
#######
##########################################################
####### Why must the following stopping criteria be changed ? Propose a correct stopping rule
#if np.linalg.norm(g) < stop:
# break
###############################################
# To complete
if ... :
break
t_e = timeit.default_timer()
print("FINISHED -- {:d} iterations / {:.6f}s -- final value: {:f} at point ({:.2f},{:.2f})\n\n".format(k,t_e-t_s,f(x),x[0],x[1]))
return x,x_tab
# ### 1.b. Constant stepsize projected gradient algorithm for interval constraints
#
# First, we consider the case where the stepsize is fixed over iterations and passed an argument `step` to the algorithm.
# Q. Fill the function below accordingly. Then, test you algorithm in `2_Optimization100.ipynb [Sec. 1a]` for Problem 1.
import numpy as np
import timeit
def interval_gradient_algorithm(f , f_grad , x0 , infbound , supbound , step , PREC , ITE_MAX ):
# compute the min of f with a gradient method with constant step under the constraint
# borninf < x < bornesup
x = np.copy(x0)
g = f_grad(x)
stop = PREC*np.linalg.norm(g)
zero = np.zeros_like(x0)
epsilon = PREC*np.ones_like(x0)
x_tab = np.copy(x)
print("------------------------------------\n Constant Stepsize gradient\n------------------------------------\nSTART -- stepsize = {:0}".format(step))
t_s = timeit.default_timer()
for k in range(ITE_MAX):
x = x ####### ITERATION --> To complete by the projection onto the set "x >= 0"
x_tab = np.vstack((x_tab,x))
####### Why must the following stopping criteria be changed ? Propose a correct stopping rule
#if np.linalg.norm(g) < stop:
# break
# To complete
if ... :
break
t_e = timeit.default_timer()
print("FINISHED -- {:d} iterations / {:.6f}s -- final value: {:f} at point ({:.2f},{:.2f})\n\n".format(k,t_e-t_s,f(x),x[0],x[1]))
return x,x_tab
|
import os
FILE = os.environ.get('FILE', 'tests/input-file-test.csv')
DATA_STRUCTURE = "HTTP/1.1 {status_code} {status}\r\n" \
"Content-Type: application/json; charset=utf-8" \
"\r\n\r\n{body}\r\n\r\n"
INITIAL_DATA = [
'GRU,BRC,10\n',
'BRC,SCL,5\n',
'GRU,CDG,75\n',
'GRU,SCL,20\n',
'GRU,ORL,56\n',
'ORL,CDG,5\n',
'SCL,ORL,20'
]
|
import yarp as y
from time import sleep
## Client class for hand data reciving
class HandClient:
##Define the fingers and articulations id, then wait for the service called handData_service to be initialized
def __init__(self):
##Default definition fingers id
self.fingers={
"Thumb": 0,
"Index": 1,
"Middle": 2,
"Ring": 3,
"Pinky": 4
}
##Default definition articulation id
self.arts={
"Outer": 0,
"Inner": 1,
"Abductor": 2
}
#init YARP ports
y.Network.init()
self.port_out = y.BufferedPortBottle()
self.portname_out="/data_client/out"
self.port_out.open(self.portname_out)
self.style = y.ContactStyle()
self.style.persisten = 1
self.serverportname_in = "/data_server/in"
y.Network.connect(self.portname_out, self.serverportname_in, self.style)
self.port_in = y.BufferedPortBottle()
self.portname_in = "/data_client/in"
self.port_in.open(self.portname_in)
self.serverportname_out = "/data_server/out"
#y.Network.connect(self.serverportname_out, self.portname_in, self.style) #conecta la salida del servidor con la entrada del cliente
self.listenerportname_in = "/listener/in"
y.Network.connect(self.serverportname_out, self.listenerportname_in, self.style)
#close YARP ports for comm
def __del__(self):
y.Network.disconnect(self.serverportname_out, self.portname_in, self.style)
y.Network.disconnect(self.portname_out, self.serverportname_in, self.style)
y.Network.fini()
## Gets a finger data
# @param finger int finger id.
def get_data(self, finger):
bottle = self.port_out.prepare()
bottle.clear()
bottle.addString("getdata")
bottle.addInt(finger)
self.port_out.write()
##Gets all fingers data
def get_all(self):
bottle = self.port_out.prepare()
bottle.clear()
bottle.addString("getall")
self.port_out.write()
|
# -*- coding: utf-8 -*-
# Author:liyu
# 导入库位库存
from UIAutomation.Utils import close_oracle, basic_cit_01
def release_import_inventory():
con, curs = basic_cit_01()
try:
sql = [
''' UPDATE XDW_APP.TS_OPERATION SET STATUS = 0, ACT_START_TIME = NULL, ACT_END_TIME = NULL WHERE OPERATION_UKID = 51766200780012166''',
''' UPDATE XDW_APP.MS_CARD SET STATUS = 0 WHERE CARD_UKID = 51761200980144011''',
''' DELETE FROM XDW_APP.RU_INVENTORY_LOG WHERE OPERATION_UKID = 51766200780012166 ''',
''' UPDATE XDW_APP.CM_PARTICIPANT_RELATION SET RELATION_STATUS = 20 WHERE RELATION_SN IN (51761700980144023, 51766200989012167)'''
]
for sql1 in sql:
curs.execute(sql1)
con.commit()
close_oracle(con, curs)
except Exception as e:
print("导入库位库存数据失败")
close_oracle(con, curs)
print("导入库位库存数据成功")
def select_import_inventory():
con, curs = basic_cit_01()
try:
w = []
s = [10, 10]
sql = [
'''select m.status from xdw_app.ts_operation m where m.operation_ukid=51766200780012166''',
''' select n.status from xdw_app.ms_card n where n. CARD_UKID = 51761200980144011''']
for sql1 in sql:
curs.execute(sql1)
res1 = curs.fetchone()
w.append(res1)
assert w == s
close_oracle(con, curs)
except Exception as e:
print (e)
close_oracle(con, curs)
print ("导入库位库存数据验证成功!!!")
|
import requests
from config import *
from gdax_auth import GdaxAuth
import pprint
from market_maker import get_position, get_bid_ask, get_usd_ex, get_total_balance
if __name__ == "__main__":
auth = GdaxAuth(key, secret, passphrase)
resp = requests.get(base_url + '/accounts', auth=auth)
print resp.json()
for acct in resp.json():
print 'type: %s, balance: %s, hold: %s, available: %s' % (acct.get('currency'), acct.get('balance'), acct.get('hold'), acct.get('available'))
total_value, balances = get_total_balance(auth=auth)
print 'Total value is: %s' % total_value
print 'Balances: %s' % balances
|
from item.static import Item
from block_group.inode import Inode
from directory.file import File
from factory.filesystem import filesystem_factory
from factory.superblock import superblock_factory
class Directory:
STATIC_FILE_FIELDS_LENGTH = 8
def __init__(self, address):
self.file_structure = [
Item('inode', Item.TYPE_NUMERIC, 0, 4),
Item('rec_len', Item.TYPE_NUMERIC, 4, 2),
Item('name_len', Item.TYPE_NUMERIC, 6, 1),
Item('file_type', Item.TYPE_NUMERIC, 7, 1),
]
self.file = None
self._filesystem = filesystem_factory.get()
self.superblock = superblock_factory.get()
self.address = address
self.get_files()
def get_file(self, record_offset=0):
_file = File()
block = self._filesystem.read(self.calculate_offset(
record_offset), Directory.STATIC_FILE_FIELDS_LENGTH + 10)
for element in self.file_structure:
setattr(
_file,
element.name,
element.get_value(block)
)
setattr(
_file,
'file_name',
Item(
'file_name',
Item.TYPE_STRING,
0,
_file.name_len
).get_value(self._filesystem.read(
self.calculate_offset(Directory.STATIC_FILE_FIELDS_LENGTH + record_offset), _file.name_len))
)
return _file
def get_files(self):
self.file = self.get_file(0)
current = self.file
offset = 0
while not current.is_last_file():
offset += current.rec_len
current.next_file = self.get_file(offset)
current = current.next_file
def calculate_offset(self, offset):
start = self.superblock.get_block_size() * self.address
return start + offset
def __str__(self):
files = 'name type created_at size inode\n'
files += '--- --- --- --- ---'
current = self.file
while current is not None:
files += "\n{} {} {} {} {}".format(
current.file_name,
current.file_type,
current.get_inode().i_ctime,
current.get_inode().i_size,
current.inode
)
current = current.next_file
return self.padding(files)
@staticmethod
def padding(text):
formatted_text = ''
arr = text.split('\n')
length = 0
for ell in arr:
for txt in ell.split(' '):
if len(txt) > length:
length = len(txt)
spacing = length + 5
for ell in arr:
txt = ell.split(' ')
formatted_text += "{name: <{spaces}}{type: <5}{created_at: <20}{size: <10}{inode: <10}\n".format(
name=txt[0], type=txt[1], created_at=txt[2], size=txt[3], inode=txt[4], spaces=spacing)
return formatted_text
def get_root_directory():
inode = Inode(2)
dir_addr = inode.get_direct_blocks()
return Directory(dir_addr)
def change_directory(directory, name):
current = directory.file
while current is not None:
if current.file_name == name:
break
current = current.next_file
if current.file_type == File.EXT2_FT_DIR:
return Directory(current._inode.get_direct_blocks())
raise Exception('cannot cd into file')
|
from flask import Flask, render_template, request, redirect, session, url_for, flash
from tools.GetRequests import GetRequest
from tools.Likes import FindLikes
from tools.Matches import FindMatches
from tools.UserInfo import GetInfo
from tools.login_query import ValidateLogIn
from tools.register_query import ValidateSignUp
app = Flask(__name__)
wsgi_app = app.wsgi_app
app.secret_key = 'e471e07eb0c2977afd4f398907cb78f8'
logged_user = []
potential_matches = []
likes = []
matched = []
@app.route('/')
def index():
if "id" in session:
return redirect(url_for('dashboard'))
return render_template('one/index.html')
@app.route('/about')
def about():
return render_template('one/about.html')
@app.route('/contact')
def contact():
team = ["Dylan Barker: dylanbarker59@gmail.com | King's College London ",
"Eyuael Berhe: eyuael.berhe@gmail.com | Warwick University",
"Isaac Addo: isaacaddo1714@gmail.com | King's College London",
"Muhriz Tauseef: muhriztauseef82@gmail.com | King's College London"]
return render_template('one/contact.html', team=team)
@app.route('/signup', methods=["GET", "POST"])
def signup():
if request.method == "POST":
name = request.form.get("name")
age = request.form.get("age")
number = request.form.get("number")
gender = request.form.get("gender")
email = request.form.get("email")
password = request.form.get("password")
postcode = request.form.get("postcode")
role = "true" if request.form.get("role") == "carer" else "false"
s_d = request.form.get("speciality_or_disabled")
ranges = request.form.get("range")
for i in (name, age, number, gender, email, password, postcode, role, s_d, ranges):
if not i or i == "Select":
flash("Please fill in all fields", "danger")
return render_template('one/signup.html', name=name, number=number, email=email, postcode=postcode,
ranges=ranges)
if not ValidateSignUp(email, name, age, s_d, ranges, postcode, role, gender, number, password).sign_up():
flash("Email has already been used, try another")
return redirect(url_for('signup'))
flash("Sign up has succeeded, please login", "success")
return redirect(url_for('login'))
return render_template('one/signup.html')
@app.route('/login', methods=["GET", "POST"])
def login():
global logged_user
if request.method == "POST":
email = request.form.get("email")
password = request.form.get("password")
for i in (email, password):
if not i:
flash("Please fill in all fields", "danger")
return render_template('one/login.html', email=email)
identity = ValidateLogIn(email, password).log_in()
if identity:
logged_user = GetInfo(identity).Info()
session['id'] = logged_user[0]
return redirect(request.args.get("dashboard") or url_for("dashboard"))
else:
flash("Email and password combination does not exist", "danger")
return render_template('one/login.html', email=email)
return render_template('one/login.html')
@app.route('/logout')
def logout():
if "id" in session:
session.pop('id', None)
return redirect(url_for('index'))
def get_information():
return potential_matches[session["page"]][1:]
@app.route('/dashboard', methods=["GET", "POST"])
def dashboard():
global potential_matches
if "id" not in session:
return redirect(url_for('login'))
if request.method == "POST":
decision = request.form.get("decision")
if decision == "Match":
FindLikes(session['id'])
else:
... # Add entry to like table saying not interested
if "page" in session and request.method == "POST":
session["page"] += 1
else:
session["page"] = 0
potential_matches = FindMatches(session["id"]).PotentialMatches()
if len(potential_matches) > session["page"]:
name, gender, age, distance = get_information()
return render_template('two/dashboard.html', name=name, gender=gender, age=age, distance=distance)
else:
potential_matches.clear()
# ----------------------------------------------------------------------
session.pop("page", None)
return render_template('two/dashboard.html', end=True)
@app.route('/requests', methods=["GET", "POST"])
def requests():
global likes
if "id" not in session:
return redirect(url_for('login'))
likes = GetRequest(session['id']).FindRequests()
if request.method == "POST":
selected = None
for i in range(len(likes)):
decision_user_id = likes[i][0]
decision = request.form.get("decision" + decision_user_id)
if not decision or decision == "Select":
continue
else:
selected = i
if selected is not None:
likes.pop(selected)
a = FindLikes(session['id'], likes[selected][0]).Like()
else:
flash("Choose a decision before pressing submit", "danger")
# Do query to insert here
end = False
if not likes:
end = True
return render_template('two/requests.html', users=likes, end=end)
@app.route('/matches', methods=["GET"])
def matches():
global matched
if "id" not in session:
return redirect(url_for('login'))
return render_template('two/matches.html', matches=matched)
@app.route('/profile', methods=["GET"])
def profile():
global logged_user
if "id" not in session:
return redirect(url_for('login'))
name, age, s_d, location, role, email, gender, number, password = logged_user[1:]
if role == "Disabled":
s_d = "Disability: " + s_d
else:
s_d = "Speciality: " + s_d
return render_template('two/profile.html', name=name, age=age, s_d=s_d, location=location, role=role, email=email,
gender=gender, number=number, password=password)
if __name__ == "__main__":
app.run()
|
import requests
import json
url = 'https://maps.googleapis.com/maps/api/geocode/json'
with open('campusbuildings_refined.json') as data_file:
data = json.load(data_file)
keys_list = data.keys()
length = len(keys_list)
print length
lat_long = {}
kind = {}
building = "1 E 26-1/2th St, Austin TX, 78705"
params = {'sensor': 'false', 'address': building}
r = requests.get(url, params=params)
results = r.json()['results']
location = results[0]['geometry']['location']
print location['lat'], location['lng']
print results[0]['geometry']['location_type']
# for key in keys_list:
# building = str(data[key])+", UT Austin, Texas"
# params = {'sensor': 'false', 'address': building}
# print key
# print building
# r = requests.get(url, params=params)
# results = r.json()['results']
# #print results
# location = results[0]['geometry']['location']
# print location['lat'], location['lng']
# print results[0]['geometry']['location_type']
# lat_long[str(key)] = (location['lat'], location['lng'])
# kind[str(key)] = results[0]['geometry']['location_type']
# with open('result1.json', 'w') as fp:
# json.dump(lat_long, fp)
# with open('result2.json', 'w') as fp:
# json.dump(kind, fp) |
__author__ = 'thorwhalen'
import datetime
from . import reporting as rp
import pandas
# settings
save_folder = '/D/Dropbox/dev/py/data/query_data/'
account_list = rp.get_account_id('dict')
account_list = list(account_list.keys())
numOfDays = 60
report_query_str = rp.mk_report_query_str(varList='q_iipic', start_date=numOfDays)
def save_file(account):
return '{}{}-{}-{}days.p'.format(
save_folder, account, datetime.date.today().strftime('%Y%m%d'), numOfDays
)
def getting_a_bunch_of_queries():
# running that shit
i = 0
account_list = ['test']
for account in account_list:
i = i + 1
print('({}/{}) downloading {}'.format(i, len(account_list), account))
report_downloader = rp.get_report_downloader(account)
df = rp.download_report(
report_downloader=report_downloader,
report_query_str=report_query_str,
download_format='df',
)
saveFile = save_file(account)
print(' Saving to {}'.format(saveFile))
df.save(saveFile)
# run this shit
getting_a_bunch_of_queries()
|
word = []
with open('MyFile.txt') as f:
for line in f:
word = [line.strip()]
print(word)
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'migrations-git-conflicts'
ALLOWED_HOSTS = ['*']
INSTALLED_APPS = [
'migrations_git_conflicts',
'tests.app_bar',
'tests.app_foo',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
} |
#!/usr/bin/env
# coding:utf-8
"""
Created on 17/7/5 下午2:58
base Info
"""
__author__ = 'xiaochenwang94'
__version__ = '1.0'
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.cross_validation import train_test_split
import time
start_time = time.time()
data = pd.read_csv('./tmp_data/new.csv', header=0)
data['shopPower'] = data['shopPower']/5
data['shopPower'].astype(int)
print(data['shopPower'])
train_xy, test = train_test_split(data, test_size=0.3, random_state=1)
y = train_xy.shopPower
X = train_xy.drop(['shopPower'], axis=1)
test_y = test.shopPower
test_X = test.drop(['shopPower'], axis=1)
xgb_train = xgb.DMatrix(X, label=y)
xgb_test = xgb.DMatrix(test)
params = {
'booster':'gbtree',
'objective':'multi:softmax',
'num_class':11,
'gamma':0.1,
'max_depth':12,
'lambda':2,
'subsample':0.7,
'colsample_bytree':0.7,
'min_child_weight':3,
'silent':0,
'eta':0.007,
'seed':1000,
'nthread':7,
}
plst = list(params.items())
num_rounds = 50
watchlist = [(xgb_train, 'train')]
model = xgb.train(plst, xgb_train, num_rounds, watchlist, early_stopping_rounds=100)
model.save_model('./model/xgbEst.model')
print('best best_ntree_limit', model.best_ntree_limit)
preds = model.predict(xgb_test,ntree_limit=model.best_ntree_limit)
np.savetxt('xgb_est.csv', np.c_[range(1,len(test)+1), preds], delimiter=',', header='Id, shopPower', comments='', fmt='%d')
cost_time = time.time() - start_time
print('xgboost success!', '\n', 'cost time:',cost_time,'(s)')
|
import sys
from django.db import migrations, models
import django.db.models.deletion
def cache_cable_devices(apps, schema_editor):
Cable = apps.get_model('dcim', 'Cable')
if 'test' not in sys.argv:
print("\nUpdating cable device terminations...")
cable_count = Cable.objects.count()
# Cache A/B termination devices on all existing Cables. Note that the custom save() method on Cable is not
# available during a migration, so we replicate its logic here.
for i, cable in enumerate(Cable.objects.all(), start=1):
if not i % 1000 and 'test' not in sys.argv:
print("[{}/{}]".format(i, cable_count))
termination_a_model = apps.get_model(cable.termination_a_type.app_label, cable.termination_a_type.model)
termination_a_device = None
if hasattr(termination_a_model, 'device'):
termination_a = termination_a_model.objects.get(pk=cable.termination_a_id)
termination_a_device = termination_a.device
termination_b_model = apps.get_model(cable.termination_b_type.app_label, cable.termination_b_type.model)
termination_b_device = None
if hasattr(termination_b_model, 'device'):
termination_b = termination_b_model.objects.get(pk=cable.termination_b_id)
termination_b_device = termination_b.device
Cable.objects.filter(pk=cable.pk).update(
_termination_a_device=termination_a_device,
_termination_b_device=termination_b_device
)
class Migration(migrations.Migration):
dependencies = [
('dcim', '0074_increase_field_length_platform_name_slug'),
]
operations = [
migrations.AddField(
model_name='cable',
name='_termination_a_device',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dcim.Device'),
),
migrations.AddField(
model_name='cable',
name='_termination_b_device',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='dcim.Device'),
),
migrations.RunPython(
code=cache_cable_devices,
reverse_code=migrations.RunPython.noop
),
]
|
#!/usr/bin/env/ python
# -*- coding:utf-8 -*-
# Created by: Vanish
# Created on: 2019/3/21
# 23ms
# 输入n个整数,找出其中最小的K个数。
# -*- coding:utf-8 -*-
class Solution:
def GetLeastNumbers_Solution(self, tinput, k):
# write code here
def big_heap(arr,root,end):
while 1:
child = root * 2 + 1
if child>end:break
if child+1<=end and arr[child]<arr[child+1]:
child+=1
if arr[child]>arr[root]:
arr[child],arr[root]=arr[root],arr[child]
root=child
else:
break
if len(tinput)<k: return []
# 建堆
start= k//2-1
for i in range(start,-1,-1):
big_heap(tinput,i,k-1)
# 删除操作:最后一个节点放到根节点处,新根节点递归下沉
for i in range(k,len(tinput)):
if tinput[i]<tinput[0]:
tinput[i],tinput[0]=tinput[0],tinput[i]
big_heap(tinput,0,k-1)
# 排序:类似删除操作
for i in range(k-1,0,-1):
tinput[i],tinput[0]=tinput[0],tinput[i]
big_heap(tinput,0,i-1)
return tinput[:k]
if __name__ == '__main__':
a = Solution()
l = [4,5,1,6,2,7,3,8]
print(a.GetLeastNumbers_Solution(l, 4))
|
from django.urls import path
from django.contrib.auth.decorators import login_required
from .views import *
app_name='lide'
urlpatterns = [
# LISTs
path('',
clovek_list, name='lide_list'),
# DETAILs
path('detail/<str:slug>/',
ClovekDetailView.as_view(), name='clovek_detail'),
path('editace/<str:slug>/',
login_required(clovek_update), name='clovek_update'),
# IMPORTs
path('import_csv/',
LideImportCSV.as_view(), name='import_csv'),
# AUTOCOMPLETEs
path('clovek_autocomplete/',
clovek_autocomplete, name='clovek_autocomplete'),
path('rocnik/<int:rocnik_pk>/clovek_autocomplete/',
clovek_autocomplete, name='clovek_autocomplete'),
# UTILITY
path('duplicity/',
duplicity, name='duplicity'),
]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 25 15:25:51 2017
@author: Work
"""
# glass identification dataset
import pandas as pd
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/glass/glass.data'
col_names = ['id','ri','na','mg','al','si','k','ca','ba','fe','glass_type']
glass = pd.read_csv(url, names=col_names, index_col='id')
glass['assorted'] = glass.glass_type.map({1:0, 2:0, 3:0, 4:0, 5:1, 6:1, 7:1})
glass.head()
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
sns.lmplot(x='al', y='ri', data=glass, ci=None)
# TODO - scatter plot using Pandas
fig, axs = plt.subplots(1, len(col_names), sharey=True)
for index, feature in enumerate(col_names):
glass.plot(kind='scatter', x='al', y='ri', ax=axs[index], figsize=(10, 6))
# scatter plot using Matplotlib
plt.scatter(glass.al, glass.ri)
# TODO - fit a linear regression model of 'ri' on 'al'
X = glass['al']
X = X.values.reshape(-1,1)
y = glass['ri']
from sklearn.linear_model import LinearRegression
linreg = LinearRegression()
linreg.fit(X, y)
# look at the coefficients to get the equation for the line, but then how do you plot the line?
print linreg.intercept_
print linreg.coef_
# you could make predictions for arbitrary points, and then plot a line connecting them
print linreg.predict(1)
print linreg.predict(2)
print linreg.predict(3)
# or you could make predictions for all values of X, and then plot those predictions connected by a line
ri_pred = linreg.predict(X)
plt.plot(glass.al, ri_pred, color='red')
# put the plots together
plt.scatter(glass.al, glass.ri)
plt.plot(glass.al, ri_pred, color='red')
# compute prediction for al=2 using the equation
linreg.intercept_ + linreg.coef_ * 2
# compute prediction for al=2 using the predict method
linreg.predict(2)
# examine coefficient for al
zip('al', linreg.coef_)
# increasing al by 1 (so that al=3) decreases ri by 0.0025
1.51699012 - 0.0024776063874696243
# increasing al by 1 (so that al=3) decreases ri by 0.0025
1.51699012 - 0.0024776063874696243
# TODO - scatter assorted' on 'al'
plt.scatter(glass.al, glass.assorted)
# TODO - fit a linear regression model and store the predictions
feature_cols = col_names[4]
X = glass['al']
X = X.values.reshape(-1,1)
y = glass['assorted']
linreg.fit(X, y)
assorted_pred = linreg.predict(X)
# scatter plot that includes the regression line
plt.scatter(glass.al, glass.assorted)
plt.plot(glass.al, assorted_pred, color='red')
# understanding np.where
import numpy as np
nums = np.array([5, 15, 8])
# np.where returns the first value if the condition is True, and the second value if the condition is False
np.where(nums > 10, 'big', 'small')
# examine the predictions
assorted_pred[:10]
# transform predictions to 1 or 0
assorted_pred_class = np.where(assorted_pred >= 0.5, 1, 0)
assorted_pred_class
# plot the class predictions
plt.scatter(glass.al, glass.assorted)
plt.plot(glass.al, assorted_pred_class, color='red')
# add predicted class to DataFrame
glass['assorted_pred_class'] = assorted_pred_class
# sort DataFrame by al
glass.sort('al', inplace=True)
# plot the class predictions again
plt.scatter(glass.al, glass.assorted)
plt.plot(glass.al, glass.assorted_pred_class, color='red')
# fit a linear regression model and store the class predictions
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression(C=1e9)
# TODO - define X, y, fit it
X = glass.al
X = X.values.reshape(-1,1)
y = glass.assorted
logreg.fit(X,y)
# THEN make predictions on X
assorted_pred_class = logreg.predict(X)
# print the class predictions
assorted_pred_class
# TODO - plot the class predictions (scatter then plot red line as above)
plt.scatter(glass.al, glass.assorted)
plt.plot(glass.al, assorted_pred_class, color='red')
# TODO - store the predicted probabilites of class 1
# hint: use logreg.predict_proba then index the right values
assorted_pred_prob = logreg.predict_proba(X)[:, -1]
# plot the predicted probabilities
plt.scatter(glass.al, glass.assorted)
plt.plot(glass.al, assorted_pred_prob, color='red')
# examine some example predictions
print logreg.predict_proba(1)
print logreg.predict_proba(2)
print logreg.predict_proba(3)
# create a table of probability versus odds
table = pd.DataFrame({'probability':[0.1, 0.2, 0.25, 0.5, 0.6, 0.8, 0.9]})
table['odds'] = table.probability/(1 - table.probability)
table
# exponential function: e^1
np.exp(1)
# time needed to grow 1 unit to 2.718 units
np.log(2.718)
# inverse of log function
np.log(np.exp(5))
# add log-odds to the table
table['logodds'] = np.log(table.odds)
table
# plot the predicted probabilities again
plt.scatter(glass.al, glass.assorted)
plt.plot(glass.al, assorted_pred_prob, color='red')
# TODO - compute predicted log-odds for al=2 using the equation
logodds = logreg.intercept_ + logreg.coef_ * 2
print logodds
# TODO - convert log-odds to odds
# hint: what numpy math function to use?
odds = np.exp(logodds)
odds
# convert odds to probability
prob = odds/(1 + odds)
prob
# compute predicted probability for al=2 using the predict_proba method
logreg.predict_proba(2)[:, 1]
# examine the coefficient for al
zip('al', logreg.coef_[0])
# increasing al by 1 (so that al=3) increases the log-odds by 4.18
logodds = 0.64722323 + 4.1804038614510901
odds = np.exp(logodds)
prob = odds/(1 + odds)
prob
# compute predicted probability for al=3 using the predict_proba method
logreg.predict_proba(3)[:, 1]
# examine the intercept
logreg.intercept_
# convert log-odds to probability
logodds = logreg.intercept_
odds = np.exp(logodds)
prob = odds/(1 + odds)
prob
from sklearn import metrics
preds = logreg.predict(X)
print metrics.confusion_matrix(y, preds)
|
import time, pyodbc, csv, numpy as np, pandas as pd, requests, smtplib, matplotlib.pyplot as plt
from tkinter import *
import tkinter as tk
from PIL import ImageTk, Image
from io import BytesIO
#Labeling the tkinter variable
master = tk.Tk()
#Creating a title for the window - shown in the white space @ the top of the window
master.title("Game of Thrones!")
#All the necessary attributes to the window are stored at the top of the script, then functions are listed, and then buttons/labels/etc are stored to ensure the functions are defined before the function is called.
#column/row configure w/ weight=1 will allow tkinter to auto adjust according to the window size
master.grid_columnconfigure(0,weight=1)
master.grid_columnconfigure(1,weight=1)
master.grid_columnconfigure(2,weight=1)
master.grid_rowconfigure(0,weight=1)
master.grid_rowconfigure(1,weight=1)
#The following function is to be used as a pop up window in case of an error
def Error1():
master = tk.Tk()
master.title("Oops, something went wrong!")
tk.Label(master,
text="Oops, something went wrong! Please confirm the email is correct.").grid(row=0, column=0,columnspan=4,pady=0)
tk.mainloop()
#The following function is to be used as a pop up window in case of an error
def Error2():
master = tk.Tk()
master.title("Oops, something went wrong!")
tk.Label(master,
text="Oops, something went wrong! The source data appears to have changed or there is no internet connection. Please try again later").grid(row=0, column=0,columnspan=4,pady=0)
tk.mainloop()
#Defining the function of the button
def WinPercentage():
#Data is gathered from a Github CSV file - normally a SQL query would be in place of this
url = 'https://raw.githubusercontent.com/chrisalbon/war_of_the_five_kings_dataset/master/5kings_battles_v1.csv'
df = pd.read_csv(url, error_bad_lines=False)
#Changing strings into values for ease of calculations
df = df.replace(to_replace ="win",
value =1)
df = df.replace(to_replace ="loss",
value =0)
#getting the input from the selection made on the master window
king = variable.get()
#quering for the selection
KingInfo = df.query(f'attacker_king == "{king}"')
Tab1 = pd.DataFrame(KingInfo, columns=['attacker_king','attacker_outcome'])
#calculating win percentage and visualing the result
winpercentage = Tab1['attacker_outcome'].mean()
losspercentage = 1 - winpercentage
breakdown = [winpercentage, losspercentage]
labels = ['Win','Loss']
plt.pie(breakdown,
startangle=90,
shadow= True,
autopct='%1.1f%%')
plt.legend(labels, loc='best')
plt.title(f'Win Percentage for {king}')
#Now, we will be reviewing if the user wants to receive a confirmation email and sends one if so.
SendEmail = var1.get()
Email = e1.get()
if Email == "" and SendEmail== 1:
Error1()
if SendEmail == 1:
gmail_user = 'ericsebringtestcode@gmail.com'
gmail_password = 'Mac12345^'
sent_from = gmail_user
to = [f'{Email}']
subject = 'Confirmation'
body = 'Hello, this is your confirmation email.'
email_text = """\
From: %s
To: %s
Subject: %s
%s
""" % (sent_from, ", ".join(to), subject, body)
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(gmail_user, gmail_password)
server.sendmail(sent_from, to, email_text)
server.close()
except:
Error1()
plt.show()
#Defining the function of the button
def WarFreq():
url = 'https://raw.githubusercontent.com/chrisalbon/war_of_the_five_kings_dataset/master/5kings_battles_v1.csv'
df = pd.read_csv(url, error_bad_lines=False)
Freq = df['year']
#configuing the chart
plt.hist(Freq, bins=5, color='g', linewidth = 1, label='Number of Wars per Year')
plt.title('Number of wars per year')
plt.xticks(np.arange(298, 301, 1))
plt.xlabel('Year')
plt.ylabel('# of Wars')
#Now, we will be reviewing if the user wants to receive a confirmation email and sends one if so. If no email is added, but the checkbox is selected, there will be an error message
SendEmail = var1.get()
Email = e1.get()
if Email == "" and SendEmail== 1:
Error1()
if SendEmail == 1:
gmail_user = 'ericsebringtestcode@gmail.com'
gmail_password = 'Mac12345^'
sent_from = gmail_user
to = [f'{Email}']
subject = 'Confirmation'
body = 'Hello, this is your confirmation email.'
email_text = """\
From: %s
To: %s
Subject: %s
%s
""" % (sent_from, ", ".join(to), subject, body)
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(gmail_user, gmail_password)
server.sendmail(sent_from, to, email_text)
server.close()
except:
Error1()
plt.show()
#Defining the function of the button
def MostWars():
url = 'https://raw.githubusercontent.com/chrisalbon/war_of_the_five_kings_dataset/master/5kings_battles_v1.csv'
df = pd.read_csv(url, error_bad_lines=False)
df1 = df.groupby('region')['battle_number'].nunique()
df1.plot(kind='pie',autopct='%.2f')
plt.ylabel('')
plt.legend(loc="best")
plt.title(f'Breakdown of most war affected areas')
#Now, we will be reviewing if the user wants to receive a confirmation email and sends one if so. We also confirm that the field is not blank before trying to send the email
SendEmail = var1.get()
Email = e1.get()
if Email == "" and SendEmail== 1:
Error1()
if SendEmail == 1:
gmail_user = 'ericsebringtestcode@gmail.com'
gmail_password = 'Mac12345^'
sent_from = gmail_user
to = [f'{Email}']
subject = 'Confirmation'
body = 'Hello, this is your confirmation email.'
email_text = """\
From: %s
To: %s
Subject: %s
%s
""" % (sent_from, ", ".join(to), subject, body)
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(gmail_user, gmail_password)
server.sendmail(sent_from, to, email_text)
server.close()
except:
Error1()
plt.show()
#Now we will format the window so it is visually appealing.
tk.Label(master,
text="Hello! And welcome to Eric Sebring's sample project.\n Please enter your email in the box below and check the box \n to ensure a confirmation email is sent. There are three reports \n that can be generated by the buttons below. \n **The drop down list is only required to be selected for the report 'View Win Percentages by King' ").grid(row=1, column=0, columnspan=5, rowspan=2,pady=0)
tk.Label(master,
text="Email:").grid(row=8, column=1,pady=0)
var1 = IntVar()
Checkbutton(master, text="Click here to be emailed a confirmation of the report!", variable=var1,borderwidth=0,highlightthickness=0).grid(row=9,column=0,sticky=W,pady=0) #var1.get() = 1 if checked
e1 = tk.Entry(master)
e1.grid(row=9, column=1,pady=0)
#optiosn for checkbox
OPTIONS = ['Joffrey/Tommen Baratheon','Robb Stark','Balon/Euron Greyjoy','Stannis Baratheon']
#checkbox
variable = StringVar(master)
variable.set(OPTIONS[0]) # default value
w = OptionMenu(master, variable, *OPTIONS).grid(row=7,
column=0,
sticky=E,
pady=0)
tk.Button(master,
text='View Win Percentages by King', command=WinPercentage).grid(row=7,
column=1,
sticky=tk.E,
pady=0)
tk.Button(master,
text='Frequency of Wars per Year', command=WarFreq).grid(row=6,
column=1,
sticky=tk.E,
pady=0)
tk.Button(master,
text='Highest War Prone Areas', command=MostWars).grid(row=5,
column=1,
sticky=tk.E,
pady=0)
#An image is used from the internet
img_url = "http://pluspng.com/img-png/game-of-thrones-logo-png-download-game-of-thrones-logo-png-images-transparent-gallery-advertisement-advertisement-400.png"
response = requests.get(img_url)
img_data = response.content
img = ImageTk.PhotoImage(Image.open(BytesIO(img_data)))
panel = tk.Label(master, image=img).grid(row=0,
column=0,
pady=0, columnspan=5)
tk.mainloop()
|
import random
C = 50
GAMMA = 0.7
EPSILON = 0.01
# https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow/blob/master/contents/3_Sarsa_maze/RL_brain.py
# https://github.com/studywolf/blog/blob/master/RL/Cat%20vs%20Mouse%20exploration/qlearn.py
class RL(object):
def __init__(self):
self.actions = [-1, 0, 1] # a list
self.c = C
self.gamma = GAMMA
self.epsilon = EPSILON
self.q_table = {}
self.N = {}
def get_q(self, state, action):
return self.q_table.get((state, action), 0.0)
def learn_q(self, state, action, reward, value):
if (state, action) not in self.N:
self.N[(state, action)] = 0
self.N[(state, action)] += 1
old_value = self.q_table.get((state, action), None)
if old_value is None:
self.q_table[(state, action)] = reward
else:
# C / (C + N(s, a))
self.q_table[(state, action)] = old_value + float(self.c) / float(
self.c + self.N[(state, action)]) * (value - old_value)
def choose_action(self, state):
if random.random() < self.epsilon:
return random.choice(self.actions)
else:
q = [self.get_q(state, a) for a in self.actions]
maxQ = max(q)
if q.count(maxQ) > 1:
best = [i for i in range(3) if q[i] == maxQ]
action = self.actions[random.choice(best)]
return action
else:
return self.actions[q.index(maxQ)]
def learn_qlearning(self, state1, action1, reward, state2):
max_q_new = max([self.get_q(state2, a) for a in self.actions])
self.learn_q(state1, action1, reward, reward + self.gamma * max_q_new)
def learn_sarsa(self, state1, action1, reward, state2, action2):
q_next = self.get_q(state2, action2)
self.learn_q(state1, action1, reward, reward + self.gamma * q_next)
|
#!/usr/bin/python
import socket, struct, sys, getopt
def main(argv):
try:
opts, args = getopt.getopt(argv, "x")
except getopt.GetoptError:
print 'ipconverter.py [-x] <ipaddress as number>'
sys.exit(1)
if (len(args) == 0):
print 'ipconverter.py [-x] <ipaddress as number>'
sys.exit(1)
if (len(opts) > 0):
ip = int(args[0], 16)
else:
ip = (int)(args[0])
"""
Convert an IP string to long
"""
print socket.inet_ntoa(struct.pack('<L', ip))
if __name__ == "__main__":
main(sys.argv[1:])
|
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
census = pd.read_csv("adult.csv", header=None)
census.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status',
'occupation', 'relationship', 'race', 'gender', 'capital_gain',
'capital_loss', 'hours_per_week', 'native_country', 'income_bracket']
def label_fix(label):
if label == ' <=50K':
return 0
else:
return 1
census['income_bracket'] = census['income_bracket'].apply(label_fix)
census['income_bracket'].unique()
x_data = census.drop('income_bracket', axis=1)
y_labels = census['income_bracket']
X_train, X_test, y_train, y_test = train_test_split(x_data, y_labels, test_size=0.3, random_state=101)
# Vocabulary list
gender = tf.feature_column.categorical_column_with_vocabulary_list("gender", ["Female", "Male"])
# Hash bucket
occupation = tf.feature_column.categorical_column_with_hash_bucket("occupation", hash_bucket_size=1000)
marital_status = tf.feature_column.categorical_column_with_hash_bucket("marital_status", hash_bucket_size=1000)
relationship = tf.feature_column.categorical_column_with_hash_bucket("relationship", hash_bucket_size=1000)
education = tf.feature_column.categorical_column_with_hash_bucket("education", hash_bucket_size=1000)
workclass = tf.feature_column.categorical_column_with_hash_bucket("workclass", hash_bucket_size=1000)
native_country = tf.feature_column.categorical_column_with_hash_bucket("native_country", hash_bucket_size=1000)
# Numeric
age = tf.feature_column.numeric_column("age")
education_num = tf.feature_column.numeric_column("education_num")
capital_gain = tf.feature_column.numeric_column("capital_gain")
capital_loss = tf.feature_column.numeric_column("capital_loss")
hours_per_week = tf.feature_column.numeric_column("hours_per_week")
feat_cols = [gender, occupation, marital_status, relationship, education, workclass, native_country, age, education_num,
capital_gain, capital_loss, hours_per_week]
input_func = tf.estimator.inputs.pandas_input_fn(x=X_train, y=y_train, batch_size=100, num_epochs=None, shuffle=True)
model = tf.estimator.LinearClassifier(feature_columns=feat_cols)
model.train(input_fn=input_func, steps=5000)
pred_fn = tf.estimator.inputs.pandas_input_fn(x=X_test, batch_size=len(X_test), shuffle=False)
test_census = pd.read_csv("test1.csv", header=None)
test_census.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status',
'occupation', 'relationship', 'race', 'gender', 'capital_gain',
'capital_loss', 'hours_per_week', 'native_country', 'income_bracket']
test_data = test_census.drop('income_bracket', axis=1)
test_pred_fn = tf.estimator.inputs.pandas_input_fn(x=test_data, batch_size=len(test_data), shuffle=False)
predictions = list(model.predict(input_fn=pred_fn))
final_preds = []
for pred in predictions:
final_preds.append(pred['class_ids'][0])
# print(final_preds[0])
print(classification_report(y_test, final_preds)) |
######################################################################################################
# more-complex-files.py by Dave Ames
# david.john.ames@gmail.com
# @davidames
#
# Problem: More Advanced File Handling
#
# Examples of more advanced file handling
#######################################################################################################
# Create an empty list
the_text = []
# Open the file for reading, and assign it to the variable file_handle
file_handle = open("pi-poems.txt", "r")
# Loop through the file, reading one line at a time into the line variable
for line in file_handle:
# Removes non-printing characters like newlines
line = line.strip()
# Add the current line to the end of the list we created
the_text.append(line)
# Close the file
file_handle.close()
# Pull each item out of the list, one at a time
for item in the_text:
# Print each item on a single line
print(item) |
import builtins
class Hand:
def __init__(self):
self.player_hand = []
def __str__(self):
s = ''
for card in self.player_hand:
s = s + str(card) + ' '
return s
def add_card(self, card):
self.player_hand.append(card)
return self.player_hand
def get_value(self):
value = 0
for card in self.player_hand:
face = card.get_face()
value = value + builtins.FACE_VALUES[face]
for card in self.player_hand:
face = card.get_face()
if face == 'A' and value <= 11:
value += 10
return value
|
# -*- coding: utf-8 -*-
__author__ = 'Alexandr'
import webapp2
from views import GroupSelectionPage, DoTheMagic, ItsAlive, decorator
application = webapp2.WSGIApplication([
('/', GroupSelectionPage),
('/dothemagic', DoTheMagic),
('/its-alive', ItsAlive),
(decorator.callback_path, decorator.callback_handler()),
], debug=True)
|
from ctapipe.reco.hillas_intersection import HillasIntersection
import copy
from tqdm import tqdm
from ctapipe.image import tailcuts_clean, dilate
from ctapipe.image.hillas import hillas_parameters, HillasParameterizationError
import numpy as np
import astropy.units as u
__all__ = ["hillas_parameterisation", "reconstruct_event", "perform_reconstruction"]
# First up is our function to perform the image cleaning and Hillas parameterisation
def hillas_parameterisation(image_event, geometry, tel_x, tel_y,
picture_thresh=10, boundary_thresh=5,
intensity_cut=80, local_distance=3):
tel_x_dict, tel_y_dict, hillas_parameter_dict = {}, {}, {}
# Make a copy of the geometry class (we need to change it a bit)
geometryh = copy.deepcopy(geometry)
geometryh.pix_x = 1 * geometry.pix_x.value * u.deg
geometryh.pix_y = -1 * geometry.pix_y.value * u.deg
t = 0
# Loop over all our images in this event
for image in image_event:
image_shape = image.shape
if np.sum(image) == 0:
image[:, :] = 0
continue
# Clean the images using split-level cleaning
mask = tailcuts_clean(geometry, image.ravel(),
picture_thresh=picture_thresh,
boundary_thresh=boundary_thresh).reshape(image_shape)
image_clean = np.zeros(image_shape)
image_clean[mask] = image[mask]
if np.sum(image_clean) == 0:
image[:, :] = 0
else:
for i in range(4):
mask = dilate(geometry, mask.ravel()).reshape(image_shape)
image[np.invert(mask)] = 0
# Make Hillas parameters and make some simple cuts on them
try:
hill = hillas_parameters(geometryh, image_clean.ravel())
centroid_dist = np.sqrt(hill.x ** 2 + hill.y ** 2)
# Cut on intensity on distance from camera centre
if hill.intensity > intensity_cut and centroid_dist < local_distance * u.deg and hill.width > 0 *u.deg:
tel_x_dict[t] = tel_y[t] * -1
tel_y_dict[t] = tel_x[t]
hillas_parameter_dict[t] = hill
else:
image[:, :] = 0
# Skip if we can't make our Hillas parameters
except HillasParameterizationError:
t = t
image[:, :] = 0
t += 1
return hillas_parameter_dict, tel_x_dict, tel_y_dict
# This is a more general function to perform the event reconstruction
def reconstruct_event(image_event, geometry, tel_x, tel_y, hillas_intersector,
min_tels=2, intensity_cut=80, local_distance=3,
picture_thresh=10, boundary_thresh=5):
# Run our last function to perform Hillas parameterisation
hillas_parameter_dict, tel_x_dict, tel_y_dict = hillas_parameterisation(image_event, geometry, tel_x, tel_y,
intensity_cut=intensity_cut,
local_distance=local_distance,
picture_thresh=picture_thresh,
boundary_thresh=boundary_thresh)
# If we have enough telescopes perform reconstruction
if len(hillas_parameter_dict) > min_tels - 1:
# Perform reconstruction in both ground and nominal system
nominal_x, nominal_y, _, _ = hillas_intersector.reconstruct_nominal(hillas_parameter_dict)
ground_x, ground_y, _, _ = hillas_intersector.reconstruct_tilted(hillas_parameter_dict,
tel_x_dict, tel_y_dict)
if not np.isnan(nominal_x):
hillas_parameters_event = []
# Loop over all good telescopes and fill up the Hillas parameter output
for tel in range(len(tel_x)):
try:
hill = hillas_parameter_dict[tel]
# Impact distance
r = np.sqrt((tel_x[tel].value - (ground_y * -1)) ** 2 + (tel_y[tel].value - ground_x) ** 2)
x_cent, y_cent = hill.x.to(u.rad).value, hill.y.to(u.rad).value
# Displacement of CoG from reconstructed position
disp = np.sqrt((nominal_x - x_cent) ** 2 +
(nominal_y - y_cent) ** 2)
# Fill our output that we can use for rejection
hillas_parameters_event.append([np.log10(hill.intensity),
hill.width.to(u.deg).value,
hill.length.to(u.deg).value,
np.rad2deg(disp),
np.rad2deg(np.sqrt(x_cent ** 2 + y_cent ** 2)),
np.log10(r)])
except:
hillas_parameters_event.append([0., 0., 0., 0., 0., 0.])
return nominal_x, nominal_y, ground_y * -1, ground_x, hillas_parameters_event
# Don't return anything if not enough telescopes
return None, None, None, None, None
# Finally package everything up to perform reconstruction
def perform_reconstruction(images, geometry, tel_x, tel_y,
min_tels=2, intensity_cut=80, local_distance=3,
picture_thresh=10, boundary_thresh=5):
# Hillas intersection object
hillas_intersector = HillasIntersection()
selected = []
reconstructed_parameters, hillas_parameters = [], []
# Loop over all of our events an perform reconstruction
for event in images:
try:
nominal_x, nominal_y, core_x, core_y, hillas = reconstruct_event(event, geometry,
tel_x, tel_y,
hillas_intersector,
min_tels=min_tels,
intensity_cut=intensity_cut,
local_distance=local_distance,
picture_thresh=picture_thresh,
boundary_thresh=boundary_thresh)
if nominal_x is not None and np.rad2deg(np.sqrt(nominal_x ** 2 + nominal_y ** 2)) < 3:
selected.append(True)
reconstructed_parameters.append([nominal_x, nominal_y, core_x, core_y])
hillas_parameters.append(hillas)
else:
selected.append(False)
except ZeroDivisionError:
selected.append(False)
return np.array(reconstructed_parameters), np.array(hillas_parameters), np.array(selected)
|
import path_utils
import Statistics
"""
For getting statistics with various combos of parameters.
"""
# ############################## Basic runs
Statistics.run_vary_params(
{
"NN": "FFNN",
"N_hidden_units": 4,
"use_bias": False,
"max_episode_steps": 200,
},
{
"env_name": ["CartPole-v0", "Pendulum-v0", "MountainCar-v0"],
"N_hidden_layers": [1, 2],
},
N_samples=1000,
N_episodes=10,
)
|
#Real-time detection of microphone signaling
import pyaudio
import numpy as np
import time
from datetime import datetime
import sys
import wave
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from pymatbridge import Matlab
from time import sleep
import serial
from tkinter import *
from threading import Lock
for name in sys.path:
print(name)
#Serial communication problems:
#1.Send and read conflict
#2. Baud rate:9600 => 960 bytes per second
# signal1 = True
# signal2 = True
# def LedOn():
#
# while True:
# ser.write('S'.encode('utf-8'))
# print(ser.readline().decode('utf-8'))
# if ser.readline().decode('utf-8').strip(" ") == "Spray!":
# pass
# break
#
#
#
#
# def LedOff():
#
# for i in range(0):
#
# while True:
# ser.write('U'.encode('utf-8'))
# print(ser.readline().decode('utf-8'))
# if ser.readline().decode('utf-8') == "Unspray":
# pass
# break
#
# while True:
# root = tk.Tk()
#
# frame = tk.Frame(root)
#
# frame.pack(side = tk.BOTTOM) #put the btn into the bottom of frame
#
# btn1 = tk.Button(frame,text="LedOn",fg="red",command=LedOn)
# btn1.config(height=10, width=10) #adjust the dimension of btn
# btn1.pack(side=tk.LEFT)
#
# btn2 = tk.Button(frame,text = "LedOff",command =LedOff)
# btn2.config(height=10, width=10)
# btn2.pack(side = tk.RIGHT)
# root.mainloop() #To make the btn box static
#Matlab source: https://ww2.mathworks.cn/help/matlab/matlab_external/matlab-arrays-as-python-variables.html
import sounddevice as sd
def main():
def set_value():
plotgraph = states() #This function returns a value
try:
CHUNK = int(CHUNK_var.get())
RATE = int(RATE_var.get())
mic = int(mic_var.get())
display_var.set("Chunk:{0}\n Rate:{1}\n Microphone:{2}".format(CHUNK, RATE, mic))
my_window.destroy()
Run(CHUNK,RATE,mic,plotgraph)
except:
display_var.set("ERROR")
return
def soundinfo():
tk = Tk()
micinfo = str(sd.query_devices())
tk.title("DevicesInfo")
soundlabel = Label(tk, text=micinfo)
soundlabel.grid(row=0, column=0)
print(sd.query_devices())
tk.mainloop()
my_window = Tk()
window_title = my_window.title("Initiation Panel")
display_var = StringVar(value='Please select the microphone \n and sampling rate for audio.')
CHUNK_var = StringVar(my_window,value='44100')
RATE_var = StringVar(my_window,value='44100')
mic_var = StringVar(my_window,value='2')
# img = PhotoImage(file="../venv/GiddyFarawayGalapagossealion-max-1mb.gif")
img = PhotoImage(file="GiddyFarawayGalapagossealion-max-1mb.gif")
label_1 = Label(my_window, text="CHUNK:") # Label can use text or textvariable
entry_1 = Entry(my_window, textvariable=CHUNK_var)
label_4 = Label(my_window, text='RATE:')
entry_3 = Entry(my_window, textvariable=RATE_var)
label_3 = Label(my_window, text='Microphone:')
entry_2 = Entry(my_window, textvariable=mic_var)
button_1 = Button(my_window, text="Enter values:", command=set_value)
button_1.config(image=img)
label_2 = Label(my_window, textvariable=display_var)
label_1.grid(row=0, column=0)
entry_1.grid(row=0, column=1)
label_4.grid(row=1, column=0)
entry_3.grid(row=1, column=1)
label_3.grid(row=2, column=0)
entry_2.grid(row=2, column=1)
button_1.grid(row=5, column=0)
label_2.grid(row=5, column=1)
#add_on button for showing sound devices
button_2 = Button(my_window,text="Show Available Devices",command = soundinfo)
button_2.grid(row=3)
# add_on for audio graph plot
plot = IntVar()
def states():
if plot.get() == 1:
graph = True
else:
graph = False
return graph #the returned variable name not same as the allocated var
checkbox1 = Checkbutton(my_window, text="Plotting Graph", variable=plot)
checkbox1.grid(row=4, column=1)
my_window.mainloop()
# root = tk.Tk()
# frame = tk.Frame(root)
# frame.pack(side = tk.BOTTOM) #put the btn into the bottom of frame
# root.title("Control")
# btn1 = tk.Button(frame,text="Run",fg="red",command=Run)
# img1 = tk.PhotoImage(file="./GiddyFarawayGalapagossealion-max-1mb.gif")
# btn1.config(image=img1) #adjust the dimension of btn
# btn1.pack(side=tk.LEFT)
# # def reset():
# # labelYourBMI2 = tk.Button(frame, text="")
# # return
# # ButtonReset = tk.Button(frame, text="Reset", command=reset)
# # ButtonReset.pack()
# root.mainloop()
def Run(C,R,mic,Plot):
CHUNK = 44100 # number of data points to read at a time 4096
CHUNK = C
# 4096 byte
# the number of frames
RATE = 44100 # 176400 # time resolution for reading device (Hz) 44100 samples/second
RATE = R
# sampling rate i.e the number of frames per second
serSignal = 'S'
KnockSignal = 'K'
Input_Device_Index = 2
Input_Device_Index = mic
plot = Plot
# Define the serial port
ser_port = "COM8" # for window computer, int must be used COM1 = 0,COM2=1 ...
baud_rate = 9600
count = 0
flag =False
signal =False
mlab =Matlab(executable=r"D:\MATLAB\bin\matlab.exe")
mlab.start()
p = pyaudio.PyAudio()
# while True:
# ser.write(serSignal.encode('utf-8'))
# if ser.readline().decode('utf-8') != "Spray":
# break
stream = p.open(format=pyaudio.paInt16, channels=1, rate=RATE, input=True, input_device_index=None
, frames_per_buffer=CHUNK)
ser = serial.Serial(ser_port, baud_rate)
print(ser.readline().decode("utf-8"))
print("Input delay is %f" % stream.get_input_latency())
while(True):
for i in range(int(3)): #only loop forA int(??) times
#if(count>1):
# sleep(1)
if(count==1):
ser.write(KnockSignal.encode("utf-8")) # encode is used for string.encode()
sleep(.32) # **change here (0.1s per 5000samples)
flag = True
print("Must Knock Here")
# The input device id "2" => built-in microphone
# info = p.get_host_api_info_by_index(0)
# numdevices = info.get('deviceCount')
# for i in range(0, numdevices):
# if (p.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels')) > 0:
# pass
#print('Input Device id', i, '-', p.get_device_info_by_host_api_device_index(0, i).get('name'))
# get the default device info
#print(p.get_default_input_device_info())
# create a numpy array holding a single read of audio data
#now = datetime.now()
if flag == True:
# if count ==1:
# sleep(.5)
np.set_printoptions(threshold=sys.maxsize)
data = np.fromstring(stream.read(CHUNK),dtype=np.short)
#print(stream)
time = np.arange(0, CHUNK)
#peak=np.average(np.abs(data))*21
#bars="#"*int(50*peak/2**16)
#print("%04d %s"%(i,data))
#print("%s %s" % (data/32768,now ))
#print("Input data is ", type(data))
# Test Matlab data 1
#res = mlab.run_func('jk.m', {'arg1': data})
#print("Output data is ", type(res['result']))
#data1 = res['result'] # The data in matlab is float64 (e.g for 64bit window) https://stackoverflow.com/questions/8855574/convert-ndarray-from-float64-to-integer
#M_data1 = data1[0] / 32768
#print("jk.m is",res)
# data1 = np.array(res['result'], dtype=np.float64).astype(np.int64)
# print(type(data1))
#Write data to text file before matlab
# with open("SignalTest1.txt", "wt") as file:
# file.write("%s" % (str(M_data1).lstrip('[').rstrip(']')))
# file.flush()
# file.close()
# # file.writelines("%s %04d %s\n"%(now,i,data))
# # close the stream gracefully
# max_val =np.amax(data)
# print(max_val)
# if max_val >30000:
#data/32768
#print(M_data1)
if count == 1:
print("Write")
with open("SignalTest.txt","wt") as out_file:
out_file.writelines(str(data)) #it can only write string
if plot == True and count==2:
past = stream.get_time()
np.set_printoptions(threshold=sys.maxsize)
data = np.fromstring(stream.read(CHUNK), dtype=np.short)
present = stream.get_time()
delay = present - past
print("The delay is %f" % delay)
plt.title('AudioSample')
plt.plot(time,data)
plt.ylim(-40000,40000)
plt.ylabel('Amplitude')
plt.xlabel('Sample Size')
#plt.pause(.0000000000000000000000000000000000000000000000000000000001)
#plt.clf()
#print(stream.get_time())
dataprocess = mlab.run_func('final_judge.m', {"arg1": data}) # ,{'arg1':data}
# print("The input data is ",M_data1)
print(np.amax(data))
print(dataprocess['result'])
d1 = dataprocess['result']
if d1 == 1:
ser.write(serSignal.encode("utf-8")) # encode is used for string.encode()
# print(ser.write(serSignal.encode("utf-8")))
#print(ser.readline().decode("utf-8"))
#d1 = 2
plt.show()
flag=False
count=0
count += 1
#ser.reset_output_buffer()
mlab.stop()
out_file.close()
stream.stop_stream()
stream.close()
p.terminate()
sys.exit(0)
main()
|
#!/usr/bin/env python3
import logging
logger = logging.getLogger(__name__)
from api.models.BriefingManager import BRIEFING_MANAGER
def trigger(userName, body):
"""This function is triggered by the api.
"""
logger.info(body)
BRIEFING_MANAGER.run(userName)
return '', 204
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple call to a print function preceding other computations.
The call may be wrapped inside a py_func, but tf.Print should be used if
possible. The subsequent computations will be gated by the print function
execution.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import reference_test_base
import numpy as np
import tensorflow.compat.v1 as tf
# TODO(mdan): Allow functions that do not have a return value.
# (either that or raise an error if a return value is missing)
def lone_print(x):
print(x)
return x + 1
def print_multiple_values(x):
print('x is', x)
return x + 1
def multiple_prints(x, y):
print('x is', x)
print('y is', y)
return x + 1
def print_with_nontf_values(x):
print('x is', x, {'foo': 'bar'})
return x + 1
def print_in_cond(x):
if x == 0:
print(x)
return x
def tf_print(x):
tf.print(x)
return x + 1
class ReferenceTest(reference_test_base.TestCase):
def test_lone_print(self):
self.assertNativeMatchesCompiled(lone_print, 1)
self.assertNativeMatchesCompiled(lone_print, np.array([1, 2, 3]))
def test_print_multiple_values(self):
self.assertNativeMatchesCompiled(print_multiple_values, 1)
self.assertNativeMatchesCompiled(print_multiple_values, np.array([1, 2, 3]))
def test_multiple_prints(self):
self.assertNativeMatchesCompiled(multiple_prints, 1, 2)
self.assertNativeMatchesCompiled(multiple_prints, np.array([1, 2, 3]), 4)
def test_print_with_nontf_values(self):
self.assertNativeMatchesCompiled(print_with_nontf_values, 1)
self.assertNativeMatchesCompiled(print_with_nontf_values,
np.array([1, 2, 3]))
def test_print_in_cond(self):
self.assertNativeMatchesCompiled(print_in_cond, 0)
self.assertNativeMatchesCompiled(print_in_cond, 1)
def test_tf_print(self):
self.assertTfMatchesCompiled(tf_print, 0)
if __name__ == '__main__':
tf.test.main()
|
"""CSC148 Lab 5: Linked Lists
=== CSC148 Fall 2020 ===
Department of Mathematical and Computational Sciences,
University of Toronto Mississauga
=== Module description ===
This module runs timing experiments to determine how the time taken
to call `len` on a Python list vs. a LinkedList grows as the list size grows.
"""
from timeit import timeit
from linked_list import LinkedList
from goated_list import GoatedList
NUM_TRIALS = 3000 # The number of trials to run.
SIZES = [1000, 2000, 4000, 8000, 16000] # The list sizes to try.
def profile_len(list_class: type, size: int) -> float:
"""Return the time taken to call len on a list of the given class and size.
Precondition: list_class is either list or LinkedList.
"""
# Create an instance of list_class containing <size> 0's.
my_list = [0] * size
# If it is a LinkedList
if list_class == LinkedList:
my_list = LinkedList(my_list)
# If it is a GoatedList
elif list_class == GoatedList:
my_list = GoatedList(my_list)
# call timeit appropriately to check the runtime of len on the list.
# Look at the Lab 4 starter code if you don't remember how to use timeit:
# https://mcs.utm.utoronto.ca/~148/labs/w4_ADTs/starter-code/timequeue.py
return timeit('len(my_list)', number=1000, globals=locals())
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Try both Python's list and our LinkedList
all_times = {}
for list_class in [list, LinkedList, GoatedList]:
times = []
plt.figure()
# Try each list size
for s in SIZES:
time = profile_len(list_class, s)
times.append(time)
print(f'[{list_class.__name__}] Size {s:>6}: {time}')
# Plot the stuff
plt.plot(SIZES, times)
plt.xlabel('Sizes of the Lists')
plt.ylabel('Time')
plt.suptitle(f'{list_class.__name__}')
plt.show()
|
# -*- coding: utf-8 -*-
# Create your views here.
from __future__ import unicode_literals
from django.shortcuts import render
from django.shortcuts import render,get_object_or_404,redirect
from django.template import loader
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from .models import Sensor
from .serializers import SensorSerializer
from django.http import Http404,HttpResponse,JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
def get_context(request):
ns0 = Sensor.objects.all()[0].noise
ns1 = Sensor.objects.all()[1].noise
ns2 = Sensor.objects.all()[2].noise
ns3 = Sensor.objects.all()[3].noise
s0 = int(request.GET.get('s0', ns0))
s1 = int(request.GET.get('s1', ns1))
s2 = int(request.GET.get('s2', ns2))
s3 = int(request.GET.get('s3', ns3))
data = {
'is_different' : 0
}
if (ns0 != s0):
print("2")
data['is_different'] = 1;
if (ns1 != s1):
print("3")
data['is_different'] = 1;
if (ns2 != s2):
print("4")
data['is_different'] = 1;
if(ns3 != s3):
print("5")
data['is_different'] = 1;
return JsonResponse(data)
# Create your views here.
def index(request):
all_sensors = Sensor.objects.all()
context = {'all_sensors':all_sensors}
return render(request,'noise/index.html',context)
def detail(request):
all_sensors = Sensor.objects.all()
context = {'all_sensors':all_sensors}
return render(request,'noise/detail.html',context)
def home(request):
return render(request,'noise/home.html')
def future(request):
return render(request,'noise/future.html')
def team(request):
return render(request,'noise/base.html')
# List all Sensors or create a new one
# Sensors/
class SensorList(APIView):
def get(self,request):
Sensors = Sensor.objects.all()
serializer = SensorSerializer(Sensors,many=True)
return Response(serializer.data)
def post(self,request):
serializer = SensorSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class SensorDetail(APIView):
def get_object(self, pk):
try:
return Sensor.objects.get(pk=pk)
except Sensor.DoesNotExist:
raise Http404
def get(self, request, pk):
detail(request)
snippet = self.get_object(pk)
serializer = SensorSerializer(snippet)
return Response(serializer.data)
def put(self, request, pk):
snippet = self.get_object(pk)
serializer = SensorSerializer(snippet, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
snippet = self.get_object(pk)
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT) |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Script for mass-commenting Jenkins test triggers on a Beam PR."""
import itertools
import os
import socket
import sys
import time
import traceback
import re
import requests
from datetime import datetime
# This list can be found by querying the Jenkins API, see BEAM-13951
COMMENTS_TO_ADD = [
"Run CommunityMetrics PreCommit",
"Run Dataflow Runner Nexmark Tests",
"Run Dataflow Runner V2 Java 11 Nexmark Tests",
"Run Dataflow Runner V2 Java 17 Nexmark Tests",
"Run Dataflow Runner V2 Nexmark Tests",
"Run Dataflow Streaming ValidatesRunner",
"Run Dataflow ValidatesRunner Java 11",
"Run Dataflow ValidatesRunner Java 17",
"Run Dataflow ValidatesRunner",
"Run Direct Runner Nexmark Tests",
"Run Direct ValidatesRunner Java 11",
"Run Direct ValidatesRunner Java 17",
"Run Direct ValidatesRunner in Java 11",
"Run Direct ValidatesRunner",
"Run Flink Runner Nexmark Tests",
"Run Flink ValidatesRunner Java 11",
"Run Flink ValidatesRunner",
"Run Go Flink ValidatesRunner",
"Run Go PostCommit",
"Run Go PreCommit",
"Run Go Samza ValidatesRunner",
"Run Go Spark ValidatesRunner",
"Run GoPortable PreCommit",
"Run Java 11 Examples on Dataflow Runner V2",
"Run Java 17 Examples on Dataflow Runner V2",
"Run Java Dataflow V2 ValidatesRunner Streaming",
"Run Java Dataflow V2 ValidatesRunner",
"Run Java Examples on Dataflow Runner V2",
"Run Java Examples_Direct",
"Run Java Examples_Flink",
"Run Java Examples_Spark",
"Run Java Flink PortableValidatesRunner Streaming",
"Run Java Portability examples on Dataflow with Java 11",
"Run Java PostCommit",
"Run Java PreCommit",
"Run Java Samza PortableValidatesRunner",
"Run Java Spark PortableValidatesRunner Batch",
"Run Java Spark v2 PortableValidatesRunner Streaming",
"Run Java Spark v3 PortableValidatesRunner Streaming",
"Run Java examples on Dataflow Java 11",
"Run Java examples on Dataflow Java 17",
"Run Java examples on Dataflow with Java 11",
"Run Java_Examples_Dataflow PreCommit",
"Run Java_Examples_Dataflow_Java11 PreCommit",
"Run Java_Examples_Dataflow_Java17 PreCommit",
"Run Java_PVR_Flink_Batch PreCommit",
"Run Java_PVR_Flink_Docker PreCommit",
"Run Javadoc PostCommit",
"Run Jpms Dataflow Java 11 PostCommit",
"Run Jpms Dataflow Java 17 PostCommit",
"Run Jpms Direct Java 11 PostCommit",
"Run Jpms Direct Java 17 PostCommit",
"Run Jpms Flink Java 11 PostCommit",
"Run Jpms Spark Java 11 PostCommit",
"Run PortableJar_Flink PostCommit",
"Run PortableJar_Spark PostCommit",
"Run Portable_Python PreCommit",
"Run PostCommit_Java_Dataflow",
"Run PostCommit_Java_DataflowV2",
"Run PostCommit_Java_Hadoop_Versions",
"Run Python 3.7 PostCommit",
"Run Python 3.8 PostCommit",
"Run Python 3.9 PostCommit",
"Run Python 3.10 PostCommit",
"Run Python Dataflow V2 ValidatesRunner",
"Run Python Dataflow ValidatesContainer",
"Run Python Dataflow ValidatesRunner",
"Run Python Examples_Dataflow",
"Run Python Examples_Direct",
"Run Python Examples_Flink",
"Run Python Examples_Spark",
"Run Python Flink ValidatesRunner",
"Run Python PreCommit",
"Run Python Samza ValidatesRunner",
"Run Python Spark ValidatesRunner",
"Run PythonDocker PreCommit",
"Run PythonDocs PreCommit",
"Run PythonFormatter PreCommit",
"Run PythonLint PreCommit",
"Run Python_PVR_Flink PreCommit",
"Run RAT PreCommit",
"Run Release Gradle Build",
"Run SQL PostCommit",
"Run SQL PreCommit",
"Run SQL_Java11 PreCommit",
"Run SQL_Java17 PreCommit",
"Run Samza ValidatesRunner",
"Run Spark Runner Nexmark Tests",
"Run Spark StructuredStreaming ValidatesRunner",
"Run Spark ValidatesRunner Java 11",
"Run Spark ValidatesRunner",
"Run Spotless PreCommit",
"Run Twister2 ValidatesRunner",
"Run Typescript PreCommit",
"Run ULR Loopback ValidatesRunner",
"Run Whitespace PreCommit",
"Run XVR_Direct PostCommit",
"Run XVR_Flink PostCommit",
"Run XVR_JavaUsingPython_Dataflow PostCommit",
"Run XVR_PythonUsingJavaSQL_Dataflow PostCommit",
"Run XVR_PythonUsingJava_Dataflow PostCommit",
"Run XVR_Samza PostCommit",
"Run XVR_Spark PostCommit",
"Run XVR_Spark3 PostCommit",
]
def executeGHGraphqlQuery(accessToken, query):
'''Runs graphql query on GitHub.'''
url = 'https://api.github.com/graphql'
headers = {'Authorization': 'Bearer %s' % accessToken}
r = requests.post(url=url, json={'query': query}, headers=headers)
return r.json()
def getSubjectId(accessToken, prNumber):
query = '''
query FindPullRequestID {
repository(owner:"apache", name:"beam") {
pullRequest(number:%s) {
id
}
}
}
''' % prNumber
response = executeGHGraphqlQuery(accessToken, query)
return response['data']['repository']['pullRequest']['id']
def fetchGHData(accessToken, subjectId, commentBody):
'''Fetches GitHub data required for reporting Beam metrics'''
query = '''
mutation AddPullRequestComment {
addComment(input:{subjectId:"%s",body: "%s"}) {
commentEdge {
node {
createdAt
body
}
}
subject {
id
}
}
}
''' % (subjectId, commentBody)
return executeGHGraphqlQuery(accessToken, query)
def postComments(accessToken, subjectId):
'''
Main workhorse method. Fetches data from GitHub and puts it in metrics table.
'''
for commentBody in COMMENTS_TO_ADD:
jsonData = fetchGHData(accessToken, subjectId, commentBody)
print(jsonData)
def probeGitHubIsUp():
'''
Returns True if GitHub responds to simple queries. Else returns False.
'''
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('github.com', 443))
return True if result == 0 else False
################################################################################
if __name__ == '__main__':
'''
This script is supposed to be invoked directly.
However for testing purposes and to allow importing,
wrap work code in module check.
'''
print("Started.")
if not probeGitHubIsUp():
print("GitHub is unavailable, skipping fetching data.")
exit()
print("GitHub is available start fetching data.")
accessToken = input("Enter your Github access token: ")
pr = input("Enter the Beam PR number to test (e.g. 11403): ")
subjectId = getSubjectId(accessToken, pr)
postComments(accessToken, subjectId)
print("Fetched data.")
print('Done.')
|
# coding: utf-8
s = 0
for i in range(1000):
#print i,
if (i % 3 == 0):
s = s + i
if (i % 5 == 0):
s = s + i
if (i % 15 == 0):
s = s - i
print s
|
import cv2
import pickle
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from pathlib import Path
from torch.utils.data import Dataset
from utils.data_manipulation import resize, normalize_mean_variance, generate_affinity, generate_target
class DatasetSYNTH(Dataset):
def __init__(self, cfg):
self.cfg = cfg
self.dataPath = Path(cfg.data_path)
self.basePath = self.dataPath.parent
with self.dataPath.open('rb') as f:
dsets = pickle.load(f)
self.imnames, self.charBB, self.txt = [], [], []
for d in tqdm(dsets, total=len(dsets), desc="loading dataset"):
self.imnames.append(d['fn'])
self.charBB.append(d['charBB'])
self.txt.append(d['txt'])
def __len__(self):
return len(self.imnames)
def __getitem__(self, item):
item = item % len(self.imnames)
image = plt.imread(self.basePath / self.imnames[item], 'PNG') # Read the image
if len(image.shape) == 2:
image = np.repeat(image[:, :, None], repeats=3, axis=2)
elif image.shape[2] == 1:
image = np.repeat(image, repeats=3, axis=2)
else:
image = image[:, :, 0: 3]
image, character = resize(image, self.charBB[item].copy()) # Resize the image to (768, 768)
normal_image = image.astype(np.uint8).copy()
image = normalize_mean_variance(image).transpose(2, 0, 1)
# Generate character heatmap
weight_character = generate_target(image.shape, character.copy())
# Generate affinity heatmap
weight_affinity, affinity_bbox = generate_affinity(image.shape, character.copy(), self.txt[item])
cv2.drawContours(
normal_image,
np.array(affinity_bbox).reshape([len(affinity_bbox), 4, 1, 2]).astype(np.int64), -1, (0, 255, 0), 2)
enlarged_affinity_bbox = []
for i in affinity_bbox:
center = np.mean(i, axis=0)
i = i - center[None, :]
i = i*60/25
i = i + center[None, :]
enlarged_affinity_bbox.append(i)
cv2.drawContours(
normal_image,
np.array(enlarged_affinity_bbox).reshape([len(affinity_bbox), 4, 1, 2]).astype(np.int64),
-1, (0, 0, 255), 2
)
return image.astype(np.float32), \
weight_character.astype(np.float32), \
weight_affinity.astype(np.float32), \
normal_image
|
import json
from unittest import TestCase
from companies.models import Company
#from coronavstech.companies.models import Company
from django.test import Client
from django.urls import reverse
import pytest
@pytest.mark.django_db
class BasicCompanyAPITestCase(TestCase):
def setUp(self) -> None:
self.client = Client()
self.companies_url = reverse("companies-list")
def tearDown(self) -> None:
pass
class TestGetCompanies(BasicCompanyAPITestCase):
def test_zero_companies_should_return_empty_list(self) -> None:
response = self.client.get(self.companies_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content), [])
def test_one_company_exists_should_succedd(self) -> None:
test_company = Company.objects.create(name="amazon")
response = self.client.get(self.companies_url)
response_content = json.loads(response.content)[0]
self.assertEqual(response.status_code, 200)
self.assertEqual(response_content.get("name"), test_company.name)
self.assertEqual(response_content.get("status"), "Hiring")
self.assertEqual(response_content.get("application_link"), "")
self.assertEqual(response_content.get("notes"), "")
test_company.delete()
class TestPostCompanies(BasicCompanyAPITestCase):
def test_create_company_without_arguments_should_fail(self) -> None:
response = self.client.post(path=self.companies_url)
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.content), {"name": ["This field is required."]}
)
def test_create_existing_company_should_fail(self) -> None:
Company.objects.create(name="Google")
response = self.client.post(path=self.companies_url, data={"name":"Google"})
self.assertEqual(response.status_code, 400)
self.assertEqual(
json.loads(response.content),
{"name": ["company with this name already exists."]}
)
def test_create_company_with_only_name_all_fields_should_be_default(self) -> None:
response = self.client.post(path=self.companies_url, data={"name": "teste name"})
self.assertEqual(response.status_code, 201)
response_content = json.loads(response.content)
self.assertEqual(response_content.get("name"), "teste name")
self.assertEqual(response_content.get("status"), "Hiring")
self.assertEqual(response_content.get("application_link"), "")
self.assertEqual(response_content.get("notes"), "")
def test_create_company_with_layoffs_status_should_succeed(self) -> None:
response = self.client.post(path=self.companies_url, data={"name": "teste name", "status":"layoffs"})
self.assertEqual(response.status_code, 201)
response_content = json.loads(response.content)
self.assertEqual(response_content.get("name"), "teste name")
self.assertEqual(response_content.get("status"), "layoffs")
self.assertEqual(response_content.get("application_link"), "")
self.assertEqual(response_content.get("notes"), "")
def test_create_company_with_wrong_status_should_fail(self) -> None:
response = self.client.post(path=self.companies_url, data={"name": "teste name", "status":"Wrong"})
self.assertEqual(response.status_code, 400)
self.assertIn("Wrong", str(response.content))
self.assertIn("is not a valid choice", str(response.content))
@pytest.mark.xfail
def test_should_be_ok_if_fails(self) -> None:
self.assertEqual(1,2)
def raise_covid19_exception() -> None:
raise ValueError("CoronaVirus Exception")
def test_raise_covid19_exception() -> None:
with pytest.raises(ValueError) as e:
raise_covid19_exception()
assert "CoronaVirus Exception" == str(e.value)
import logging
logger = logging.getLogger("Corona_LOGS")
def funciton_that_logs_something() -> None:
try:
raise ValueError("CoronaVirus Exception")
except ValueError as e:
logger.warning(f"I am logging {str(e)}")
def test_logged_warning_level(caplog) -> None:
funciton_that_logs_something()
assert "I am logging CoronaVirus Exception" in caplog.text
def test_logged_info_level(caplog) -> None:
with caplog.at_level(logging.INFO):
logger.info("I am logging info level")
assert "I am logging info level" in caplog.text |
import pandas as pd
import nltk
import pdb
def combine_premises(row):
#import pdb; pdb.set_trace()
'''
Index([u'ID', u'premise1', u'premise2', u'premise3', u'premise4',
u'hypothesis', u'entailment_judgments', u'neutral_judgments',
u'contradiction_judgments', u'gold_label'],
dtype='object')
'''
# return " ".join([nltk.word_tokenize(row["premise%d"%(i)].split('/')[1]) for i in range(1,5)])
return " ".join([" ".join(nltk.word_tokenize(row["premise%d"%(i)].split('/')[1])) for i in range(1,5)])
for f in ["train", "dev", "test"]:
line_count = -1
lbls, hypoths, premises = [], [], []
df = pd.read_csv("mpe/mpe_%s.txt" % (f), sep="\t")
df['combined_premises'] = df.apply(combine_premises, axis=1)
# import pdb; pdb.set_trace()
for line in open("mpe/mpe_%st.txt" % (f)):
line_count += 1
if line_count == 0:
continue
line = line.split("\t")
assert (len(line) == 10, "MPE %s file has a bad line at line numbers %d" % (f, line_count))
for i in range(4):
# there are four premises per hypothesis
lbls.append(line[-1].strip().split()[-1])
hypoths.append(" ".join(nltk.word_tokenize(line[5].strip())))
premises.append(" ".join(nltk.word_tokenize(line[i+1].split('/',1)[1].strip())))
if f == "dev":
f = "val"
assert(len(hypoths) == len(set(hypoths)), "A hypothesis appears more than once")
assert(len(lbls) == len(hypoths), "Number of labels and hypothesis for MPE %s do not match" % (f))
lbl_out = open("mpe/cl_mpe_%s_lbl_concat_file" % (f), "wb")
source_out = open("mpe/cl_mpe_%s_source_concat_file" % (f), "wb")
for i in range(len(lbls)):
lbl_out.write(lbls[i].strip() + "\n")
source_out.write(premises[i] + "|||" + hypoths[i] + "\n")
lbl_out.close()
source_out.close()
|
from typing import List, Optional, Tuple, Union
from ray.data._internal.planner.exchange.interfaces import ExchangeTaskSpec
from ray.data._internal.sort import SortKey
from ray.data._internal.table_block import TableBlockAccessor
from ray.data.aggregate import AggregateFn, Count
from ray.data.aggregate._aggregate import _AggregateOnKeyBase
from ray.data.block import Block, BlockAccessor, BlockExecStats, BlockMetadata, KeyType
class SortAggregateTaskSpec(ExchangeTaskSpec):
"""
The implementation for sort-based aggregate tasks.
Aggregate is done in 2 steps: partial aggregate of individual blocks, and
final aggregate of sorted blocks.
Partial aggregate (`map`): each block is sorted locally, then partitioned into
smaller blocks according to the boundaries. Each partitioned block is aggregated
separately, then passed to a final aggregate task.
Final aggregate (`reduce`): each task would receive a block from every worker that
consists of items in a certain range. It then merges the sorted blocks and
aggregates on-the-fly.
"""
def __init__(
self,
boundaries: List[KeyType],
key: Optional[str],
aggs: List[AggregateFn],
):
super().__init__(
map_args=[boundaries, key, aggs],
reduce_args=[key, aggs],
)
@staticmethod
def map(
idx: int,
block: Block,
output_num_blocks: int,
boundaries: List[KeyType],
key: Optional[str],
aggs: List[AggregateFn],
) -> List[Union[BlockMetadata, Block]]:
stats = BlockExecStats.builder()
block = SortAggregateTaskSpec._prune_unused_columns(block, key, aggs)
if key is None:
partitions = [block]
else:
partitions = BlockAccessor.for_block(block).sort_and_partition(
boundaries,
SortKey(key),
)
parts = [BlockAccessor.for_block(p).combine(key, aggs) for p in partitions]
meta = BlockAccessor.for_block(block).get_metadata(
input_files=None, exec_stats=stats.build()
)
return parts + [meta]
@staticmethod
def reduce(
key: Optional[str],
aggs: List[AggregateFn],
*mapper_outputs: List[Block],
partial_reduce: bool = False,
) -> Tuple[Block, BlockMetadata]:
return BlockAccessor.for_block(mapper_outputs[0]).aggregate_combined_blocks(
list(mapper_outputs), key, aggs, finalize=not partial_reduce
)
@staticmethod
def _prune_unused_columns(
block: Block,
key: str,
aggs: Tuple[AggregateFn],
) -> Block:
"""Prune unused columns from block before aggregate."""
prune_columns = True
columns = set()
if isinstance(key, str):
columns.add(key)
elif callable(key):
prune_columns = False
for agg in aggs:
if isinstance(agg, _AggregateOnKeyBase) and isinstance(agg._key_fn, str):
columns.add(agg._key_fn)
elif not isinstance(agg, Count):
# Don't prune columns if any aggregate key is not string.
prune_columns = False
block_accessor = BlockAccessor.for_block(block)
if (
prune_columns
and isinstance(block_accessor, TableBlockAccessor)
and block_accessor.num_rows() > 0
):
return block_accessor.select(list(columns))
else:
return block
|
import sys
import time
import threading
import queue
from hashlib import sha256
from secrets import token_bytes
import grpc
from lnd_grpc.protos import invoices_pb2 as invoices_pb2, rpc_pb2
from loop_rpc.protos import loop_client_pb2
from test_utils.fixtures import *
from test_utils.lnd import LndNode
impls = [LndNode]
if TEST_DEBUG:
logging.basicConfig(
level=logging.DEBUG, format="%(name)-12s %(message)s", stream=sys.stdout
)
logging.info("Tests running in '%s'", TEST_DIR)
FUND_AMT = 10 ** 7
SEND_AMT = 10 ** 3
def get_updates(_queue):
"""
Get all available updates from a queue.Queue() instance and return them as a list
"""
_list = []
while not _queue.empty():
_list.append(_queue.get())
return _list
def transact_and_mine(btc):
"""
Generate some transactions and blocks.
To make bitcoind's `estimatesmartfee` succeeded.
"""
addr = btc.rpc.getnewaddress("", "bech32")
for i in range(10):
for j in range(10):
txid = btc.rpc.sendtoaddress(addr, 0.5)
btc.rpc.generatetoaddress(1, addr)
def wait_for(success, timeout=30, interval=0.25):
start_time = time.time()
while not success() and time.time() < start_time + timeout:
time.sleep(interval)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for {}", success)
def wait_for_bool(success, timeout=30, interval=0.25):
start_time = time.time()
while not success and time.time() < start_time + timeout:
time.sleep(interval)
if time.time() > start_time + timeout:
raise ValueError("Error waiting for {}", success)
def sync_blockheight(btc, nodes):
"""
Sync blockheight of nodes by checking logs until timeout
"""
info = btc.rpc.getblockchaininfo()
blocks = info["blocks"]
for n in nodes:
wait_for(lambda: n.get_info().block_height == blocks, interval=1)
time.sleep(0.25)
def generate_until(btc, success, blocks=30, interval=1):
"""
Generate new blocks until `success` returns true.
Mainly used to wait for transactions to confirm since they might
be delayed and we don't want to add a long waiting time to all
tests just because some are slow.
"""
addr = btc.rpc.getnewaddress("", "bech32")
for i in range(blocks):
time.sleep(interval)
if success():
return
generate(bitcoind, 1)
time.sleep(interval)
if not success():
raise ValueError("Generated %d blocks, but still no success", blocks)
def gen_and_sync_lnd(bitcoind, nodes):
"""
generate a few blocks and wait for lnd nodes to sync
"""
generate(bitcoind, 3)
sync_blockheight(bitcoind, nodes=nodes)
for node in nodes:
wait_for(lambda: node.get_info().synced_to_chain, interval=0.25)
time.sleep(0.25)
def generate(bitcoind, blocks):
addr = bitcoind.rpc.getnewaddress("", "bech32")
bitcoind.rpc.generatetoaddress(blocks, addr)
def close_all_channels(bitcoind, nodes):
"""
Recursively close each channel for each node in the list of nodes passed in and assert
"""
gen_and_sync_lnd(bitcoind, nodes)
for node in nodes:
for channel in node.list_channels():
channel_point = channel.channel_point
node.close_channel(channel_point=channel_point).__next__()
gen_and_sync_lnd(bitcoind, nodes)
assert not node.list_channels()
gen_and_sync_lnd(bitcoind, nodes)
def disconnect_all_peers(bitcoind, nodes):
"""
Recursively disconnect each peer from each node in the list of nodes passed in and assert
"""
gen_and_sync_lnd(bitcoind, nodes)
for node in nodes:
peers = [p.pub_key for p in node.list_peers()]
for peer in peers:
node.disconnect_peer(pub_key=peer)
wait_for(lambda: peer not in node.list_peers(), timeout=5)
assert peer not in [p.pub_key for p in node.list_peers()]
gen_and_sync_lnd(bitcoind, nodes)
def get_addresses(node, response="str"):
p2wkh_address = node.new_address(address_type="p2wkh")
np2wkh_address = node.new_address(address_type="np2wkh")
if response == "str":
return p2wkh_address.address, np2wkh_address.address
return p2wkh_address, np2wkh_address
def setup_nodes(bitcoind, nodes, delay=0):
"""
Break down all nodes, open fresh channels between them with half the balance pushed remotely
and assert
:return: the setup nodes
"""
# Needed by lnd in order to have at least one block in the last 2 hours
generate(bitcoind, 1)
# First break down nodes. This avoids situations where a test fails and breakdown is not called
break_down_nodes(bitcoind, nodes, delay)
# setup requested nodes and create a single channel from one to the next
# capacity in one direction only (alphabetical)
setup_channels(bitcoind, nodes, delay)
return nodes
def setup_channels(bitcoind, nodes, delay):
for i, node in enumerate(nodes):
if i + 1 == len(nodes):
break
nodes[i].connect(
str(nodes[i + 1].id() + "@localhost:" + str(nodes[i + 1].daemon.port)),
perm=1,
)
wait_for(lambda: nodes[i].list_peers(), interval=0.25)
wait_for(lambda: nodes[i + 1].list_peers(), interval=0.25)
time.sleep(delay)
nodes[i].add_funds(bitcoind, 1)
gen_and_sync_lnd(bitcoind, [nodes[i], nodes[i + 1]])
nodes[i].open_channel_sync(
node_pubkey_string=nodes[i + 1].id(),
local_funding_amount=FUND_AMT,
push_sat=int(FUND_AMT / 2),
spend_unconfirmed=True,
)
time.sleep(delay)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [nodes[i], nodes[i + 1]])
assert confirm_channel(bitcoind, nodes[i], nodes[i + 1])
def break_down_nodes(bitcoind, nodes, delay=0):
close_all_channels(bitcoind, nodes)
time.sleep(delay)
disconnect_all_peers(bitcoind, nodes)
time.sleep(delay)
def confirm_channel(bitcoind, n1, n2):
"""
Confirm that a channel is open between two nodes
"""
assert n1.id() in [p.pub_key for p in n2.list_peers()]
assert n2.id() in [p.pub_key for p in n1.list_peers()]
for i in range(10):
time.sleep(0.5)
if n1.check_channel(n2) and n2.check_channel(n1):
return True
addr = bitcoind.rpc.getnewaddress("", "bech32")
bhash = bitcoind.rpc.generatetoaddress(1, addr)[0]
n1.block_sync(bhash)
n2.block_sync(bhash)
# Last ditch attempt
return n1.check_channel(n2) and n2.check_channel(n1)
# def idfn(impls):
# """
# Not used currently
# """
# return "_".join([i.displayName for i in impls])
def wipe_channels_from_disk(node, network="regtest"):
"""
used to test channel backups
"""
_channel_backup = node.lnd_dir + f"chain/bitcoin/{network}/channel.backup"
_channel_db = node.lnd_dir + f"graph/{network}/channel.db"
assert os.path.exists(_channel_backup)
assert os.path.exists(_channel_db)
os.remove(_channel_backup)
os.remove(_channel_db)
assert not os.path.exists(_channel_backup)
assert not os.path.exists(_channel_db)
def random_32_byte_hash():
"""
Can generate an invoice preimage and corresponding payment hash
:return: 32 byte sha256 hash digest, 32 byte preimage
"""
preimage = token_bytes(32)
_hash = sha256(preimage)
return _hash.digest(), preimage
#########
# Tests #
#########
class TestNonInteractiveLightning:
"""
Non-interactive tests will share a common lnd instance because test passes/failures will not
impact future tests.
"""
def test_start(self, bitcoind, alice):
assert alice.get_info()
sync_blockheight(bitcoind, [alice])
def test_wallet_balance(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.get_info(), rpc_pb2.GetInfoResponse)
pytest.raises(TypeError, alice.wallet_balance, "please")
def test_channel_balance(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.channel_balance(), rpc_pb2.ChannelBalanceResponse)
pytest.raises(TypeError, alice.channel_balance, "please")
def test_get_transactions(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.get_transactions(), rpc_pb2.TransactionDetails)
pytest.raises(TypeError, alice.get_transactions, "please")
def test_send_coins(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
alice.add_funds(alice.bitcoin, 1)
p2wkh_address, np2wkh_address = get_addresses(alice)
# test passes
send1 = alice.send_coins(addr=p2wkh_address, amount=100000)
generate(alice.bitcoin, 1)
time.sleep(0.5)
send2 = alice.send_coins(addr=np2wkh_address, amount=100000)
assert isinstance(send1, rpc_pb2.SendCoinsResponse)
assert isinstance(send2, rpc_pb2.SendCoinsResponse)
# test failures
pytest.raises(
grpc.RpcError,
lambda: alice.send_coins(
alice.new_address(address_type="p2wkh").address, amount=100000 * -1
),
)
pytest.raises(
grpc.RpcError,
lambda: alice.send_coins(
alice.new_address(address_type="p2wkh").address, amount=1000000000000000
),
)
def test_send_many(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
alice.add_funds(alice.bitcoin, 1)
p2wkh_address, np2wkh_address = get_addresses(alice)
send_dict = {p2wkh_address: 100000, np2wkh_address: 100000}
send = alice.send_many(addr_to_amount=send_dict)
alice.bitcoin.rpc.generatetoaddress(1, p2wkh_address)
time.sleep(0.5)
assert isinstance(send, rpc_pb2.SendManyResponse)
def test_list_unspent(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
alice.add_funds(alice.bitcoin, 1)
assert isinstance(alice.list_unspent(0, 1000), rpc_pb2.ListUnspentResponse)
def test_subscribe_transactions(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
subscription = alice.subscribe_transactions()
alice.add_funds(alice.bitcoin, 1)
assert isinstance(subscription, grpc._channel._Rendezvous)
assert isinstance(subscription.__next__(), rpc_pb2.Transaction)
# gen_and_sync_lnd(alice.bitcoin, [alice])
# transaction_updates = queue.LifoQueue()
#
# def sub_transactions():
# try:
# for response in alice.subscribe_transactions():
# transaction_updates.put(response)
# except StopIteration:
# pass
#
# alice_sub = threading.Thread(target=sub_transactions(), daemon=True)
# alice_sub.start()
# time.sleep(1)
# while not alice_sub.is_alive():
# time.sleep(0.1)
# alice.add_funds(alice.bitcoin, 1)
#
# assert any(isinstance(update) == rpc_pb2.Transaction for update in get_updates(transaction_updates))
def test_new_address(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
p2wkh_address, np2wkh_address = get_addresses(alice, "response")
assert isinstance(p2wkh_address, rpc_pb2.NewAddressResponse)
assert isinstance(np2wkh_address, rpc_pb2.NewAddressResponse)
def test_sign_verify_message(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
message = "Test message to sign and verify."
signature = alice.sign_message(message)
assert isinstance(signature, rpc_pb2.SignMessageResponse)
verified_message = alice.verify_message(message, signature.signature)
assert isinstance(verified_message, rpc_pb2.VerifyMessageResponse)
def test_get_info(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.get_info(), rpc_pb2.GetInfoResponse)
def test_pending_channels(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.pending_channels(), rpc_pb2.PendingChannelsResponse)
# Skipping list_channels and closed_channels as we don't return their responses directly
def test_add_invoice(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
invoice = alice.add_invoice(value=SEND_AMT)
assert isinstance(invoice, rpc_pb2.AddInvoiceResponse)
def test_list_invoices(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.list_invoices(), rpc_pb2.ListInvoiceResponse)
def test_lookup_invoice(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
payment_hash = alice.add_invoice(value=SEND_AMT).r_hash
assert isinstance(alice.lookup_invoice(r_hash=payment_hash), rpc_pb2.Invoice)
def test_subscribe_invoices(self, alice):
"""
Invoice subscription run as a thread
"""
gen_and_sync_lnd(alice.bitcoin, [alice])
invoice_updates = queue.LifoQueue()
def sub_invoices():
try:
for response in alice.subscribe_invoices():
invoice_updates.put(response)
except grpc._channel._Rendezvous:
pass
alice_sub = threading.Thread(target=sub_invoices, daemon=True)
alice_sub.start()
time.sleep(1)
while not alice_sub.is_alive():
time.sleep(0.1)
alice.add_invoice(value=SEND_AMT)
alice.daemon.wait_for_log("AddIndex")
time.sleep(0.1)
assert any(
isinstance(update, rpc_pb2.Invoice)
for update in get_updates(invoice_updates)
)
def test_decode_payment_request(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
pay_req = alice.add_invoice(value=SEND_AMT).payment_request
decoded_req = alice.decode_pay_req(pay_req=pay_req)
assert isinstance(decoded_req, rpc_pb2.PayReq)
def test_list_payments(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.list_payments(), rpc_pb2.ListPaymentsResponse)
def test_delete_all_payments(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(
alice.delete_all_payments(), rpc_pb2.DeleteAllPaymentsResponse
)
def test_describe_graph(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.describe_graph(), rpc_pb2.ChannelGraph)
# Skipping get_chan_info, subscribe_chan_events, get_alice_info, query_routes
def test_get_network_info(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.get_network_info(), rpc_pb2.NetworkInfo)
@pytest.mark.skipif(
TRAVIS is True,
reason="Travis doesn't like this one. Possibly a race"
"condition not worth debugging",
)
def test_stop_daemon(self, node_factory):
node = node_factory.get_node(implementation=LndNode, node_id="test_stop_node")
node.daemon.wait_for_log("Server listening on")
node.stop_daemon()
# use is_in_log instead of wait_for_log as node daemon should be shutdown
node.daemon.is_in_log("Shutdown complete")
time.sleep(1)
with pytest.raises(grpc.RpcError):
node.get_info()
def test_debug_level(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(
alice.debug_level(level_spec="warn"), rpc_pb2.DebugLevelResponse
)
def test_fee_report(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.fee_report(), rpc_pb2.FeeReportResponse)
def test_forwarding_history(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
assert isinstance(alice.forwarding_history(), rpc_pb2.ForwardingHistoryResponse)
def test_lightning_stub(self, alice):
gen_and_sync_lnd(alice.bitcoin, [alice])
original_stub = alice.lightning_stub
# not simulation of actual failure, but failure in the form that should be detected by
# connectivity event logger
alice.connection_status_change = True
# make a call to stimulate stub regeneration
alice.get_info()
new_stub = alice.lightning_stub
assert original_stub != new_stub
class TestInteractiveLightning:
def test_peer_connection(self, bob, carol, dave, bitcoind):
# Needed by lnd in order to have at least one block in the last 2 hours
generate(bitcoind, 1)
# connection tests
connection1 = bob.connect(
str(carol.id() + "@localhost:" + str(carol.daemon.port))
)
wait_for(lambda: bob.list_peers(), timeout=5)
wait_for(lambda: carol.list_peers(), timeout=5)
# check bob connected to carol using connect() and list_peers()
assert isinstance(connection1, rpc_pb2.ConnectPeerResponse)
assert bob.id() in [p.pub_key for p in carol.list_peers()]
assert carol.id() in [p.pub_key for p in bob.list_peers()]
dave_ln_addr = dave.lightning_address(
pubkey=dave.id(), host="localhost:" + str(dave.daemon.port)
)
carol.connect_peer(dave_ln_addr)
wait_for(lambda: carol.list_peers(), timeout=5)
wait_for(lambda: dave.list_peers(), timeout=5)
# check carol connected to dave using connect() and list_peers()
assert carol.id() in [p.pub_key for p in dave.list_peers()]
assert dave.id() in [p.pub_key for p in carol.list_peers()]
generate(bob.bitcoin, 1)
gen_and_sync_lnd(bitcoind, [bob, carol])
# Disconnection tests
bob.disconnect_peer(pub_key=str(carol.id()))
time.sleep(0.25)
# check bob not connected to carol using connect() and list_peers()
assert bob.id() not in [p.pub_key for p in carol.list_peers()]
assert carol.id() not in [p.pub_key for p in bob.list_peers()]
carol.disconnect_peer(dave.id())
wait_for(lambda: not carol.list_peers(), timeout=5)
wait_for(lambda: not dave.list_peers(), timeout=5)
# check carol not connected to dave using connect_peer() and list_peers()
assert carol.id() not in [p.pub_key for p in dave.list_peers()]
assert dave.id() not in [p.pub_key for p in carol.list_peers()]
def test_open_channel_sync(self, bob, carol, bitcoind):
# Needed by lnd in order to have at least one block in the last 2 hours
generate(bitcoind, 1)
disconnect_all_peers(bitcoind, [bob, carol])
bob.connect(str(carol.id() + "@localhost:" + str(carol.daemon.port)), perm=1)
wait_for(lambda: bob.list_peers(), interval=1)
wait_for(lambda: carol.list_peers(), interval=1)
bob.add_funds(bitcoind, 1)
gen_and_sync_lnd(bitcoind, [bob, carol])
bob.open_channel_sync(
node_pubkey_string=carol.id(), local_funding_amount=FUND_AMT
)
gen_and_sync_lnd(bitcoind, [bob, carol])
assert confirm_channel(bitcoind, bob, carol)
assert bob.check_channel(carol)
assert carol.check_channel(bob)
def test_open_channel(self, bob, carol, bitcoind):
# Needed by lnd in order to have at least one block in the last 2 hours
generate(bitcoind, 1)
break_down_nodes(bitcoind, nodes=[bob, carol])
bob.connect(str(carol.id() + "@localhost:" + str(carol.daemon.port)), perm=1)
wait_for(lambda: bob.list_peers(), interval=0.5)
wait_for(lambda: carol.list_peers(), interval=0.5)
bob.add_funds(bitcoind, 1)
gen_and_sync_lnd(bitcoind, [bob, carol])
bob.open_channel(
node_pubkey_string=carol.id(), local_funding_amount=FUND_AMT
).__next__()
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
assert confirm_channel(bitcoind, bob, carol)
assert bob.check_channel(carol)
assert carol.check_channel(bob)
def test_close_channel(self, bob, carol, bitcoind):
bob, carol = setup_nodes(bitcoind, [bob, carol])
channel_point = bob.list_channels()[0].channel_point
bob.close_channel(channel_point=channel_point).__next__()
generate(bitcoind, 6)
gen_and_sync_lnd(bitcoind, [bob, carol])
assert bob.check_channel(carol) is False
assert carol.check_channel(bob) is False
def test_send_payment_sync(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
# test payment request method
invoice = carol.add_invoice(value=SEND_AMT)
bob.send_payment_sync(payment_request=invoice.payment_request)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash = carol.decode_pay_req(invoice.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
assert carol.lookup_invoice(r_hash_str=payment_hash).settled is True
# test manually specified request
invoice2 = carol.add_invoice(value=SEND_AMT)
bob.send_payment_sync(
dest_string=carol.id(),
amt=SEND_AMT,
payment_hash=invoice2.r_hash,
final_cltv_delta=144,
)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash2 = carol.decode_pay_req(invoice2.payment_request).payment_hash
assert payment_hash2 in [p.payment_hash for p in bob.list_payments().payments]
assert carol.lookup_invoice(r_hash_str=payment_hash).settled is True
# test sending any amount to an invoice which requested 0
invoice3 = carol.add_invoice(value=0)
bob.send_payment_sync(payment_request=invoice3.payment_request, amt=SEND_AMT)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash = carol.decode_pay_req(invoice3.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
inv_paid = carol.lookup_invoice(r_hash_str=payment_hash)
assert inv_paid.settled is True
assert inv_paid.amt_paid_sat == SEND_AMT
def test_send_payment(self, bitcoind, bob, carol):
# TODO: remove try/except hack for curve generation
bob, carol = setup_nodes(bitcoind, [bob, carol])
# test payment request method
invoice = carol.add_invoice(value=SEND_AMT)
try:
bob.send_payment(payment_request=invoice.payment_request).__next__()
except StopIteration:
pass
bob.daemon.wait_for_log("Closed completed SETTLE circuit", timeout=60)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash = carol.decode_pay_req(invoice.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
assert carol.lookup_invoice(r_hash_str=payment_hash).settled is True
# test manually specified request
invoice2 = carol.add_invoice(value=SEND_AMT)
try:
bob.send_payment(
dest_string=carol.id(),
amt=SEND_AMT,
payment_hash=invoice2.r_hash,
final_cltv_delta=144,
).__next__()
except StopIteration:
pass
bob.daemon.wait_for_log("Closed completed SETTLE circuit", timeout=60)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash2 = carol.decode_pay_req(invoice2.payment_request).payment_hash
assert payment_hash2 in [p.payment_hash for p in bob.list_payments().payments]
assert carol.lookup_invoice(r_hash_str=payment_hash).settled is True
# test sending different amount to invoice where 0 is requested
invoice = carol.add_invoice(value=0)
try:
bob.send_payment(
payment_request=invoice.payment_request, amt=SEND_AMT
).__next__()
except StopIteration:
pass
bob.daemon.wait_for_log("Closed completed SETTLE circuit", timeout=60)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
payment_hash = carol.decode_pay_req(invoice.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
inv_paid = carol.lookup_invoice(r_hash_str=payment_hash)
assert inv_paid.settled is True
assert inv_paid.amt_paid_sat == SEND_AMT
def test_send_to_route_sync(self, bitcoind, bob, carol, dave):
bob, carol, dave = setup_nodes(bitcoind, [bob, carol, dave])
gen_and_sync_lnd(bitcoind, [bob, carol, dave])
invoice = dave.add_invoice(value=SEND_AMT)
route = bob.query_routes(pub_key=dave.id(), amt=SEND_AMT, final_cltv_delta=144)
bob.send_to_route_sync(payment_hash=invoice.r_hash, route=route[0])
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol, dave])
payment_hash = dave.decode_pay_req(invoice.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
assert dave.lookup_invoice(r_hash_str=payment_hash).settled is True
def test_send_to_route(self, bitcoind, bob, carol, dave):
bob, carol, dave = setup_nodes(bitcoind, [bob, carol, dave])
gen_and_sync_lnd(bitcoind, [bob, carol, dave])
invoice = dave.add_invoice(value=SEND_AMT)
route = bob.query_routes(pub_key=dave.id(), amt=SEND_AMT, final_cltv_delta=144)
try:
bob.send_to_route(invoice=invoice, route=route[0]).__next__()
except StopIteration:
pass
bob.daemon.wait_for_log("Closed completed SETTLE circuit", timeout=60)
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol, dave])
payment_hash = dave.decode_pay_req(invoice.payment_request).payment_hash
assert payment_hash in [p.payment_hash for p in bob.list_payments().payments]
assert dave.lookup_invoice(r_hash_str=payment_hash).settled is True
def test_subscribe_channel_events(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
gen_and_sync_lnd(bitcoind, [bob, carol])
chan_updates = queue.LifoQueue()
def sub_channel_events():
try:
for response in bob.subscribe_channel_events():
chan_updates.put(response)
except grpc._channel._Rendezvous:
pass
bob_sub = threading.Thread(target=sub_channel_events, daemon=True)
bob_sub.start()
time.sleep(1)
while not bob_sub.is_alive():
time.sleep(0.1)
channel_point = bob.list_channels()[0].channel_point
bob.close_channel(channel_point=channel_point).__next__()
generate(bitcoind, 3)
gen_and_sync_lnd(bitcoind, [bob, carol])
assert any(
update.closed_channel is not None for update in get_updates(chan_updates)
)
def test_subscribe_channel_graph(self, bitcoind, bob, carol, dave):
bob, carol = setup_nodes(bitcoind, [bob, carol])
new_fee = 5555
subscription = bob.subscribe_channel_graph()
carol.update_channel_policy(
chan_point=None,
base_fee_msat=new_fee,
fee_rate=0.5555,
time_lock_delta=9,
is_global=True,
)
assert isinstance(subscription.__next__(), rpc_pb2.GraphTopologyUpdate)
def test_update_channel_policy(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
update = bob.update_channel_policy(
chan_point=None,
base_fee_msat=5555,
fee_rate=0.5555,
time_lock_delta=9,
is_global=True,
)
assert isinstance(update, rpc_pb2.PolicyUpdateResponse)
class TestChannelBackup:
def test_export_verify_restore_multi(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
funding_txid, output_index = bob.list_channels()[0].channel_point.split(":")
channel_point = bob.channel_point_generator(
funding_txid=funding_txid, output_index=output_index
)
all_backup = bob.export_all_channel_backups()
assert isinstance(all_backup, rpc_pb2.ChanBackupSnapshot)
# assert the multi_chan backup
assert bob.verify_chan_backup(multi_chan_backup=all_backup.multi_chan_backup)
bob.stop()
wipe_channels_from_disk(bob)
bob.start()
assert not bob.list_channels()
assert bob.restore_chan_backup(
multi_chan_backup=all_backup.multi_chan_backup.multi_chan_backup
)
bob.daemon.wait_for_log("Inserting 1 SCB channel shells into DB")
carol.daemon.wait_for_log("Broadcasting force close transaction")
generate(bitcoind, 6)
bob.daemon.wait_for_log("Publishing sweep tx", timeout=120)
generate(bitcoind, 6)
assert bob.daemon.wait_for_log(
"a contract has been fully resolved!", timeout=120
)
def test_export_verify_restore_single(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
funding_txid, output_index = bob.list_channels()[0].channel_point.split(":")
channel_point = bob.channel_point_generator(
funding_txid=funding_txid, output_index=output_index
)
single_backup = bob.export_chan_backup(chan_point=channel_point)
assert isinstance(single_backup, rpc_pb2.ChannelBackup)
packed_backup = bob.pack_into_channelbackups(single_backup=single_backup)
# assert the single_chan_backup
assert bob.verify_chan_backup(single_chan_backups=packed_backup)
bob.stop()
wipe_channels_from_disk(bob)
bob.start()
assert not bob.list_channels()
assert bob.restore_chan_backup(chan_backups=packed_backup)
bob.daemon.wait_for_log("Inserting 1 SCB channel shells into DB")
carol.daemon.wait_for_log("Broadcasting force close transaction")
generate(bitcoind, 6)
bob.daemon.wait_for_log("Publishing sweep tx", timeout=120)
generate(bitcoind, 6)
assert bob.daemon.wait_for_log(
"a contract has been fully resolved!", timeout=120
)
class TestInvoices:
def test_all_invoice(self, bitcoind, bob, carol):
bob, carol = setup_nodes(bitcoind, [bob, carol])
_hash, preimage = random_32_byte_hash()
invoice_queue = queue.LifoQueue()
invoice = carol.add_hold_invoice(
memo="pytest hold invoice", hash=_hash, value=SEND_AMT
)
decoded_invoice = carol.decode_pay_req(pay_req=invoice.payment_request)
assert isinstance(invoice, invoices_pb2.AddHoldInvoiceResp)
# thread functions
def inv_sub_worker(_hash):
try:
for _response in carol.subscribe_single_invoice(_hash):
invoice_queue.put(_response)
except grpc._channel._Rendezvous:
pass
def pay_hold_inv_worker(payment_request):
try:
bob.pay_invoice(payment_request=payment_request)
except grpc._channel._Rendezvous:
pass
def settle_inv_worker(_preimage):
try:
carol.settle_invoice(preimage=_preimage)
except grpc._channel._Rendezvous:
pass
# setup the threads
inv_sub = threading.Thread(
target=inv_sub_worker, name="inv_sub", args=[_hash], daemon=True
)
pay_inv = threading.Thread(
target=pay_hold_inv_worker, args=[invoice.payment_request]
)
settle_inv = threading.Thread(target=settle_inv_worker, args=[preimage])
# start the threads
inv_sub.start()
# wait for subscription to start
while not inv_sub.is_alive():
time.sleep(0.1)
pay_inv.start()
time.sleep(2)
# carol.daemon.wait_for_log(regex=f'Invoice({decoded_invoice.payment_hash}): accepted,')
settle_inv.start()
while settle_inv.is_alive():
time.sleep(0.1)
inv_sub.join(timeout=1)
assert any(invoice.settled is True for invoice in get_updates(invoice_queue))
class TestLoop:
@pytest.mark.skip(reason="waiting to configure loop swapserver")
def test_loop_out_quote(self, bitcoind, alice, bob, loopd):
"""
250000 satoshis is currently middle of range of allowed loop amounts
"""
loop_amount = 250000
alice, bob = setup_nodes(bitcoind, [alice, bob])
if alice.daemon.invoice_rpc_active:
quote = loopd.loop_out_quote(amt=loop_amount)
assert quote is not None
assert isinstance(quote, loop_client_pb2.QuoteResponse)
else:
logging.info("test_loop_out() skipped as invoice RPC not detected")
@pytest.mark.skip(reason="waiting to configure loop swapserver")
def test_loop_out_terms(self, bitcoind, alice, bob, loopd):
alice, bob = setup_nodes(bitcoind, [alice, bob])
if alice.daemon.invoice_rpc_active:
terms = loopd.loop_out_terms()
assert terms is not None
assert isinstance(terms, loop_client_pb2.TermsResponse)
else:
logging.info("test_loop_out() skipped as invoice RPC not detected")
|
import tensorflow as tf
from tensorflow.keras.layers import BatchNormalization, LeakyReLU
from tensorflow.keras.activations import relu, tanh, sigmoid
from tensorflow.keras.layers import Conv2D, Conv2DTranspose
from tensorflow.keras import Model
conv_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.02)
class Discriminator(Model):
def __init__(self, feature_map_size):
super(Discriminator, self).__init__()
self.conv1 = Conv2D(feature_map_size, 4, 2, padding='same', use_bias=False,
kernel_initializer=conv_initializer)
self.conv2 = Conv2D(feature_map_size * 2, 4, 2, padding='same', use_bias=False,
kernel_initializer=conv_initializer)
self.conv3 = Conv2D(feature_map_size * 4, 4, 2, padding='same', use_bias=False,
kernel_initializer=conv_initializer)
self.conv4 = Conv2D(feature_map_size * 8, 4, 2, padding='same', use_bias=False,
kernel_initializer=conv_initializer)
self.conv5 = Conv2D(1, 4, 1, use_bias=False)
self.b_norm1 = BatchNormalization()
self.b_norm2 = BatchNormalization()
self.b_norm3 = BatchNormalization()
self.b_norm4 = BatchNormalization()
self.b_norm1 = BatchNormalization()
self.b_norm2 = BatchNormalization()
self.b_norm3 = BatchNormalization()
self.b_norm4 = BatchNormalization()
self.leaky_relu = LeakyReLU(alpha=0.2)
def call(self, inputs, **kwargs):
x = self.conv1(inputs)
x = self.b_norm1(x)
x = self.leaky_relu(x)
x = self.conv2(x)
x = self.b_norm2(x)
x = self.leaky_relu(x)
x = self.conv3(x)
x = self.b_norm3(x)
x = self.leaky_relu(x)
x = self.conv4(x)
x = self.b_norm4(x)
x = self.leaky_relu(x)
x = self.conv5(x)
x = sigmoid(x)
return x
if __name__ == '__main__':
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
discriminator_model = Discriminator(64)
discriminator_model.build((1, 64, 64, 3))
discriminator_model.summary()
|
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Doctor(db.Model):
__tablename__='doctor'
DoctorId = db.Column(db.Integer, primary_key=True,autoincrement=True)
username = db.Column(db.String,nullable=False,unique=True)
email = db.Column(db.String,nullable=False,unique=True)
password = db.Column(db.String,nullable=False)
class Admin(db.Model):
__tablename__='admin'
AdminId = db.Column(db.Integer, primary_key=True,autoincrement=True)
username = db.Column(db.String,nullable=False,unique=True)
email = db.Column(db.String,nullable=False,unique=True)
password = db.Column(db.String,nullable=False)
class Patient(db.Model):
__tablename__ = 'patient'
PatientId = db.Column(db.Integer, primary_key=True,autoincrement=True)
username = db.Column(db.String,nullable=False)
name = db.Column(db.String,nullable=False)
email = db.Column(db.String,nullable=False,unique=True)
password = db.Column(db.String,nullable=False)
address = db.Column(db.String,nullable=False)
symptom = db.Column(db.String,nullable=True)
status = db.Column(db.String,nullable=True)
phone = db.Column(db.Integer)
docname = db.Column(db.String,nullable=True)
date = db.Column(db.String,nullable=True)
class Appointment(db.Model):
__tablename__="appointment"
appointmentId = db.Column(db.Integer, primary_key=True,autoincrement=True)
name = db.Column(db.String,nullable=False)
description = db.Column(db.String,nullable=False)
appointment_date=db.Column(db.String,nullable=False)
appointed_by = db.Column(db.String,nullable=False)
appointed_to = db.Column(db.String,nullable=False)
class Order(db.Model):
__tablename__="order"
orderId = db.Column(db.Integer, primary_key=True,autoincrement=True)
pUserName = db.Column(db.String,nullable=False)
description = db.Column(db.String,nullable=False)
orderFor = db.Column(db.String,nullable=False)
class Laboratorist(db.Model):
__tablename__='laboratory'
labId = db.Column(db.Integer, primary_key=True,autoincrement=True)
username = db.Column(db.String,nullable=False)
email = db.Column(db.String,nullable=False)
password = db.Column(db.String,nullable=False)
class Pharmasist(db.Model):
__tablename__='pharmacy'
pharmaId = db.Column(db.Integer, primary_key=True,autoincrement=True)
username = db.Column(db.String,nullable=False)
email = db.Column(db.String,nullable=False)
password = db.Column(db.String,nullable=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.