index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
21,000 | 72864d34cf7330d594b95e718f1dc6c4eee7c4c7 | import os
import setuptools
_here = os.path.abspath(os.path.dirname(__file__))
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="yelp_analysis",
version='0.0.2',
author="Ian Buttimer",
author_email="author@example.com",
description="Yelp Open Dataset Analysis",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ib-da-ncirl/yelp_analysis",
license='MIT',
packages=setuptools.find_packages(),
install_requires=[
'pandas>=1.0.5',
'dask>=2.19.0',
'pillow>=7.2.0',
'yaml>=0.2.5',
'pyyaml>=5.3.1',
'setuptools>=47.3.1',
'tensorflow>=2.2.0',
'tensorflow-gpu>=2.2.0',
'numpy>=1.18.5',
'matplotlib>=3.2.2'
'keras>=2.4.3',
'keras-tuner>=1.0.1',
'scikit-learn>=0.23.1',
'pydot>=1.3.0',
'pygraphviz>=1.3',
],
dependency_links=[
],
classifiers=[
'Development Status :: 3 - Alpha',
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
21,001 | 1f936fedaece0fdb1ca6d7028ab43691d9bc9a69 | #!/usr/bin/env python
"""
@package ion.services.mi.drivers.uw_trhph.test.test_trhph_driver
@file ion/services/mi/drivers/uw_trhph/test/test_trhph_driver.py
@author Carlos Rueda
@brief Direct tests to the TrhphInstrumentDriver class.
"""
__author__ = "Carlos Rueda"
__license__ = 'Apache 2.0'
from ion.services.mi.drivers.uw_trhph.trhph_driver import TrhphInstrumentDriver
from ion.services.mi.drivers.uw_trhph.test import TrhphTestCase
from ion.services.mi.drivers.uw_trhph.test.driver_test_mixin import DriverTestMixin
from nose.plugins.attrib import attr
from ion.services.mi.mi_logger import mi_logger
log = mi_logger
import unittest
import os
@unittest.skipIf(os.getenv('run_it') is None,
'''Not run by default because of mixed monkey-patching issues. \
Define environment variable run_it to force execution.''')
@attr('UNIT', group='mi')
class DriverTest(TrhphTestCase, DriverTestMixin):
"""
Direct tests to the TrhphInstrumentDriver class. The actual set of tests
is provided by DriverTestMixin.
"""
def setUp(self):
"""
Calls TrhphTestCase.setUp(self),creates and assigns the
TrhphDriverProxy, and assign the comm_config object.
"""
TrhphTestCase.setUp(self)
def evt_callback(event):
log.info("CALLBACK: %s" % str(event))
# needed by DriverTestMixin
self.driver = TrhphInstrumentDriver(evt_callback)
self.comms_config = {
'addr': self.device_address,
'port': self.device_port}
|
21,002 | 6b0884f46c1988ddb638fdf6e6835e8f95f03dfb | import os, sys
import Tkinter
import tkFileDialog
import PIL
from PIL import ImageTk, Image
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.path as path
import cv2
from scipy.cluster.vq import kmeans
from skimage import data, img_as_float
#from skimage.measure import compare_ssim as ssim
from skimage.measure import structural_similarity as ssim
LETTERS = ["a","b","c","d","e","f","g","h","i","j","k","l","m",
"n","o","p","q","r","s","t","u","v","w","x","y","z"]
class Rectangle:
def __init__(self, x_param=0, y_param=0, w_param=0, h_param=0):
self.x = x_param
self.y = y_param
self.w = w_param
self.h = h_param
def __str__(self):
return "Width = "+str(self.w)+", Height = "+str(self.h)
class MainWindow:
def __init__(self, master):
self.video = None
self.frame_rate = 0
self.video_length = 0
# The scaled image used for display. Needs to persist for display
self.display_image = None
self.display_ratio = 0
self.awaiting_corners = False
self.corners = []
#Tkinter related fields
self.master = master
self.master.title("Auto Kifu Test2")
self.window_width = root.winfo_screenwidth()
self.window_height = root.winfo_screenheight() - 100
self.master.geometry("%dx%d+0+0" % (self.window_width, self.window_height))
self.master.configure(background='grey')
self.canvas = Tkinter.Canvas(self.master)
self.canvas.place(x=0,
y=0,
width=self.window_width,
height=self.window_height)
self.canvas.bind("<Button-1>", self.mouse_clicked)
self.menubar = Tkinter.Menu(root)
root.config(menu=self.menubar)
self.fileMenu = Tkinter.Menu(self.menubar)
self.fileMenu.add_command(label="Load Image", command=self.load())
self.menubar.add_cascade(label="File", menu=self.fileMenu)
def mouse_clicked(self, event):
if self.awaiting_corners:
self.draw_x(event.x, event.y)
self.corners += [(event.x/self.display_ratio, event.y/self.display_ratio)]
if len(self.corners) == 4:
self.awaiting_corners = False
self.main()
def main(self):
board_positions, crop_window = self.find_grid(self.corners)
frames = self.parse_video(crop_window)
for x in range(len(frames)):
frames[x] = cv2.cvtColor(frames[x], cv2.COLOR_BGR2GRAY)
frames[x] = cv2.GaussianBlur(frames[x], (51, 51), 0)
thresholds = self.determine_thresholds(frames[-1], board_positions)
for x in range(len(frames)):
cv2.imwrite('output/2/frames'+str(x)+'.png', frames[x])
for x in range(len(frames)):
frames[x] = self.parse_frames(frames[x], board_positions, thresholds)
for x in range(1, len(frames)):
print "Board: "+str(x)
self.print_board(frames[x])
output = "(;GM[1]FF[4]CA[UTF-8]AP[CGoban:3]ST[2]SZ[19]"
for i in range(1, len(frames)):
moves = self.frame_difference(frames[i-1], frames[i])
for move in moves:
color = move["color"]
x = LETTERS[move["position"][0]]
y = LETTERS[move["position"][1]]
output += ";"+color+"["+x+y+"]"
output += ")"
file = open("output.txt", "w")
file.write(output)
file.close()
def find_grid(self, corners):
top_left = corners[0]
bottom_right = corners[2]
board_width = bottom_right[0] - top_left[0]
board_height = bottom_right[1] - top_left[1]
horizontal_spacing = board_width / 18
vertical_spacing = board_height / 18
crop_window = Rectangle()
crop_window.x = int(top_left[0] - horizontal_spacing)
crop_window.y = int(top_left[1] - vertical_spacing)
crop_window.w = int(board_width + (2 * horizontal_spacing))
crop_window.h = int(board_height + (2 * vertical_spacing))
board_positions = []
for x in range(0, 19):
board_positions += [[]]
for y in range(0, 19):
x_coord = int(top_left[0] + horizontal_spacing * x)
y_coord = int(top_left[1] + vertical_spacing * y)
x_coord -= crop_window.x
y_coord -= crop_window.y
board_positions[x] += [(y_coord, x_coord)]
return board_positions, crop_window
def print_board(self, frame):
print "-------------------"
for y in range(19):
string = ""
for x in range(19):
string += frame[x][y]
print string
print "-------------------"
def parse_video(self, crop_window):
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out1 = cv2.VideoWriter('output.avi', fourcc, 30.0, (crop_window.w, crop_window.h))
success, current_frame = self.video.read()
current_frame = current_frame[crop_window.y:crop_window.y + crop_window.h,
crop_window.x:crop_window.x + crop_window.w]
differences = []
final_video = [current_frame]
while (self.video.isOpened() and success):
last_frame = current_frame
success, current_frame = self.video.read()
if not success: break
current_frame = current_frame[crop_window.y:crop_window.y+crop_window.h,
crop_window.x:crop_window.x+crop_window.w]
out1.write(current_frame)
s = self.mse_total(last_frame, current_frame)
#s = ssim(last_frame, current_frame) # Doesn't Work
differences += [s]
recently_still = True
still_duration = 15
for x in range(still_duration):
if x<len(differences) and differences[-x]>4:
recently_still = False
if recently_still:
#out1.write(current_frame)
s = self.mse_total(current_frame, final_video[-1])
if s>20:
final_video += [current_frame]
#plt.hist(differences, bins=400)
plt.title("Frame Difference Historgram")
plt.xlabel("Difference (mean squared error)")
plt.ylabel("Number of Frames")
#plt.show()
time = np.arange(0, self.video_length/self.frame_rate, 1.0/self.frame_rate)
time = time[:len(differences)]
#plt.plot(time, differences)
plt.xlabel('time (s)')
plt.ylabel('Difference')
plt.title('MSE over Time')
plt.grid(True)
#plt.show()
out1.release()
'''
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out2 = cv2.VideoWriter('output2.avi', fourcc, 30.0,
(self.crop_w, self.crop_h))
for x in final_video:
for y in range(30):
out2.write(x)
out2.release()
'''
return final_video
def mse_total(self, imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
def mse_image(self, imageA, imageB):
return (imageA - imageB) ** 2
def determine_thresholds(self, image, board_positions):
samples = []
for x in range(0, 19):
for y in range(0, 19):
position = board_positions[x][y]
samples += [float(image[position[0]][position[1]])]
plt.hist(samples, bins=255)
plt.title("Intersection Intensity Historgram")
plt.xlabel("Intensity (Greyscale)")
plt.ylabel("Number of Intersections")
# plt.show()
centroids, _ = kmeans(samples, 3)
plt.axvline(x=centroids[0], color="red")
plt.axvline(x=centroids[1], color="red")
plt.axvline(x=centroids[2], color="red")
plt.show()
min = 0
mid = 0
max = 0
for x in range(0, 3):
if centroids[x] < centroids[min]:
min = x
if centroids[x] > centroids[max]:
max = x
for x in range(0, 3):
if x != min and x != max:
mid = x
min = centroids[min]
mid = centroids[mid]
max = centroids[max]
threshold1 = (min + mid) / 2
threshold2 = (max + mid) / 2
print "threshold 1 = "+str(threshold1)
print "threshold 2 = "+str(threshold2)
#return [threshold1, threshold2]
return [120,185]
def parse_frames(self, image, board_positions, thresholds):
return_array = []
for x in range(0, 19):
return_array += [[]]
for y in range(0, 19):
position = board_positions[x][y]
intensity = image[position[0]][position[1]]
if intensity < thresholds[0]:
return_array[x] += ["B"]
elif intensity > thresholds[1]:
return_array[x] += ["W"]
else:
return_array[x] += ["+"]
return return_array
def frame_difference(self, former_frame, later_frame):
moves = []
for x in range(19):
for y in range(19):
if (later_frame[x][y] != former_frame[x][y]
and former_frame[x][y] == "+"):
moves += [{"color":later_frame[x][y],
"position":(x,y)}]
return moves
def display_grid(self, board_positions):
for x in range(0, 19):
for y in range(0, 19):
self.draw_x(board_positions[x][y][1],
board_positions[x][y][0],
transform=self.display_ratio)
def draw_x(self, x, y, radius=10, width=3, color = "red", transform = 1):
self.canvas.create_line((x-radius)*transform,
(y-radius)*transform,
(x+radius)*transform,
(y+radius)*transform,
width=width,
fill=color)
self.canvas.create_line((x-radius)*transform,
(y+radius)*transform,
(x+radius)*transform,
(y-radius)*transform,
width=width,
fill=color)
def load(self):
# Load Video
dir_path = os.path.dirname(os.path.realpath(__file__))
path = tkFileDialog.askopenfilename(initialdir=dir_path,
title="Select file",
filetypes=(
("mp4 files", "*.mp4"),
("jpeg files", "*.jpg"),
("png files", "*.png")))
self.video = cv2.VideoCapture(path)
self.frame_rate = self.video.get(cv2.CAP_PROP_FPS)
self.video_length = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))
success, first_frame = self.video.read()
image_height, image_width = first_frame.shape[:2]
# Display Image
self.display_ratio = float(self.window_height - 200)/image_height
resize_dimentions = (int(image_width*self.display_ratio), int(image_height*self.display_ratio))
resized_image = cv2.resize(first_frame, resize_dimentions, interpolation=cv2.INTER_CUBIC)
tk_image = self.convert_cv2_to_PIL(resized_image)
self.display_image = PIL.ImageTk.PhotoImage(tk_image)
self.canvas.create_image(0, 0, anchor ="nw", image = self.display_image)
# cue corner collection
self.awaiting_corners = True
def convert_cv2_to_PIL(self, cv2image):
cv2_im = cv2.cvtColor(cv2image, cv2.COLOR_BGR2RGB)
return PIL.Image.fromarray(cv2_im)
root = Tkinter.Tk()
main_window = MainWindow(root)
root.mainloop()
|
21,003 | 7b9184bb18b0f5a997f661b15ef510aef30e0332 | """
CCT 建模优化代码
A19 直线二极磁铁 LocalUniformMagnet
作者:赵润晓
日期:2021年5月2日
"""
from os import error, path
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
from cctpy import *
lcs = LocalCoordinateSystem(
location=P3(1,2,3),
x_direction=P3.y_direct(),
z_direction=P3.x_direct()
)
lum = LocalUniformMagnet(
local_coordinate_system=lcs,
length=0.5,
aperture_radius=0.05,
magnetic_field=50
)
print(lum.magnetic_field_at(P3(1,2,3)+P3.x_direct(0.001)))
print(lum.magnetic_field_at(P3(1,2,3)-P3.x_direct(0.001)))
print(lum.magnetic_field_at(P3(1,2,3)+P3.x_direct(0.001)+P3.y_direct(0.001)))
print(lum.magnetic_field_at(P3(1,2,3)+P3.x_direct(0.001)+P3.y_direct(0.5)))
print(lum.magnetic_field_at(P3(1,2,3)+P3.x_direct(0.001)-P3.y_direct(0.5))) |
21,004 | 4ac4090841d0295467ea1d9360398c48924056e5 | import time
from tkinter import *
import sys
from ADDCLASS import addclass
from ADDTRADEMARK import addtrademark
from LISTBYCLASS import trademarklistbyclass
from TRADEMARKLIST import trademarkdetails
from UPDATE import updatetrademark
class main_pageemp:
def __init__(self,mywindow):
self.mywindow=Tk()
# self.mywindow=mywindow
self.mywindow.wm_title("EMPLOYEE PAGE")
menubar = Menu(self.mywindow)
self.mywindow.option_add("*tearOff", False)
self.mywindow.config(menu=menubar)
# w = mywindow.winfo_screenwidth()
# h = mywindow.winfo_screenheight()
# mywindow.geometry("%dx%d+%d+%d" % (w, h, 0, -9))
self.mywindow.wm_minsize(1350,1200)
# self.img = PhotoImage(file="C:\\Users\\rajat\\PycharmProjects\\Images\\hello.png")
# self.img2 = Label(mywindow,height=35,width=80, bg="blue")
# self.img2.place(x=0, y=98)
# self.img2.config(image=self.img)
File = Menu(menubar)
Class = Menu(menubar)
Details = Menu(menubar)
menubar.add_cascade(menu=File, label="FILE")
menubar.add_cascade(menu=Class, label="CLASS")
menubar.add_cascade(menu=Details, label="DETAILS")
File.add_command(label="Add Trademark", accelerator="Ctrl+n", command=self.addtrademarkform)
self.mywindow.bind("<Control-n>", lambda e: addtrademark(mywindow))
File.add_command(label="Search / Update / Delete Student", command=self.updateframe)
Class.add_command(label="Add Class", command=self.addclassform)
Class.add_command(label="List Class")
Class.add_command(label="Delete Class")
Details.add_command(label="List of trademark", command=self.trademarklistform)
Details.add_command(label="Trademark list by class", command=self.trademarkbyclass)
mywindow.bind("<Control-q>", self.quitwindow)
def quitwindow(self, e):
sys.exit()
def addtrademarkform(self):
addtrademark(self.mywindow)
def addclassform(self):
addclass(self.mywindow)
def trademarklistform(self):
trademarkdetails(self.mywindow)
def trademarkbyclass(self):
trademarklistbyclass(self.mywindow)
def updateframe(self):
updatetrademark(self.mywindow)
# my_frame = Tk()
# obj = main_pageemp(my_frame)
# my_frame.mainloop() |
21,005 | 229b37dfe544605b477ab3dfb3e3aee68e7a3198 | from django.shortcuts import render, redirect
from .models import Manager, Employee
from django.views.generic import CreateView
from .decorators import manager_required
from django.utils.decorators import method_decorator
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from medicine.models import Company, MedicineProduct, Sale
@method_decorator([login_required, manager_required], name='dispatch')
class Profile(CreateView):
model = Manager
template_name = 'manager/profile.html'
fields = ('pharmacy_name', 'name', 'contact', 'address')
def form_valid(self, form):
manager_obj = form.save(commit=False)
manager_obj.user = self.request.user
manager_obj.save()
messages.success(self.request, 'The profile was created with success! ')
return redirect('App_Login:manager_main_home')
def index(request):
return render(request, 'manager/manager_home_page.html')
@login_required
def manager_dashboard(request):
meadicine = MedicineProduct.objects.all()
medicine_count = meadicine.count()
company = Company.objects.all()
company_count = company.count()
sales = Sale.objects.all()
profit = sum([items.get_profit() for items in sales])
diction = {
'medicine_count': medicine_count,
'profit': profit, 'company_count': company_count,
}
return render(request, 'manager/manager_main_homepage.html', context=diction)
|
21,006 | 1da127b9a24dbee20cf0c4236f9bbad12dcdc28b |
from django.urls import path
from . import views
app_name = 'posts'
urlpatterns = [
path('list/', views.p_list, name='list'),
path('create/', views.p_create, name='create'),
path('<int:post_id>/delete/', views.p_delete, name='delete'),
path('<int:post_id>/update/', views.p_update, name='update'),
path('<int:post_id>/detail/', views.p_detail, name='detail'),
path('<int:post_id>/detail/sendcomment/', views.p_comment, name='send_comment'),
path('<int:post_id>/detail/delcomment/<int:comment_id>/', views.c_delete, name='del_comment_detail'),
]
|
21,007 | b2fb2bd739e08ea6e4c4c2adbbbf33b81c0ec558 | from enum import Enum
class APIErrorStausCode(Enum):
# start with 1000
DATABASE_ERR = {
"status": 400,
"code": 1001,
"message": "database error"
}
EXCLUDED_DOMAIN = {
"status": 400,
"code": 1002,
"message": "excluded domain"
}
NOT_URL = {
"status": 400,
"code": 1003,
"message": "not url"
}
|
21,008 | 98386b844089c867d854911692806cd995e504fe | from pythonping import ping
ping('8.8.8.8') |
21,009 | 9abd4ebc46577ac49887349f71e8b412aab2fc48 | import json
import logging
import os
import sys
from dotenv import load_dotenv,find_dotenv
from telebot import TeleBot
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from src.yandex_api import yandex_diccionary
from src.telegram_object import TelegramMessageObject
load_dotenv(find_dotenv())
logger = logging.getLogger()
logger.setLevel(logging.INFO)
bot = TeleBot(os.getenv("telegram_token"))
yandex_object = yandex_diccionary(yandex_api_key=os.getenv("yandex_api_key"),
yandex_diccionary_key=os.getenv("yandex_diccionary"))
yandex_languages = yandex_object.get_all_available_languages(language_id='en')
yandex_translations = yandex_object.get_all_available_translations()
OK_RESPONSE = {"statusCode": 200, "headers": {}, "body": "Succesfull Requests"}
FAILED_RESPONSE = {"statusCode": 404, "headers": {},
"body": "Something goes wrong"}
def proccess_lambda_headers(headers, headers_key):
python_string = headers.get(headers_key)
if python_string:
return json.loads(python_string.replace("'", ""))
def parse_translator(text):
message_arguments = text.split(' ')
if len(message_arguments) >= 2:
target_language = message_arguments[0]
message = ' '.join(message for message in message_arguments[1:])
message_language = yandex_object.detect_language(message)
message_translate = yandex_object.translate_message(message=message,
language_to_translate=target_language,
message_language=message_language)
if message_translate:
return message_translate
def send_welcome(chat_id, message_id, message):
bot.send_message(chat_id, "Welcome to translator")
def send_languages(message_id,chat_id, message):
yandex_languages_text = ''.join(f'{key}––>{value}\n'
for key, value in yandex_languages.items())
bot.send_message(reply_to_message_id=message_id, chat_id=chat_id,
text=yandex_languages_text)
def send_help(message_id,chat_id,message):
help_message = f'Translate a message to the language you want, follow this steps:\n' \
f'\t\t 1.Choose your target translate language in the two digits code.\n' \
f'\t\t\t\t To see all the differents languages codes use /languages command.\n' \
f'\t\t\t\t To see all the available translations use /translations command.\n'\
f'\t\t 2.Write your message.\n' \
f'The message should follow the following structure LANGUAGECODE message.\n' \
f'just as an example: en eres tonto si estas utilizando este bot.'
bot.send_message(reply_to_message_id=message_id, chat_id=chat_id, text=help_message)
def send_translation(message_id, chat_id, message):
reply_message = ','.join(translation for translation in yandex_translations)
bot.send_message(reply_to_message_id=message_id, chat_id=chat_id,
text=reply_message)
def send_synonyms(message_id, chat_id, message):
synonyms_input = message.split(' ')
target_language = None
target_message = message.split()[1:]
if len(synonyms_input)>=3 and synonyms_input[1] in yandex_languages:
target_language = synonyms_input[1]
target_message = synonyms_input[2:]
input_message = ' '.join(word for word in target_message)
synonyms = yandex_object.get_word_diccionary(text=input_message,
language=target_language)
if synonyms:
reply_message = '\n'.join(synonym for synonym in synonyms)
bot.send_message(reply_to_message_id=message_id, chat_id=chat_id,
text=reply_message)
def translate_message(message_id, chat_id, message):
translate_message = parse_translator(message)
if translate_message:
bot.send_message(reply_to_message_id=message_id,chat_id=chat_id,
text=translate_message)
def telegram_execute_method(message_id,chat_id, message):
target_function = message.split(' ')[0]
telegram_function = {'/start': send_welcome,'/languages': send_languages,
'/translations': send_translation,'/help': send_help,
'/synonyms': send_synonyms
}.get(target_function, translate_message)
telegram_function(message_id=message_id,chat_id=chat_id,message=message)
def telegram_trigger(event, context):
logger.info("lambda event triggered")
logger.info(f"{context}")
logger.info(f"{event}")
requests_body = proccess_lambda_headers(event, 'body')
if not requests_body:
logger.error("No body key")
else:
telegram_object = TelegramMessageObject(requests_body)
if telegram_object:
telegram_execute_method(message=telegram_object.text,
chat_id=telegram_object.chat_id,
message_id=telegram_object.message_id)
return OK_RESPONSE
return FAILED_RESPONSE |
21,010 | bec51e33489f3de9f5fde69c8e846d946641274b | # Filter with Convolution
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential([
layers.Conv2D(filters=64, kernel_size=3), # activation is None
# More layers follow
])
# Detect with ReLU (Module: tf.keras.layers)
model = keras.Sequential([
layers.Conv2D(filters=64, kernel_size=3, activation='relu')
# More layers follow
])
# Apply Convolution and ReLU (Modules: tf.constant and tf.nn)
import tensorflow as tf
kernel = tf.constant([
[-1, -1, -1],
[-1, 8, -1],
[-1, -1, -1],
])
plt.figure(figsize=(3, 3))
show_kernel(kernel)
image_filter = tf.nn.conv2d(
input=image,
filters=kernel,
strides=1,
padding='SAME',
)
plt.figure(figsize=(6, 6))
plt.imshow(tf.squeeze(image_filter))
plt.axis('off')
plt.show();
image_detect = tf.nn.relu(image_filter)
plt.figure(figsize=(6, 6))
plt.imshow(tf.squeeze(image_detect))
plt.axis('off')
plt.show(); |
21,011 | 0ef0f86fc27b244cfb4b223488aa414f5a860375 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from seamicroclient.tests import utils
from seamicroclient.tests.v2 import fakes
from seamicroclient.v2 import system
cs = fakes.FakeClient()
class Systemstest(utils.TestCase):
def test_list_system(self):
pl = cs.system.list()
cs.assert_called('GET', '/chassis/systems')
[self.assertTrue(isinstance(s, system.System)) for s in pl]
def test_switchover_system(self):
cs.system.switchover(1)
cs.assert_called('PUT', '/chassis/system/switchover')
def test_writemem_system(self):
cs.system.writemem(1)
cs.assert_called('PUT', '/chassis/system/writeMem')
def test_reload_system(self):
cs.system.reload(1)
cs.assert_called('PUT', '/chassis/system/reload')
|
21,012 | 2a86e473f8cef97d0e7ef4d27f31c32652969f6f | # By submitting this assignment, I agree to the following:
# "Aggies do not lie, cheat, or steal, or tolerate those who do"
# "I have not given or received any unauthorized aid on this assignment"
#
# Name: Rushil Udani
# Section: 219
# Assignment: 04b Program 4
# Date: 15 09 2020
from sympy import symbols
from sympy.solvers import solve
print('This program will solve a quadratic equation, given the coefficients.')
print('For an equation written as Ax**2 + Bx + C = 0, please input the coefficients.')
coeff_a = float(input('A: '))
coeff_b = float(input('B: '))
coeff_c = float(input('C: '))
x = symbols('x')
quadratic = coeff_a * x ** 2 + coeff_b * x + coeff_c
print('Solving:', quadratic)
solutions = solve(quadratic)
print('The solution(s) are:', ', '.join(f'{sol}' for sol in solutions))
|
21,013 | e07b7c361ad8fd6e183a65045ba46283d34aa85c | from pagarme.resources import handler_request
from pagarme.resources.routes import customer_routes
def create(dictionary):
return handler_request.post(customer_routes.BASE_URL, dictionary)
def find_all():
return handler_request.get(customer_routes.GET_ALL_CUSTOMERS)
def find_by(search_params):
return handler_request.get(customer_routes.GET_CUSTOMER_BY, search_params) |
21,014 | 94156d0b6263aaddcf3e60f8b916f55d70346056 | import cv2
# Class to store parameters for motion detection
class MotionDetection:
def __init__(self, status):
self.status = status
self.frame = None
self.gray = None
self.delta_frame = None
self.thresh_frame = None
# Function to perform motion detection
def motionDetector(frame, first_frame):
"""
Performs object detection by comparison between the current frame and the previous frame.
"""
detector = MotionDetection(status=0)
# convert the color frame to gray frame as an extra layer of color
# is not required
detector.gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# convert gray scale image to GaussianBlur
detector.gray = cv2.GaussianBlur(detector.gray, (21, 21), 0)
# set first frame as the baseline frame
if first_frame[0] is None:
first_frame[0] = detector.gray
return False, detector.status
# calculate difference between static background and current frame
detector.delta_frame = cv2.absdiff(first_frame[0], detector.gray)
# apply the threshold
detector.thresh_frame = cv2.threshold(detector.delta_frame, 30, 255, cv2.THRESH_BINARY)[1]
# dilate the Threshold Frame and find pixel contours in it
detector.thresh_frame = cv2.dilate(detector.thresh_frame, None, iterations=3)
# find contours in the frame
contours, _ = cv2.findContours(
detector.thresh_frame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
if cv2.contourArea(contour) < 10000:
continue
detector.status = 1
return True, detector.status
|
21,015 | 83b00a428ca83dc3ee1693cd3c36326408bb1b37 | import re
pattern = r"[0-9]+"
while True:
text = input()
if text == "":
break
res = re.findall(pattern, text)
for i in res:
print(i, end = " ") |
21,016 | e599ebfbdad501c3071b5d99fede63db0ce0e200 | T = int(input().strip())
def check(s,ans):
#print(s,ans)
if '.' in s:
return ans if ans < 2 else 3
if 'O' not in s:
return 0
if 'X' not in s:
return 1
return ans
for case in range(1,T+1):
ans = 2
answers = ["X won", "O won", "Draw", "Game has not completed"]
board = [0]*4
for r in range(4):
board[r] = input().strip()
input()
for row in board:
ans = check(row,ans)
for c in range(4):
col = ''.join(board[r][c] for r in range(4))
ans = check(col,ans)
diag = ''.join(board[r][r] for r in range(4))
ans = check(diag,ans)
diag = ''.join(board[r][3-r] for r in range(4))
ans = check(diag,ans)
print("Case #",case,": ",answers[ans],sep = '')
|
21,017 | e6c2264f32039f588efbbbdce12a2c902bacf773 | lstm_concatenated = {
"model_module": 'lstm_concatenated',
"model_class": 'LSTMConcatenated',
"vocabulary_dim": 3000,
"article_length": 800,
"headline_length": 100,
"embedding_dim": 20,
"hidden_layers": [
(50, 'relu', 0.5),
(50, 'relu', 0.5),
],
"compile": {
'optimizer': 'nadam',
'loss': {
'related_prediction': 'binary_crossentropy',
'stance_prediction': 'categorical_crossentropy'
},
'loss_weights': {
'related_prediction': 0.25,
'stance_prediction': 0.75
},
'metrics': ['accuracy']
},
"fit" : {
'epochs': 4,
'batch_size': 64,
'verbose': 1
},
}
|
21,018 | 893aeea6eddd441f7424d68c5260e5a1e1dbc96e | import os
import shutil
import unittest
import numpy as np
from utils import common_functions
from wassr import wassr_corrector
from wassr import algorithm
from wassr import mscf_algorithm
class MockAlgorithm(algorithm.Algorithm):
def __init__(self, hStep, maxOffset, maxShift):
self.hStep = hStep
self.maxOffset = maxOffset
self.maxShift = maxShift
def calculate(self, mppmValue, minimalValue):
return (hStep, maxOffset)
testFilesPath = '../DICOM_TEST/WASSR_99677/'
filename = 'WASSR_99677_sl_1_dyn_'
sSlide = '1'
hStep = '0.01'
maxOffset = '1.0'
alternating = 'False'
nDynamics = '22'
lmo = 'B'
gauss = '3.0'
zFilter = 'False'
algoritm = MockAlgorithm(hStep, maxOffset, maxOffset)
def createWassrCorrector():
return wassr_corrector.WassrCorrector(sSlide, hStep, maxOffset, alternating, nDynamics, lmo, gauss, zFilter, algoritm)
class WassrCorrectorTest(unittest.TestCase):
def test_constructor(self):
corrector = createWassrCorrector()
self.assertEqual(int(sSlide), corrector.sSlide)
self.assertEqual(float(hStep), corrector.hStep)
self.assertEqual(float(maxOffset), corrector.maxOffset)
self.assertEqual(common_functions.str2bool(alternating), corrector.alternating)
self.assertEqual(int(nDynamics), corrector.nDynamics)
self.assertEqual(lmo, corrector.lmo)
self.assertEqual(float(gauss), corrector.gauss)
self.assertEqual(common_functions.str2bool(zFilter), corrector.zFilter)
def test_calculate_wassr_aml_correction(self):
corrector = createWassrCorrector()
corrector.algoritm = mscf_algorithm.MscfAlgorithm(hStep, maxOffset, maxOffset)
sName = '../DICOM_TEST/A_WASSR_99677'
Mask = common_functions.createTestMask(192, 192, 5)
(OF, R) = corrector.calculateWassrAmlCorrection(testFilesPath, sName, filename, Mask)
self.assertEqual((192, 192), OF.shape)
self.assertTrue(abs( 0.380000000000000 - OF[0, 0]) < abs(OF[0, 0]) * 0.0001)
self.assertTrue(abs(-0.190000000000000 - OF[0, 1]) < abs(OF[1, 1]) * 0.0001)
self.assertTrue(abs( 0.360000000000000 - OF[1, 0]) < abs(OF[1, 0]) * 0.0001)
self.assertTrue(abs( 0.350000000000000 - OF[1, 1]) < abs(OF[1, 1]) * 0.0001)
self.assertTrue(abs(-0.600000000000000 - OF[3, 4]) < abs(OF[3, 4]) * 0.0001)
self.assertTrue(abs( 0.510000000000000 - OF[4, 3]) < abs(OF[4, 3]) * 0.0001)
self.assertEqual(0.0, OF[5, 5])
self.assertEqual(0.0, OF[191, 191])
self.assertEqual((192, 192), R.shape)
self.assertTrue(abs(1.031706834523456e+02 - R[0, 0]) < abs(R[0, 0]) * 0.0001)
self.assertTrue(abs(9.948575112426138 - R[0, 1]) < abs(R[1, 1]) * 0.0001)
self.assertTrue(abs(13.260475788136212 - R[1, 0]) < abs(R[1, 0]) * 0.0001)
self.assertTrue(abs(6.142879082109577 - R[1, 1]) < abs(R[1, 1]) * 0.0001)
self.assertTrue(abs(0.786478608844326 - R[3, 4]) < abs(R[3, 4]) * 0.0001)
self.assertTrue(abs(0.349443498538241 - R[4, 3]) < abs(R[4, 3]) * 0.0001)
self.assertEqual(0.0, R[5, 5])
self.assertEqual(0.0, R[191, 191])
def test_calculate_offsets(self):
corrector = createWassrCorrector()
Mask = common_functions.createDefaultMask(os.path.join(testFilesPath, filename + '1'), 0)
(Images, ZeroImage, sequence) = common_functions.loadImages(testFilesPath, filename, float(gauss), int(sSlide), int(nDynamics), Mask)
NormalizedImages = common_functions.normalizeImages(Images, int(nDynamics), Mask)
(result1, result4) = corrector.calculateOffsets(NormalizedImages)
self.assertEqual((192, 192), result1.shape)
self.assertEqual((192, 192), result4.shape)
self.assertEqual(0.01, result1[0, 0])
self.assertEqual(1.0, result4[0, 0])
|
21,019 | 7cecb4e8e6adf704f6f269dbaf9f8cf9d936ff02 | """
3 useful tests that cover ``ModelSerializer`` behavior are:
- ``test_deserialize_all_fields``
- ``test_deserialize_required_fields`` (optional)
- ``test_serialize_all_fields``
For code that implements create functionality, implement the
``deserialize_all_fields`` and ``deserialize_required_fields`` tests. For code
that only implements read functionality, implement ``serialize_all_fields``
tests.
"""
import pytest
from ..serializers import DogSerializer
from ..models import Dog
@pytest.mark.django_db
def test_deserialize_required_fields():
"""
Test the required fields of the deserialization. Validate the
``serializer.save()`` deserializes and creates a new object in the
datastore with the required fields.
- Initialize ``serializer`` with empty data set
- Assert ``serializer.is_valid()`` is ``False``
- Assert ``len(serializer.errors)`` reflects the number of required fields
- Initialize ``serializer`` with only required fields
- Assert ``serializer.is_valid()`` is ``True``
- Deserialize with ``serializer.save()``
- Assert one new object is created in the datastore
- Assert required fields are on new object in the datastore
"""
data = {}
serializer = DogSerializer(data=data)
assert not serializer.is_valid()
assert len(serializer.errors) == 1
data = {'name': 'bruce'}
serializer = DogSerializer(data=data)
assert serializer.is_valid()
serializer.save()
assert Dog.objects.count() == 1
dog = Dog.objects.first()
assert dog.name == 'bruce'
@pytest.mark.django_db
def test_deserialize_all_fields():
"""
Test all fields of the deserialization. Validate the
``serializer.save()`` deserializes and creates a new object in the
datastore with all fields.
- Initialize ``serializer`` with all fields
- Assert ``serializer.is_valid()`` is ``True``
- Deserialize with ``serializer.save()``
- Assert one new object is created in the datastore
- Assert required fields are on new object in the datastore
"""
data = {'name': 'bruce', 'breed': 'bulldog'}
serializer = DogSerializer(data=data)
assert serializer.is_valid()
serializer.save()
assert Dog.objects.count() == 1
dog = Dog.objects.first()
assert dog.name == 'bruce'
assert dog.breed == 'bulldog'
@pytest.mark.django_db
def test_serialize_all_fields():
"""
Test all fields of the serialization. Validate the
``serializer.data`` contains all fields.
- Create instance of object
- Initialize serializer with object instance
- Assert all fields are ``serializer.data``
"""
dog = Dog.objects.create(name='bruce', breed='bulldog')
serializer = DogSerializer(dog)
assert serializer.data['name'] == 'bruce'
assert serializer.data['breed'] == 'bulldog'
|
21,020 | 3d63ee1c22a94fec1224be66a4c7b4b848e960d9 | import re
import operator
def parse_room(roomname):
roomname = roomname.split('-')
end = re.search(r"(.*)\[(.*)\]", roomname[-1])
checksum = end.group(2)
number = end.group(1)
encrypted_name = ''.join(roomname[:-1])
return encrypted_name, number, checksum
def get_checksum(encrypted_name):
count = []
for n in list(set(encrypted_name)):
count.append((n, encrypted_name.count(n)))
alpha_sort = sorted(count, key=operator.itemgetter(0))
number_sort = sorted(alpha_sort, key=operator.itemgetter(1), reverse=True)
return ''.join([x[0] for x in number_sort[:5]])
def main():
lines = [line.strip() for line in open('input.txt')]
sectorIdSum = 0
for l in lines:
encrypted_name, number, checksum = parse_room(l)
if get_checksum(encrypted_name) == checksum:
sectorIdSum += int(number)
print sectorIdSum
if __name__ == '__main__':
main()
|
21,021 | 128df11ac886f401d5269f4c97b619778c467ca1 | from tkinter import*
cat = Tk()
#State disabled button
Button1 = Button(cat, text = "Click", state = DISABLED)
Button1.pack()
mainloop() |
21,022 | 255ce5b9674295596b37b5b6221804d8d98717cf | """Errors occurred while looking for file encoding."""
import enum
class EncodingError(enum.IntEnum):
"""Encoding error types."""
OK = 0
NOT_FOUND = 1
PERMISSION_ERROR = 2
INVALID_ENCODING = 3
ERRORS_COLOUR = {
EncodingError.OK: 'green',
EncodingError.NOT_FOUND: 'yellow',
EncodingError.PERMISSION_ERROR: 'red',
EncodingError.INVALID_ENCODING: 'red',
}
def error_colour(error):
"""Return error's colour."""
return ERRORS_COLOUR[error]
|
21,023 | 6d1d750bf154efd20c474c8659afc3b010520853 |
from socket import *
ip_addr = ('127.0.0.1', 8080)
accept_size = 1024
client = socket(AF_INET, SOCK_DGRAM)
# 输入一个"string",发送给服务端
def echo():
while True:
msg = input(">>>:").strip()
if msg == 'q':
client.close()
break
else:
client.sendto(msg.encode('utf8'), ip_addr)
data, addr = client.recvfrom(accept_size)
print(data.decode("utf8"))
if __name__ == "__main__":
echo()
|
21,024 | a7355ca6e6083de7d3c07ca5a981c549553408f9 | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 16 17:39:31 2019
@author: marti
"""
import os
import pandas as pd
import copy
#fuer das spiel wird ein neuer dataframe als dokument angelegt
def doc_anlegen():
doc = pd.DataFrame(columns=['spielID', 'zugNummer', 'player1', 'playerTyp', 'zug', 'spalte_1'
, 'spalte_2', 'spalte_3', 'spalte_4', 'spalte_5', 'spalte_6'
, 'spalte_7', 'sieger'])
return doc
#zug wird an das Spiel-Dokument gehaengt
def zugDokumentieren(doc, spielID, zugNummer, zug, spielfeld, player1, playerTyp):
#schreibe den Datensatz in dictionary
s = copy.copy(spielfeld)
data = {'spielID': spielID, 'zugNummer': zugNummer, 'player1': player1, 'playerTyp': playerTyp,
'zug': zug,
'spalte_1': s[0],
'spalte_2': s[1],
'spalte_3': s[2],
'spalte_4': s[3],
'spalte_5': s[4],
'spalte_6': s[5],
'spalte_7': s[6]}
#Datensatz an DataFrame haengen
doc = doc.append(data, ignore_index=True)
return doc
#wenn das spiel aus ist, werden an alle Datensaetze des Spiel-Dokuments die INfor, wer gewonnen hat, gehaengt
def siegerDokumentieren(doc, sieger):
#wenn sieger = 1 hat player1 gewonnen
#wenn sieger = 2 hat player2 gewonnen
#wenn sieger = 0 ist das spiel unentschieden ausgegangen
doc['sieger'] = sieger
return doc
#das Spiel-Dokument in eine csv schreiben
def spielDokumentieren(doc, pfad, dateiname):
#wenn es schon eine.csv datei gibt, haenge den datensatz an diese datei
if os.path.exists(pfad + '\\' + dateiname):
doc.to_csv(path_or_buf=pfad + '\\' + dateiname,index=False, sep=';', header=False, mode='a')
#wenn es noch keine .csv datei gibt, erstelle eine neue
else:
doc.to_csv(path_or_buf=pfad + '\\' + dateiname,index=False, sep=';', header=True, mode='w')
def datenSammeln(pfad):
data = pd.DataFrame()
csvList = os.listdir(pfad)
for csv in csvList:
dfCsv = pd.read_csv(pfad + '\\' + csv, sep=';')
data = data.append(dfCsv, ignore_index=True)
return data
|
21,025 | 4e34784a1325f2640a367f6679721c876d756339 | f=open('data.txt','rb')
f.read(3)
print('当前读写位置',f.tell())
f.read(7)
print('当前读写位置',f.tell())
f.close() |
21,026 | 4eb617fa171cba69d4583df77f55214dd4b06e3b | from ConfigParser import SafeConfigParser
import os
class PidginIrcNotifyConfig(object):
"""
Class for creating/reading/modifying config values for the app
"""
def __init__(self):
self.config_dir_path = os.path.expanduser('~/.pidgin-irc-notify/')
self.config_file_path = os.path.join(self.config_dir_path,
'settings.conf')
self.parser = self.get_parser()
def get_parser(self):
"""
Returns the parser, and creates default config, if it doesn't exist
"""
parser = SafeConfigParser({'channels': []})
exists = parser.read(self.config_file_path)
if not exists:
self.create_default_config(parser)
return parser
def create_default_config(self, parser):
"""
Creates a default bare-bones config file
"""
parser.add_section('irc')
parser.set('irc', 'channels', '')
# create the full path, and the file
try:
os.makedirs(self.config_dir_path, mode=0700)
except OSError:
pass
file_resource = open(self.config_file_path, 'w')
parser.write(file_resource)
def parse(self):
"""
Parses the config file and returns the result
"""
config = {}
channels = self.parser.get('irc', 'channels')
if channels == '':
channels = []
else:
channels = channels.split(',')
config['channels'] = channels
return config
|
21,027 | 232e8ce8fb829b54f70a52c0a00a18229bf659c5 | #! /usr/bin/env python3.6
#coding=utf-8
import os
import subprocess
import threading
import time
import traceback
from enum import Enum
import grpc
from . import perfdog_pb2, perfdog_pb2_grpc
class SaveFormat(Enum):
NONE = 0,
JSON = 1,
PB = 2,
EXCEL = 3,
ALL = 4,
class PerfdogService():
packageName = ''
PerfdogPath = ''
Token = ''
stub = None
device = None
caseName = ''
deviceUuid = ''
saveformat = SaveFormat.ALL
uploadServer = True
saveJsonPath = ''
def __init__(self,packageName,perfdogPath,token,deviceuuid,saveJsonPath,casename,saveFormat,UploadServer):
"""
:param packageName: 测试包的包名
:param perfdogPath: 性能狗Service 本地目录
:param token: 性能狗Service 令牌
:param deviceuuid: 需要测试的设备id
:param saveJsonPath: 测试数据保存的本地位置
:param casename: 当此测试名
:param saveFormat: 测试数据保存格式
:param UploadServer: 测试数据是否上传性能狗网站
"""
self.packageName = packageName
self.PerfdogPath = perfdogPath
self.Token = token
self.caseName = casename
self.deviceUuid = deviceuuid
self.saveJsonPath = saveJsonPath
self.saveformat = saveFormat
self.uploadServer = UploadServer
def initService(self):
try:
print("0 启动PerfDogService")
# 填入PerfDogService的路径
perfDogService = subprocess.Popen(self.PerfdogPath)
# 等待PerfDogService启动完毕
time.sleep(5)
print("1.通过ip和端口连接到PerfDog Service")
options = [('grpc.max_receive_message_length', 100 * 1024 * 1024)]
channel = grpc.insecure_channel('127.0.0.1:23456', options=options)
print("2.新建一个stub,通过这个stub对象可以调用所有服务器提供的接口")
self.stub = perfdog_pb2_grpc.PerfDogServiceStub(channel)
print("3.通过令牌登录,令牌可以在官网申请")
userInfo = self.stub.loginWithToken(
perfdog_pb2.Token(token=self.Token))
print("UserInfo:\n", userInfo)
print("4.启动设备监听器监听设备,每当设备插入和移除时会收到一个DeviceEvent")
deviceEventIterator = self.stub.startDeviceMonitor(perfdog_pb2.Empty())
for deviceEvent in deviceEventIterator:
# 从DeviceEvent中获取到device对象,device对象会在后面的接口中用到
self.device = deviceEvent.device
if deviceEvent.eventType == perfdog_pb2.ADD:
print("设备[%s:%s]插入\n" % (self.device.uid, perfdog_pb2.DEVICE_CONTYPE.Name(self.device.conType)))
# 每台手机会返回两个conType不同的设备对象(USB的和WIFI的),如果是测有线,取其中的USB对象
if self.device.conType == perfdog_pb2.USB:
if self.device.uid == self.deviceUuid:
print("5.初始化设备[%s:%s]\n" % (self.device.uid, perfdog_pb2.DEVICE_CONTYPE.Name(self.device.conType)))
self.stub.initDevice(self.device)
print("5.初始化设备 完成\n" )
break
elif deviceEvent.eventType == perfdog_pb2.REMOVE:
print("设备[%s:%s]移除\n" % (self.device.uid, perfdog_pb2.DEVICE_CONTYPE.Name(self.device.conType)))
except Exception as e:
traceback.print_exc()
def startPerf(self):
try:
print("6.获取app列表")
appList = self.stub.getAppList(self.device)
apps = appList.app
app = self.selectApp(apps)
if app == None:
raise Exception("未获取 "+self.packageName+" 信息")
print("7.获取设备的详细信息")
deviceInfo = self.stub.getDeviceInfo(self.device)
print("deviceInfo")
print(deviceInfo)
# self.stub.setGlobalDataUploadServer(perfdog_pb2.SetDataUploadServerReq(serverUrl="http://127.0.0.1:80/",dataUploadFormat=perfdog_pb2.JSON))
print("8.开启性能数据项")
self.stub.enablePerfDataType(
perfdog_pb2.EnablePerfDataTypeReq(device=self.device, type=perfdog_pb2.NETWORK_USAGE))
self.stub.enablePerfDataType(
perfdog_pb2.EnablePerfDataTypeReq(device=self.device, type=perfdog_pb2.SCREEN_SHOT))
print("9.开始收集[%s:%s]的性能数据\n" % (app.label, app.packageName))
# self.stub.setScreenShotInterval(1)
print(self.stub.startTestApp(perfdog_pb2.StartTestAppReq(device=self.device, app=app)))
# req = perfdog_pb2.OpenPerfDataStreamReq(device=self.device)
# perfDataIterator = self.stub.openPerfDataStream(req)
# def perf_data_process():
# for perfData in perfDataIterator:
# print(perfData)
#
# threading.Thread(target=perf_data_process).start()
threading.Thread().start()
except Exception as e:
traceback.print_exc()
def setlabel(self,label):
try:
print(" 添加label :" + label)
self.stub.setLabel(perfdog_pb2.SetLabelReq(device=self.device, label=label))
except Exception as e:
traceback.print_exc()
def setNote(self,note):
try:
print(" 添加批注 :"+note)
self.stub.addNote(perfdog_pb2.AddNoteReq(device=self.device, time=5000, note=note))
except Exception as e:
traceback.print_exc()
def SaveJSON(self):
try:
str = "导出所有数据"
if self.uploadServer:
str = '上传' + str
if self.saveformat == SaveFormat.NONE:
print("PrefDog 数据保存格式为 NONE 不保存,不上传")
elif self.saveformat == SaveFormat.ALL:
print("12.%s ----JSON" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_JSON
))
print("保存结果 ----JSON :\n", saveResult)
self.uploadServer = False
print("12.%s ----PB" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_PROTOBUF
))
print("保存结果----PB:\n", saveResult)
print("12.%s ----Excel" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_EXCEL
))
print("保存结果 ----JSON :\n", saveResult)
else:
if self.saveformat == SaveFormat.JSON:
print("12.%s ----JSON" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_JSON
))
print("保存结果----JSON:\n", saveResult)
if self.saveformat == SaveFormat.PB:
print("12.%s ----PB" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_PROTOBUF
))
print("保存结果----PB:\n", saveResult)
if self.saveformat == SaveFormat.EXCEL:
print("12.%s ----Excel" % str)
saveResult = self.stub.saveData(perfdog_pb2.SaveDataReq(
device=self.device,
caseName=self.caseName, # web上case和excel的名字
uploadToServer=self.uploadServer, # 上传到perfdog服务器
exportToFile=True, # 保存到本地
outputDirectory=self.saveJsonPath,
dataExportFormat=perfdog_pb2.EXPORT_TO_EXCEL
))
print("保存结果 ----JSON :\n", saveResult)
self.uploadServer = False
except Exception as e:
traceback.print_exc()
def StopPerf(self):
try:
if self.saveformat != SaveFormat.NONE:
self.SaveJSON()
else:
print("保存格式为NONE 不保存为文件")
print("13.停止测试")
self.stub.stopTest(perfdog_pb2.StopTestReq(device=self.device))
self.stub.killServer()
print("over")
except Exception as e:
traceback.print_exc()
def selectApp(self,Apps):
for app in Apps:
print("find :",self.packageName," With ",app.packageName)
if app.packageName == self.packageName:
return app;
return None;
if __name__ == '__main__':
package = "com.ztgame.fangzhidalu"
path = "C:/Work/PerfDog/PerfDogService(v4.3.200927-Win)/PerfDogService.exe"
token = "e8e5734ad2f74176b368c956173c9bfbb3a85bd1ec676cbef4b90435234786c1"
uuid = ""
pref = PerfdogService(package,path,token,"Test",uuid)
print(pref)
pref.StopPerf()
|
21,028 | 9ae09f8cb21a3f54963782b17f69553337a75a81 | import numpy as np
# def width(args = 3):
# try:
# args = int(input('Введите ширину поля, поле принимает только int'))
# except (TypeError, ValueError):
# print('Вы ввели не int')
# print('Научитесь нажимать на клавиши')
# quit()
# else:args
# return args
# def height(args = 3):
# try:
# args = int(input('Введите ширину поля, поле принимает только int'))
# except (TypeError, ValueError):
# print('Вы ввели не чило')
# print('Научитесь нажимать на клавиши')
# quit()
# return args
#
#
# s = range(width() * height())
# s_str = {i : '{}'.format("%02d" % i) for i in s}
# st = step()
# st_fr = '{}'.format("%02d" % int(st))
# if st_fr == s_str[st]:
# return s_str
#
#
# def field_cre(s_str):
# def fil():
# for i in range(1):
# print(f" [ {s_str.get((0))} ] " + f" [ {s_str.get(1)} ] " + f" [ {s_str.get(2)} ] \n")
# print(f" [ {s_str.get((3))} ] " + f" [ {s_str.get(4)} ] " + f" [ {s_str.get(5)} ] \n")
# print(f" [ {s_str.get((6))} ] " + f" [ {s_str.get(7)} ] " + f" [ {s_str.get(8)} ] \n")
# return fil()
# matrix = np.array(field_list()).reshape(width(), height())
# for row in matrix:
# print_field = print(f' {list(row)} \n')
# field_cre()
# print(type(field_list()[1]))
# #
# #
# # wi = width()
# # he = height()
# # wehe = wi * he
# ''' можно использовать для игры Go'''
#
# def field():
# j = 0
#
# field = { x: x for x in range(1, 9) for x in range(1, 10)}
#
#
# field = {x: y for x, y in zip(range(1, 10), range(15, 19)) for i in range(0, 8)}
#
#
# a = {'key1': 'word1', 'key2': 'word2', 'key3': 'word3'}
# b = {key.upper(): value[::-1] for key, value in a.items()}
#
# field = [[j for j in range(2)] for j in range(9)]
#
#
# for i in range(0, 9):
# field[i] = str('0') + field[i]
# return field
#
# #
#
# def field_cre():
#
# width = 3
# height = 3
# s = range(width * height)
# s_str = [' {} '.format("%02d" % i) for i in s]
# matrix = np.array(s_str).reshape(width, height)
# for row in matrix:
# print()
# print(list(row))
#
# print(f" [ {field().get(1)} ] " + f" [ {field().get(2)} ] " + f" [ {field().get(3)} ] ")
# print()
# print(f" [ {field().get(3)} ] " + f" [ {field().get(4)} ] " + f" [ {field().get(5)} ] ")
# print()
# print(f" [ {field().get(7)} ] " + f" [ {field().get(8)} ] " + f" [ {field().get(9)} ] ")
#
#
# # for i in range(he):
# # print(field()[wi * i:wi * i + wi:])
# # print()
# #
# #
# # return
#
# # print(field().get(1))
# # print(field().values()ues(6))
#
#
# import numpy as np
#
#
# class Field:
# def init(self, width=None, height=None):
# self.width = width or self.__insert_parameter(
# description='ширина') # если первое значение задано, то выводит его, а если первое=None, то выводит второе значение
# self.height = height or self.__insert_parameter(description='длина')
#
# def __insert_parameter(self, description):
# try:
# value = int(input('Введите {} поля, поле принимает только int'.format(description[:-1] + 'у')))
# except (TypeError, ValueError):
# raise ValueError('Вы ввели не int. Научитесь нажимать на клавиши!')
# return value
#
# def create_field(self):
# s = range(self.width * self.height)
# s_str = [' {} '.format("%02d" % i) for i in s]
# matrix = np.array(s_str).reshape(self.width, self.height)
# for row in matrix:
# print(list(row))
#
#
# field_fun = Field(5, 5)
# field_fun.create_field() |
21,029 | f9dfba328bad7b2551d3bab5c0299aab36f58f21 | koszykUzytkownika = []
czyDalejRobieZakupy = True
asortyment = ["Mleko","Chleb","Masło","Kurczak","Jajka","Ogorek","Pomidor","Cukierki","Szynka"]
def menu():
print("Co chcesz zrobić?")
print("1. Dodaj do koszyka")
print("2. Usun z koszyka")
print("3. Sprawdz co masz w koszyku")
print("4. Wyjdz ze sklepu")
return input()
def kup():
print("Co chcesz dodac?")
wybranyPrzedmiot = input()
if wybranyPrzedmiot in asortyment:
koszykUzytkownika.append(wybranyPrzedmiot)
else:
print("Wpierdol. W asortymencie nie ma takiego itemku")
def pokazKoszyk():
print("Teraz twoj koszyk zawiera:")
print(koszykUzytkownika)
def usun():
print("Co chcesz usunąć?")
wybranyPrzedmiot = input()
if wybranyPrzedmiot in koszykUzytkownika:
koszykUzytkownika.remove(wybranyPrzedmiot)
else:
print("Wpierdol. W koszyku nie ma takiego itemku")
while czyDalejRobieZakupy:
wyborUzytkownika = menu()
if wyborUzytkownika == '1':
kup()
pokazKoszyk()
elif wyborUzytkownika == '2':
pokazKoszyk()
usun()
elif wyborUzytkownika == '3':
pokazKoszyk()
elif wyborUzytkownika == '3':
print("not implemented yet")
elif wyborUzytkownika == '4':
czyDalejRobieZakupy = False
print("Twój koszyk")
print(pokazKoszyk())
print("Dowidzenia")
#Słowa od Profesora M.Ł
#Pokrzepiające dusze oraz motywujące
#"a tak pozatym to bardzo dobrze CI idzie ziomeczku"
#TODO WYKONAC ASORTYMENT SKLEPU ; CENNIK i SUME ZAKUPÓW
#TODO XD
|
21,030 | 93c2d15b06902c3d3eb8c360ca9e6403513e03dc | ##PROBLEM: Mark is stuck on Mars and needs to figure out how long he can
## survive eating rations and potatoes.
##
##ALGORITHM:
## 1. Ask user about survival supplies
## 2. Calculate ration survival data
## 3. Find survival data on potatoes
## 4. Display survival data summary
print('The Martian Survival\n')
ration = float(input('How many meal packs do you have? '))
consumption = float(input('How many calories per day will you consume? '))
farmland = float(input('How many square meters of soil will you plant? '))
print('\nCalculating Survival\n')
rationDays = ration * 2000 / consumption
print('You will survive', rationDays, 'days on NASA ration packs.\n')
soil = farmland / 10
print('You will need', soil, 'cubic meters of soil')
water = soil * 40
print('The soil needs', water, 'liters of water')
potatoKg = 0.006 * rationDays * farmland
print('You can grow', potatoKg, 'kg of potatoes before rations run out')
potatoDays = 700 * potatoKg / consumption
print('Crops will extend your life by', potatoDays, 'days')
totalDays = rationDays + potatoDays
print('This plan gives you a total of', totalDays, 'days of food\n')
print('Good luck Mark')
|
21,031 | e23c5d0da91649c8f70970fa1a9a4b5e41462ecd | # %%
my_dict = {
"title": "Python is charming"
}
print(my_dict["title"])
try:
print(my_dict["author"])
except KeyError:
print("Anonymous Author")
# %%
|
21,032 | dd4b59cebe31599dd325ec89c4b9a1ce3c09e6fa | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-02-08 18:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_auto_20180208_1943'),
]
operations = [
migrations.AlterModelOptions(
name='product',
options={'verbose_name': 'Товар', 'verbose_name_plural': 'Товари'},
),
migrations.AlterModelOptions(
name='productimage',
options={'verbose_name': 'Фото', 'verbose_name_plural': 'Фотографії до товарів'},
),
migrations.AddField(
model_name='product',
name='price',
field=models.DecimalField(decimal_places=2, default=0, max_digits=10),
),
]
|
21,033 | 8a35c1182c4be5329a9d21d170840dd641e208f0 | import numpy as np
import math
import pylab as plt
from scipy import optimize
import time
import matplotlib.animation as manimation
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from scipy import interpolate
class system:
def __init__(a,b,Tmax,nx,nt,c,inital_pos,initial_vel,x0,v0):
self.a=a
self.b=b
self.Tmax=Tmax
self.nx=nx
self.nt=nt
self.c=c
self.x=np.linspace(self.a,self.b,self.nx)
self.t=np.linspace(0.0,self.Tmax,nt)
def build_func(self):
f=lambda unplus1,un,unminus1,phiplus1,phin,phiminus1,vn,wn,x,tn,first_step,velocity,c,givenfunc: self.buildSys(unplus1,un,unminus1,phiplus1,phin,phiminus1,vn,wnx,tn,first_step,velocity,c,givenfuncic)
return f
def buildSys(self):
def solveprob(self):
phi=[]
A=[]
v=[]
w=[]
t=0
for i in range(0,len(self.t)):
sol=None
if(i==0):
phik=u_sol[-1]
Ak=A[-1]
vk=v[-1]
wk=w[-1]
sol=optimize.root(F,uk,args=(uk,None,self.x,t,True,self.initial_velocity,c))
sol=sol.x
else:
phikminus1=phik[-2]
phik=phik[-1]
Akminus1=Ak[-2]
Ak=Ak[-1]
vk=v[-1]
wk=w[-1]
sol=optimize.root(F,uk,args=(uk,ukminus1,self.x,t,False,self.initial_velocity,c))
sol=sol.x
t+=self.dt |
21,034 | 0caf2836cd9f21d33c217070d4a38364caa3dfb4 | from rest_framework.views import APIView
from .serializers import *
from ecom.models import *
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, AllowAny,IsAuthenticatedOrReadOnly
from rest_framework.authentication import TokenAuthentication
from rest_framework.viewsets import ModelViewSet,ViewSet
from django.shortcuts import get_object_or_404,Http404
from rest_framework import status
class UserAPIView(APIView):
permission_classes = [IsAuthenticated, ]
authentication_classes = [TokenAuthentication, ]
def get(self, request):
serializer = User_Serializer(request.user)
return Response(serializer.data)
def put(self, request):
serializer = User_Serializer(request.user, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CustomerAPIView(APIView):
permission_classes = [IsAuthenticated, ]
authentication_classes = [TokenAuthentication, ]
def get(self, request):
user = request.user.id
profile = Customer.objects.get(id=user)
serializer = Profile_serializers(profile)
return Response(serializer.data)
def put(self, request):
user = request.user.id
profile = Customer.objects.get(user=user)
serializer = Profile_serializers(profile, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CatagoryAPIView(ModelViewSet):
permission_classes = [IsAuthenticatedOrReadOnly, ]
serializer_class = Catagory_serializers
queryset = Catagory.objects.all()
class ProductAPIView(ModelViewSet):
permission_classes = [IsAuthenticatedOrReadOnly, ]
serializer_class = Product_serializers
queryset = Product.objects.all()
class CartAPIView(ViewSet):
permission_classes = [IsAuthenticated, ]
authentication_classes = [TokenAuthentication, ]
def list(self, request):
user=request.user
# queryset = Cart.objects.filter(customer=Customer.objects.get(user=user))
queryset = Cart.objects.filter(customer__user=user)
serializer = Cart_serializers(queryset, many=True)
return Response(serializer.data)
def create(self, request):
user=request.user
customer = Customer.objects.get(user=user)
serializer = Cart_serializers(data=request.data)
if serializer.is_valid():
customer = customer
serializer.save()
return Response(serializer.data,status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request,pk=None):
user=request.user
queryset = Cart.objects.filter(customer=Customer.objects.get(user=user))
artical = get_object_or_404(queryset, pk=pk)
serializer = Cart_serializers(artical)
return Response(serializer.data)
def update(self, request,pk=None):
user=request.user
queryset = Cart.objects.filter(customer=Customer.objects.get(user=user))
artical = get_object_or_404(queryset, pk=pk)
serializer = Cart_serializers(artical,data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request,pk=None, *args, **kwargs):
user=request.user
queryset = Cart.objects.filter(customer=Customer.objects.get(user=user))
try:
artical = get_object_or_404(queryset, pk=pk).delete()
return Response({"Success": "Delate Successfully"})
except Http404:
pass
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CartProductAPIView(ViewSet):
permission_classes = [IsAuthenticated, ]
authentication_classes = [TokenAuthentication, ]
def list(self, request):
user=request.user
queryset = CartProduct.objects.filter(cart__customer__user=user)
serializer = CartProduct_serializers(queryset, many=True)
return Response(serializer.data)
def retrieve(self,request,pk=None):
user=request.user
queryset = CartProduct.objects.filter(cart__customer__user=user)
article = get_object_or_404(queryset, pk=pk)
serializer = CartProduct_serializers(article)
return Response(serializer.data)
def create(self, request):
serializer = CartProduct_serializers(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def update(self, request,pk=None):
user=request.user
queryset = CartProduct.objects.filter(cart__customer__user=user)
artical = get_object_or_404(queryset, pk=pk)
print(artical)
serializer = CartProduct_serializers(artical,data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request,pk=None, *args, **kwargs):
user=request.user
queryset = CartProduct.objects.filter(cart__customer__user=user)
try:
artical = get_object_or_404(queryset, pk=pk).delete()
return Response({"Success": "Delate Successfully"})
except Http404:
pass
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class OrderAPIView(ViewSet):
permission_classes = [IsAuthenticated, ]
def list(self, request):
user=request.user
queryset = Order.objects.filter(cart__customer__user=user)
serializer = Order_serializers(queryset, many=True)
return Response(serializer.data)
def retrieve(self,request,pk=None):
user=request.user
queryset = Order.objects.filter(cart__customer__user=user)
article = get_object_or_404(queryset, pk=pk)
serializer = Order_serializers(article)
return Response(serializer.data)
def destroy(self, request,pk=None, *args, **kwargs):
user=request.user
queryset = Order.objects.filter(cart__customer__user=user)
try:
artical = get_object_or_404(queryset, pk=pk).delete()
return Response({"Success": "Delate Successfully"})
except Http404:
pass
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserViewSet(APIView):
queryset = User.objects.all()
permission_classes = (AllowAny,)
def post(self,request):
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
21,035 | 9faa14e10304d13b49d7f57a46d8fc29313aa1b4 | # This script makes plots of the coincident micorbursts
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
import os
import sys
sys.path.insert(0, '/home/mike/research/mission-tools/ac6')
import read_ac_data
# Path containing the catalog file to validate
catPath = os.path.join('/home/mike/research/ac6-microburst-scale-sizes/data/'
'coincident_microbursts_catalogues', 'flash_catalogue_v2_sorted.txt')
pltWidth = 5 # seconds
plotPath = ('/home/mike/research/ac6-microburst-scale-sizes/'
'data/plots/{}'.format(datetime.date.now()))
if not os.path.exists(plotPath):
os.makedirs(plotPath)
print('Made plot directory at', plotPath)
# Load and filter the catalog
c = ExploreDependencies(catPath)
c.filter()
# Set up plots
fig, ax = plt.subplots(1)
current_date = datetime.date().min
for i in len(c.cat['burstType']):
if c.cat['dateTimeA'][i].date().isoformat != current_date:
dataA = read_ac_data_wrapper('A', current_date, dType='10Hz')
dataB = read_ac_data_wrapper('B', current_date, dType='10Hz')
current_date = c.cat['dateTimeA'][i].date().isoformat
# Pick out only the valid data
validIdxA = np.where(dataA['dos1rate'] > 0)[0]
validIdxB = np.where(dataB['dos1rate'] > 0)[0]
# Plot the unshifted data
ax[0].plot(dataA['dateTime'][validIdxA], dataA['dos1rate'][validIdxA],
label='AC-6 A')
ax[0].plot(dataB['dateTime'][validIdxB], dataB['dos1rate'][validIdxB],
label='AC-6 B')
# Set the time range around the coincident microburst event, and then save.
xlim=(c.cat['dateTimeA'][i] - timedelta(seconds=pltWidth),
c.cat['dateTimeA'][i] + timedelta(seconds=pltWidth))
# Calculate ylimits from the xlimits
idxA = np.where((dataA['dateTime'] > xlim[0]) &
(dataA['dateTime'] < xlim[1]) &
(dataA['dos1rate'] > 0))[0]
idxB = np.where((dataB['dateTime'] > xlim[0]) &
(dataB['dateTime'] < xlim[1]) &
(dataB['dos1rate'] > 0))[0]
ylim = (0.9*np.min(dataA['dos1rate'][idxA]),
1.1*np.max(dataA['dos1rate'][idxA]))
ax[0].set(title='AC-6 Coincident microburst validation', xlabel='UTC',
ylabel='dos1 [counts/s]', xlim=xlim, ylim=ylim)
ax[0].legend(loc=1)
plt.savefig(os.path.join(plotPath, ))
|
21,036 | 395b289e42c25b20723f86e70f031b36e26857d6 | from setuptools import setup
def readme():
with open('README') as f:
return f.read()
setup(name='ppftps',
version='0.2.1',
description='Push directories over a secured FTP connection.',
long_description=readme(),
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: File Transfer Protocol (FTP)',
],
keywords='cli ftp ftps keepass push pull sync util',
url='https://github.com/studio-b12/ppftps',
author='Christoph Polcin',
author_email='c.polcin@studio-b12.de',
license='BSD',
packages=['ppftps'],
install_requires=[
'ftputil',
],
dependency_links=['https://codeload.github.com/pschmitt/pykeepass/tar.gz/2.8.1'],
scripts=['bin/ppftps'],
entry_points = {
'console_scripts': ['ppftps=ppftps:cli'],
},
include_package_data=True,
zip_safe=False,
project_urls={
'Bug Reports': 'https://github.com/studio-b12/ppftps/issues',
'Source': 'https://github.com/studio-b12/ppftps',
},
python_requires='>=3',
)
|
21,037 | a43af1ec6a0c7b770d19e834c6c8c79d7161a7cd | import json
import clipboard
class JsonToBean:
bean_name = None
config = None
json_response = None
class_properties = str()
class_getters_setters = str()
java_class = str()
has_get_set = None
def __init__(self):
self.get_config()
def start(self):
self.get_bean_name()
self.will_generate_get_set()
self.get_json_response()
self.generate_bean()
self.copy_to_clipboard()
# config.json contains type for collection (Array) in java equivalent, Ex. java.util.List (List)
def get_config(self):
try:
with open('config.json') as json_file:
self.config = json.load(json_file)
except:
self.config = {
"collection": "List"
}
def get_bean_name(self):
self.bean_name = input("Bean name: ")
self.bean_name = self.bean_name[0].capitalize() + self.bean_name[1:]
if len(self.bean_name) == 0:
print("Bean name cant be null")
self.get_bean_name()
def will_generate_get_set(self):
answer = input("Generate getters and setters? (y/n) ")
self.has_get_set = True if answer.lower() == 'y' else False
def get_json_response(self):
input("Press enter if you already copied the json.")
try:
self.json_response = json.loads(clipboard.paste())
except:
print("Your json might be invalid!")
self.get_json_response()
def create_properties_and_getters_setters(self):
for key, value in self.json_response.items():
key_cap = key.capitalize()
not_string_type = f"{self.config['collection']}<Insert{key[0].capitalize() + key[1:]}TypeHere>" if type(
value) is list else f"Insert{key_cap}TypeHere"
data_type = 'String' if type(value) is str or type(
value) is int else not_string_type
self.__add_property(data_type, key)
if self.has_get_set:
self.__add_getter_setter(data_type, key)
def __add_getter_setter(self, data_type, key):
self.class_getters_setters += f"\tpublic {data_type} get{key[0].capitalize() + key[1:]}(){{\n\t\treturn {key};\n\t}}\n\n"
self.class_getters_setters += f"\tpublic void set{key[0].capitalize() + key[1:]}({data_type} {key}){{\n\t\tthis.{key} = {key};\n\t}}\n\n"
def __add_property(self, data_type, key):
self.class_properties += f"\tprivate {data_type} {key};\n"
def generate_bean(self):
self.create_properties_and_getters_setters()
self.java_class = f'public class {self.bean_name} {{\n{self.class_properties}\n{self.class_getters_setters}}}'
def copy_to_clipboard(self):
try:
clipboard.copy(self.java_class)
finally:
print("The java class is copied to clipboard successfully.")
|
21,038 | 0869e437fe48043408691e691c3f5e2f93bfc587 | #!/usr/bin/python3
# filename: PythonVariety.py
port = 21
banner = "FreeFloat FTP Server"
print("[+] Checking for" + banner + " on port" + str(port))
port_list = [21, 22, 80, 110]
port_open = True
# string module :upper(), lower(), replace(), find().
print(banner.upper())
print(banner.lower())
print(banner.replace('FreeFloat', 'Ability'))
print(banner.find('FTP'))
# List module
port_list.append(443)
port_list.append(25)
# List can insert repeating data
port_list.append(21)
port_list.sort()
print(port_list)
# index can return the first offset
position = port_list.index(21)
print(position)
# Dictionary
services = {'ftp': 21, 'ssh': 22, 'smtp': 25, 'http': 80}
print("ftp port is " + str(services['ftp']))
print("ftp port is", services['ftp'])
type(services)
print(services)
|
21,039 | 7ce0db9919e79f7434c2d8595aab7e3be6e59fb2 | from flask import Flask,render_template
import urllib
import requests
import pandas as pd
from bs4 import BeautifulSoup
from urllib.request import urlopen
def table():
data=[]
url = "https://en.wikipedia.org/wiki/Capgemini"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
table=soup.find('table',{'class':'infobox vcard'})
rows=table.find_all('tr')
for row in rows:
data.append([cell.text.replace('\n', ' ')for cell in row.find_all(['tr','th', 'td'])])
df = pd.DataFrame(data[11:16],columns=data[2])
export_csv = df.to_csv (r'/home/dev732/newproj/venv/company 004/capgemini.csv', index = None, header=True)
return df.to_html(header="true", table_id="table")
|
21,040 | f65b21a076170ee28311e524eff938894fb8d699 | #!/usr/bin/env python2.7
import math
import os
import pandas
import sys
import time
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.dates import MonthLocator, DateFormatter, DayLocator, epoch2num, num2date
def print_usage(cmd_name):
print "Usage:", cmd_name, "<csv-file> [<col_usec>] [<col_hum>]"
cmd_path = sys.argv[0]
cmd_name = os.path.basename(cmd_path)
if len(sys.argv) < 2:
print_usage(cmd_name)
sys.exit(0)
def humanizeMicroseconds(mus, precision = 2):
result = float(mus)
units = [ "mus", "ms", "secs", "mins", "hrs", "days", "years" ]
numUnits = len(units)
i = 0
while result >= 1000.0 and i < 2:
result /= 1000.0
i = i+1
while result >= 60.0 and i >= 2 and i < 4:
result /= 60.0
i = i+1
if i == 4 and result >= 24.0:
result /= 24.0
i = i+1
if i == 5 and result >= 365.25:
result /= 365.25
i = i+1
assert(i < numUnits)
string = ("{:." + str(precision) + "f}").format(result)
string += " "
string += units[i]
return string
del sys.argv[0]
data_files = [ sys.argv[0] ]
del sys.argv[0]
usec_col=None
hum_col=None
if len(sys.argv) >= 1:
usec_col = sys.argv[0]
del sys.argv[0]
if len(sys.argv) >= 1:
hum_col = sys.argv[0]
del sys.argv[0]
else:
print "ERROR: Must specify <col_hum> after <col_usec>"
print
print_usage(cmd_name)
sys.exit(1)
csv_data = pandas.concat((pandas.read_csv(f) for f in data_files))
cols = list(csv_data.columns.values)
print csv_data.to_string();
if usec_col is not None:
for idx, r in csv_data.iterrows():
if r[usec_col] != 'todo' and str(r[usec_col]) != 'nan':
csv_data.ix[idx, hum_col] = humanizeMicroseconds(int(r[usec_col]))
else:
for c_usec in cols:
if c_usec.endswith("_usec"):
c_hum = c_usec[:-5] + "_hum"
print c_usec
print c_hum
for idx, r in csv_data.iterrows():
if r[c_usec] != 'todo' and str(r[c_usec]) != 'nan':
csv_data.ix[idx, c_hum] = humanizeMicroseconds(int(r[c_usec]))
print csv_data.to_string();
csv_data.to_csv(data_files[0], float_format='%f', index=False);
|
21,041 | d40318072d21286f91a5d225daeab9d848cfb46f | from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
from checkov.bicep.parser import Parser
from checkov.bicep.utils import get_scannable_file_paths
from checkov.common.graph.graph_builder.consts import GraphSource
from checkov.common.graph.graph_manager import GraphManager
from checkov.bicep.graph_builder.local_graph import BicepLocalGraph
if TYPE_CHECKING:
from checkov.common.typing import LibraryGraphConnector
from pycep.typing import BicepJson
class BicepGraphManager(GraphManager[BicepLocalGraph, "dict[Path, BicepJson]"]):
def __init__(self, db_connector: LibraryGraphConnector, source: str = GraphSource.BICEP) -> None:
super().__init__(db_connector=db_connector, parser=None, source=source)
def build_graph_from_source_directory(
self,
source_dir: str,
local_graph_class: type[BicepLocalGraph] = BicepLocalGraph,
render_variables: bool = True,
parsing_errors: dict[str, Exception] | None = None,
download_external_modules: bool = False,
excluded_paths: list[str] | None = None,
) -> tuple[BicepLocalGraph, dict[Path, BicepJson]]:
file_paths = get_scannable_file_paths(root_folder=source_dir)
definitions, definitions_raw, parsing_errors = Parser().get_files_definitions(file_paths) # type:ignore[assignment]
local_graph = self.build_graph_from_definitions(definitions)
return local_graph, definitions
def build_graph_from_definitions(
self, definitions: dict[Path, BicepJson], render_variables: bool = True
) -> BicepLocalGraph:
local_graph = BicepLocalGraph(definitions)
local_graph.build_graph(render_variables)
return local_graph
|
21,042 | 642012d8ae61e2412c1fa1c97b2c94ce683cda43 | import os
shell.executable("bash")
def get_mem_gb(wildcards, input):
size = 0
if os.path.exists(input[0]):
size = int(os.path.getsize(input[0]) / (1024 ** 3))
return max(size, 1)
rule a:
input:
"test1.in",
"test2.in"
output:
"test.out"
params:
a=lambda wildcards, input, resources: "+".join(input)
resources:
mem_gb=get_mem_gb
shell:
"echo {params.a} > {output}"
|
21,043 | 358b730d66b540c9afb7c9f1e2e35f142cd22384 | from openfisca_uk_data.datasets.spi.raw_spi import RawSPI
from openfisca_uk_data.datasets.spi.spi import SPI
|
21,044 | 56f508aff5b51f4550fe266313abb040fbf4be6b | class Solution(object):
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
stack = []
for token in tokens:
# print stack
if token in ['+','-','*','/']:
right = (stack.pop())
left = (stack.pop())
if token == '+':
stack.append(left + right)
if token == '-':
stack.append(left - right)
if token == '*':
stack.append(left * right)
if token == '/':
if left / right < 0:
stack.append( -1 * (abs(left)/abs(right)))
else:
stack.append(left /right)
else:
stack.append(int(token))
return stack[0] |
21,045 | 06ea674587d1d33d77b2c338357bc5591f986063 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Name: ekfsync.py
# Version: 1.1
# Purpose: Synchronisation dirs
#
# Author: Eugene Klepikov
# E-Mail: klek07@ya.ru
#
# Created: 19.02.2020
# Updated: 20.02.2020
# Copyright: (c) 2017 WildFox24
# Licence: GPL
#----------------------------------------------------------------------------
##
'''
Sync /home/oracle/disk2/car_photo/integration_file/ from arzamas to kissamos
'''
import sys, os, subprocess
import datetime
wd=0
cmd=""
fname="/home/oracle/ekfsync.dat"
#fname="ekfsync.dat"
curDate=datetime.date.today()
#---------------------------------------------------------------------------
def ekGetData(fname):
with open(fname, "rt") as f:
for line in f:
data=(int(line.strip()))
#print(data)
return int(data)
#---------------------------------------------------------------------------
def ekWriteData(fname, data):
with open(fname, "wt") as f:
f.write(data)
#---------------------------------------------------------------------------
def main():
print "-----",curDate,"-----"
wd=ekGetData(fname)
cmd="rsync -auv /home/oracle/disk2/car_photo/integration_file/{0:04d}/ /mnt/cph/integration_file/{1:04d}/ | wc -l".format(wd, wd)
print cmd
retval=subprocess.check_output(cmd, shell=True)
if int(retval) == 4:
print "Change to next dir {0:04d}".format(wd+1)
ekWriteData(fname, str(wd+1))
print retval
return 0
if __name__ == '__main__' :
sys.exit( main() )
#----------------------------------------------------------------------------
|
21,046 | 501ef807c52dfe04a82a1a985a62d747db9149da | import json
class scenario:
scenarioID
pedestrians_on_waypoint_reached
NPC_waypoint_reached
NPC_stop_line
NPC_lane_change
weather
time_of_day
Ego_sensors
Ego_position
def __init__(self, name, age):
self.scenarioID = scenarioID
self.pedestrians_on_waypoint_reached = pedestrians_on_waypoint_reached
self.NPC_waypoint_reached = NPC_waypoint_reached
self.NPC_stop_line = NPC_stop_line
self.NPC_lane_change = NPC_lane_change
self.weather = weather
self.time_of_day = time_of_day
self.Ego_sensors = Ego_sensors
self.Ego_position = Ego_position
def get_scenarioID():
return self.scenarioID
def get_pedestrians_on_waypoint_reached():
return self.pedestrians_on_waypoint_reached
def get_NPC_waypoint_reached():
return self.NPC_waypoint_reached
def get_NPC_stop_line():
return self.NPC_stop_line
def get_weather():
return self.weather
def get_time_of_day():
return self.time_of_day
def get_Ego_sensors():
return self.Ego_sensors
def get_Ego_position():
return self.Ego_position
def serialization(scenario):
|
21,047 | df77f80a54567f4f9a5367a0a5da814d0baf92ed | from api.app import app
app.run()
|
21,048 | a192390a5a4b830d346380fecbe169844e49926e | #!/usr/bin/python3
def common_elements(set_1, set_2):
return set([i for i in set_1 if i in set_2])
|
21,049 | 9d99308ca022681c65cc0dc46227d05256de373b | from Utils.Layout import Layout
class LayoutFactory:
def createLayout(layoutFile):
return Layout.fromFile(layoutFile)
|
21,050 | e6fd58a75ff43bf96267c085c5d486c359c89003 | #---------------------------------------------------------------
# 最長共通部分列問題 (Longest Common Subsequence problem: LCS)
# 最長共通部分列の文字数を返す
def lcs_len(x, y):
indices = [0]
for y_i in y:
t_index = 0
for i, index in enumerate(indices, 1):
c_index = x.find(y_i, t_index) + 1
if c_index:
if i < len(indices):
t_index = indices[i]
indices[i] = min(c_index, t_index)
else:
indices.append(c_index)
break
return len(indices) - 1
def lcs(X, Y):
costs = [0]
for c in Y:
for i in range(len(costs) - 1, -1, -1):
tmp = X.find(c, costs[i])
if tmp + 1:
if i + 1 < len(costs):
costs[i + 1] = min(costs[i + 1], tmp + 1)
else:
costs.append(tmp + 1)
return len(costs) - 1
|
21,051 | 1f35e6fc4af4c48ebe19f40f901d76d500debefb | #!/usr/bin/env python3
import json
import petname
import random
import sys
head_generator = ['bull', 'lion', 'raven', 'bunny']
arm_generator = [2,4,6,8,10]
leg_generator = [3,6,9,12]
animals = {}
for i in range (20):
animals[i] = {}
animals[i]['head'] = random.choice(head_generator)
body1 = petname.name()
body2 = petname.name()
animals[i]['body'] = ('{}-{}').format(body1, body2)
animals[i]['arms'] = random.choice(arm_generator)
animals[i]['legs'] = random.choice(leg_generator)
animals[i]['tail'] = animals[i]['arms']+animals[i]['legs']
with open(sys.argv[1], 'w') as out:
json.dump(animals, out, indent=2)
|
21,052 | 1c4ccbafddf535b44994ad511421d93ea396ca2f | # Sort number list in ascending & decending
usr_inp = input("Enter a list of numbers : ")
mylist = usr_inp.split(',')
# printing
print("\nList :", mylist)
# to sort list
mylist.sort()
# printing
print("In Ascending :", mylist)
# to sort list
mylist_dec = mylist.sort(reverse = True)
# printing
print("In Decending :", mylist) |
21,053 | 292379a721c767093f6cb64d48c0f13e706a97d3 | '''
Team Id: HC#145
Author List: Sujan Bag
Filename: task4.py
Theme: Homecoming (HC)
Functions: findhabit(image),findanimal(image),Hpredict_image(image_path,model),Apredict_image(image_path,model),Diff(li1,li2)
Global Variables: position=[],hposition=[],aposition=[],name=[],hname=[],aname=[],dicto={},animallist={},habitatlist={},Amodel,
Aclass_name,Amodel1,Hmodel,Hclass_name,Hmodel1,hab,data,habitatandanimalllist,handa,flag,habit,animal,habitatloc,animalloc,dictokey,
valid_habitat,invalid_habitat,fullstr,printstr,file,ser,file1,text,x,u,v,a,b,k,x,c,d,i,j,x,t,ap,df,animalmodelpath,habitmodelpath,excel_file_name,img
'''
import serial
import datetime
import torch
from PIL import Image
from torch.autograd import Variable
import torchvision
from torchvision import datasets, models, transforms
import cv2
import argparse
import torch
import pandas as pd
import warnings
#ignore the warnning
warnings.filterwarnings("ignore")
ap=argparse.ArgumentParser()
ap.add_argument("input",help="input a arena image") #input for taking arena image as a argument
ap.add_argument("-s","--save",help="save contoured image") #for saving "-s" argument
ap.add_argument("-amod","--animalmodel",help="path of animal model") #for providing animal model -amod and location
ap.add_argument("-homd","--habitatmodel",help="path of habitat model")#for providing habitat model -hmod and location
ap.add_argument("-excel","--mappingfile",help="path of mapping file")#for animal and habitat mapping -excel take a excel file only
args=ap.parse_args()
if args.animalmodel != None:
animalmodelpath=args.animalmodel
else:
animalmodelpath="divide2PerfectAnimalModel.pth" #by default it's take animal model file from it's current directory
if args.mappingfile != None:
excel_file_name=args.mappingfile
else:
excel_file_name="Animal_Habitat_Mapping.xlsx" #by default it's take animal habitat mapping file location from it's current directory
if args.habitatmodel != None:
habitatmodelpath=args.habitatmodel
else:
habitatmodelpath='dividePerfectHabitatModel.pth'#by default it's take habitat model location from it's current working directory
img=args.input
df=pd.read_excel(excel_file_name)#read the mapping excel file
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
position=[]
hposition=[]
aposition=[]
name=[]
hname=[]
aname=[]
dicto={}
animallist={}
habitatlist={}
image=cv2.imread(img)
Amodel=torch.load(animalmodelpath,map_location=device) #load animal model
Aclass_name=Amodel['class_name'] #copy all the class name of this model in Aclass_name variable
Amodel1=Amodel['arch']#copy entire model in Amodel1
Hmodel=torch.load(habitatmodelpath,map_location=device)#load habitat model
Hclass_name=Hmodel['class_name'] #copy All the class name of this model in Hclass_name variable
Hmodel1=Hmodel['arch'] #copy entire model in Hmodel1
'''
Function name : findhabit(image)
input : image
output : predicted class name
call example : a=findhabit(image)
'''
def findhabit(image):
image=Image.fromarray(image,'RGB')
index=Hpredict_image(image,Hmodel1)
prediction=Hclass_name[index]
return prediction
'''
Function name : findanimal(image)
input : image
output : predicted class name
call example : a=findanimal(image)
'''
def findanimal(image):
image=Image.fromarray(image,'RGB')
index=Apredict_image(image,Amodel1)
prediction=Aclass_name[index]
return prediction
'''
Function name : Hpredict_image(image_path,model)
input : image path and model
output : predicted class name index of Habitat image
call example : a=Hpredict_image(image_path,model1)
'''
def Hpredict_image(image_path,model1):
#print("Prediction in progress")
image=image_path
#image = Image.open(image_path,'rb')
# Define transformations for the image, should (note that imagenet models are trained with image size 224)
transformation = transforms.Compose([
transforms.Resize(224),
#transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Preprocess the image
image_tensor = transformation(image).float()
# Add an extra batch dimension since pytorch treats all images as batches
image_tensor = image_tensor.unsqueeze_(0)
if torch.cuda.is_available():
image_tensor.cuda()
# Turn the input into a Variable
input = Variable(image_tensor)
input=input.to(device)
# Predict the class of the image
output = model1(input)
index = output.cpu().data.numpy().argmax()
return index
'''
Function name : Apredict_image(image_path,model)
input : image path and model
output : predicted class name index of Animal image
call example : a=Apredict_image(image_path,model1)
'''
#this function will predict image
def Apredict_image(image_path,model1):
#print("Prediction in progress")
#image = Image.open(image_path)
image=image_path
model_ft=model1
# Define transformations for the image, should (note that imagenet models are trained with image size 224)
'''transformation = transforms.Compose([
transforms.Resize(input_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])'''
transformation=transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# Preprocess the image
image_tensor = transformation(image).float()
# Add an extra batch dimension since pytorch treats all images as batches
image_tensor = image_tensor.unsqueeze_(0)
if torch.cuda.is_available():
image_tensor.cuda()
# Turn the input into a Variable
input = Variable(image_tensor)
input=input.to(device)
# Predict the class of the image
output = model_ft(input)
index = output.cpu().data.numpy().argmax()
return index
#x is a variable which will count number of contour image
#This will draw contour and predict all the habitat image
x=1
for i in range(0,5):
for j in range(0,5):
image2=image[1629-i*310:1930-i*310,390+j*310:690+j*310,:] #habitat location of arena image
#cv2.imshow('image2',image2)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
imggray=cv2.cvtColor(image2,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(imggray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) #find conture of habitat image
# print(len(contures))
if len(contures) != 1:
pred=findhabit(image[1639-i*310:1922-i*310,396+j*310:680+j*310,:])#predict class name of habitat image
# print(x,pred)
position.append(x)
hposition.append(x)
name.append(pred)
hname.append(pred)
dicto=dict(zip(position,name))
habitatlist=dict(zip(hposition,hname))
image[1629-i*310:1930-i*310,390+j*310:690+j*310,:]=cv2.drawContours(image2,contures,0,(0,255,0),4)
val=x
cv2.putText(image2,str(val),(80,150),cv2.FONT_HERSHEY_SIMPLEX,1.8,(0,0,255),2)
#cv2.imshow('con',image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
x=x+1
#top corner
u=0
v=0
for i in range(0,2):
image3=image[120:265,120+u:264+v,:] #location of image
image11=image[90:265,120+u:264+v,:]
img10gray=cv2.cvtColor(image3,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img10gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find conture of image location
# print(len(contures))
if len(contures) !=3:
pred=findanimal(image[120:265,120+u:264+v,:])#prediction of animal image
image[120:265,120+u:264+v,:]=cv2.drawContours(image3,contures,1,(0,255,0),2)
if i==0:
value='A6'
else:
value='F6'
cv2.putText(image11,value,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('track',image)
#cv2.imshow('im',image[120:265,120+u:264+v,:])
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#print(value,pred)
position.append(value)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
u=u+1936
v=v+1937
#bottom two corner contour find ,drawing and prediction
u=0
v=0
for i in range(0,2):
image7=image[2055:2200,120+u:265+v,:]#image location copy to image7
image8=image[2025:2200,120+u:265+v,:]
img7gray=cv2.cvtColor(image7,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img7gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find conture
#print(len(contures))
if len(contures) != 3:
pred=findanimal(image[2074:2181,138+u:249+v,:])#predict animal name
image[2055:2200,120+u:265+v,:]=cv2.drawContours(image7,contures,1,(0,255,0),2)
if i==0:
value='A1'
else:
value='F1'
cv2.putText(image8,value,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('images',image)
#cv2.imshow('track',image[2055:2200,120+u:265+v,:])
#cv2.waitKey(0)
#cv2.destroyAllWindows()
# print(value,pred)
position.append(value)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
u=u+1936
v=v+1937
#top to bottom contour find drawing and detection
a=0
b=0
k=0
x=0
for j in range(0,4):
c=0
d=0
for i in range(0,2):
image3=image[2055-c:2200-d,622+a:766+b,:] #location of arena image
image13=image[2025-c:2200-d,622+a:766+b,:]
img7gray=cv2.cvtColor(image3,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img7gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find all conture
#print(len(contures))
pred=findanimal(image[2075-c:2182-d,636+a:753+b,:]) #predict animal name
if len(contures) !=3:
image[2055-c:2200-d,622+a:766+b,:]=cv2.drawContours(image3,contures,1,(0,255,0),2)
if i==0:
value=chr(ord('B')+x)+'1'
else:
value=chr(ord('B')+x)+'6'
cv2.putText(image13,value,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('track',image)
#cv2.imshow('image4',image[2055-c:2200-d,622+a:766+b,:])
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#print(value,pred)
position.append(value)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
c=c+1935
d=d+1935
x=x+1
a=a+311
b=b+309
#Two Side Left-Right contour detection drawing and prediction
a=0
b=0
k=0
for j in range(0,2):
x=2
for i in range(0,4):
image1=image[1552-i*310:1697-i*310,120+a:265+b,:]#location of arena image
image14=image[1522-i*310:1697-i*310,120+a:265+b,:]
img1gray=cv2.cvtColor(image1,cv2.COLOR_BGR2GRAY)
_,thres=cv2.threshold(img1gray,220,225,0)
_,contures,_=cv2.findContours(thres,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)#find conture of image location
#print(len(contures))
if len(contures) !=3:
pred=findanimal(image[1569-i*309:1676-i*311,140+a:244+b,:]) #predict animal name
image[1552-i*310:1697-i*310,120+a:265+b,:]=cv2.drawContours(image1,contures,1,(0,255,0),2)
if j==0:
val='A'+str(x)
else:
val='F'+str(x)
cv2.putText(image14,val,(50,30),cv2.FONT_HERSHEY_SIMPLEX,0.8,(0,0,0),2)
#cv2.imshow('track',image[1552-i*310:1697-i*310,120+a:265+b,:])
#cv2.imshow('ori',image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
#print(val,pred)
position.append(val)
aposition.append(value)
name.append(pred)
aname.append(pred)
dicto=dict(zip(position,name))
animalliston=dict(zip(aposition,aname))
x=x+1
else:
x=x+1
a=a+1933
b=b+1936
print('\n Animal And Habitat : ')
print("__________________________")
print(dicto) #this will print animal and habitat name with location
'''for i in dicto.keys():
print(dicto[i])'''
'''print('\nHabitat(Cell Numbers)')
print(habitatlist)'''
print("For Animal Dataset")
print("..................")
print('\nAnimal(Location)')
print('__________________\n')
print(animalliston)
a,b=df.shape #assign excel sheet column and row size in a and b variable
hab=[]
for i in range(0,a):
hab.append(df.iloc[i][0])#copy all habitat name of excell file in hab list
data={}
for i in range(0,a):
for j in range(0,b):
data.update({hab[i]:df.iloc[i][0:]})
#all the habitat and animal which are maching to excel file copy to habitatandanimal list
habitatandanimallist=[]
for x in hab:
for y in dicto.keys():
if(x==dicto[y]):
listOfhabitat = [key for (key, value) in dicto.items() if value == x]
# print(x,listOfhabitat)
habitatandanimallist.append(listOfhabitat)
for z in range(1,b):
for t in dicto.keys():
if(data[x][z]==dicto[t]):
#habitatandanimallist.append('\n')
listofanimal= [key for (key, value) in dicto.items() if value == data[x][z]]
# print(data[x][z],listofanimal)
#habitatandanimallist.append('\n')
habitatandanimallist.append(listofanimal)
#habitatandanimallist.append('\n')
break
#habitatandanimallist.append('\n')
break
handa=[]
flag=0
i=0
while(i<len(habitatandanimallist)):
j=i+1
while(j<len(habitatandanimallist)):
if(habitatandanimallist[i]==habitatandanimallist[j]):
print(habitatandanimallist[i],i)
flag=1
i=i+1
else:
flag=0
j=j+1
if(flag==0):
handa.append(habitatandanimallist[i])
i=i+1
habitatandanimallist=handa
#separate habitat and animal
i=0
habit=[]
animal=[]
while(i <len(habitatandanimallist)):
if(type(habitatandanimallist[i][0])==str):
habit.append(habitatandanimallist[i-1])
animal.append(habitatandanimallist[i])
#while j in range(i+1,len(habitatandanimallist)):
j=i+1
while(j<len(habitatandanimallist)):
if(type(habitatandanimallist[j][0])==str):
animal.append(habitatandanimallist[j])
habit.append(habitatandanimallist[i-1])
i=i+1
j=j+1
else:
break
i=i+1
#according to mapping rearrange habitat and animal
i=0
habitatloc=[]
animalloc=[]
while(i<len(animal)):
if(len(animal[i])==len(habit[i])):
l=0
while(l<len(habit[i])):
habitatloc.append(habit[i][l])
l=l+1
#print('animal=habit')
i=i+1
elif(len(animal[i])>len(habit[i])):
j=0
# print('animal greater')
while(j<len(habit[i])):
habitatloc.append(habit[i][j])
j=j+1
k=0
while(k<(len(animal[i])-len(habit[i]))):
habitatloc.append(habit[i][0])
k=k+1
i=i+1
else:
j=0
while(j<len(animal[i])):
habitatloc.append(habit[i][j])
j=j+1
i=i+1
t=0
while(t<len(animal)):
for j in range(0,len(animal[t])):
animalloc.append(animal[t][j])
t=t+1
dictokey=[]
for key in habitatlist:
dictokey.append(key)
def Diff(li1, li2):
return (list(set(li1) - set(li2)))
habitat_loc=Diff(dictokey,habitatloc)
invalid_habitat=[]
for i in range(0,len(habitat_loc)):
invalid_habitat.append([habitat_loc[i],habitatlist[habitat_loc[i]]])
valid_habitat=[]
for i in range(0,len(habitatloc)):
valid_habitat.append([habitatloc[i],habitatlist[habitatloc[i]]])
print("For Habitat Dataset")
print("....................")
print("\nValid habitat set :")
print("___________________\n")
print(valid_habitat)
print("\nInvalid habitat set :")
print("______________________\n")
print(invalid_habitat)
#Only two animal are associated with one habitat acording to Theme Rule
animal=[]
habitat=[]
i=0
while(i<len(habitatloc)):
animal.append(animalloc[i])
habitat.append(habitatloc[i])
j=i+1
count=1
while(j<len(habitatloc)):
if(habitatloc[i]==habitatloc[j]):
count=count+1
j=j+1
if(count>2):
print(dicto[animalloc[i]])
i=i+1
i=i+1
fullstr=(str(habitat)+'\n'+str(animal))#all animal and habitat convert to string and store it in fullstr variable
printstr=('Animals = '+str(animal)+'\n'+'Habitats = '+str(habitat)) #This string will print in output screen
fullstr=fullstr.replace("'",'')#remove '
fullstr=fullstr.replace("[",'')#remove [
fullstr=fullstr.replace("]",'')#remove ]
printstr=printstr.replace("'",'')#remove '
'''printstr=printstr.replace("[",'')#remove [
printstr=printstr.replace("]",'')#remove ]
'''
#create a text file for this fullstr text file
file=open("textfileofanimalandhabitat.txt","w")
file.writelines(fullstr)
file.close()
print('\n After Mapping of animal and habitat this is only locations of animal and habitat :')
print("_______________________________________________________________________________________\n")
print(printstr)
#if save argument passed then it will save the drawing contour image
if args.save != None:
cv2.imwrite(args.save,image)
print('successful to save ......')
ser=serial.Serial()
ser.port="com3"
ser.baudrate=9600
print(ser.portstr)
file1=open("textfileofanimalandhabitat.txt","r")
text=file1.read()
text=text+' #'
print(text)
print(datetime.datetime.now().time().__format__('%H:%M:%S'))
ser.open()
ser.write(text.encode())
ser.close()
print(datetime.datetime.now().time().__format__('%H:%M:%S'))
cv2.namedWindow("arena image",cv2.WINDOW_NORMAL)
cv2.imshow("arena image",image)
cv2.waitKey(0)
cv2.destroyAllWindows() |
21,054 | c04220fd2e5f3727e8805e19420c799594b0db46 |
from faker import Faker
from src.domain.usecases import CreateUserFromGitHubContract, CreateUserFromGitHubParams, CreateUserParams
from typing import Dict
faker = Faker()
class CreateUserFromGithubMock(CreateUserFromGitHubContract):
def __init__(self):
self.execute_params = {}
self.return_value = {}
def execute(self, params: CreateUserFromGitHubParams ) -> Dict:
self.execute_params = params
if(not self.return_value):
self.return_value = {"success": True, "message": "ok", "data": CreateUserParams(
params.username,
faker.name(),
faker.name(),
faker.word(),
faker.word(),
faker.email(),
params.gender
)}
return self.return_value |
21,055 | 84091f9dbdca6ba18e9f10f2c3ee91b217403449 | from selenium import webdriver
from urllib import urlopen
from bs4 import BeautifulSoup
import re
import time
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "TrafficAnalyzer.settings")
from traffic.models import camera_info
from django.utils import timezone
import random
def wait_for(condition_function):
start_time = time.time()
while time.time() < start_time + 2:
if condition_function():
return True
else:
time.sleep(0.1)
raise Exception(
'Timeout waiting for {}'.format(condition_function.__name__)
)
def click_through_to_new_page(link_text):
link = driver.find_element_by_link_text(link_text)
link.click()
def link_has_gone_stale():
try:
# poll the link with an arbitrary call
link.find_elements_by_id('doesnt-matter')
return False
except StaleElementReferenceException:
return True
wait_for(link_has_gone_stale)
##driver = webdriver.Firefox()
driver = webdriver.PhantomJS()
driver.get("https://www.i-traffic.co.za/traffic/cameras.aspx")
##page=urlopen("https://www.i-traffic.co.za/traffic/cameras.aspx").read()
i=0
while i<66:
print "**************************Page:%d*****************************" % i
i+=1
page=driver.page_source
soup = BeautifulSoup(page, 'html.parser')
for img in soup.findAll('img'):
img_source=img.get('src')
print (img_source)
camIDregex=re.compile('&deviceID=(.*)')
camID=re.findall(camIDregex,img_source)
if len(camID) == 1:
print camID[0]
camStr=str(camID[0])
camStr=camStr.replace("/","-")
imgFile=open("traffic_images/"+camStr+".jpg","wb")
try:
imgFile.write(urlopen(img_source).read())
imgFile.close()
record = camera_info(camera=camStr, traffic=random.randint(1, 100),timestamp=timezone.now())
record.save()
except:
pass
try:
click_through_to_new_page('Next')
except:
pass
|
21,056 | f1677842bd1e0b5998c3babd72972d277f92bfc3 | from marshmallow import Schema, fields, ValidationError, \
validate, post_load
import json
from datetime import datetime
def validate_mobile(n):
if len(n) > 20:
raise ValidationError('length of mobile number must be less than 20')
if not n.isdigit():
raise ValidationError('mobile number must be numeric')
def validate_date(date_str):
try:
datetime.strptime(date_str, '%Y-%m-%d')
except Exception:
raise ValidationError('date must be valid')
class AddressSchema(Schema):
name = fields.Str(validate=validate.Length(min=1))
country_code = fields.Str(validate=validate.Length(min=1))
mobile_number = fields.Str(validate=validate_mobile)
email = fields.Email()
address_line_1 = fields.Str(required=True, validate=validate.Length(min=1))
address_line_2 = fields.Str(validate=validate.Length(min=1))
landmark = fields.Str(validate=validate.Length(min=1))
city = fields.Str(required=True, validate=validate.Length(min=1))
district = fields.Str(required=True, validate=validate.Length(min=1))
state = fields.Str(required=True, validate=validate.Length(min=1))
country = fields.Str(required=True, validate=validate.Length(min=1))
pincode = fields.Str(required=True, validate=validate.Length(min=1))
class PaymentInfoSchema(Schema):
method = fields.Str(validate=validate.Length(max=100))
vendor = fields.Str(validate=validate.Length(max=100))
class UserDetailSchema(Schema):
user_id = fields.Str(required=True, validate=validate.Length(max=100))
name = fields.Str(validate=validate.Length(max=100), missing=None)
email = fields.Email(required=True)
sex = fields.Str(validate=validate.OneOf(["male", "female", "other"]), missing=None)
dob = fields.Str(validate=validate_date, missing=None)
country_code = fields.Str(validate=validate.Length(max=20), missing=None)
mobile = fields.Str(validate=validate_mobile, missing=None)
billing_address = fields.Nested(AddressSchema, missing={})
delivery_address = fields.Nested(AddressSchema, missing={})
payment_info = fields.Nested(PaymentInfoSchema, missing={})
latest_order_id = fields.Str(validate=validate.Length(max=100), missing=None)
tags = fields.List(fields.Str(), missing=[])
@post_load
def dict_to_json(self, data, **kwargs):
for column in ['tags', 'billing_address', 'delivery_address', 'payment_info', 'latest_order_id']:
data[column] = json.dumps(data[column])
return data
class Meta:
unknown = 'EXCLUDE'
class DOBSchema(Schema):
start_date = fields.Str(required=True, validate=validate_date)
end_date = fields.Str(required=True, validate=validate_date)
class FilterSchema(Schema):
user_ids = fields.List(fields.Str(validate=validate.Length(max=100)))
dob = fields.Nested(DOBSchema)
class UserGetDataSchema(Schema):
properties = fields.List(fields.Str(), required=True)
filters = fields.Nested(FilterSchema)
batch_size = fields.Int(validate=validate.Range(min=1, max=100))
class Meta:
unknown = 'EXCLUDE'
|
21,057 | 43e272f7a01f8757b449dc855da70edee3d2ff10 | # coding=UTF-8
'''
@Author: httermin
@Date: 2020-01-01 17:38:37
'''
import sys
import os
sys.path.append("..")
from typing import List
from copy import deepcopy
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
result = []
tmp = []
self.getSum(result, candidates, 0, target, tmp)
return result
def getSum(self, result, candidates, start, target, tmp):
if target == 0 and tmp: result.append(tmp[:])
if target < 0: return
i = start
while i < len(candidates):
# for i in range(start, len(candidates)):
tmp.append(candidates[i])
if candidates[i] == 0:
i += 1
if i < len(candidates):
self.getSum(result, candidates, i, target-candidates[i], tmp)
else:
result.append(tmp[:])
tmp.pop()
i += 1
if __name__ == "__main__":
solu = Solution()
examples = [([2,3,6,7], 7), ([2,3,5], 8), ([], 1), ([0], 0)]
for ex, t in examples[:]:
print(solu.combinationSum(ex, t)) |
21,058 | 1d2f0c41b4e44884d58a687efdb169440b768c70 | #!/usr/bin/env python3
# Code name: opencv_tools.py
# Brief description: Provides a set of image analysis tools mostly derived from OpenCV
# that can be applied to an input image (i.e. numpy array)
#
# Requirements: Python (3.5+?), plus the packages listed below
#
# Start Date: 9/28/21
# Last Revision:
# Current Version:
# Notes:
#
# Copyright (C) 2021, Frederick D. Pearce, opencv_tools.py
# 0. Import modules
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
import os
import random
## 1. Define functions
# 1.1 Frame manipulation and display functions
# ToDo: Change these to methods of new class so they can be imported together
def load_frame_gray(img_path, gray_flag=False):
"""Load image at img_path, and convert the original image to grayscale if gray_flag=True.
Return image and grayscale image if gray_flag=True; otherwise only return original image.
img_path = a string containing the path to an image file readable by cv.imread
"""
try:
img = cv.imread(img_path)
except Exception as err:
print(f"The following error occurred when reading the image file at {img_path}: \n{err}")
img = None
if gray_flag and isinstance(img, np.ndarray):
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
else:
gray = None
return (img, gray) if gray_flag else img
def resize_frame(frame, scale=0.5, interp_method = cv.INTER_AREA):
width = int(frame.shape[1]*scale)
height = int(frame.shape[0]*scale)
return cv.resize(frame, (width, height), interpolation=interp_method)
def translate_frame(frame, x, y):
"""Translate an image frame by pixel values (x, y) where
-x value ---> Left
-y value ---> Up
x value ---> Right
y value ---> Down
"""
trans_mat = np.float32([[1, 0, x], [0, 1, y]])
dimensions = (frame.shape[1], frame.shape[0])
return cv.warpAffine(frame, trans_mat, dimensions)
def rotate_frame(frame, rotation_angle, rotation_point=None):
"""Rotate an image frame by rotation_angle degrees where
-rotation_angle value ---> clockwise
"""
h, w = frame.shape[:2]
if rotation_point is None:
rotation_point = (w//2, h//2)
rotation_matrix = cv.getRotationMatrix2D(rotation_point, rotation_angle, 1.0)
return cv.warpAffine(frame, rotation_matrix, (w, h))
def flip_frame(frame, flip_code):
"""Flip an image frame using flip_code where
flip_code = 0 ---> vertical
flip_code = 1 ---> horizontal
flip_code = -1 ---> both vertical and horizontal
"""
return cv.flip(frame, flip_code)
def print_frame_info(frame, frame_desc=""):
print(f"{frame_desc} Image Shape: Height={frame.shape[0]}, Width={frame.shape[1]}, Channels={frame.shape[2]}")
def show_frame(frame, frame_title):
cv.imshow(frame_title, frame)
cv.waitKey(0)
cv.destroyAllWindows()
# 1.2 Histogram-related functions
def get_hist_params(hist_params, plot_params=None):
"""Return a dictionary containing parameters for calculating and plotting histograms using OpenCV. This
function defines default parameter values, then updates them based on user input to the function, and finally
it does error checking to identify incomplete or erroneous input parameters from the user.
"""
# Default values for all parameters, except 'images', which MUST be provided by user
params = {
'hist': {
'images': None,
'channels': [0],
'mask': None,
'histSize': [256],
'ranges': [0, 256]
},
'plot': {
'figsize': (10, 8),
'title': "Image Histogram",
'xlabel': "Bins",
'ylabel': "# of Pixels",
# Specify color code for each channel: MUST have same length as hist 'channels' list above
'channel_colors': ["k"]
}
}
if 'images' not in hist_params:
raise KeyError("Missing 'images' key containing a list of images, the only required key/value pair in hist_params")
# Update param dicts based on user input
if hist_params:
try:
params['hist'].update(hist_params)
except Exception as e:
print(e)
if plot_params:
try:
params['plot'].update(plot_params)
except Exception as e:
print(e)
num_channels = len(params['hist']['channels'])
num_chancols = len(params['plot']['channel_colors'])
if num_chancols != num_channels:
raise ValueError(f"# of input channels ({num_channels}) MUST equal # of input channel_colors ({num_chancols})")
return params
def create_figure_axis(**params):
plt.figure(figsize=params['figsize'])
plt.title(params['title'])
plt.xlabel(params['xlabel'])
plt.ylabel(params['ylabel'])
def calc_plot_histogram(hist, plot):
for cha, col in zip(hist['channels'], plot['channel_colors']):
col_hist = cv.calcHist(hist['images'], [cha], hist['mask'], hist['histSize'], hist['ranges'])
plt.plot(col_hist, color=col)
plt.xlim(hist['ranges'])
def plot_frame_histogram(hist_params, plot_params=None):
params = get_hist_params(hist_params, plot_params)
create_figure_axis(**params['plot'])
calc_plot_histogram(**params)
# 1.3 Edge detection functions
def edges_canny_auto(frame, median_ratio=0.33):
"""Automatic Canny edge detection following https://www.pyimagesearch.com/2015/04/06/zero-parameter-automatic-canny-edge-detection-with-python-and-opencv/"""
m = np.median(frame)
l = int(max(0, (1.0-median_ratio)*m))
h = int(min(255, (1.0-median_ratio)*m))
return cv.Canny(frame, l, h)
# 1.4 Object detection functions
def detect_all_objects(gray, haar_file, params, verbose=False):
"""Return objects detected in input grayscale image, gray, using the haar cascade detector specified in haar_file,
with the input parameters to the detectMultiScale method specified in params.
"""
# Not the most performant to load haar_cascade for each image when params aren't changing...
haar_cascade = cv.CascadeClassifier(haar_file)
detected_objects = haar_cascade.detectMultiScale(gray, **params)
if verbose:
print(f"# of Objects Detected = {len(detected_objects)}")
return detected_objects
def detect_primary_objects(gray, haar_file, params, num_primary_obj=1, max_iter=10, boost_flag=True, verbose=False):
"""Identify the "primary", or least likely to be a false positive, object detected within the input grayscale image, gray.
The type of object detected by haar_cascade is determined by the haar cascade class xml file that was provided in
the haar_file parameter.
"""
haar_cascade = cv.CascadeClassifier(haar_file)
detected_objects = haar_cascade.detectMultiScale(gray, **params)
num_detected = len(detected_objects)
num_detected_prev = num_detected
if verbose:
print(f"Initial # of Objects Detected = {num_detected}")
num_iter = 0
boost_factor = 1
while num_detected != num_primary_obj and num_iter != max_iter:
num_iter += 1
if verbose:
print(f"Iteration # = {num_iter}")
# Update minNeighbors value in copy of params dict
if num_iter == 1:
params_new = params.copy()
elif num_iter == max_iter:
print(f"Maximum # of iterations ({max_iter}) reached!")
# Change minNeighbors up/down depending on whether num detected is too high/low
# Steps up are twice as big as steps down, and boost_factor determines step size
if num_detected < num_primary_obj and params_new['minNeighbors'] > 4:
params_new['minNeighbors'] -= boost_factor
elif num_detected < num_primary_obj and params_new['minNeighbors'] > 1:
params_new['minNeighbors'] -= 1
elif num_detected > num_primary_obj:
params_new['minNeighbors'] += 2 * boost_factor
else:
print(f"Unable to detect {num_primary_obj} primary object(s) in input image")
print(f"Verify that either 1) num_detected is zero ({num_detected==0}) and minNeighbors is one ({params_new['minNeighbors']==1})")
print(f"OR 2) the maximum # of iterations has been reached ({num_iter==max_iter})")
print("If either of these scenarios occurs, consider changing the input scaleFactor and/or initial minNeighbors value. If neither 1) or 2) applies, then there is an unknown bug somewhere that should be investigated!!!")
if verbose:
print(f"minNeighbors = {params_new['minNeighbors']}")
detected_objects = haar_cascade.detectMultiScale(gray, **params_new)
num_detected = len(detected_objects)
if num_detected == num_detected_prev and boost_flag:
boost_factor += 1
else:
boost_factor = 1
num_detected_prev = num_detected
if verbose:
print(f"Final # of Objects Detected = {num_detected}")
return detected_objects
def get_detected_features_labels(img, detected_rects, label=-1, verbose=False):
"""Loop through each detected rectangle in the input list, detected_rects, and extract the region of interest (ROI)
from the input image, img, for each detected rectangle. Return a list containing each ROI as the feature, and optionally,
a list containing the input label value, label, if label > -1.
"""
obj_rois = []
for rect in detected_rects:
try:
(x, y, w, h) = rect
except Exception as e:
print(f"The following error occurred when performing object detection for the image at {img_path}:")
print(e)
x = None
if verbose:
print(*rect, sep=", ")
if isinstance(x, int):
obj_rois.append(img[y:y+h, x:x+w])
else:
obj_rois.append(None)
if label > -1:
return obj_rois, [label] * len(obj_rois)
else:
return obj_rois
def detect_image_objects(gray, detect_params, detect_type="all", label=-1, verbose=False):
"""Detect object(s) in the image located at img_path, using the haar object defined in
the xml file located at haar_path where
gray = a grayscale image as an numpy array of type uint8
detect_params = a dictionary containing two sets of parameters: 1) 'haar_file', a string specifying the full path to the haar cascade
xml file to load and 2) 'params' dict to pass to the detectMultiScale method of the haar cascade class. Valid values
include scaleFactor (default=1.1), minNeighbors (default=3), and minSize
detect_type = an optional string specifying the type of detection to perform:
"all": runs detect_all_objects, which returns all objects detected from one execution of the haar class detectMultiScale
method with the input parameters specified in detect_params. The number of objects detected may vary greatly from image to
image for a fixed set of input parameters
"primary": runs detect_primary_objects, which performs an iterative process to return a user-specified number of primary objects
detected in the input image. Essentially, the minNeighbors parameter is adjusted until the desired number of objects are detected
label = an optional integer specifying the index to a specific person in the people list that is the primary person in the image at img_path
When the default value of -1 is provided, then no label is returned (i.e. default is non-training mode)
verbose = an optional boolean-like value that, when truthy, prints additional details during execution for validation/debugging purposes
"""
if detect_type == "all":
detected_rects = detect_all_objects(gray, verbose=verbose, **detect_params)
elif detect_type == "primary":
detected_rects = detect_primary_objects(gray, verbose=verbose, **detect_params)
else:
print(f"Unrecongized input value for detect_type, {detect_type}, so no objects were detected!")
print("Please provide a string value for detect_type of either 1) 'all' or 2) 'primary'")
detected_rects = None
if isinstance(detected_rects, np.ndarray):
features_labels = get_detected_features_labels(gray, detected_rects, label=label, verbose=verbose)
return features_labels
def draw_detected_objects(detected_frame, detected_rect, frame_to_show=None, print_detected=False, rect_color=(255, 255, 255), rect_thickness=2):
"""Display source image with detected object(s) outlined, and optionally display an image focused around each detected object based on a
different input image, frame_to_show. This functionality allow one to show the outline of the detected objects on the grayscale image used
for detection, and also show the bgr image zoomed in on each detected object.
detected_frame = numpy array of uint8 specifying the image used for detection
detected_rect = a list containing zero or more lists, each specifying a rectangle that bounds a detected object in detected_frame
frame_to_show = an alternate image used to display the image contained within each detected_rect. Input MUST be a numpy array in order
to turn this feature on
print_detected = boolean-like flag when truthy prints the x, y, w, and h values specifying the rectangle bounding each detected object
rect_color = tuple with three values specifying the (b, g, r) color value for displaying the detected objects
rect_thickness = integer specifying the thickness of the lines defining the rectangle bounding each detected object
"""
for i, (x, y, w, h) in enumerate(detected_rect):
if print_detected:
print(f"Object {i} Location: x={x}, y={y}, w={w}, h={h}")
detected_frame = cv.rectangle(detected_frame, (x, y), (x+w, y+h), rect_color, thickness=rect_thickness)
if isinstance(frame_to_show, np.ndarray):
show_frame(frame_to_show[y:y+h, x:x+w], "Objects Detected in Image")
return detected_frame
## 2. If run from command line, execute script below here
if __name__ == "__main__":
print("ToDo: Implement example that runs as a script!") |
21,059 | 337a6098af37d804ffe04b1802633f8a2d2d31d1 | import time
# def producer():
# ret = []
# for i in range(100):
# time.sleep(0.1)
# ret.append(i)
# return ret
#
# def consumer(res):
# for index,baozi in enumerate(res):
# time.sleep(0.1)
# print('第%s个人,吃了%s' %(index,baozi))
#
# res = producer()
# consumer(res)
'''
#yield 3相当于return 控制的是函数的返回值
#x = yield 的另外一个特性,接收send传过来的值,赋值给x
def test():
print('开始啦')
first = yield #return 1 first=None
print('第一次',first)
yield 2
print('第二次')
t =test()
res = t.__next__() #next(t)
print(res)
#t.__next__()
# res = t.send(None)
res = t.send('函数停留在first那个位置,我就是给first赋值的')
print(res)
'''
def consumer(name):
print('我是[%s],我准备吃包子了' %name)
while True :
baozi = yield
time.sleep(1)
print('%s 很开心的把[%s]吃掉了' %(name,baozi))
def producer():
c1= consumer('weipeiqi')
c2= consumer('yuanyao')
c1.__next__()
c2.__next__()
for i in range(10):
time.sleep(1)
c1.send('肉包 %s' %i)
c2.send('肉包 %s' %i)
producer() |
21,060 | b11a20be0d71fddcff6b8665684cdb89d490f16e | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 25 13:15:51 2016
@author: naylor
"""
import numpy as np
from debug_mode import debug_mess
import cPickle as pickle
import random
import smilPython as sm
import useful_functions as uf
import os
import openslide
from BasicOperations import ROI
from image_classification import GetImage
class SegmDataBaseCommon(object):
"""Segmentation data base server common class."""
def __init__(self, dir_in):
self._im_dir = {}
self._im_dir["general"] = dir_in
self._segm_dir = {}
self._res_dir = {}
def get_input_dir(self):
return self._im_dir["general"]
def iter(self, code, first=None, last=None):
for im_file in os.listdir(self._im_dir[code])[first:last]:
#debug_mess("Processing file %s" % os.path.basename(im_file))
outputs = self._im_and_segm(os.path.join(self._im_dir[code], im_file), self._segm_dir[code])
yield outputs[0], outputs[1], outputs[2], os.path.basename(im_file)
def iter2(self, code):
for im_file in os.listdir(self._im_dir[code]):
#debug_mess("Processing file %s" % os.path.basename(im_file))
outputs = self._im_and_segm2(os.path.join(self._im_dir[code], im_file), self._segm_dir[code], self._res_dir[code])
yield outputs[0], outputs[1], outputs[2], os.path.basename(im_file)
def train(self):
raise NameError("Train set not available for this database")
def val(self):
raise NameError("Validation set not available for this database")
def test(self):
raise NameError("Test set not available for this database")
def _im_and_segm(self, im_file, segm_dir):
"""Return image and the corresponding segmentations.
In this version, we suppose that there is a single segmentation for the input file,
which has the same name and is found in the segmentation folder.
Args:
im_file: complete file name.
segm_dir: folder containing the image segmentations.
Returns:
im: original image
segm_list: list containing the corresponding GT segmentations
file_name: original file name
"""
#pdb.set_trace()
im = sm.Image(im_file)
segm_list = []
file_name = os.path.basename(im_file)
print file_name
im_segm = sm.Image(os.path.join(segm_dir, file_name))
self.segm_post_process(im_segm)
segm_list.append(im_segm)
return im, segm_list, file_name
def _im_and_segm2(self, im_file, segm_dir, res_dir):
"""Return image and the corresponding segmentations.
In this version, we suppose that there is a single segmentation for the input file,
which has the same name and is found in the segmentation folder.
Args:
im_file: complete file name.
segm_dir: folder containing the image segmentations.
Returns:
im: original image
segm_list: list containing the corresponding GT segmentations
file_name: original file name
"""
segm_list = []
file_name = os.path.basename(im_file)
im_segm = sm.Image(os.path.join(segm_dir, file_name))
self.segm_post_process(im_segm)
segm_list.append(im_segm)
im_pred = sm.Image(os.path.join(res_dir, file_name))
return segm_list, im_pred, file_name
def nb_im(self, code):
"""Number of images in subbase given by *code*"""
return len(os.listdir(self._im_dir[code]))
def segm_post_process(self, im_segm):
pass
#__init__ get_input_dir iter iter2 train val test _im_and_segm _im_and_segm2 nb_im segm_post_process
class SegmChallengeCamelyon16(SegmDataBaseCommon):
"""
Base du challenge CAMELYON16 (ISBI16).
"""
def __init__(self, dir_in):
SegmDataBaseCommon.__init__(self, dir_in)
self._im_dir["train"] = os.path.join(dir_in, "Normal")
self._im_dir["val"] = os.path.join(dir_in, "images/val")
self._im_dir["test"] = os.path.join(dir_in, "images/test")
self._segm_dir["train"] = os.path.join(dir_in, "GT/train")
self._segm_dir["val"] = os.path.join(dir_in, "GT/val")
self._segm_dir["test"] = os.path.join(dir_in, "GT/test")
self._res_dir["train"] = os.path.join(dir_in, "resultats/train")
self._res_dir["test"] = os.path.join(dir_in, "resultats/test")
self._res_dir["val"] = os.path.join(dir_in, "resultats/val")
self._im_dir["otsu_train"] = os.path.join(dir_in, "images/train")
self._im_dir["otsu_test"] = os.path.join(dir_in, "images/test")
self._im_dir["otsu_val"] = os.path.join(dir_in, "images/val")
self._segm_dir["otsu_train"] = os.path.join(dir_in, "GT/train")
self._segm_dir["otsu_test"] = os.path.join(dir_in, "GT/test")
self._segm_dir["otsu_val"] = os.path.join(dir_in, "GT/val")
self._res_dir['otsu_train'] = os.path.join(dir_in, "resultats/otsu/train")
self._res_dir['otsu_test'] = os.path.join(dir_in, "resultats/otsu/test")
self._res_dir['otsu_val'] = os.path.join(dir_in, "resultats/otsu/val")
def iter(self, code, first=None, last=None):
for im_file in os.listdir(self._im_dir[code])[first:last]:
#debug_mess("Processing file %s" % os.path.basename(im_file))
slide , ROI_pos = self._im_and_segm(os.path.join(self._im_dir[code], im_file), self._segm_dir[code])
file_name = os.path.basename(im_file)
[base_name, ext] = file_name.rsplit(".", 1)
for para in ROI_pos:
outputs=np.zeros(3)
outputs[0]=sm.Image(GetImage(os.path.join(self._im_dir[code], im_file),para))
if 'Tumor' in im_file:
outputs[1] = [sm.Image(GetImage(os.path.join(self._segm_dir[code], base_name + "_Mask" + "." + ext),para))]
else:
w=outputs[0].getSize()[0]
h=outputs[0].getSize()[1]
d=outputs[0].getSize()[2]
outputs[1] = [sm.Image(w,h,d)]
outputs[2] = file_name
yield outputs[0], outputs[1], outputs[2], os.path.basename(im_file)
def segm_post_process(self, im_segm):
sm.compare(im_segm, ">", 0, 1, 0, im_segm)
# pdb.set_trace()
# image_slices = sm.Image()
# sm.splitChannels(im_segm, image_slices)
# sm.compare(image_slices.getSlice(0), ">", 0, 1, 0, image_slices.getSlice(0))
# return image_slices.getSlice(0)
def _im_and_segm(self, im_file, segm_dir):
"""Return image and the corresponding segmentations.
Args:
im_file: complete file name.
segm_dir: folder containing the image segmentations.
Returns:
im: original image
segm_list: list containing the corresponding GT segmentations
file_name: original file name
"""
slide = openslide.openslide(im_file)
ROI_pos=ROI(im_file,ref_level=2,disk_size=4,thresh=220,black_spots=20,number_of_pixels_max=700000,verbose=False) ### RAJOUTER ARGUMENT EN OPTION
return(slide,ROI_pos)
#im = sm.Image(im_file)
#segm_list = []
#file_name = os.path.basename(im_file)
#[base_name, ext] = file_name.rsplit(".", 1)
#im_segm = sm.Image(os.path.join(segm_dir, base_name + "_Mask" + "." + ext))
#self.segm_post_process(im_segm)
#segm_list.append(im_segm)
#return im, segm_list, file_name
def _im_and_segm2(self, im_file, segm_dir, res_dir):
"""Return image and the corresponding segmentations.
In this version, we suppose that there is a single segmentation for the input file,
which has the same name and is found in the segmentation folder.
Args:
im_file: complete file name.
segm_dir: folder containing the image segmentations.
Returns:
im: original image
segm_list: list containing the corresponding GT segmentations
file_name: original file name
"""
segm_list = []
file_name = os.path.basename(im_file)
[base_name, ext] = file_name.rsplit(".", 1)
im_segm = sm.Image(os.path.join(segm_dir, base_name + "_Mask" + "." + ext))
self.segm_post_process(im_segm)
segm_list.append(im_segm)
im_pred = sm.Image(os.path.join(res_dir, file_name))
return segm_list, im_pred, file_name
|
21,061 | 00210173fd5c613fc3db25b7d36b8134956b5901 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 19 11:12:14 2020
@author: Jasper Dijkstra + 2 functions from Bram Maasakkers
This script contains functions to:
1. Check if directory exists, if not create it
2. List all files in directory
3. Export data to csv file
4. Get current time (year, month, day, hour, minute, second)
5. Convert UTC to modified Julian date 2010 ((C) Bram Maasakkers)
6. Convert modified Julian date 2010 to UTC ((C) Bram Maasakkers)
"""
import os
import csv
from datetime import datetime
import calendar
import numpy as np
# ==============================================
# OS FUNCTIONS
# ==============================================
def DefineAndCreateDirectory(targetDirectory):
"""
Check if directory exists, and else create it
"""
if not os.path.isdir(targetDirectory):
os.makedirs(targetDirectory)
# Make sure path ends with separator (//)
if not targetDirectory.endswith(os.path.sep):
targetDirectory += os.path.sep
return targetDirectory
def ListFilesInDirectory(inputDirectory, extension='.csv', maxfiles=None):
"""
list all files of given extension (default is '.csv') in a directory.
"""
# Check if directory exists, else raise AssertionError
assert os.path.isdir(inputDirectory), 'This directory does not exist!'
files = []
for file in os.listdir(inputDirectory):
if maxfiles != None:
if len(files) == maxfiles: break # Limit the amount of input days
if file.endswith(extension): files.append(os.path.join(inputDirectory, file))
else:
continue
return files
def ExportAsCSV(csv_out_path, data):
"""
export data (lists) as a csv file
Parameters
----------
csv_out_path : string
Path to output csv file.
data : list
list that contains lists with data to be used as output.
Returns
-------
None.
"""
with open(csv_out_path, "w", newline="") as f:
writer = csv.writer(f, delimiter=',')
writer.writerows(data)
f.close()
return
# ==============================================
# TIME CONVERSION FUNCTIONS
# ==============================================
def GetCurrentTime():
now = datetime.now()
year = now.strftime("%Y")
month = now.strftime("%m")
day = now.strftime("%d")
hour = now.strftime("%H")
minute = now.strftime("%M")
second = now.strftime("%S")
return {'year' : year, 'month' : month, 'day' : day, 'hour' : hour, 'minute' : minute, 'second' : second}
def UTCtoModifiedJulianDate(year, month, day, hour, minute, second, millisecond=0):
"""
FUNCTION BY BRAM MAASAKKERS (C)
Convert UTC (year, month, day, hour, minute, second[, millisecond]) to modified Julian date 2010
This function is vector-safe.
Parameters:
year: the year
month: the month
day: the day
hour: the hour
minute: the minute
second: the second
millisecond: the millisecond (default: 0)
Return value:
result: the time/date converted to modified Julian date 2010
"""
t2000 = calendar.timegm((2010, 1, 1, 0, 0, 0)) # seconds in epoch 1970 corresponding to 1 Jan 2000
if isinstance(year, np.ndarray): # if input is vectors
is_scalar = False
if isinstance(millisecond,int): # if millisecond is an int, which means that the optional argument has presumably not been set
millisecond = np.zeros(year.shape, dtype=np.int) # set millisecond to an array of matching size
elif any(millisecond != 0) and any(second%1 != 0): # if both millisecond and fractional second are given
print("Warning: both milliseconds and fractional seconds given! Ignoring fractional seconds.")
second = np.floor(second).astype(np.int)
else: # input is scalars
is_scalar = True
if millisecond != 0 and second%1 != 0: # if both millisecond and fractional second are given
print("Warning: both milliseconds and fractional seconds given! Ignoring fractional seconds.")
second = np.int(np.floor(second)) # cut off fractional seconds
year = np.array([year]) # convert to arrays
month = np.array([month])
day = np.array([day])
hour = np.array([hour])
minute = np.array([minute])
second = np.array([second])
millisecond = np.array([millisecond])
result = np.zeros(year.shape, dtype=np.float64) # initialise field for result
for ind in range(len(year)): # loop over entries of vectors (one entry for scalars)
t = calendar.timegm((year[ind], month[ind], day[ind], hour[ind], minute[ind], second[ind])) # convert to seconds in epoch 1970
result[ind] = ( np.float64(t - t2000) + np.float64(millisecond[ind])/1000.0 ) / 86400.0 # convert to (fractional) days since 1 Jan 2000
if is_scalar: # if input has been scalars
result = result.item() # convert result back to scalar
return result
def ModifiedJulianDatetoUTC(mjd):
"""
FUNCTION BY BRAM MAASAKKERS (C)
Convert modified Julian date 2010 to UTC
This function is vector-safe.
Parameters:
mjd: time/date as modified Julian date 2000
Return value:
result: a dictionary with the entries "year", "month", "day", "hour", "minute", "second", "millisecond", "fractional_year"
"""
import calendar
import time
global t
global gmt
global t2010
t2010 = calendar.timegm((2010, 1, 1, 0, 0, 0)) # seconds in epoch 1970 corresponding to 1 Jan 2010
# Vectorize inputs
if isinstance(mjd, (np.ndarray,list)): # if input is a vector
is_scalar = False
if isinstance(mjd,list):
mjd = np.array(mjd)
else: # input is a scalar
is_scalar = True
mjd = np.array([mjd]) # convert to array
t = mjd + t2010 # compute seconds since epoch 1970
gmt = np.zeros((len(mjd),9), dtype=np.int) # initialise field for intermediate result
fractional_year = np.zeros(len(mjd), dtype=np.double)
day_of_year = np.zeros(len(mjd), dtype=np.double)
for ind in range(len(t)): # loop over entries of vector (may be one entry)
gmt[ind,:] = np.array(time.gmtime(t[ind]))
mjd_year_begin = UTCtoModifiedJulianDate(gmt[ind,0], 1, 1, 0, 0, 0)
day_of_year[ind] = (mjd[ind] - mjd_year_begin)
fractional_year[ind] = float(gmt[ind,0]) + day_of_year[ind] / (366.0 if calendar.isleap(gmt[ind,0]) else 365.0)
if is_scalar:
result={"year":gmt[0,0],"month":gmt[0,1], "day":gmt[0,2], "hour":gmt[0,3], "minute":gmt[0,4], "second":gmt[0,5], "millisecond":np.int(np.round(t[0]%1*1000.0)), "fractional_year":fractional_year[0], "day_of_year":day_of_year[0]}
else:
result={"year":gmt[:,0],"month":gmt[:,1], "day":gmt[:,2], "hour":gmt[:,3], "minute":gmt[:,4], "second":gmt[:,5], "millisecond":np.round(np.array(t%1)*1000.0).astype(np.int), "fractional_year":fractional_year, "day_of_year":day_of_year}
return result
|
21,062 | 36aca0f75ce3a24f7edaf5d33d29edb9d283d2b4 | # Tuples ============================================================================
# (you are not able to change a tuple once it has been created)
pi_tuple = (1, 2, 3, 4)
new_list = list(pi_tuple)
new_tuple = tuple(new_list)
|
21,063 | 20a79cc3039b95bc0634d2bc2638dc90f75477d4 | # -*- coding: utf-8 -*-
import pickle
import numpy as np
import tensorflow as tf
from .DataPreprocessor import DataPreprocessor
from .ModelConfiguration import ModelConfiguration
from .WordEmbedding import WordEmbedding
from classifier.Classifier import Classifier
class Model:
"""
This class is used for creating and using the sentence classification model.
For creating a classification model, firs a word-embedding model shoould be created or loaded,
in the case if one already exists.
Attributes:
word2int: a dictionary, which stores for each word an unique id as an integer
int2word: the reversed word2int dictionary
vectors: an ordered set of words embedded in a vector with the size of embedding_dim
max_sentence_length: the maximum amount of words in a sentence
PADDING: the constant, with which a sentence is padded, if it has less than max-sentence-length words.
Besides that it is used for unknown words, which does not have any embedding.
x_training: the extracted sentences from raw-data
y_training: the extracted labels from raw-data
cl_model: the classification model
vocab_size: the vocabulary_size of the model
"""
word2int = {}
int2word = {}
vectors = []
max_sentence_length = None
PADDING = None
x_training = None
y_training = None
cl_model = None
vocab_size = None
classifier = None
def __init__(self):
self.config = ModelConfiguration()
config = ModelConfiguration()
self.max_sentence_length = config.parameters["networks"][1]["max_sentence_length"]
self.empedding_dim = config.parameters["networks"][0]["embedding_dim"]
self.PADDING = np.zeros(self.empedding_dim, dtype=np.dtype(np.float32))
def load_training_data(self):
preprocessor = DataPreprocessor(self.config.parameters["networks"][1]["number_of_classes"])
self.x_training, self.y_training = preprocessor.get_data(self.config.parameters["training_data_src"])
def create_word_embedding(self):
self.load_training_data()
word_embedding = WordEmbedding(sentences=self.x_training, config=self.config.get_word_embedding_config())
self.vectors = word_embedding.vectors
self.vocab_size = word_embedding.vocab_size
self.config.parameters['networks'][0]['vocab_size'] = word_embedding.vocab_size
self.config.save()
self.word2int = self.load_obj("word2int")
self.int2word = self.load_obj("int2word")
def load_word_embeddings(self):
tf.reset_default_graph()
w1 = tf.get_variable("weights_first_layer", shape=[self.config.parameters["networks"][0]["vocab_size"], self.empedding_dim])
b1 = tf.get_variable("biases_first_layer", shape=[self.empedding_dim]) # bias
saver = tf.train.Saver()
self.word2int = self.load_obj("word2int")
self.int2word = self.load_obj("int2word")
with tf.Session() as sess:
# Restore variables from disk.
saver.restore(sess, "./classifier/models/w2v.ckpt")
self.vectors = sess.run(w1 + b1)
def create_model(self):
self.load_training_data()
classifier = Classifier(self.config)
classifier.create_network()
classifier.train_model(self.sentences_2_tensor(self.x_training), self.y_training)
classifier.save_model()
self.classifier = classifier.load_model()
def predict(self, sentence):
if (self.classifier is None):
self.classifier = Classifier(self.config)
self.classifier.load_model()
sentence_as_tensor = self.sentence_2_tensor(sentence)
sentence_as_tensor = np.array(sentence_as_tensor)
sentence_as_tensor = sentence_as_tensor.reshape(1, self.max_sentence_length, self.empedding_dim, 1)
classification = self.classifier.predict(sentence_as_tensor)[0]
if (np.amax(classification) < 0.8):
return -1
return np.argmax(classification)
def sentences_2_tensor(self, sentences):
tensors = []
for sentence in sentences:
tensor = []
for word in sentence:
tensor.append(self.vectors[self.word2int[word]])
while len(tensor) < self.max_sentence_length:
tensor.append(self.PADDING)
tensors.append(tensor)
return tensors
def sentence_2_tensor(self, sentence):
tensor = []
sentence = sentence.split()
for word in sentence:
try:
tensor.append(self.vectors[self.word2int[word]])
except Exception:
tensor.append(self.PADDING)
while len(tensor) < self.max_sentence_length:
tensor.append(self.PADDING)
return tensor
@staticmethod
def load_obj(name):
with open('./classifier/dictionary/' + name + '.pkl', 'rb') as f:
return pickle.load(f) |
21,064 | 833676b6923708da6c296506e42ccf8bb45cff72 | #!/usr/bin/python
from argparse import ArgumentParser
from bs4 import BeautifulSoup
import sys
class EmailOptions:
FromAddress = ''
FromAddressPassword = ''
ToAddresses = []
Subject = ''
Body = ''
Attatchments = []
def LoadOptionsFromXml(self, OptionsXmlFilename):
FileHandler = open(OptionsXmlFilename).read()
SoupXmlParser = BeautifulSoup(FileHandler, 'xml')
EmailOptionsTag = SoupXmlParser.find('EmailOptions')
if (EmailOptionsTag):
if (EmailOptionsTag.has_attr('FromAddress')):
self.FromAddress = EmailOptionsTag['FromAddress']
if (EmailOptionsTag.has_attr('FromAddressPassword')):
self.FromAddressPassword = \
EmailOptionsTag['FromAddressPassword']
if (EmailOptionsTag.has_attr('ToAddresses')):
self.ToAddresses = EmailOptionsTag['ToAddresses'].split(' ')
if (EmailOptionsTag.has_attr('Subject')):
self.Subject = EmailOptionsTag['Subject']
if (EmailOptionsTag.has_attr('Body')):
self.Body = EmailOptionsTag['Body']
if (EmailOptionsTag.has_attr('Attatchments')):
self.Attatchments = EmailOptionsTag['Attatchments'].split(' ')
def ParseOptionsFromCommandLine(self):
Parser = ArgumentParser(
description="Send an e-mail.")
Parser.add_argument(
'-f', '--from',
action='store',
metavar='FromAddress',
dest='FromAddress',
help='The Sender (Required if no default found)')
Parser.add_argument(
'-t', '--to',
nargs='+',
action='store',
metavar='ToAddresses',
dest='ToAddresses',
help='The Recipient (At least one required if no default found)')
Parser.add_argument(
'-s', '--subject',
action='store',
metavar='Subject',
dest='Subject',
help='The value of the Subject: header')
Parser.add_argument(
'-b', '--body',
action='store',
metavar='Body',
dest='Body',
help='The content of the message body')
Parser.add_argument(
'-a', '--attatchments',
nargs='+',
action='store',
metavar='Attatchments',
dest='Attatchments',
help='Files that you would like to attatch to the email')
Options = Parser.parse_args()
if (not Options.ToAddresses and self.ToAddresses == []):
Parser.print_help()
sys.exit(1)
if (not Options.FromAddress and self.FromAddress == ''):
Parser.print_help()
sys.exit(1)
if (Options.FromAddress):
self.FromAddress = Options.FromAddress
if (Options.ToAddresses):
self.ToAddresses = Options.ToAddresses
if (Options.Subject):
self.Subject = Options.Subject
if (Options.Body):
self.Body = Options.Body
if (Options.Attatchments):
self.Attatchments = Options.Attatchments
|
21,065 | 34cb851ef95680262cb8f683b9568972f38aafc9 | """
# Definition for Employee.
class Employee:
def __init__(self, id: int, importance: int, subordinates: List[int]):
self.id = id
self.importance = importance
self.subordinates = subordinates
"""
class Solution:
def getImportance(self, employees: List['Employee'], id: int) -> int:
for e in employees:
if e.id == id:
z = e.importance
for s in e.subordinates:
z += self.getImportance(employees, s)
return z |
21,066 | ca1f0f2c9a43b41c09bc72f9af8364ede78c2b21 | #-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: admin
#
# Created: 18/01/2019
# Copyright: (c) admin 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
def main():
a = [] # start an empty list
n = int(input("Enter no.of elements:")) # read number of element in the list
for i in range(n):
new_element = int(input("Enter element:")) # read next element
a.append(new_element) #
even_lst = []
odd_lst = []
count_even=0
count_odd=0
even=0
odd=0
for j in a:
if j % 2 == 0:
even_lst.append(j)
count_even+=1
even=even+j
else:
odd_lst.append(j)
count_odd+=1
odd=odd+j
print("FullList: ",a)
print("Even numbers list \t", even_lst)
print("Number of even num:", count_even)
print("sum of even no:",even)
print("Odd numbers list \t", odd_lst)
print("Number of odd num:", count_odd)
print("sum of odd no:",odd)
if __name__ == '__main__':
main()
|
21,067 | 6049790ecb1c6e2f10b825c3cca4825004143075 | from advsearch import SearchBackendException
from advsearch import AdvancedSearchPlugin
from backend import PySolrSearchBackEnd
from interface import IAdvSearchBackend
|
21,068 | fcaf2f454e2358a54d412e72f4de0bae2fd04142 | import os
import glob
import sys
from vtk import *
from ReadPoints import *
# to run in frankie
test_path = "/mnt/storage/home/mthanaj/cardiac/xyz"
dir_list = os.listdir(test_path)
def txt2vtk(pathin,pathout):
data=vtk.vtkUnstructuredGrid()
data.SetPoints(readPoints(pathin))
Data=vtk.vtkUnstructuredGridWriter()
Data.SetInputData(data)
Data.SetFileName(pathout)
Data.Update()
Data.Write()
for dir_pat in dir_list:
p_folder = os.path.join(test_path,dir_pat)
path_in = glob.glob(p_folder+'/*.txt')
txt2vtk(path_in[0],os.path.join(p_folder,"LVed_"+str(dir_pat)+".vtk"))
|
21,069 | 39580e04dbaa8885cb56b4904309307e31f5acab | """OldBoyPython URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.urls import path, re_path, include
from blog import views
urlpatterns = [
path('admin/', admin.site.urls),
path("login", views.login),
path("home", views.home),
path("template", views.template),
re_path(r"^blog/", include("blog.urls")),
# path("userinfo", views.user_info),
# noname匹配
# re_path('^articles/([0-9]{4})/$', views.year_archive),
# re_path('^articles/([0-9]{4})/([0-9]{2})', views.year_mouth_archive),
# name group (?P<name>pattern)
# re_path("^articles/(?P<year>[0-9]{4})/(?P<mouth>[0-9]{2})", views.year_mouth_archive),
# 命名变量
# re_path("^articles/(?P<year>[0-9]{4})/(?P<mouth>[0-9]{2})", views.year_mouth_archive_variable,
# {"year": 2022, "mouth": 20}),
# url别名
# url(r'^func_alias', views.func_alias, name="alias"),
path("ordered", views.ordered),
path("shopping_car", views.shopping_car),
path("students", views.Students.as_view()),
path("teachers", views.Teachers.as_view()),
path("orders", views.OrdersView.as_view()),
path("api/v1/auth", views.AuthView.as_view()),
path("api/v1/user", views.UserView.as_view()),
]
|
21,070 | 948b358c0244becdf013c0015004953e1d4dab28 | assertions = [
{'name': 'City Council',
'num_members': '50',
'sources': [{'note': 'organizations search table',
'url': 'https://chicago.legistar.com/Departments.aspx'}]},
{'name': 'Committee on Aviation',
'num_members': '16',
'sources': [{'note': 'organizations search table',
'url': 'https://chicago.legistar.com/Departments.aspx'}]},
{'name': 'Committee on Budget and Government Operations',
'num_members': '35',
'sources': [{'note': 'organizations search table',
'url': 'https://chicago.legistar.com/Departments.aspx'}]}
] |
21,071 | c30c60837e7fc27e819cb4a66c364c5a1a1aed72 | # -*- coding: utf-8 -*-
from .mixins import JSONResponseView
class BaseDatatableView(JSONResponseView):
""" JSON data for datatables
"""
order_columns = []
def initialize(*args, **kwargs):
pass
def get_order_columns(self):
""" Return list of columns used for ordering
"""
return self.order_columns
def ordering(self, qs):
""" Get parameters from the request and prepare order by clause
"""
request = self.request
# Number of columns that are used in sorting
try:
i_sorting_cols = int(request.REQUEST.get('iSortingCols', 0))
except ValueError:
i_sorting_cols = 0
order = []
order_columns = self.get_order_columns()
for i in range(i_sorting_cols):
# sorting column
try:
i_sort_col = int(request.REQUEST.get('iSortCol_%s' % i))
except ValueError:
i_sort_col = 0
# sorting order
s_sort_dir = request.REQUEST.get('sSortDir_%s' % i)
sdir = '-' if s_sort_dir == 'desc' else ''
sortcol = order_columns[i_sort_col]
if isinstance(sortcol, list):
for sc in sortcol:
order.append('%s%s' % (sdir, sc))
else:
order.append('%s%s' % (sdir, sortcol))
if order:
return qs.order_by(*order)
return qs
def paging(self, qs):
""" Paging
"""
limit = min(int(self.request.REQUEST.get('iDisplayLength', 10)), 100)
start = int(self.request.REQUEST.get('iDisplayStart', 0))
offset = start + limit
return qs[start:offset]
# TO BE OVERRIDEN
def get_initial_queryset(self):
raise Exception("Method get_initial_queryset not defined!")
def filter_queryset(self, qs):
return qs
def prepare_results(self, qs):
return []
# /TO BE OVERRIDEN
def get_context_data(self, *args, **kwargs):
request = self.request
self.initialize(*args, **kwargs)
qs = self.get_initial_queryset()
# number of records before filtering
total_records = qs.count()
qs = self.filter_queryset(qs)
# number of records after filtering
total_display_records = qs.count()
qs = self.ordering(qs)
qs = self.paging(qs)
# prepare output data
aaData = self.prepare_results(qs)
ret = {'sEcho': int(request.REQUEST.get('sEcho', 0)),
'iTotalRecords': total_records,
'iTotalDisplayRecords': total_display_records,
'aaData': aaData
}
return ret
|
21,072 | 1e235dd678bd2de4b03db27529bbcef49e35abd1 | """
(C) Copyright 2016 Nuxeo SA (http://nuxeo.com/) and contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
you may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contributors:
Pierre-Gildas MILLON <pgmillon@nuxeo.com>
"""
from mock.mock import patch
from nxtools import services
from nxtools.hooks.endpoints.webhook.github_handlers.jenkins_trigger import GithubJenkinsTriggerHandler
from nxtools.hooks.endpoints.webhook.github_hook import GithubHook
from nxtools.hooks.services.config import Config
from nxtools.hooks.services.jenkins import JenkinsService
from nxtools.hooks.tests.webhooks.github_handlers import GithubHookHandlerTest
class GithubJenkinsTriggerHandlerTest(GithubHookHandlerTest):
def test_jenkins_trigger(self):
base_url = 'http://void.null'
services.get(Config).set_request_environ({
Config.ENV_PREFIX + 'JENKINS_DEFAULT_USERNAME': 'some_username',
Config.ENV_PREFIX + 'JENKINS_DEFAULT_TOKEN': 'some_token',
Config.ENV_PREFIX + 'JENKINS_INSTANCE_QATEST_URL': base_url,
Config.ENV_PREFIX + 'GITHUBJENKINSTRIGGERHANDLER_INSTANCES': 'qatest'
})
with GithubHookHandlerTest.payload_file('github_push') as payload, patch('nxtools.hooks.services.jenkins.Jenkins') as mock:
mock.return_value.base_server_url.return_value = base_url
handler = GithubJenkinsTriggerHandler()
self.assertTupleEqual((200, 'OK'), handler.handle(payload))
mock.return_value.requester.post_and_confirm_status.\
assert_called_once_with(base_url + handler.DEFAULT_GITHUB_URL,
data=payload,
headers={'Content-Type': 'application/json', GithubHook.payloadHeader: 'push'})
|
21,073 | d1bbcc7ea867c60a467914b491bf09432a7e4815 | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import marl
from marl.agent import MATrainable, QAgent, TrainableAgent
from marl.policy import DeterministicPolicy, StochasticPolicy
from marl.tools import super_cat
class MAPGAgent(TrainableAgent, MATrainable):
"""
The class of trainable agent using multi-agent policy gradient methods.
:param critic_model: (Model or torch.nn.Module) The critic model
:param actor_policy: (Policy) actor policy
:param actor_model: (Model or torch.nn.Module) The actor model
:param observation_space: (gym.Spaces) The observation space
:param action_space: (gym.Spaces) The action space
:param index: (int) The index of the agent in the multi-agent system
:param mas: (MARL) The multi-agent system in which the agent is included
:param experience: (Experience) The experience memory data structure
:param exploration: (Exploration) The exploration process
:param lr_actor: (float) The learning rate for each actor
:param lr_critic: (float) The learning rate for each critic
:param gamma: (float) The discount factor
:param batch_size: (int) The batch size
:param tau: (float) The update rate
:param name: (str) The name of the agent
"""
def __init__(
self,
critic_model,
actor_policy,
observation_space,
action_space,
actor_model=None,
index=None,
mas=None,
experience="ReplayMemory-1000",
exploration="EpsGreedy",
lr_actor=0.001,
lr_critic=0.001,
gamma=0.95,
batch_size=32,
tau=0.01,
use_target_net=False,
name="MAACAgent",
):
TrainableAgent.__init__(
self,
policy=actor_policy,
model=actor_model,
observation_space=observation_space,
action_space=action_space,
experience=experience,
exploration=exploration,
lr=lr_actor,
gamma=gamma,
batch_size=batch_size,
name=name,
)
MATrainable.__init__(self, mas, index)
self.tau = tau
# Actor model
self.actor_optimizer = optim.Adam(self.policy.model.parameters(), lr=self.lr)
# Critic model
self.critic_model = marl.model.make(critic_model)
self.critic_criterion = nn.SmoothL1Loss() # Huber criterionin (or nn.MSELoss())
self.lr_critic = lr_critic
self.critic_optimizer = optim.Adam(
self.critic_model.parameters(), lr=self.lr_critic
)
# Init target networks
self.use_target_net = use_target_net
if self.use_target_net:
self.target_critic = copy.deepcopy(self.critic_model)
self.target_critic.eval()
self.target_policy = copy.deepcopy(self.policy)
self.target_policy.model.eval()
def soft_update(self, local_model, target_model, tau):
for target_param, local_param in zip(
target_model.parameters(), local_model.parameters()
):
target_param.data.copy_(
tau * local_param.data + (1.0 - tau) * target_param.data
)
def update_model(self, t):
if len(self.experience) < self.batch_size:
return
# Get batches
ind = self.experience.sample_index(self.batch_size)
global_batch = self.mas.experience.get_transition(
len(self.mas.experience) - np.array(ind) - 1
)
local_batch = self.experience.get_transition(
len(self.experience) - np.array(ind) - 1
)
# Get changing policy
self.curr_critic = (
self.target_critic if self.use_target_net else self.critic_model
)
self.update_critic(local_batch, global_batch)
self.update_actor(local_batch, global_batch)
# Compute
if self.use_target_net:
self.soft_update(self.policy.model, self.target_policy.model, self.tau)
self.soft_update(self.critic_model, self.target_critic, self.tau)
def update_critic(self, local_batch, global_batch):
# Calculate target r_i + gamma * Q_i(x,a1',a2',...,aN')
target_value = self.target(local_batch, global_batch)
# Calculate value Q_i(x,a1,a2,...,aN)
inputs_critic = self._critic_inputs(
global_batch.observation, global_batch.action
)
curr_value = self.critic_model(inputs_critic)
### = self.critic_ag.update_q(curr_value, target_value)
self.critic_optimizer.zero_grad()
# Calculate critic loss
loss = self.critic_criterion(curr_value, target_value)
# Update params
loss.backward()
self.critic_optimizer.step()
def target(self, local_batch, global_batch):
join_by_agent = lambda observation_batch, num_ag: [
list(observation_batch[:, i]) for i in range(num_ag)
]
tensor_forme = lambda observation, i_ag: torch.tensor(
[list(i) for i in observation[i_ag]]
)
next_observ = [
tensor_forme(join_by_agent(global_batch.next_observation, len(self.mas)), i)
for i in range(len(self.mas))
]
next_actions = self.mas.greedy_action(next_observ)
nextact = []
for bat in range(self.batch_size):
nextact.append([next_actions[ag][bat] for ag in range(len(self.mas))])
inputs_critic = self._critic_inputs(global_batch.observation, nextact)
next_action_value = self.curr_critic(inputs_critic)
my_reward = torch.tensor(local_batch.reward).view(-1, 1)
my_dones = torch.tensor(np.where(local_batch.done_flag, 1, 0)).view(-1, 1)
return (
(my_reward + self.gamma * next_action_value * (1 - my_dones))
.detach()
.float()
)
def _critic_inputs(self, batch_obs, batch_act):
return torch.tensor(
[super_cat(batch_obs[b], batch_act[b]) for b in range(self.batch_size)]
).float()
class MAACAgent(MAPGAgent):
"""
The class of trainable agent using multi-agent actor-critic methods.
:param critic_model: (Model or torch.nn.Module) The critic model
:param actor_model: (Model or torch.nn.Module) The actor model
:param observation_space: (gym.Spaces) The observation space
:param action_space: (gym.Spaces) The action space
:param index: (int) The index of the agent in the multi-agent system
:param mas: (MARL) The multi-agent system in which the agent is included
:param experience: (Experience) The experience memory data structure
:param exploration: (Exploration) The exploration process
:param lr_actor: (float) The learning rate for each actor
:param lr_critic: (float) The learning rate for each critic
:param gamma: (float) The discount factor
:param batch_size: (int) The batch size
:param tau: (float) The update rate
:param use_target_net: (bool) If true use a target model
:param name: (str) The name of the agent
"""
def __init__(
self,
critic_model,
actor_model,
observation_space,
action_space,
index=None,
experience="ReplayMemory-1000",
exploration="EpsGreedy",
lr_actor=0.001,
lr_critic=0.001,
gamma=0.95,
batch_size=32,
tau=0.01,
use_target_net=False,
name="MAACAgent",
):
super(MAACAgent, self).__init__(
critic_model=critic_model,
actor_policy="StochasticPolicy",
actor_model=actor_model,
observation_space=observation_space,
action_space=action_space,
index=index,
experience=experience,
exploration=exploration,
lr_actor=lr_actor,
lr_critic=lr_critic,
gamma=gamma,
batch_size=batch_size,
name=name,
)
def update_actor(self, local_batch, global_batch):
self.actor_optimizer.zero_grad()
# Calcul actor loss
pd = self.policy.forward(local_batch.observation)
log_prob = pd.log_prob(local_batch.action) # .unsqueeze(0)
critic_in = self._critic_inputs(global_batch.observation, global_batch.action)
print(local_batch.observation)
print(critic_in)
gae = self.critic_model(critic_in).detach()
actor_loss = -(log_prob * gae).mean()
actor_loss.backward()
self.actor_optimizer.step()
class MADDPGAgent(MAPGAgent):
"""
The class of trainable agent using multi-agent deep deterministic policy gradient methods.
:param critic_model: (Model or torch.nn.Module) The critic model
:param actor_model: (Model or torch.nn.Module) The actor model
:param observation_space: (gym.Spaces) The observation space
:param action_space: (gym.Spaces) The action space
:param index: (int) The index of the agent in the multi-agent system
:param mas: (MARL) The multi-agent system in which the agent is included
:param experience: (Experience) The experience memory data structure
:param exploration: (Exploration) The exploration process
:param lr_actor: (float) The learning rate for each actor
:param lr_critic: (float) The learning rate for each critic
:param gamma: (float) The discount factor
:param batch_size: (int) The batch size
:param tau: (float) The update rate
:param use_target_net: (bool) If true use a target model
:param name: (str) The name of the agent
"""
def __init__(
self,
critic_model,
actor_model,
observation_space,
action_space,
index=None,
experience="ReplayMemory-1000",
exploration="OUNoise",
lr_actor=0.01,
lr_critic=0.01,
gamma=0.95,
batch_size=32,
tau=0.01,
use_target_net=100,
name="MADDPGAgent",
):
super(MADDPGAgent, self).__init__(
critic_model=critic_model,
actor_policy="DeterministicPolicy",
actor_model=actor_model,
observation_space=observation_space,
action_space=action_space,
index=index,
experience=experience,
exploration=exploration,
lr_actor=lr_actor,
lr_critic=lr_critic,
gamma=gamma,
tau=tau,
use_target_net=use_target_net,
batch_size=batch_size,
name=name,
)
def update_actor(self, local_batch, global_batch):
self.actor_optimizer.zero_grad()
# Calcul actor loss
obs = torch.tensor(local_batch.observation).float()
my_action_pred = self.policy.model(obs)
join_by_agent = lambda batch, num_ag: [list(batch[:, i]) for i in range(num_ag)]
tensor_forme = lambda observation, i_ag: torch.tensor(
[list(i) for i in observation[i_ag]]
)
# Rearrange batches
action_batch = [
tensor_forme(join_by_agent(global_batch.action, len(self.mas)), i)
for i in range(len(self.mas))
]
observation_batch = [
tensor_forme(join_by_agent(global_batch.observation, len(self.mas)), i)
for i in range(len(self.mas))
]
action_batch[self.index] = my_action_pred
# build input for the critic
inp_critic = []
for b in range(self.batch_size):
b_o = [observation_batch[ind_ag][b] for ind_ag in range(len(self.mas))]
b_a = [action_batch[ind_ag][b] for ind_ag in range(len(self.mas))]
inp_critic.append(torch.cat([torch.cat(b_o), torch.cat(b_a)]).unsqueeze(0))
inputs_critic = torch.cat(inp_critic)
actor_loss = -self.critic_model(inputs_critic).mean()
actor_loss.backward(retain_graph=True)
self.actor_optimizer.step()
|
21,074 | cd7d2b67e7c2728a7c463b75a3d7732196f104be |
print("=============================================")
print ("Faça sua escolha Digitando a opção desejada:")
print (" 1 - Novo Salario retirando o Imposto")
print (" 2 - Novo Salario com aumento")
print (" 3 - Classificação salarial")
print("=============================================")
opcao = int(input( "Digite a oção desejada: " ))
while opcao < 1 or opcao > 3:
print ("[WARNING] Opção inválida. Digite novamente")
print ("---------------------------------------------")
opcao = int(input("Digite a opção desejada: "))
if opcao ==1:
salario = int(input("Digite seu salario: "))
imposto = 0.0
if salario < 500:
imposto = salario*0.05
print("Seu novo salario livre de imposto é R$ %d" %(salario - imposto))
elif salario >= 500 and salario <= 850:
imposto = salario*0.10
print("Seu novo salario livre de imposto é R$ %d" %(salario - imposto))
else:
imposto = salario*0.15
print("Seu novo salario livre de imposto é R$ %d" %(salario - imposto))
if opcao==2:
salario = int(input("Digite seu salario: "))
if salario > 1500:
aumento = 25
print("Seu novo salario após o aumento é R$ %d" %(salario + aumento))
elif salario >= 750 and salario <= 1500:
aumento = 50
print("Seu novo salario após o aumento é R$ %d" %(salario + aumento))
elif salario >= 450 and salario < 750:
aumento = 75
print("Seu novo salario após o aumento é R$ %d" %(salario + aumento))
else:
aumento = 100
print("Seu novo salario após o aumento é R$ %d" %(salario + aumento))
if opcao==3:
salario = int(input("Digite seu salario: "))
if salario <= 700:
print("Funcionario mal remunerado")
else:
print("Funcionario bem remunerado")
|
21,075 | 22d7f4fa9f3ca0ea42f2a1c1d6e81785cdf102f1 | # **************************************
# This file contains validation functions for use in various Views.
# Any data validation should be done by functions in this file.
# **************************************
from django.contrib.auth.models import User
from .models import *
# Name: check_post_fields
# Description: check that post contains all fields
# Arguments: post a request.POST
# fields a dict of fields to be in post
# Returns: An error string for the first missing field
# None if no field is missing
def check_post_fields(post, fields):
for field in fields:
if (field not in post) or post[field] == '':
return "The %s field is required" % field
return None
# Name: validate_insurance
# Description: Check that id_num is exactly 12 characters, starting with an
# alphabetic character, and containing only alphanumeric
# characters.
# Arguments: id_num, a potential insurance number
# Returns: True if id_num is a valid insurance ID number; False otherwise.
def validate_insurance(id_num):
if len(id_num) != 12:
return False
if not id_num[0].isalpha():
return False
else:
for char in id_num:
if not char.isalnum():
return False
return True
# Name: unique_insurance
# Description: Check that an insurance number is not already in use
# Arguments: id_num, a potential insurance number
# Returns: True if id_num has NOT been registered already; False otherwise
def unique_insurance(id_num):
p = Patient.objects.filter(insurance_id__exact=id_num)
return not p.exists()
# Name: validate_name
# Description: Check that the string is only alphabetic characters
# Arguments: name_string is a potential name
# Returns: True if name_string exists and is not empty and contains only aphabetic characters
# False otherwise
def validate_name(name_string):
if name_string is None or name_string is '':
return False
for char in name_string:
if not char.isalpha():
return False
return True
# Name: validate_phone
# Description: Ignore any non-digit character, check if there are 10 digits
# Arguments: num is a potential phone number
# Returns: True if num contains exactly ten digits, ignoring other chars
# False otherwise
def validate_phone(num):
if num is None or num is '':
return False
result_string = ''
for char in num:
if char.isdigit():
result_string = result_string + char
if len(result_string) == 10:
return True
return False
# Name: unique_username
# Description: Check if the provided name already is owned by another user
# Arguments: name the username to check against
# Returns: True if this name is acceptable
# False otherwise
def unique_username(name):
u = User.objects.filter(username__exact=name)
return not u.exists()
# Name: validate_prescription_dates
# Description: Check if the prescription start date is before the prescription end date
# Arguments: start and end date of the prescription
# Returns: True if the start date is before the end date
# False otherwise
def validate_prescription_dates(start, end):
return (start < end)
# Name: validate_prescription_dates
# Description: Check if the prescription start date is on or after today's date
# Arguments: start date of the prescription
# Returns: True if the start date is on or after today's date
# False otherwise
def validate_presciption_start(start):
return datetime.strptime(start, "%Y-%m-%d") > datetime.today()
# Name: validate_dob
# Description: Check that a provided date is not in the future
# Arguments: date the date object to be checked
# Returns: True if date is in the past, False if date is in the future
def validate_dob(date):
try:
dob = datetime.strptime(date, "%Y-%m-%d")
d = datetime.today()
if dob <= d:
return True
else:
return False
except:
return False
# Name: validate_weight
# Description: check that a provided weight is not less than 0 or greater than 600
# Arguments: weight the weight to be checked
# Returns: True if meets reqs, False otherwise
def validate_weight(weight):
w = (int)(weight)
if w < 0:
return False
if w > 600:
return False
return True
# Name: validate_height
# Description: check that a height is not less than 0 or greater than 9 ft
# Arguments: height the height to be checked
# Returns: True if desc parameters are met, false otherwise
def validate_height(height):
h = (int)(height)
if h < 0:
return False
if h > 108:
return False
return True |
21,076 | d27404c4dd82aa6071d4b95f08dfe299920b89d0 | from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
class Article(models.Model):
user = models.ForeignKey(User, verbose_name="Автор статьи")
publish_date = models.DateTimeField("Дата публикации", default=timezone.now)
text = models.TextField("Текст статьи")
title = models.CharField("Заголовок", max_length=255)
class Meta:
verbose_name='Статья'
verbose_name_plural='Статьи' |
21,077 | e68fc1ecb4e922c6e6d3379e619138d75c304e7f | import pytest
import json
def test_app(client):
assert client.get('accounts/accounts').status_code == 200
def test_post_account(client):
mimetype = 'application/json'
headers = {
'Content-Type': mimetype,
'Accept': mimetype
}
data = {
"account_id": 9999,
"type": "checking",
"name": "testing",
"birthdate": "1985-06-06",
"phone": "55511123111",
"email": "email@test.com",
"balance": 0
}
url = 'accounts/accounts'
response = client.post(url, data=json.dumps(data), headers=headers)
assert response.status_code == 201
#assert response.content_type == mimetype
#assert response.json['Result'] == 39 |
21,078 | 791ec64ce0fc2d09206661c70db602a9b9cd4b9f | #!C:/python27/python.exe
'''
cs1114
Submission: hw04
Programmer: Kenneth Huynh
Username: kh1983
Purpose of program, assumptions, constraints:
This module contains the function for hw04.
'''
def getCommonElements(firstList, secondList):
'''this function returns a list of all the same elements within the lists
passed into the function'''
commonElements = []
for loopVar in firstList:
if loopVar in secondList:
if loopVar not in commonElements:
commonElements.append(loopVar)
return commonElements
|
21,079 | 906b13f013c6c830d4b9b187fc8291e14a84f85e | mylist=[1,2,3,4,5]
for obj in mylist:
if obj % 2 == 0:
print(obj,"is even")
else:
print(obj,"is odd")
|
21,080 | b3b50492d22d0ea33048e791a8ec38e35645baae | #!/usr/bin/env python
#from numpy import *
#from string import *
#from time import clock
from math import sqrt
#from itertools import permutations
from collections import OrderedDict
listofprimes = [2, 3, 5]
def prime(i):
if len(listofprimes) > i:
return listofprimes[i]
else:
p = listofprimes[-1] + 2
while not isprime(p):
p += 2
listofprimes.append(p)
return prime(i)
def isprime(n):
i = 0
while prime(i)**2 <= n:
if n % prime(i) == 0:
return False
else:
i += 1
return True
def factors(n):
lst = []
nbr = n
while not nbr == 1:
i = 0
while not nbr % prime(i) == 0:
i += 1
lst.append(prime(i))
nbr = nbr / prime(i)
return lst
if __name__ == '__main__':
nprimefactors = []
n = 4
inarow = 0
i = 1
while inarow < n:
i += 1
if len(list(OrderedDict.fromkeys(factors(i)))) == n:
inarow += 1
else:
inarow = 0
print i-n+1
# FUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUULT |
21,081 | 88fdde3f35a4af4c0d6afe0870123dbc9f2dca05 | import numpy as np
import matplotlib.pyplot as plt
class Controlador:
def __init__(self,):
pass
def get_valor_presente(self,listas_transformadas):
if listas_transformadas[0] and listas_transformadas[3] and listas_transformadas[4] and listas_transformadas[5] and listas_transformadas[6]:
return self.vp_ii_aa_tt_listas(listas_transformadas[0],listas_transformadas[3],listas_transformadas[4],listas_transformadas[5],listas_transformadas[6])
else:
return "Caso no contemplado"
def get_valor_futuro(self):
pass
def vp_ii_aa_tt_listas(self,l_inv,l_a,l_i,l_n,l_s):
totales=[]
for i in range(len(l_n)):
totales.append(self.vp_ii_aa_tt_unidad(l_inv[i],l_a[i],l_i[i],l_n[i],l_s[i]))
return totales
def vp_ii_aa_tt_unidad(self,inv,aa,tt,nn,ss):
vp_inversion=np.pv(tt,nn,-1*aa)
vp_salvataje=np.pv(tt,nn,0,-1*ss)
print(vp_salvataje,vp_inversion)
return vp_inversion+vp_salvataje-inv |
21,082 | 13608d8f168ac89ed2dd995dce7cfbf082c784df | from flask import Blueprint, request, current_app
from flask_jwt_extended import jwt_required, get_jwt_identity
from http import HTTPStatus
from src.models.ticket_model import TicketModel
from src.models.user_model import UserModel
from src.services.ticket_numbers import ticket_numbers_creator
megasenas_bp = Blueprint("megasenas", __name__, url_prefix="/api/megasenas")
@megasenas_bp.route("", methods=["GET"])
@jwt_required()
def list_tickets():
user_id = get_jwt_identity()
user: UserModel = UserModel.query.get(user_id)
user_tickets = user.ticket_list
from src.serializers.ticket_model_serializer import TicketsSerializer
serialized_tickets = TicketsSerializer(user_tickets)
return serialized_tickets, HTTPStatus.OK
@megasenas_bp.route("", methods=["POST"])
@jwt_required()
def create_ticket():
body = request.get_json()
from src.serializers.create_ticket_schema import create_ticket_schema
request_errors = create_ticket_schema.validate(body)
if request_errors:
return {
"msg": "Invalid or missing Megasena request fields."
}, HTTPStatus.UNPROCESSABLE_ENTITY
user_id = get_jwt_identity()
ticket: TicketModel = TicketModel(user_id=user_id)
request_numbers = body.get("numbers")
numbers = ticket_numbers_creator(request_numbers)
ticket.ticket_numbers = numbers
session = current_app.db.session
session.add(ticket)
session.commit()
from src.serializers.ticket_model_serializer import TicketSerializer
serialized_ticket = TicketSerializer(ticket)
return serialized_ticket, HTTPStatus.OK
@megasenas_bp.route("/draws", methods=["GET"])
@jwt_required()
def read_draw():
from src.services.megasena_draw import draw_numbers_supplier
draw_numbers = draw_numbers_supplier()
return {"latest_draw": draw_numbers}, HTTPStatus.OK
@megasenas_bp.route("/results", methods=["GET"])
@jwt_required()
def read_results():
user_id = get_jwt_identity()
user: UserModel = UserModel.query.get(user_id)
last_ticket: TicketModel = user.ticket_list[-1]
from src.services.megasena_draw import get_correct_ticket_numbers
correct_ticket_numbers = get_correct_ticket_numbers(last_ticket.ticket_numbers)
from src.services.megasena_draw import draw_numbers_supplier
last_draw = draw_numbers_supplier()
return {
"user_id": user_id,
"last_ticket": {
"ticket_id": last_ticket.id,
"ticket_numbers": last_ticket.ticket_numbers,
},
"last_megasena_draw": last_draw,
"correct_count": len(correct_ticket_numbers),
"correct_numbers": correct_ticket_numbers,
}, HTTPStatus.OK
|
21,083 | 822b2672f8e00cdb55616f709a572df40cae6833 | def comp(array1, array2):
"""
A function that checks whether the two arrays have the "same" elements, with the same multiplicities.
"Same" means, here, that the elements in b are the elements in a squared, regardless of the order.
"""
return sorted([i * i for i in array1]) == sorted(array2)
def sum_array(arr):
"""
Sum all the numbers of the array (in F# and Haskell you get a list) except the highest and the lowest
element (the value, not the index!).
(The highest/lowest element is respectively only one element at each edge, even if there are more than one
with the same value!)
"""
if arr is None:
return 0
else:
arr = sorted(arr)
return sum([i for i in arr[1:len(arr) - 1: 1]]) # OR: return 0 if arr == None else sum(sorted(arr)[1:-1])
def points(games):
"""
Our football team finished the championship. The result of each match look like "x:y". Results of all matches
are recorded in the collection.
For example: ["3:1", "2:2", "0:1", ...]
Write a function that takes such collection and counts the points of our team in the championship. Rules for
counting points for each match:
if x>y - 3 points
if x<y - 0 point
if x=y - 1 point
"""
points = 0
for game in games:
scores = game.split(':')
if scores[0] > scores[1]:
points += 3
elif scores[0] == scores[1]:
points += 1
return points
def reverse(string):
"""Reverse a given string"""
return string[::-1]
|
21,084 | a0bd2c8e6f566e1f96e2a108576bbaec36c79105 | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 8 19:52:02 2019
@author: Bill Smith
"""
#rock paper scissors
# for num in range (2, 1000, 3):
# for x in range (2, num):
# if num % x == 0:
# print(num, 'equals', x, '*', num//x)
# break
# else:
# # print(num, 'is a prime number')
# def fib(n):
# a, b = 0, 1
# while a < n:
# print(a, end = ' ')
# a,b = b, a+b
# print()
# # fib(10000)
# bob = ['bob smith', 42, 39999, 'software']
# sue = ['Sue smith', 49, 23444, 'hardware']
# people = [bob, sue]
# for person in people:
# # print(person)
# # print(person[0].split()[-1])
# # person[2] *= 2
# print(person[2])
# while True:
# print('Type UserName')
# name = input()
# if name != 'grovemonkey':
# continue
# print('hello, sam. what is the password? (it is a fish)')
# password = input()
# if password == 'bean':
# break
# print('access granted')
# name = ''
# while not name:
# print('enter your name: ')
# name = input()
# print('how many will youhave?')
# numofguest = int(input())
# if numofguest:
# open the file
# search all the words line by line
# find words with len >= 20
# print those words
# line = []
# linenum = 0 #starting point for the search
# with open('C:/Users/Bill Smith/Documents/Python Scripts/words.txt') as myfile:
# for line in myfile:
# linenum += 1
# if len(line) >= 20:
# print(line)
# else:
# continue
def avoids(word, forbidden):
for letter in word[2]:
if letter in forbidden:
print(letter)
return False
return True
def uses_only(word, available): #checks if left is in right uses_only('testing', eeetesting) it will return True
for xw in word:
if xw not in available:
return False
return True
def uses_all(word, required):# right is in left('testing", 'eeetesting') will return False
for letter in required:
if letter not in word:
return False
return True
def is_abecedarian(word):
previous = word[0]
for c in word:
if c < previous:
print(word)
return False
previous = c
return True
addr = 'monty@python.org'
uname, domain = addr.split('@')
def min_max(t):
return min(t), max(t)
dope = 5000
s = [0, 1, 2]
t = [0, 1, 2]
def has_match(t1, t2):
for x, y in zip(t1, t2):
if x == y:
return True
return False
d = dict(zip('abcdef', range(5)))
for key, val in d.items():
print (val, key)
|
21,085 | 28c666685973771cc581b872598f2f6a91bfa7bf | """A permutation is an ordered arrangement of objects. For example, 3124 is one possible permutation of the digits 1, 2, 3 and 4. If all of the permutations are listed numerically or alphabetically, we call it lexicographic order. The lexicographic permutations of 0, 1 and 2 are:
012 021 102 120 201 210
What is the millionth lexicographic permutation of the digits 0, 1, 2, 3, 4, 5, 6, 7, 8 and 9?"""
import itertools
solution = 0
count = 9
permutations = list(itertools.permutations(range(10)))
for i in permutations[999999]:
solution += i*10**count
count -= 1
print(solution) |
21,086 | 7d0c9298a92cde04f2ed75439ea4868426d8ef08 | # coding=utf-8
import socket
s = socket.socket()
#host = socket.gethostname() #单台电脑
host = "192.168.1.64" #多台电脑
port = 1234
s.connect((host,port))
print s.recv(1024) |
21,087 | e0b59688e7180ea51e9ed84e68f9c95f9e625a05 | from kivy.app import App
from kivy.uix.widget import Widget
from kivy.graphics.instructions import RenderContext
from kivy.graphics import Rectangle, Color, Mesh, ClearBuffers
from kivy.clock import Clock
from numpy.random import rand
# Mesh(vertices=(0,0,0,.5,.5,0,.5,.5),indices=(0,1,2,3),
# mode="triangle_strip",
# fmt=[(b'pos', 2, "float")])
vs = """
attribute vec2 vPosition;
void main()
{
gl_Position = vec4(vPosition, 0.0, 1.0);
}
"""
fs = """
void main()
{
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
"""
class GLWidget(Widget):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.prog = RenderContext()
self.prog.shader.vs = vs
self.prog.shader.fs = fs
self.canvas = self.prog
def draw(self):
self.canvas.draw()
class MainApp(App):
def build(self):
self.glw = GLWidget()
return self.glw
def draw(self,dt):
self.glw.canvas.clear()
with self.glw.canvas:
ClearBuffers(clear_color=False)
Rectangle(pos=(rand()*2-1,rand()*2-1),size=(0.01,0.01))
def on_start(self):
Clock.schedule_interval(self.draw,0)
if __name__ == "__main__":
MainApp().run()
|
21,088 | 48668e5babd07686773cc73175de72222924d3b7 | from itertools import combinations, product, chain
from random import sample
def get_sample_of_cousins(population, distance, percent_ancestors = 0.1,
percent_descendants = 0.1):
"""
return a sample of pairs of individuals whos most recent common
ancestor is exactly generations back.
"""
assert 0 < distance < len(population.generations)
assert 0 < percent_descendants <= 1
assert 0 < percent_ancestors <= 1
common_ancestors = population.generations[-(distance + 1)].members
last_generation = set(population.generations[-1].members)
ancestors_sample = sample(common_ancestors,
int(len(common_ancestors) * percent_ancestors))
pairs = []
for ancestor in ancestors_sample:
temp_pairs = descendants_with_common_ancestor(ancestor, last_generation)
temp_pairs = list(temp_pairs)
pairs.extend(sample(temp_pairs,
int(len(temp_pairs) * percent_descendants)))
return pairs
def descendants_of(node):
descendants = set()
to_visit = list(node.children)
while len(to_visit) > 0:
ancestor = to_visit.pop()
descendants.add(ancestor)
to_visit.extend(ancestor.children)
return descendants
def descendants_with_common_ancestor(ancestor, generation_members):
"""
Returns pairs of individuals descendent from ancestor in the given
generation who have ancestor as their most recent ancestor.
"""
# Find the descendents of the children, remove the pairwise
# intersection, and return pairs from different sets.
ancestor_children = ancestor.children
if len(ancestor_children) < 2:
return []
if generation_members.issuperset(ancestor_children):
# Depth is only 1 generation, so return all combinations of children.
return combinations(ancestor_children, 2)
descendant_sets = [descendants_of(child).intersection(generation_members)
for child in ancestor_children]
pair_iterables = []
for descendants_a , descendants_b in combinations(descendant_sets, 2):
intersection = descendants_a.intersection(descendants_b)
if len(intersection) > 0:
# Remove individuals who have a more recent common ancestor
descendants_a = descendants_a - intersection
descendants_b = descendants_b - intersection
pair_iterables.append(product(descendants_a, descendants_b))
return chain.from_iterable(pair_iterables)
|
21,089 | 6e9e2b9d92a09583c1f6842a2ef58493b0ce3cff | from face_engine import FaceEngine
from flask import Flask
from config import Config
engine = FaceEngine()
def create_app():
app = Flask(__name__)
app.config.from_object(Config)
# set custom FaceEngine models
engine.detector = app.config['DETECTOR']
engine.embedder = app.config['EMBEDDER']
engine.estimator = app.config['ESTIMATOR']
from .api import api
app.register_blueprint(api, url_prefix='/api')
return app
|
21,090 | 3d3231a0bf55d29328643191afd6e1155c9db472 | # -*- coding: utf-8 -*-
import os
from core.script_base import ScriptBase
from . import load_img as li,TEMPLATEPATH,RESAULTPATH
from core.general import util
#image save at scripts/template
#use li to load scriptimage--> image=li(r"t112521512.png")
class SampleScript(ScriptBase):
ScriptName='sample1'
Description='This is Sample1 Description'
ENABLERECORD=False
def __init__(self):
super().__init__(RESAULTPATH,__name__)
def start(self):
print(f'Sample1 脚本准备启动...')
print(f'准备脚本真实内容要求的条件和环境...')
#raise NotImplementedError('BaseScript-start error-No Function Defined')
def run(self):
print(f'Sample1脚本真实循环执行的逻辑...')
#self.Logger.info('sample1 run')
def finish(self):
print(f'Sample1脚本循环结束,返回主界面,方便其他脚本运行')
#raise NotImplementedError('BaseScript-finish error-No Function Defined') |
21,091 | 0b0427741d6be04cc8d5404dc1bceb2ad0b62a55 | from django.test import TestCase
from django.core.exceptions import PermissionDenied
from django.contrib.auth.models import Group
from hs_access_control.models import PrivilegeCodes
from hs_core.hydroshare.users import create_account
from hs_core.hydroshare.resource import create_resource
from hs_core.testing import MockIRODSTestCaseMixin
from hs_access_control.tests.utilities import global_reset, \
assertResourceUserState, assertUserResourceState, \
assertUserGroupState, assertGroupUserState, \
assertGroupResourceState, assertResourceGroupState
class BasicFunction(MockIRODSTestCaseMixin, TestCase):
""" test basic functions """
def setUp(self):
super(BasicFunction, self).setUp()
global_reset()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
self.alva = create_account(
'alva@gmail.com',
username='alva',
first_name='alva',
last_name='couch',
superuser=False,
groups=[]
)
self.george = create_account(
'george@gmail.com',
username='george',
first_name='george',
last_name='miller',
superuser=False,
groups=[]
)
self.john = create_account(
'john@gmail.com',
username='john',
first_name='john',
last_name='miller',
superuser=False,
groups=[]
)
self.admin = create_account(
'admin@gmail.com',
username='admin',
first_name='first_name_admin',
last_name='last_name_admin',
superuser=True,
groups=[]
)
# george creates a resource 'bikes'
self.bikes = create_resource(
resource_type='CompositeResource',
owner=self.george,
title='Bikes',
metadata=[],
)
# george creates a group 'bikers'
self.bikers = self.george.uaccess\
.create_group(title='Bikers', description="We are the bikers")
# george creates a group 'harpers'
self.harpers = self.george.uaccess\
.create_group(title='Harpers', description="We are the harpers")
def test_matrix_testing(self):
""" Test that matrix testing routines function as believed """
george = self.george
alva = self.alva
john = self.john
bikes = self.bikes
bikers = self.bikers
harpers = self.harpers
assertResourceUserState(self, bikes, [george], [], [])
assertUserResourceState(self, george, [bikes], [], [])
assertUserResourceState(self, alva, [], [], [])
assertUserResourceState(self, john, [], [], [])
assertUserGroupState(self, george, [harpers, bikers], [], [])
assertUserGroupState(self, alva, [], [], [])
assertUserGroupState(self, john, [], [], [])
george.uaccess.share_resource_with_user(
bikes, alva, PrivilegeCodes.CHANGE)
assertResourceUserState(self, bikes, [george], [alva], [])
assertUserResourceState(self, george, [bikes], [], [])
assertUserResourceState(self, alva, [], [bikes], [])
assertUserResourceState(self, john, [], [], [])
george.uaccess.share_resource_with_user(
bikes, john, PrivilegeCodes.VIEW)
assertResourceUserState(self, bikes, [george], [alva], [john])
assertUserResourceState(self, george, [bikes], [], [])
assertUserResourceState(self, alva, [], [bikes], [])
assertUserResourceState(self, john, [], [], [bikes])
bikes.raccess.immutable = True
bikes.raccess.save()
assertResourceUserState(
self, bikes, [george], [], [
alva, john]) # squashes CHANGE
assertUserResourceState(self, george, [bikes], [], [])
# immutable squashes CHANGE
assertUserResourceState(self, alva, [], [], [bikes])
assertUserResourceState(self, john, [], [], [bikes])
assertGroupUserState(self, bikers, [george], [], [])
assertGroupUserState(self, harpers, [george], [], [])
assertUserGroupState(self, george, [bikers, harpers], [], [])
assertUserGroupState(self, alva, [], [], [])
assertUserGroupState(self, john, [], [], [])
george.uaccess.share_group_with_user(
bikers, alva, PrivilegeCodes.CHANGE)
assertGroupUserState(self, bikers, [george], [alva], [])
assertGroupUserState(self, harpers, [george], [], [])
assertUserGroupState(self, george, [bikers, harpers], [], [])
assertUserGroupState(self, alva, [], [bikers], [])
assertUserGroupState(self, john, [], [], [])
george.uaccess.share_group_with_user(bikers, john, PrivilegeCodes.VIEW)
assertGroupUserState(self, bikers, [george], [alva], [john])
assertGroupUserState(self, harpers, [george], [], [])
assertUserGroupState(self, george, [bikers, harpers], [], [])
assertUserGroupState(self, alva, [], [bikers], [])
assertUserGroupState(self, john, [], [], [bikers])
assertResourceGroupState(self, bikes, [], [])
assertGroupResourceState(self, bikers, [], [])
george.uaccess.share_resource_with_group(
bikes, bikers, PrivilegeCodes.CHANGE)
# immutable squashes state
assertResourceGroupState(self, bikes, [], [bikers])
# immutable squashes state
assertGroupResourceState(self, bikers, [], [bikes])
bikes.raccess.immutable = False
bikes.raccess.save()
# without immutable, CHANGE returns
assertResourceGroupState(self, bikes, [bikers], [])
# without immutable, CHANGE returns
assertGroupResourceState(self, bikers, [bikes], [])
def test_share(self):
bikes = self.bikes
harpers = self.harpers
bikers = self.bikers
george = self.george
alva = self.alva
admin = self.admin
john = self.john
assertResourceUserState(self, bikes, [george], [], [])
assertUserResourceState(self, george, [bikes], [], [])
assertUserResourceState(self, alva, [], [], [])
george.uaccess.share_resource_with_user(
bikes, alva, PrivilegeCodes.OWNER)
assertResourceUserState(self, bikes, [george, alva], [], [])
assertUserResourceState(self, george, [bikes], [], [])
assertUserResourceState(self, alva, [bikes], [], [])
# test a user can downgrade (e.g., from OWNER to CHANGE) his/her access
# privilege
alva.uaccess.share_resource_with_user(
bikes, alva, PrivilegeCodes.CHANGE)
assertResourceUserState(self, bikes, [george], [alva], [])
assertUserResourceState(self, george, [bikes], [], [])
assertUserResourceState(self, alva, [], [bikes], [])
# unshare bikes
george.uaccess.unshare_resource_with_user(bikes, alva)
assertResourceUserState(self, bikes, [george], [], [])
assertUserResourceState(self, george, [bikes], [], [])
assertUserResourceState(self, alva, [], [], [])
assertGroupResourceState(self, bikers, [], [])
george.uaccess.share_resource_with_group(
bikes, bikers, PrivilegeCodes.VIEW)
assertGroupResourceState(self, bikers, [], [bikes])
george.uaccess.share_resource_with_group(
bikes, harpers, PrivilegeCodes.CHANGE)
assertGroupResourceState(self, harpers, [bikes], [])
george.uaccess.share_group_with_user(
harpers, alva, PrivilegeCodes.CHANGE)
assertUserGroupState(self, alva, [], [harpers], [])
# isolated from group privilege CHANGE
assertUserResourceState(self, alva, [], [], [])
assertGroupResourceState(self, harpers, [bikes], [])
george.uaccess.unshare_group_with_user(harpers, alva)
# isolated from group privilege CHANGE
assertUserResourceState(self, alva, [], [], [])
george.uaccess.unshare_resource_with_group(bikes, harpers)
assertGroupResourceState(self, harpers, [], [])
# test upgrade privilege by non owners
# let george (owner) grant change privilege to alva (non owner)
george.uaccess.share_resource_with_user(
bikes, alva, PrivilegeCodes.CHANGE)
assertUserResourceState(self, alva, [], [bikes], [])
# let alva (non owner) grant view privilege to john (non owner)
alva.uaccess.share_resource_with_user(
bikes, self.john, PrivilegeCodes.VIEW)
assertUserResourceState(self, john, [], [], [bikes])
assertResourceUserState(self, bikes, [george], [alva], [john])
# let alva (non owner) grant change privilege (upgrade) to john (non
# owner)
alva.uaccess.share_resource_with_user(
bikes, self.john, PrivilegeCodes.CHANGE)
assertUserResourceState(self, john, [], [bikes], [])
assertResourceUserState(self, bikes, [george], [alva, john], [])
# test django admin has ownership permission over any resource when not
# owning a resource
self.assertFalse(admin.uaccess.owns_resource(bikes))
self.assertEqual(
bikes.raccess.get_effective_privilege(admin),
PrivilegeCodes.OWNER)
# test django admin can always view/change or delete any resource
self.assertTrue(admin.uaccess.can_view_resource(bikes))
self.assertTrue(admin.uaccess.can_change_resource(bikes))
self.assertTrue(admin.uaccess.can_delete_resource(bikes))
# test django admin can change resource flags
self.assertTrue(admin.uaccess.can_change_resource_flags(bikes))
# test django admin can share any resource with all possible permission
# types
self.assertTrue(
admin.uaccess.can_share_resource(
bikes, PrivilegeCodes.OWNER))
self.assertTrue(
admin.uaccess.can_share_resource(
bikes, PrivilegeCodes.CHANGE))
self.assertTrue(
admin.uaccess.can_share_resource(
bikes, PrivilegeCodes.VIEW))
# test django admin can share a resource with a specific user
admin.uaccess.share_resource_with_user(
bikes, alva, PrivilegeCodes.OWNER)
assertResourceUserState(self, bikes, [george, alva], [john], [])
admin.uaccess.share_resource_with_user(
bikes, alva, PrivilegeCodes.CHANGE)
assertResourceUserState(self, bikes, [george], [john, alva], [])
admin.uaccess.share_resource_with_user(
bikes, alva, PrivilegeCodes.VIEW)
assertResourceUserState(self, bikes, [george], [john], [alva])
# test django admin can unshare a resource with a specific user
admin.uaccess.unshare_resource_with_user(bikes, alva)
assertResourceUserState(self, bikes, [george], [john], [])
# test django admin can share a group with a user
self.assertEqual(bikers.gaccess.members.count(), 1)
self.assertFalse(admin.uaccess.owns_group(bikers))
admin.uaccess.share_group_with_user(bikers, alva, PrivilegeCodes.OWNER)
self.assertEqual(alva.uaccess.owned_groups.count(), 1)
self.assertEqual(bikers.gaccess.members.count(), 2)
# test django admin can share resource with a group
self.assertFalse(
admin.uaccess .can_share_resource_with_group(
bikes, harpers, PrivilegeCodes.OWNER))
self.assertTrue(
admin.uaccess .can_share_resource_with_group(
bikes, harpers, PrivilegeCodes.CHANGE))
admin.uaccess.share_resource_with_group(
bikes, harpers, PrivilegeCodes.CHANGE)
self.assertTrue(bikes in harpers.gaccess.edit_resources)
self.assertTrue(
admin.uaccess .can_share_resource_with_group(
bikes, harpers, PrivilegeCodes.VIEW))
admin.uaccess.share_resource_with_group(
bikes, harpers, PrivilegeCodes.VIEW)
self.assertTrue(bikes in harpers.gaccess.view_resources)
# test django admin can unshare a user with a group
self.assertTrue(
admin.uaccess.can_unshare_group_with_user(
bikers, alva))
admin.uaccess.unshare_group_with_user(bikers, alva)
self.assertTrue(bikers.gaccess.members.count(), 1)
self.assertEqual(alva.uaccess.owned_groups.count(), 0)
def test_share_inactive_user(self):
"""
Inactive grantor can't grant permission
Inactive grantee can't be granted permission
"""
george = self.george
alva = self.alva
john = self.john
bikes = self.bikes
self.assertEqual(
bikes.raccess.get_effective_privilege(alva),
PrivilegeCodes.NONE)
# inactive users can't be granted access
# set john to an inactive user
john.is_active = False
john.save()
with self.assertRaises(PermissionDenied):
george.uaccess.share_resource_with_user(
bikes, john, PrivilegeCodes.CHANGE)
john.is_active = True
john.save()
# inactive grantor can't grant access
# let's first grant John access privilege
george.uaccess.share_resource_with_user(
bikes, john, PrivilegeCodes.CHANGE)
self.assertEqual(
bikes.raccess.get_effective_privilege(john),
PrivilegeCodes.CHANGE)
john.is_active = False
john.save()
with self.assertRaises(PermissionDenied):
john.uaccess.share_resource_with_user(
bikes, alva, PrivilegeCodes.VIEW)
|
21,092 | 701ff2b1c765c4d8cf7c4649cd83a86ec252e17a | #!/home/wandapi/virtualenv/bin/python
import sys
from wandapi.command import api
if __name__ == '__main__':
sys.exit(api.main())
|
21,093 | 762dfad617e4e9f7ee2c32c2deab08a6d93b010b | # for M269 TMA01 Q4, The Open University, 2016
# version: 25 June 2016
# Jack Pincombe E5244151
from TMA01_Q4_MString import MString
def concatenate (first, second):
for i in range(second.length()):
first.addChar(second.getChar(i))
return first
assert concatenate(MString("good"), MString("morning!")).toString() == "goodmorning!"
assert concatenate(MString("Open"), MString("University")).toString() == "OpenUniversity"
assert concatenate(MString("United"), MString("Kingdom")).toString() == "UnitedKingdom"
# add 2 tests here, in the same format
print("concatenate passed all tests")
|
21,094 | 4e9c9e1a33410b37452d4656aa011009be6844df |
# standard
from sys import argv
from socket import error as socketerror
# Project Hydra
from MySQLSetup import Hydra_rendertask, transaction, KILLED, READY, STARTED
from Connections import TCPConnection
from Questions import KillCurrentJobQuestion
from LoggingSetup import logger
def sendKillQuestion(renderhost, newStatus=KILLED):
"""Tries to kill the current task running on the renderhost. Returns True
if successful, otherwise False"""
logger.debug ('kill job on %s' % renderhost)
connection = TCPConnection(hostname=renderhost)
answer = connection.getAnswer(
KillCurrentJobQuestion(newStatus))
logger.debug("child killed: %s" % answer.childKilled)
if not answer.childKilled:
logger.debug("%r tried to kill its job but failed for some reason."
% renderhost)
return answer.childKilled
def killJob(job_id):
"""Kills every task associated with job_id. Killed tasks have status code
'K'. If a task was already started, an a kill request is sent to the host
running it.
@return: False if no errors while killing started tasks, else True"""
# mark all of the Ready tasks as Killed
with transaction() as t:
t.cur.execute("""update Hydra_rendertask set status = 'K'
where job_id = '%d' and status = 'R'""" % job_id)
# get hostnames for tasks that were already started
tuples = None # @UnusedVariable
with transaction() as t:
t.cur.execute("""select host from Hydra_rendertask
where job_id = '%d' and status = 'S'""" % job_id)
tuples = t.cur.fetchall()
# make flat list out of single-element tuples fetched from db
hosts = [t for (t,) in tuples]
# send a kill request to each host, note if any failures occurred
error = False
for host in hosts:
try:
error = error or not sendKillQuestion(host)
except socketerror:
logger.debug("There was a problem communicating with {:s}"
.format(host))
error = True
return error
def resurrectJob(job_id):
"""Resurrects job with the given id. Tasks marked 'K' or 'F' will have
their data cleared and their statuses set to 'R'"""
with transaction() as t:
t.cur.execute("""update Hydra_rendertask
set status = 'R'
where job_id = '%d' and
status = 'K' or status = 'F'""" % job_id)
def killTask(task_id):
"""Kills the task with the specified id. If the task has been started, a
kill request is sent to the node running it.
@return: True if there were no errors killing the task, else False."""
[task] = Hydra_rendertask.fetch("where id = '%d'" % task_id)
if task.status == READY:
task.status = KILLED
with transaction() as t:
task.update(t)
# if we reach this point: transaction successful, no exception raised
return True
elif task.status == STARTED:
killed = sendKillQuestion(renderhost = task.host, newStatus = KILLED)
# if we reach this point: TCPconnection successful, no exception raised
return killed
return False
def resurrectTask(task_id, ignoreStarted = False):
"""Resurrects the task with the specified id.
@return: True if there was an error, such as when the user tries to
resurrect a task that is marked as Started, else False."""
[task] = Hydra_rendertask.fetch("where id = '%d'" % task_id)
if (
task.status == 'K' or task.status == 'F' or
(task.status == 'S' and ignoreStarted == True)
):
task.status = 'R'
task.host = None
task.startTime = None
task.endTime = None
else:
return True
with transaction() as t:
task.update(t)
return False
if __name__ == '__main__':
if len(argv) == 3:
cmd, job_id = argv[1], int(argv[2])
if cmd == 'kill':
killJob(job_id)
elif cmd == 'resurrect':
resurrectJob(job_id)
else:
print "Command line args: ['kill' or 'resurrect'] [job_id]"
|
21,095 | 9871cee1c4b250914b2dadf84e0d924341fa00f3 | """Fetching Data from database."""
from tornado.escape import recursive_unicode
import gramex
import pandas as pd
import json as js
import numpy as np
import refresh_data as rd
cache = gramex.service.cache.memory
def get_swing_data(handler):
args = recursive_unicode(handler.request.arguments)
state = args.get('state', ['uttar pradesh'])[0]
state = handler.get_arguments('state')[0].lower()
refresh = args.get('refresh', ['0'])[0]
if refresh == '1':
data = rd.refresh_data(state);
else:
data = rd.get_live_data(state)
data = data.merge(data.groupby('ConstId')['Votes'].sum().reset_index().rename(
columns={'Votes': 'ValidVotes'}), on='ConstId').rename(
columns={'AllianceName': 'Party'})
parties = list(data['Party'].unique())
data = data[data['Rank'] < 3]
data['CandiStatus'] = data['Rank'].replace({1: 'WON', 2: 'LOST'})
map_file = open('data/eng_hindi_map.json')
hindi_mapping = js.loads(map_file.read())
map_file.close()
output = {
'data': data[['ConstId', 'ConstName', 'Party', 'CandiStatus', 'Votes',
'ValidVotes', 'Rank']].to_json(orient='records'),
'parties': parties,
'hindi_mapping': hindi_mapping
}
return output
|
21,096 | 1ba34b1a71327d2a1739bff9cef94c6df57284ba | # -*- coding: utf-8 -*-
# Copyright: (c) 2020, Garfield Lee Freeman (@shinmog)
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r'''
'''
FACTS = r'''
notes:
- As this is a facts module, check mode is not supported.
options:
details:
description:
- Whether to retrieve full detailed results or not.
type: bool
default: false
'''
STATE = r'''
options:
state:
description:
- The state.
type: str
default: 'present'
choices:
- present
- absent
'''
|
21,097 | 93a187445c68c7f0f55bc676c4ded8544a7b0eed | import numpy as np
def get_random_agent_reward(env, steps=50000):
"""
Get the expected reward of following a random policy.
ARGS
----
env : gym.Env
Environment considered.
steps : int
Number of steps for sampling.
"""
step = 0
total_rew = 0
env.reset()
while step < steps:
# Interact.
act = env.action_space.sample()
_, rew, _, done = env.step(act)
# Update counters.
total_rew += rew
step += 1
if done:
env.reset()
return total_rew / steps
def get_batch(env, agent, batch_len):
obss = np.empty((batch_len,) + env.observation_space.shape)
acts = np.empty((batch_len,), dtype=np.int32)
rets = np.empty((batch_len,))
t = 0
obs = env.reset()
for step in range(batch_len):
obss[step] = obs.copy()
acts[step] = agent.act(obs)
obs, _, done, _ = env.step(acts[step])
if done:
rets[step - t: step] = t
t = 0
obs = env.reset()
else:
t += 1
rets[step - t: step] = t
return obss, acts, rets
if __name__ == '__main__':
import gym
env = gym.make('MountainCarContinuous-v0')
print(get_random_agent_reward(env)) |
21,098 | f9cf6904c8514a40d8245d3fcfef82a1648c74e7 | #N students take K apples and distribute them among each other evenly. The remaining (the undivisible)
# part remains in the basket. How many apples will each single student get? How many apples will remain in the basket?
#The program reads the numbers N and K. It should print the two answers for the questions above.
n = int(input(" number of student, n = "))
k = int(input("number of apples, k = "))
print("Numbers of apple evenly distributed among n number of students = ", k//n) #example of integer division
print("Number of undivisible apples = ", k % n) #example of getting remainder
|
21,099 | f61bd8b18304b96b125ba74a3f98712cf5d6b915 | from __future__ import absolute_import, print_function
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from pymongo import MongoClient
import logging as lg
import pymongo
import json
#logging server settings
lg.basicConfig(filename="twiiter_debug_log.log",level=lg.DEBUG)
#MonogDB client connection
client = pymongo.MongoClient("mongodb://127.0.0.1:27017")
db = client.twitter
# Go to http://apps.twitter.com and create an app.
# The consumer key and secret will be generated for you after
consumer_key="sRfCM1D5RUetDCp3abBzQcS4P"
consumer_secret="z5uACcOznq0sV3rZmowzOe7oibwbRZijM5jWjBY0FQGBk2VhXA"
# After the step above, you will be redirected to your app's page.
# Create an access token under the the "Your access token" section
access_token="765052127873945600-NcssTtt0FkyUcAICXdJQ79quypsONsD"
access_token_secret="ZkgPgWUfv7x0Zx6jrF4llZUjHqzGXd4ZABeXjDsiulMOK"
class StdOutListener(StreamListener):
""" A listener handles tweets that are received from the stream.
This is a basic listener that just prints received tweets to stdout.
"""
def on_data(self, data):
get_data = json.loads(data)
lg.debug("get a tweet")
writemongo(get_data)
return True
def on_error(self, status):
lg.debug("Error from twitter: "+str(status))
print("Twitter Error!")
def writemongo(pass_data):
try:
db.set4.insertad(pass_data)
lg.info("Tweet inserted to database")
except:
lg.info("Error while MonogDB insertion")
print("Program End- Bcz mongoDB error!")
exit()
if __name__ == '__main__':
print("Program started")
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
word_string="bacon-beef-chicken-cooked-meat-duck-ham-kidneys-lamb-liver-mince-minced-pate-salami-sausages-pork-pork-pie-sausage-roll-turkey-veal-butter-cream-cheese-blue-cottage-goats-creme-fraiche-eggs-free-range-eggs-margarine-milk-full-fat-milk-semi-skimmed-milk-skimmed-milk-sour-cream-yoghurt-apple-apricot-banana-blackberry-blackcurrant-blueberry-cherry-coconut-fig-gooseberry-grape-grape-fruit-kiwi-lemon-lime-mango-melon-orange-peach-pear-pineapple-plum-pomegranate-raspberry-redcurrant-rhubarb-strawberry-bunch-bananas-bunch-grapes-grap-baguette-bread-rolls-brown-white-garlic-pitta-loaf-bread-sliced-cake-Danish-pastry-quiche-sponge-baking-powder-plain-flour-self-raising-cornflour-sugar-brown-icing-pastry-yeast-dried-apricots-prunes-dates-raisins-sultanas-anchovy-cod-haddock-herring-kipper-smoked-fish-usually-herring-mackerel-pilchard-plaice-salmon-sardine-smoked-salmon-sole-trout-tuna-artichoke-asparagus-aubergine-avocado-beansprouts-beetroot-broad-beans-broccoli-Brussels-sprouts-cabbage-carrot-cauliflower-celery-chilli-pepper-courgette-cucumber-French beans-garlic-ginger-leek-lettuce-mushroom-onion-peas-pepper-potato-potatoes-pumpkin-radish-rocket-runner-beans-swede-sweet-potato-sweetcorn-tomato-tomatoes-turnip-spinach-spring-onion-squash-clove-garlic-stick-celery-baked-beans-corned-beef-kidney-beans-soup-tinned-tomatoes-chips-fish-fingers-frozen-peas-frozen-pizza-ice-cream-cooking-oil-olive-stock-cubes-tomato-puree-breakfast-cereal-cornflakes-honey-jam-marmalade-muesli-porridge-toast-noodles-pasta-sauce-pizza-rice-spaghetti-ketchup-mayonnaise-mustard-pepper-salad-dressing-salt-vinaigrette-vinegar-biscuits-chocolate-crisps-hummus-nuts-olives-peanuts-sweets-walnuts-basil-chives-coriander-dill-parsley-rosemary-sage-thyme-chilli-powder-cinnamon-cumin-curry-nutmeg-paprika-saffron-organic-ready-meal-bar-bottle-milk-bear-wine-wisky-takila-shot-alcohol-pepsi-cocola-soda-juce-fruits-vegetables-fruite-veggi-veg-vegetable-rum-carton-eggs-carbohydrate-chanterelle-chow-comestibles-comfort-food-concoction-convenience-food-cordon-bleu-delicacy-diabetic-dietary-fibre-doggy-bag-dry-goods-eats-fare-fast-food-fayre-fibre-food-foodstuff-fruit-fruits-greengrocer-grocer-grocery-grub-health-food-junk-food-morel-morsel-mouthful-munch-non-dairy-nosh-superfood-taste-tidbit-titbit-tuck-tucker-victuals-whole-teasty-yummy-yum-yummm-love-it-loved-fabulous"
stream = Stream(auth, l)
stream.filter(track=word_string.split("-")) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.