text stringlengths 38 1.54M |
|---|
from django.contrib import admin
from Pingme.models import FollowUser, MyPost, MyProfile, PostComment,PostLike
from django.contrib.admin.options import ModelAdmin
class FollowUserAdmin(ModelAdmin):
list_display = ["profile", "followed_by"]
search_fields = ["profile", "followed_by"]
list_filter = ["profile", "followed_by"]
admin.site.register(FollowUser, FollowUserAdmin)
class MyPostAdmin(ModelAdmin):
list_display = ["subject", "cr_date", "uploaded_by"]
search_fields = ["subject", "msg", "uploaded_by"]
list_filter = ["cr_date", "uploaded_by"]
admin.site.register(MyPost, MyPostAdmin)
class MyProfileAdmin(ModelAdmin):
list_display = ["name"]
search_fields = ["name", "status", "phone_no"]
list_filter = ["status", "gender"]
admin.site.register(MyProfile, MyProfileAdmin)
class PostCommentAdmin(ModelAdmin):
list_display = ["post", "msg"]
search_fields = ["msg", "post", "commented_by"]
list_filter = ["cr_date", "flag"]
admin.site.register(PostComment, PostCommentAdmin)
class PostLikeAdmin(ModelAdmin):
list_display = ["post", "liked_by"]
search_fields = ["post", "liked_by"]
list_filter = ["cr_date"]
admin.site.register(PostLike, PostLikeAdmin)
|
f=open("Questions.txt","a")
f.write("How many vowels are there?\n")
f.write("How many prime numbers are there within 100?\n")
f.write("How many alphabets are there?\n")
f.write("How many sense organs are there?\n")
f.write("How many consonants are there?\n")
f.close()
g=open("Answers.txt","a")
g.write("5\n")
g.write("29\n")
g.write("26\n")
g.write("5\n")
g.write("21\n")
g.close()
s=0
print("Enter name")
name=input()
f1=open("Student.txt","a")
f2=open("Marks.txt","a")
g=open("Answers.txt","r")
i=0
with open("Questions.txt","r") as f:
for line in f:
print(line)
ans=input()
f1.write(ans)
f1.write("\n")
if(i==0):
l=list(g.read().split())
if(l[i]==ans):
s+=1
else:
print("Oops,Wrong answer!!!!")
print("Correct answer is: ",l[i])
i+=1
f2.write(name+": ")
f2.write(str(s))
f2.write("\n")
print("Your marks are:",s)
f2.close()
f1.close()
g.close()
2).Solution :-
import time
print("Welcome To English Tutor")
print("=========================================")
print("Here is your English test paper")
print("1. I ___ watching TV when Paul and Simon arrived.")
print("2. Do you think he ___ what I said?")
print("3.She ___ to learn English in Malta next summer.")
print("4.I don't think I've ever ___ on that sofa.")
print("5. Tom ___ tired. ")
print("6.To get score")
print("7.To quit")
time.sleep(2)
c=0
count=0
while True:
question=(1,2,3,4,5,6,7)
question=int(input("Question no: "))
if question is 1:
print("options:isam,was,were")
answer=input("enter answer: ")
count+=1
if (answer=="was"):
print("correct")
c=c+1
else:
print("wrong")
elif question is 2:
print("options:understanding,understood,understand")
answer=input("enter answer: ")
count+=1
if (answer=="understood"):
print("correct")
c=c+1
else:
print("wrong")
elif question is 3:
print("options:hopes,hope,hoping")
answer=input("enter answer: ")
count+=1
if (answer=="hopes"):
print("correct")
c=c+1
else:
print("wrong")
elif question is 4:
print("options:sat,sit,sitting")
answer=input("enter answer: ")
count+=1
if (answer=="sat"):
print("correct")
c=c+1
else:
print("wrong")
elif question is 5:
print("options:looks,looking,look ")
answer=input("enter answer: ")
count+=1
if (answer=="looks"):
print("correct")
c=c+1
else:
print("wrong")
elif question is 6:
print(c,"out of {}".format(count))
elif question is 7:
break
else:
print("English tutor: I don't understand what you said")
|
import pyrealsense2 as rs
import numpy as np
import cv2
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 15) # 10、15或者30可选,20或者25会报错,其他帧率未尝试
config.enable_stream(rs.stream.infrared, 1, 640, 480, rs.format.y8, 15)
config.enable_stream(rs.stream.infrared, 2, 640, 480, rs.format.y8, 15)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 15)
profile = pipeline.start(config)
# 获取深度传感器的深度刻度 Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print("Depth Scale is: ", depth_scale)
clipping_distance_in_meters = 1 # 1 meter
clipping_distance = clipping_distance_in_meters / depth_scale
# Create an align object
# Rs.align允许我们对深度帧和其他帧进行对齐 rs.align allows us to perform alignment of depth frames to others frames
# "align_to"是我们计划对齐深度帧的流类型 The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
try:
while True:
frames = pipeline.wait_for_frames()
# Align the depth frame to color frame
aligned_frames = align.process(frames)
# Get aligned frames
aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
if not aligned_depth_frame:
continue
depth_frame = np.asanyarray(aligned_depth_frame.get_data())
# 将深度图转化为伪彩色图方便观看
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_frame, alpha=0.03), cv2.COLORMAP_JET)
cv2.imshow('1 depth', depth_colormap)
# color frames
color_frame = aligned_frames.get_color_frame()
if not color_frame:
continue
color_frame = np.asanyarray(color_frame.get_data())
cv2.imshow('2 color', color_frame)
# left frames
left_frame = frames.get_infrared_frame(1)
if not left_frame:
continue
left_frame = np.asanyarray(left_frame.get_data())
cv2.imshow('3 left_frame', left_frame)
# right frames
right_frame = frames.get_infrared_frame(2)
if not right_frame:
continue
right_frame = np.asanyarray(right_frame.get_data())
cv2.imshow('4 right_frame', right_frame)
c = cv2.waitKey(1)
# 如果按下ESC则关闭窗口(ESC的ascii码为27),同时跳出循环
if c == 27:
cv2.destroyAllWindows()
break
finally:
# Stop streaming
pipeline.stop()
# 深度图上色参考https://github.com/IntelRealSense/librealsense/blob/jupyter/notebooks/distance_to_object.ipynb
# 对齐参考:https://github.com/IntelRealSense/librealsense/blob/master/wrappers/python/examples/align-depth2color.py
# 左右图获取参考https://blog.csdn.net/Hanghang_/article/details/102489762
# 其他参考https://blog.csdn.net/Dontla/article/details/102701680
|
import typer
from typing import Optional
from .. import config
# Program
program = typer.Typer()
def version(value: bool):
if value:
typer.echo(config.VERSION)
raise typer.Exit()
@program.callback()
def version(
version: Optional[bool] = typer.Option(None, "--version", callback=version),
):
"""Interact with dfour from the command line."""
pass
# @program.command(
# name="login",
# help="Store login credentials in a config file. Alternatively use --username/-u, --password/-p and --endpoint/-e options.",
# short_help="Store login credentials",
# )
# def program_login():
# """
# Login to dfour via the command line.
# """
# home_dir = os.path.expanduser("~")
# conf_dir = ".config/dfour"
# conf_file = "config.json"
# # Check for existing config.json file
# if os.path.exists(f"{home_dir}/{conf_dir}/{conf_file}"):
# typer.secho("Login successful.", fg=typer.colors.GREEN, bold=True)
# else:
# is_stdin = True
# typer.secho("No login credentials found.", err=True, fg=typer.colors.YELLOW, bold=True)
# typer.secho(f"Creating empty credentials in {home_dir}/{conf_dir}/{conf_file}")
# typer.secho("")
# endpoint = "https://sandbox.dfour.space"
# endpoint_input = input(f"Enter dfour endpoint [{endpoint}]: ")
# if endpoint_input != "":
# endpoint = endpoint_input
# username = ""
# password = ""
# while username == "":
# username = input("Enter your dfour username: ")
# while password == "":
# password = getpass.getpass("Enter your dfour password: ")
# credentials = {
# "endpoint": endpoint,
# "username": username,
# "password": password,
# }
# if not os.path.exists(f"{home_dir}/{conf_dir}"):
# os.mkdir(f"{home_dir}/{conf_dir}")
# with open(f"{home_dir}/{conf_dir}/{conf_file}","w") as f:
# json.dump(credentials, f)
# typer.secho("Login successful.", fg=typer.colors.GREEN, bold=True)
# raise typer.Exit()
|
# -*- encoding: UTF-8 -*-
dStrings = {
"fr": {
"title": u"Orthographe française",
"choose": u"Choisissez un dictionnaire",
"select": u"Utiliser ce dictionnaire",
"moderne": u"“Moderne”",
"classique": u"“Classique” (recommandé)",
"reforme1990": u"“Réforme 1990”",
"toutesvariantes": u"“Toutes variantes”",
"descModern": u"Ce dictionnaire propose l’orthographe telle qu’elle est écrite aujourd’hui le plus couramment. C’est le dictionnaire recommandé si le français n’est pas votre langue maternelle ou si vous ne désirez qu’une seule graphie correcte par mot.",
"descClassic": u"Il s’agit du dictionnaire “Moderne”, avec des graphies classiques en sus, certaines encore communément utilisées, d’autres désuètes. C’est le dictionnaire recommandé si le français est votre langue maternelle.",
"descReform": u"Avec ce dictionnaire, seule l’orthographe réformée est reconnue. Attendu que bon nombre de graphies réformées sont considérées comme erronées par beaucoup, ce dictionnaire est déconseillé. Les graphies passées dans l’usage sont déjà incluses dans le dictionnaire “Moderne”.",
"descAllvar": u"Ce dictionnaire contient les variantes graphiques, classiques, réformées, ainsi que d’autres plus rares encore. Ce dictionnaire est déconseillé à ceux qui ne connaissent pas très bien la langue française.",
"restart": u"Le changement ne sera effectif qu’après le redémarrage du logiciel.",
"error": u"Erreur : impossible de savoir quel dictionnaire est actif."
},
"en": {
"title": u"French spelling",
"choose": u"Choose a dictionary",
"select": u"Use this dictionary",
"moderne": u"“Modern”",
"classique": u"“Classic” (recommended)",
"reforme1990": u"“Reform 1990”",
"toutesvariantes": u"“All variants”",
"descModern": u"This dictionary offers the French spelling as it is written nowadays most often. This is the recommended dictionary if French is not your mother tongue or if you want only one correct spelling per word.",
"descClassic": u"This is the “Modern” dictionary plus classical spellings, some of them still widely used, others obsolete. This is the recommended dictionary if French is your native language.",
"descReform": u"With this dictionary, only the reformed spelling is recognized. As many of reformed spellings are considered erronous for many people, this dictionary is unadvised. Reformed spellings commonly used are already included in the “Modern” dictionary.",
"descAllvar": u"This dictionary contains all spelling variants, classical and reformed, and some others even rarer. This dictionary is unadvised for those who doesn’t know very well the French language.",
"restart": u"The modification will be effective only after restarting the software.",
"error": u"Error: enable to retrieve information about the current dictionary."
}
}
|
class CreditCard:
def __init__(self, number=""):
self.number = str(number)
def checkLength(self):
if len(self.number) == 16 or len(self.number) == 15:
return True
else:
return False
def determineCardType(self):
if self.checkLength:
if self.number[0] == '4':
return "Visa"
elif self.number[0:2] in ['51','52','53','54','55']:
return "MasterCard"
elif self.number[0:2] in ["34", "37"]:
return "Amex"
elif self.number[0:4] == "6011":
return "Discover Card"
else:
return "Not Valid"
else:
return "Not Valid"
def validate(self):
if self.determineCardType == "Not Valid":
return False
else:
doubles = list(map(lambda x: int(x)*2, self.number[-2::-2]))
digitSum = [x%10+1 if x > 9 else x for x in doubles]
fullList = [int(x) for x in self.number[-1::-2]]
fullList.extend(digitSum)
finalSum = sum(fullList)
if finalSum%10 == 0:
return True
else:
return False
def main():
myCard = CreditCard('5515460934365316')
myCard2 =CreditCard('379179199857686')
myCard3 = CreditCard('4929896355493470')
fake = CreditCard('5215460934865316')
if myCard2.validate():
print("The", myCard.determineCardType(), ", ", myCard.number," is a valid credit card.")
else:
print(myCard.number," is NOT a valid credit card.")
if myCard2.validate():
print("The", myCard2.determineCardType(), ", ", myCard2.number,", is a valid credit card.")
else:
print(myCard2.number,", is NOT a valid credit card.")
if fake.validate():
print("The", fake.determineCardType(), ", ", fake.number,", is a valid credit card.")
else:
print(fake.number,", is NOT a valid credit card.")
if myCard3.validate():
print("The", myCard3.determineCardType(), ", ", myCard3.number,", is a valid credit card.")
else:
print(myCard3.number,", is NOT a valid credit card.")
main()
|
# Generated by Django 3.2 on 2021-05-11 17:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0003_alter_user_tasktype'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='name',
),
migrations.RemoveField(
model_name='user',
name='password',
),
migrations.AddField(
model_name='user',
name='taskDescription',
field=models.CharField(default='', max_length=500, verbose_name='TaskDescription'),
),
migrations.AddField(
model_name='user',
name='taskName',
field=models.CharField(default='', max_length=100, verbose_name='TaskName'),
),
]
|
class Userinfo:
def __init__(self, name, gender, age, weight, height, objective, preferred_time, use_DNA_data, use_wearable_data, term):
self.name = name
self.gender = gender
self.age = age
self.weight = weight
self.height = height
self.objective = objective
self.preferred_time = preferred_time
self.use_DNA_data = use_DNA_data
self.use_wearable_data = use_wearable_data
self.term = term
|
from rest_framework import serializers
class EmailValidSerializer(serializers.Serializer):
"""Сериализация email."""
email = serializers.EmailField()
|
from django.contrib import admin
from portfolio.models import ImageGallery
# Register your models here.
admin.site.register(ImageGallery) |
from hackman_notifier import api as notification_api
from django.core.management.base import BaseCommand
from django_redis import get_redis_connection
from hackman_rfid import api as rfid_api
from hackman import api as hackman_api
import json
class Command(BaseCommand):
def handle(self, *args, **kwargs):
r = get_redis_connection('default')
for card_hash, rawdata in rfid_api.cards_read():
card = rfid_api.card_validate(card_hash)
if not card:
continue
if not card.user_id:
r.set('rfid_last_unpaired', card.id)
notification_api.notify_subject(b'door_event', json.dumps({
'event': 'CARD_UNPAIRED',
'card_id': card.id,
'rawdata': rawdata.hex(),
}))
continue
# TODO:
# - lookup user_name and send it to the door open
hackman_api.door_open_if_paid(
card.user_id,
source="CARD",
rawdata=rawdata,
)
|
from django.contrib import admin
from django.urls import include, path
from django.views.generic.base import RedirectView
urlpatterns = [
path('basic_calc/', include('basic_calc.urls')),
path('', RedirectView.as_view(url='/basic_calc/')),
path('admin/', admin.site.urls),
]
|
import json
import math
with open("ml_model.json", "r") as json_file:
json_data = json.load(json_file)
def predict_num(image_data):
closest = [(math.inf, None) for i in range(7)]
for num in range(10):
num = str(num)
for image_index, pixels in enumerate(json_data[num]):
distance = find_distance(pixels, image_data)
for index, obj in enumerate(closest):
if distance < obj[0]:
closest.insert(index, (distance, num))
closest.pop(-1)
break
counters = {str(i): 0 for i in range(10)}
for index, i in enumerate(closest):
counters[i[1]] += 1 / ((index + 1) * 0.8)
largest = (-math.inf, None)
for key in counters.keys():
if counters[key] > largest[0]:
largest = (counters[key], key)
each_prob = {str(i): 0 for i in range(10)}
for num in range(10):
for obj in closest:
if obj[1] == str(num):
each_prob[str(num)] += 1
print(each_prob)
return (largest[1], round(each_prob[largest[1]] / 7 * 100, 2))
return f"Guess: {largest[1]}, Confidence: {round((each_prob[largest[1]] / 7) * 100, 2)}%"
def find_distance(image1, image2):
return math.dist((i / 255 for i in image1), (i / 255 for i in image2)) |
from django.conf.urls import url
from profiles import views
urlpatterns = [
url(
regex=r"^edit/$",
view=views.ProfileEditUpdateView.as_view(),
name="profile_edit"
),
url(
regex="^confirm_role/(?P<membership_id>[-\w]+)/(?P<action>verify|deny)/$",
view=views.profile_confirm_role,
name="profile_confirm_role",
),
url(
regex="^deny_account/(?P<type_name>[\w]+)/(?P<account_name>[-\.\w]+)/$",
view=views.profile_deny_account,
name="profile_deny_account",
),
url(
regex="^confirm/$",
view=views.profile_confirm,
name="profile_confirm",
),
url(r"^$", views.profile_list, name="profile_list"),
url(r"^(?P<github_account>[-\w]+)/$", views.profile_detail, name="profile_detail"),
url(r"^github/(?P<github_account>[-\w]+)/$", views.profile_detail, name="github_profile_detail"),
url(r"^steem/(?P<steem_account>[-\.\w]+)/$", views.profile_detail, name="steem_profile_detail"),
url(r"^id/(?P<id>[-\w]+)/$", views.profile_detail, name="id_profile_detail"),
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
#~ __NAME = '..\opengl\samples.txt'
#~ __NAME = '..\\opengl\\test.txt'
__NAME = 'haired2.dat'
__DIVK = 1.
def main():
app = QtGui.QApplication([])
file = open(__NAME,"r")
legends=['gx','gy','gz','ax','ay','az','yaw','pitch','roll']
colors = [(255,0,0),(0,255,0),(0,0,255),(255,0,255),(255,255,0),(0,255,255),(200,200,200),(50,100,150),(200,10,50)]
y=[[],[],[],[],[],[],[],[],[]]
points = 0
min = 0
max = 0
rline = 0
while 1:
if rline ==0:
rline =1
file.readline()
continue
rl = file.readline()[:-1]
if not rl:
break
srl = rl.split(' ')
for i in xrange(len(y)):
tmp = float(srl[i])
if tmp>max:
max = tmp
if tmp<min:
min = tmp
y[0].append(float(srl[0]))
y[1].append(float(srl[1]))
y[2].append(float(srl[2]))
y[3].append(float(srl[3])/__DIVK)
y[4].append(float(srl[4])/__DIVK)
y[5].append(float(srl[5])/__DIVK)
y[6].append(float(srl[6]))
y[7].append(float(srl[7]))
y[8].append(float(srl[8]))
points+=1
file.close()
p = pg.plot()
p.setTitle('loaded'+__NAME)
p.setRange(QtCore.QRectF(0, min-1, points, max-min+1))
p.setLabel('bottom', 'Index', units='samples')
p.addLegend()
plt_grid = pg.GridItem()
p.addItem(plt_grid)
curve=[]
for i in xrange(len(y)):
curve.append(p.plot( pen=colors[i], name=legends[i]))
curve[i].setData(y[i])
## run qt
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
main()
|
breakfast = [['French', 'toast'], ['blueberry', 'pancakes'], ['scrambled', 'eggs']]
print(breakfast)
answer = breakfast[-2][-2]
print(answer)
breakfast[-2][-2]
|
from TwitterAPI import TwitterAPI
#gove required details
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_TOKEN_KEY = ''
ACCESS_TOKEN_SECRET = ''
b=1
while True:
b=b+1
api = TwitterAPI(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET)
file = open('/home/pi/cam/imgs/'+str(b)+'.jpg', 'rb')#selecting a file to be uploaded
data = file.read()
r = api.request('statuses/update_with_media', {'status':'#pyTweetCMR'}, {'media[]':data})
print(r.status_code)
|
#!/usr/bin/env python
# coding: utf-8
"""Display LIDAR data from specific node.
Attributes
----------
ANGLE_MAX : int
maximum angle
ANGLE_MIN : int
minimum angle
angles : list
list of available angles
ctr : int
counter of displayed data sets
intensities : list
intensities from scan data
INTENSITY_MAX : int
maximum intensity
INTENSITY_MIN : int
minimum intensity
phist : plot
histogram plot
pintensities : plot
intensity plot
pranges : plot
range plot
preflectivities : plot
reflectivity plot
RANGE_MAX : int
maximum range
RANGE_MIN : int
minimum range
ranges : list
ranges from scan data
reflectivities : list
reflectivities from scan data
SETS : int
number of displayed scans
TOPIC : str
which topic should be listened to
"""
import rospy
from sensor_msgs.msg import LaserScan
import pyqtgraph as pg
import numpy as np
from turtlebot3_charge import calcReflectivities
from turtlebot3_charge import reorder
from turtlebot3_charge import getThreshold
from find_spots import THRESHOLD
# view settings
TOPIC = "scan_filtered" # which topic should be listened to
SETS = 1 # number of displayed scans
ANGLE_MIN = -180 # min dispalyed angle
ANGLE_MAX = 180 # max displayed angle
RANGE_MIN = 0 # min displayed range
RANGE_MAX = 4 # max displayed range
INTENSITY_MIN = 0 # min displayed intensity
INTENSITY_MAX = 15000 # max displayed intensity
# global variables
ctr = 0 # counter for scans
angles = list(range(-180, 180))
ranges = list(np.zeros(360))
intensities = list(np.zeros(360))
reflectivities = list(np.zeros(360))
# setup windows
pg.setConfigOption("background", "w")
pg.setConfigOption("foreground", "k")
# setup ranges plot
pranges = pg.plot()
pranges.setTitle("Ranges")
pranges.setRange(xRange=range(ANGLE_MIN, ANGLE_MAX),
yRange=range(RANGE_MIN, RANGE_MAX))
# setup intensities plot
pintensities = pg.plot()
pintensities.setTitle("Intensities")
pintensities.setRange(xRange=range(ANGLE_MIN, ANGLE_MAX),
yRange=range(INTENSITY_MIN, INTENSITY_MAX))
# setup reflectivites plot
preflectivities = pg.plot()
preflectivities.setTitle("Reflectivities")
preflectivities.setRange(xRange=range(ANGLE_MIN, ANGLE_MAX),
yRange=range(10000))
# setup histogram plot
phist = pg.plot()
phist.setTitle("Reflectivity Histogram")
phist.setRange(xRange=range(10000), yRange=range(360))
def callback(data):
"""Is called when new sensor data is available.
Parameters
----------
data : LaserScan
laser data from LIDAR
"""
global angles
global ranges
global intensities
global reflectivities
# convert to lists
intensities = list(data.intensities)
ranges = list(data.ranges)
# filter values
for i in range(len(intensities)):
if str(ranges[i]) == "nan" or ranges[i] <= 0.0:
ranges[i] = -1.0
intensities[i] = 0
reflectivities = list(calcReflectivities(ranges, intensities))
# reorder values
intensities = reorder(intensities)
ranges = reorder(ranges)
reflectivities = reorder(reflectivities)
def update():
"""Update graph."""
global ctr
global angles
global ranges
global pranges
global intensities
global pintensities
global reflectivities
global preflectivities
global phist
# calc intensity histogram data
rhist, redges = np.histogram(reflectivities)
if ctr <= SETS: # add another scan
pintensities.plot(angles, intensities,
pen=None,
symbol="o",
symbolSize=5,
symbolBrush=(255, 0, 0, 255))
pranges.plot(angles, ranges,
pen=None,
symbol="o",
symbolSize=5,
symbolBrush=(255, 0, 0, 255))
preflectivities.plot(angles, reflectivities,
pen=None,
symbol="o",
symbolSize=5,
symbolBrush=(255, 0, 0, 255))
phist.plot(redges[1:], rhist,
pen=None,
symbol="o",
symbolSize=5,
symbolBrush=(255, 0, 0, 255))
else: # clear graph and add scan
pintensities.plot(angles, intensities,
clear=True,
pen=None,
symbol="o",
symbolSize=5,
symbolBrush=(255, 0, 0, 255))
pranges.plot(angles, ranges,
clear=True,
pen=None,
symbol="o",
symbolSize=5,
symbolBrush=(255, 0, 0, 255))
preflectivities.plot(angles, reflectivities,
clear=True,
pen=None,
symbol="o",
symbolBrush=(255, 0, 0, 255))
phist.plot(redges[1:], rhist,
clear=True,
pen=None,
symbol="o",
symbolSize=5,
symbolBrush=(255, 0, 0, 255))
ctr = 0
# calc adaptive threholds
intensities_threshold = getThreshold(intensities)
reflectivities_threshold = getThreshold(reflectivities)
rospy.logdebug("display_sensordata: \
\n\tintensity threshold is {} \
\n\treflectivity threshold is {}".format(intensities_threshold,
reflectivities_threshold))
intensities_threshold_list = list()
reflectivities_threshold_list = list()
for a in angles:
intensities_threshold_list.append(intensities_threshold)
reflectivities_threshold_list.append(reflectivities_threshold)
# draw red line if adaptive threshold is below const threshold
if intensities_threshold >= THRESHOLD:
pintensities.plot(angles, intensities_threshold_list,
symbol=None)
else:
pintensities.plot(angles, intensities_threshold_list,
symbol=None,
pen="r")
if reflectivities_threshold >= THRESHOLD:
preflectivities.plot(angles, reflectivities_threshold_list,
symbol=None)
else:
preflectivities.plot(angles, reflectivities_threshold_list,
symbol=None,
pen="r")
ctr += 1
pg.QtGui.QApplication.processEvents()
def listener():
"""Listen to a given ros topic."""
rospy.init_node("display_sensordata", anonymous=True,
log_level=rospy.DEBUG)
rospy.Subscriber(TOPIC, LaserScan, callback)
rate = rospy.Rate(10.0)
while not rospy.is_shutdown():
update()
rate.sleep()
def main():
"""Run listener."""
listener()
if __name__ == "__main__":
main()
|
"""
Before running the program, enter your specifications into
SETUP_focalMechMap.txt
"""
#########################################################################
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import netCDF4
import numpy as np
import obspy
from obspy import read
from obspy.clients.fdsn.client import Client
from obspy.core import UTCDateTime
from obspy.core.event import Event
from obspy.core.event import FocalMechanism
from obspy.core.event import Origin
from obspy.core.stream import Stream
from obspy.core.trace import Trace
from obspy.imaging.beachball import beach
from obspy.geodetics.base import gps2dist_azimuth as separation
from obspy.signal.trigger import ar_pick
import os
#### Read program specs from the setup file
with open('SETUP/SETUP_calcSingleFocalMech.txt', 'r') as f:
setup=f.read().splitlines()
PROJECTION=setup[0][11:]
MIN_LATITUDE=float(setup[1][13:])
MAX_LATITUDE=float(setup[2][13:])
MIN_LONGITUDE=float(setup[3][14:])
MAX_LONGITUDE=float(setup[4][14:])
QUAKEML_FILE=setup[5][13:]
NETCDF4_FILE=setup[6][13:]
IMAGE_PATH=setup[7][11:]
WAVE_PATH=setup[8][10:]
start=UTCDateTime() #time the program
#### Create basemap
def polar_stere(lon_w, lon_e, lat_s, lat_n, **kwargs):
## Returns a basemap in polar stereographic of a specific box not centered at a pole
lon_0 = lon_w + (lon_e - lon_w) / 2.
ref = lat_s if abs(lat_s) > abs(lat_n) else lat_n
lat_0 = np.copysign(90., ref)
prj = Basemap(projection=PROJECTION, lon_0=lon_0, lat_0=lat_0,
boundinglat=0, resolution='l')
lons = [lon_w, lon_e, lon_w, lon_e, lon_0, lon_0]
lats = [lat_s, lat_s, lat_n, lat_n, lat_s, lat_n]
x, y = prj(lons, lats)
ll_lon, ll_lat = prj(min(x), min(y), inverse=True)
ur_lon, ur_lat = prj(max(x), max(y), inverse=True)
return Basemap(projection='stere', lat_0=lat_0, lon_0=lon_0,
llcrnrlon=ll_lon, llcrnrlat=ll_lat,
urcrnrlon=ur_lon, urcrnrlat=ur_lat, **kwargs)
if (PROJECTION == 'spstere' or PROJECTION == 'npstere'):
my_map = polar_stere(MIN_LONGITUDE, MAX_LONGITUDE, MIN_LATITUDE, MAX_LATITUDE, area_thresh=0.1)
else:
my_map = Basemap(projection=PROJECTION, lat_0=MIN_LATITUDE,
lon_0=MIN_LONGITUDE, resolution='l', area_thresh=0.1,
llcrnrlon=MIN_LONGITUDE, llcrnrlat=MIN_LATITUDE,
urcrnrlon=MAX_LONGITUDE, urcrnrlat=MAX_LATITUDE)
my_map.drawcoastlines()
my_map.drawcountries()
my_map.fillcontinents(color=[1,1,1])
my_map.drawmapboundary()
my_map.drawmeridians(range(120,129,8), labels=[0,0,0,1], fontsize=3)
#### Read events from saved file from 'fetchdata.py'
events = obspy.read_events(QUAKEML_FILE, 'QUAKEML')
lons, lats, deps, mags, fmcs = [], [], [], [], []
for event in events:
if (len(event.focal_mechanisms) > 0):
lons.append(event.preferred_origin().longitude)
lats.append(event.preferred_origin().latitude)
deps.append(event.preferred_origin().depth)
mags.append(event.magnitudes[0].mag)
fmcs.append(event.focal_mechanisms[0])
print('\n___________________\nFirst event: '+str(events[0].resource_id)+
'\ntime: '+str(events[0].preferred_origin().time)+'\nlat,lon: '+
str(lats[0])+','+str(lons[0])+'\nmagnitude: '+str(mags[0])+
'\ndepth: '+str(deps[0])+'\nfocal mec: '+
str(fmcs[0].resource_id)+'\n___________________\n')
#### Plot the focal mechanisms
x,y = my_map(lons, lats)
ax = plt.gca()
maxDepth = max(deps)
skips=0
for xxx, yyy, dep, mag, fmec in zip(x, y, deps, mags, fmcs):
try:
t = fmec.moment_tensor.tensor
print(t)
b = beach([t.m_rr,t.m_tt,t.m_pp,t.m_rt,t.m_rp,t.m_tp], xy=(xxx, yyy),
width=100000*mag, linewidth=0.1, facecolor=[dep/maxDepth,0,0])
b.set_zorder(10)
ax.add_collection(b)
except: skips+=1
print('\n...'+str(skips)+' events skipped...\n')
#### Read topobathymetry data from saved file
nc = netCDF4.Dataset(NETCDF4_FILE,'r',format='NETCDF4')
bathLat = nc.variables['latitude'][::10]
bathLon = nc.variables['longitude'][::10]
bathLon,bathLat = np.meshgrid(bathLon,bathLat)
bathX, bathY = my_map(bathLon, bathLat)
#### Add bathymetry to map
#interval=range(np.amin(nc.variables['ROSE'][::10,::10]),0,25)
interval=range(-6000,25,25)
cs1 = my_map.contourf(bathX,bathY,nc.variables['ROSE'][::10,::10], interval)
cbar1 = my_map.colorbar(cs1,pad="5%")
cbar1.set_label('Ocean Depth (m)')
#### save the plot and display runtime
plt.savefig(IMAGE_PATH+'initial.png', dpi=2400)
print('\nfirst plot complete\n')
#### organize the wave data
client=Client('IRIS')
traces=[]
for mseed in os.listdir(WAVE_PATH): # reads all mseed files in path
if '.mseed' in mseed:
stream=read(WAVE_PATH+mseed)
for trace in stream: traces.append(trace)
traceIDdict={}
for trace in traces:
idList=trace.id.split('.')
try: traceIDdict[idList[0]+idList[1]+idList[2]].append(trace)
except: traceIDdict[idList[0]+idList[1]+idList[2]]=[trace]
print('\ndictionary filled\n')
firstArrivals=[] ## (Radial first arrival amplitude with polarity, distance^3, azimuth)
groupedStreams=[] ## (Z,N,E,R,T)
posZandAzm=[] ## (+stZ[0].max(),distance^3,azimuth)
for key in traceIDdict:
if len(traceIDdict[key])==5:
for trace in traceIDdict[key]:
traceInfo=trace.id.split('.')
if traceInfo[3][2] == 'Z':
station=client.get_stations(network=traceInfo[0],
station=traceInfo[1],level='response')
coords=station.get_coordinates(seed_id=trace.id)
sep=separation(lats[0],lons[0],coords['latitude'],coords['longitude'])
stZ=Stream(traces=[trace])
elif traceInfo[3][2] == 'E': stE=Stream(traces=[trace])
elif traceInfo[3][2] == 'N': stN=Stream(traces=[trace])
elif traceInfo[3][2] == 'R': stR=Stream(traces=[trace])
elif traceInfo[3][2] == 'T': stT=Stream(traces=[trace])
arrivals=ar_pick(a=stZ[0].data,b=stN[0].data,c=stE[0].data,
samp_rate=stE[0].stats.sampling_rate,
f1=0.02,f2=1.0,
lta_p=60.0,sta_p=2.0,
lta_s=60.0,sta_s=2.0,
m_p=4,m_s=4,
l_p=4,l_s=4,
s_pick=True)
firstPindex=int(np.round(arrivals[0]*stE[0].stats.sampling_rate))
firstPamp=stR[0].data[firstPindex]
firstArrivals.append((firstPamp,sep[0]**3,sep[1]))
ZvR.append((abs(stZ[0].data[firstPindex]/firstPamp),1,sep[1]))
#groupedStreams.append((stZ,stN,stE,stR,stT))
zMax=stZ[0].max()
if zMax>0: posZandAzm.append((zMax,sep[0]**3,sep[1]))
else:
for trace in traceIDdict[key]:
traceInfo=trace.id.split('.')
if traceInfo[3][2] == 'Z':
station=client.get_stations(network=traceInfo[0],
station=traceInfo[1],level='response')
coords=station.get_coordinates(seed_id=trace.id)
sep=separation(lats[0],lons[0],coords['latitude'],coords['longitude'])
stZ=Stream(traces=[trace])
zMax=stZ[0].max()
if zMax>0: posZandAzm.append((zMax,sep[0]**3,sep[1]))
print(firstArrivals[0])
#### write the first arrival amplitudes and locations into a text file
posRad,negRad=[],[] # separate the entries by direction of first motion
with open(IMAGE_PATH+'info.txt','w') as f:
f.write('Radial first arrival amplitude, distance^3, azimuth\n\n')
for entry in firstArrivals:
f.write(str(entry)+'\n')
if entry[0]>=0: posRad.append(entry)
else: negRad.append(entry)
#### merge sort functions for tuples
def merge(a=[],b=[],index=0):
c=[(Trace(),0.0)]*(len(a)+len(b))
countC,countA,countB=0,0,0
while (countC<len(c)):
if (countA<len(a) and countB<len(b)):
if (a[countA][index]<b[countB][index]):
c[countC]=a[countA]
countA+=1
countC+=1
if (countA<len(a) and countB<len(b)):
if (a[countA][index]>b[countB][index]):
c[countC]=b[countB]
countB+=1
countC+=1
if (countA<len(a) and countB<len(b)):
if (a[countA][index]==b[countB][index]):
c[countC]=a[countA]
countC+=1
countA+=1
c[countC]=b[countB]
countC+=1
countB+=1
elif (countA<len(a)):
while (countA<len(a)):
c[countC]=a[countA]
countA+=1
countC+=1
elif (countB<len(b)):
while (countB<len(b)):
c[countC]=b[countB]
countB+=1
countC+=1
return c
def split_list(to_be_split=[]):
if (len(to_be_split)==1): return to_be_split
x=0
a,b=[],[]
split=int(len(to_be_split)/2)
while (x<len(to_be_split)):
if (x<split): a.append(to_be_split[x])
else: b.append(to_be_split[x])
x+=1
return a,b
def merge_sort(to_be_merged=[],index=0):
if (len(to_be_merged)<2): return to_be_merged
left,right=split_list(to_be_merged)
left=merge_sort(left,index)
right=merge_sort(right,index)
return merge(left,right,index)
#### recalculate the focal mechanism from the wave data
incAzmPosRad=merge_sort(posRad,2) ## 2 is the index of azimuth
incAzmNegRad=merge_sort(negRad,2)
incZ=merge_sort(posZandAzm,2)
inZvR=merge_sort(ZvR,2)
## returns the average amplitude of an index of tuples of a list between
## previously set range of azimuths, also dividing by the distance^3
def local_average(values=[],index=0):
avg=0.0
for value in values:
avg+=abs(value[index])/value[1]
return avg/len(values)
## returns an azimuth specifying orientation of focal mechanism
def search_azm(values=[],index=0):
a,b=split_list(values)
avgA=local_average(a,index)
avgB=local_average(b,index)
if len(a)>7 and len(b)>7:
if avgA>avgB: azm=search_azm(a,index)
else: azm=search_azm(b,index)
else:
if avgA>avgB: return a[0][2]+((a[len(a)-1][2]-a[0][2])/2)
return b[0][2]+((b[len(b)-1][2]-b[0][2])/2)
return azm
strikeAngle=search_azm(incAzmPosRad,0)
#negRazm=search_azm(incAzmNegRad,0)
slipAngle=search_azm(incZ,0)
if slipAngle>180: zBazm=slipAngle-180
else: zBazm=slipAngle+180
print('\nstrike angle: '+str(strikeAngle)+'\nslip angle: '+str(slipAngle))
## returns boolean for if within width number of degrees of an azimuth
def within(to_fit=0.0,azm=0.0,width=5):
if azm<=360-width and azm>=width:
return to_fit<=azm+width and to_fit>=azm-width
elif azm>360-width:
return to_fit>=azm-width or to_fit>azm or to_fit<=width+azm-360
else:
return to_fit<=azm or to_fit>=360+azm-width
## calculating the dip angle
ZatAzm,ZatBazm=[],[]
try:
for entry in incZvR:
if within(entry[2],slipAngle,80): ZatAzm.append(entry)
elif within(entry[2],zBazm,80): ZatBazm.append(entry)
dipAngle=90*abs(local_average(ZatBazm,0)/local_average(ZatAzm,0))
except:
for entry in incZ:
if within(entry[2],slipAngle,80): ZatAzm.append(entry)
elif within(entry[2],zBazm,80): ZatBazm.append(entry)
dipAngle=90*abs(local_average(ZatBazm,0)/local_average(ZatAzm,0))
## calculating the moment tensor
angles=np.radians((strikeAngle,slipAngle,dipAngle))
normal=(-1*np.sin(angles[2])*np.sin(angles[0]),-1*np.sin(angles[2])*np.cos(angles[0]),np.cos(angles[2]))
slip=(np.cos(angles[1])*np.cos(angles[0])+np.sin(angles[1])*np.cos(angles[2])*np.sin(angles[0]),
-1*np.cos(angles[1])*np.sin(angles[0])+np.sin(angles[1])*np.cos(angles[2])*np.cos(angles[0]),
np.sin(angles[1])*np.sin(angles[2]))
fmec.moment_tensor.tensor.m_rr=2*normal[0]*slip[0]
fmec.moment_tensor.tensor.m_tt=2*normal[1]*slip[1]
fmec.moment_tensor.tensor.m_pp=2*normal[2]*slip[2]
fmec.moment_tensor.tensor.m_rt=normal[0]*slip[1]+slip[0]*normal[1]
fmec.moment_tensor.tensor.m_rp=normal[0]*slip[2]+slip[0]*normal[2]
fmec.moment_tensor.tensor.m_tp=normal[1]*slip[2]+slip[1]*normal[2]
t = fmec.moment_tensor.tensor
print(t)
b = beach([t.m_rr,t.m_tt,t.m_pp,t.m_rt,t.m_rp,t.m_tp], xy=(xxx, yyy),
width=100000*mag, linewidth=0.1, facecolor=[dep/maxDepth,0,0])
b.set_zorder(10)
ax.add_collection(b)
plt.savefig(IMAGE_PATH+'_recalc.png', dpi=2400)
print('\n\nRuntime: '+str(UTCDateTime()-start)+' seconds\n\n')
os.system('say "Focal mechanism mapping complete"')
##########################################################################
"""
cite smith and sandwell (bathymetry)
cite polar_stere
""" |
from itertools import combinations
x1,y1=input().split()
x=str(x1)
y=int(y1)
z=[]
a=combinations(x,len(x)-y)
for i in list(z):
a.append(''.join(i))
print(min(a))
|
"""Test combination of all sources."""
from textwrap import dedent
import pytest
from docoptcfg import docoptcfg
from tests import DOCSTRING_FAM, EXPECTED_FAM
def test_config_file_in_env(monkeypatch, tmpdir):
"""Test specifying a config file using only env variables.
:param monkeypatch: pytest fixture.
:param tmpdir: pytest fixture.
"""
config_file = tmpdir.join('config.ini')
config_file.write(dedent("""\
[FlashAirMusic]
mac-addr = AA:BB:CC:DD:EE:FF
"""))
monkeypatch.setenv('FAM_CONFIG', str(config_file))
actual = docoptcfg(DOCSTRING_FAM, ['run'], config_option='-c', env_prefix='FAM_')
expected = EXPECTED_FAM.copy()
expected['--config'] = str(config_file)
expected['--mac-addr'] = 'AA:BB:CC:DD:EE:FF'
assert actual == expected
@pytest.mark.parametrize('set_arg', [True, False])
@pytest.mark.parametrize('set_env', [True, False])
@pytest.mark.parametrize('set_file', [True, False])
def test_override(monkeypatch, tmpdir, set_arg, set_env, set_file):
"""Test source overrides.
:param monkeypatch: pytest fixture.
:param tmpdir: pytest fixture.
:param bool set_arg: Set value in command line arguments.
:param bool set_env: Set value in environment variables.
:param bool set_file: Set value in config file.
"""
config_file = tmpdir.join('config.ini')
config_file.write(dedent("""\
[FlashAirMusic]
quiet = true
{0}
""").format('ffmpeg-bin = ffmpeg_file' if set_file else ''))
monkeypatch.setenv('FAM_CONFIG', str(config_file))
monkeypatch.setenv('FAM_VERBOSE', 'true')
if set_env:
monkeypatch.setenv('FAM_FFMPEG_BIN', 'ffmpeg_env')
argv = ['run', '-m', '00:11:22:33:44:55'] + (['--ffmpeg-bin', 'ffmpeg_arg'] if set_arg else [])
actual = docoptcfg(DOCSTRING_FAM, argv, config_option='-c', env_prefix='FAM_')
expected = EXPECTED_FAM.copy()
expected['--config'] = str(config_file)
expected['--mac-addr'] = '00:11:22:33:44:55'
expected['--quiet'] = True
expected['--verbose'] = True
if set_arg:
expected['--ffmpeg-bin'] = 'ffmpeg_arg'
elif set_env:
expected['--ffmpeg-bin'] = 'ffmpeg_env'
elif set_file:
expected['--ffmpeg-bin'] = 'ffmpeg_file'
assert actual == expected
@pytest.mark.parametrize('data_type', ['str', 'int', 'float'])
@pytest.mark.parametrize('source', ['arg', 'env', 'file'])
def test_data_types(monkeypatch, tmpdir, source, data_type):
"""Ensure all sources produce the exact same non-boolean data types and values.
:param monkeypatch: pytest fixture.
:param tmpdir: pytest fixture.
:param source: Config source to test.
:param data_type: Data type to test.
"""
argv = ['run']
expected = EXPECTED_FAM.copy()
if source == 'file':
config_file = tmpdir.join('config.ini')
if data_type == 'str':
config_file.write('[FlashAirMusic]\nmac-addr = one')
elif data_type == 'int':
config_file.write('[FlashAirMusic]\nmac-addr = 1')
else:
config_file.write('[FlashAirMusic]\nmac-addr = 2.3')
monkeypatch.setenv('FAM_CONFIG', str(config_file))
expected['--config'] = str(config_file)
elif source == 'env':
if data_type == 'str':
monkeypatch.setenv('FAM_MAC_ADDR', 'one')
elif data_type == 'int':
monkeypatch.setenv('FAM_MAC_ADDR', '1')
else:
monkeypatch.setenv('FAM_MAC_ADDR', '2.3')
else:
if data_type == 'str':
argv.extend(['--mac-addr', 'one'])
elif data_type == 'int':
argv.extend(['--mac-addr', '1'])
else:
argv.extend(['--mac-addr', '2.3'])
# Set expected.
if data_type == 'str':
expected['--mac-addr'] = 'one'
elif data_type == 'int':
expected['--mac-addr'] = '1'
else:
expected['--mac-addr'] = '2.3'
actual = docoptcfg(DOCSTRING_FAM, argv, config_option='-c', env_prefix='FAM_')
assert actual == expected
|
import tflearn
import numpy as np
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import os
#print('abuuuuuuuuuuuuuuuuuu')
import matplotlib.pyplot as plt
#print('babuuuuuuuuuuuuuuu')
import tensorflow as tf
tf.reset_default_graph()
train_data=np.load('train_data.npy', encoding='latin1')
train=train_data[:-500]
test=train_data[-500:]
x=np.array([i[0] for i in train]).reshape(-1,50,50,1)
y=[i[1] for i in train]
test_x=np.array([i[0] for i in test]).reshape(-1,50,50,1)
test_y=[i[1] for i in test]
convnet = input_data(shape=[None, 50, 50, 1], name='input')
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet, 2)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 2, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=1e-3, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet,tensorboard_dir='log')
if os.path.exists('catdog.meta'):
model.load('catdog')
print('loaded')
#model.fit({'input': x}, {'targets': y}, n_epoch=5, validation_set=({'input': test_x}, {'targets': test_y}),
# snapshot_step=500, show_metric=True, run_id='catdog')
model.save('catdog')
test_data=np.load('test_data.npy', encoding='latin1')
fig=plt.figure()
for num,data in enumerate(test_data[:12]):
img_num=data[1]
img_data=data[0]
y=fig.add_subplot(3,4,num+1)
orig=img_data
data=img_data.reshape(50,50,1)
model_out=model.predict([data])[0]
if np.argmax(model_out)==1: label='Dog'
else: label='cat'
y.imshow(orig,cmap='gray')
plt.title(label)
y.axes.get_xaxis().set_visible(False)
y.axes.get_yaxis().set_visible(False)
#plt.show()
with open('submission.csv','w') as f:
f.write('id,label\n')
with open('submission.csv','a') as f:
for data in test_data:
img_num=data[1]
img_data=data[0]
data=img_data.reshape(50,50,1)
model_out=model.predict([data])[0]
f.write('{},{}\n'.format(img_num,model_out[1]))
|
import qrcode
def create():
qr = qrcode.QRCode(
version = 5,
box_size = 3,
border = 2
)
qr.add_data("Name: Shikhar\nAge: 16\nGender: Male")
qr.make(fit=True)
img = qr.make_image(fill = "black", back_color = "white")
img.save("MyQRcode.png")
create() |
# Difficulty: trivial
# https://www.hackerrank.com/challenges/ctci-is-binary-search-tree/problem?h_l=interview&playlist_slugs%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D=trees
def check_BST(root):
current = None
for x in in_order_traversal(root):
if current != None and current >= x:
return False
current = x
return True
# Simplest way of checking for duplicates, and is lazy (I don't know how Python makes this happen though, but I'm guessing it involves
# storing quite a few stack frames somewhere to be abe to retrieve them later on. I only used Python because Scala was not available)
def in_order_traversal(root):
if root == None:
return iter(())
yield from in_order_traversal(root.left)
yield root.data
yield from in_order_traversal(root.right)
|
# DRF Imports
from django.shortcuts import render
from rest_framework.viewsets import ViewSet, ModelViewSet
from rest_framework.response import Response
from rest_framework.views import APIView
from django.db.models import Q
from rest_framework.generics import ListAPIView
from rest_framework.status import ( HTTP_201_CREATED, HTTP_400_BAD_REQUEST, HTTP_200_OK, HTTP_500_INTERNAL_SERVER_ERROR)
# Misc Imports
import json
from pizza_app.pagination import PostLimitOffsetPagination
# Model Imports
from pizza_app.models import Pizza
# Serializer Import
from pizza_app.serializer import PizzaListSerializer
# Create your views here.
class PizzaView(ModelViewSet):
pagination_class = PostLimitOffsetPagination
def list(self, request, *args, **kwargs):
try:
pizza_list = Pizza.objects.filter().values()
return Response({"pizza_list": pizza_list})
except Exception as e:
return Response({"error": str(e)}, status=HTTP_500_INTERNAL_SERVER_ERROR)
def create(self, request, *args, **kwargs):
try:
size = request.POST.get("size")
types = request.POST.get("types")
toppings = request.POST.get("toppings")
if not size and types:
return Response({"message": "Parameters Missing."}, status=HTTP_400_BAD_REQUEST)
toppings = json.dumps(toppings)
pizza_obj = Pizza.objects.create(size=size, types=types, toppings=toppings)
return Response({"message":"Created"},HTTP_201_CREATED)
except Exception as e:
return Response({"error": str(e)}, status=HTTP_500_INTERNAL_SERVER_ERROR)
def update(self, request, pk=None, *args, **kwargs):
try:
query = Pizza.objects.filter(id=pk).values().first()
if not query:
return Response({"message": "Invalid ID."}, status=HTTP_400_BAD_REQUEST)
size = request.data.get("size") or query["size"]
types = request.data.get("types") or query["types"]
toppings = request.data.get("toppings") or query["toppings"]
Pizza.objects.filter(id=pk).update(size=size, types=types, toppings=toppings)
return Response({"message":"Updated"})
except Exception as e:
return Response({"error": str(e)}, status=HTTP_500_INTERNAL_SERVER_ERROR)
def destroy(self, request, pk=None, *args, **kwargs):
try:
obj = Pizza.objects.filter(id=pk)
if not obj:
return Response({"message": "Invalid ID."}, status=HTTP_400_BAD_REQUEST)
obj.delete()
return Response({"message": "Deleted."})
except Exception as e:
return Response({"error": str(e)}, status=HTTP_500_INTERNAL_SERVER_ERROR)
class PizzaList(ListAPIView):
serializer_class = PizzaListSerializer
pagination_class = PostLimitOffsetPagination
def get_queryset(self, *args, **kwargs):
try:
search = self.request.GET.get("search")
obj = Pizza.objects.filter().values()
if search:
return(Pizza.filter(Q(size__icontains=search) | Q(types__icontains=search)))
return obj
except Exception as e:
return Response({"error": str(e)}, status=HTTP_500_INTERNAL_SERVER_ERROR)
|
# -*- coding: utf8 -*-
#
# Copyright 2011-2012, Intel Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
'''
Created on 6 sept. 2011
@author: Ronan Le Martret
'''
from PySide.QtCore import QObject
from Utils import popupOnException
from ObsLightGuiObject import ObsLightGuiObject
from PySide.QtGui import QFileSystemModel
class FileManagerModel(QObject, ObsLightGuiObject):
'''
Manage the file list widget and file-related buttons of the main window.
'''
def __init__(self, gui, manager):
QObject.__init__(self)
ObsLightGuiObject.__init__(self, gui)
self.__treeView = None
self.__lineEdit = None
self._project = None
self._package = None
self._baseDirPath = "/"
self._curentDirPath = "/"
self.__systemModel = None
def setTreeView(self, treeView):
self.__treeView = treeView
def setLineEdit(self, lineEdit):
self.__lineEdit = lineEdit
def getTreeView(self):
return self.__treeView
def _init_connect(self):
if self.__treeView is not None:
self.__treeView.doubleClicked.connect(self.on_TreeView_activated)
self.__treeView.clicked.connect(self.on_TreeView_clicked)
self.__treeView.expanded.connect(self.on_TreeView_expanded)
self.__treeView.collapsed.connect(self.on_TreeView_expanded)
def _isInit(self):
return True
def setCurrentProjectAndPackage(self, project, package):
self._project = project
self._package = package
#---------------------------------------------------------------------------------------------------
@popupOnException
def on_TreeView_activated(self, index):
"""
When user double-clicks on an item, open it with default application.
"""
filePath = index.model().filePath(index)
self.manager.openFile(filePath)
@popupOnException
def on_TreeView_clicked(self, index):
"""
When user clicks on an item, display the complete path
of this item under the widget.
"""
filePath = index.model().filePath(index)
if self.__lineEdit is not None:
self.__lineEdit.setText(filePath)
@popupOnException
def on_TreeView_expanded(self, _index):
if self.__treeView is not None:
self.__treeView.resizeColumnToContents(0)
self._baseDirPath = None
self._curentDirPath = None
def _initBaseDir(self):
pass
#---------------------------------------------------------------------------------------------------
def refresh(self):
# --- view ---------
self.__systemModel = QFileSystemModel()
if self._project is not None and self._package is not None and self.__treeView is not None:
if self._isInit():
self.__treeView.setEnabled(True)
# Qt 4.6 do not know "directoryLoaded"
if hasattr(self.__systemModel, "directoryLoaded"):
self.__systemModel.directoryLoaded.connect(self.on_path_loaded)
self._initBaseDir()
self.__systemModel.setRootPath(self._baseDirPath)
if self._baseDirPath != self._curentDirPath:
self.__systemModel.setRootPath(self._curentDirPath)
else:
self.__treeView.setEnabled(False)
self.mainWindow.packageTabWidget.setEnabled(True)
else:
self.mainWindow.packageTabWidget.setEnabled(False)
if self.__treeView is not None:
self.__treeView.setModel(self.__systemModel)
# Qt 4.6 do not know "directoryLoaded"
if not hasattr(self.__systemModel, "directoryLoaded"):
self.on_path_loaded(self.__baseDirPath)
self.on_path_loaded(self.__curentDirPath)
def on_path_loaded(self, path):
"""
Called when the QFileSystem model loads paths.
"""
if self.__treeView is not None:
if path == self._baseDirPath:
# Set the root index of the QTreeView to the root directory of
# the project file system, so user does not see outside
if self.__systemModel is not None:
self.__treeView.setRootIndex(self.__systemModel.index(path))
elif path == self._curentDirPath:
# Set the current index of the QTreeView to the package directory
# so it appears unfolded
if self.__systemModel is not None:
self.__treeView.setCurrentIndex(self.__systemModel.index(path))
self.__treeView.resizeColumnToContents(0)
|
# Return the length of the longest palindromic subsequence given a sequence x[1...n]
def longestPalindrome(x):
n = len(x)
lengths = [[0 for i in range(n)] for j in range(n)]
for i in range(n):
lengths[i][i] = 1
for s in range(2,n+1):
for i in range(n-s+1):
j = i + s - 1
if x[i] == x[j] and s == 2:
lengths[i][j] = 2
elif x[i] == x[j]:
lengths[i][j] = lengths[i+1][j-1] + 2
else:
lengths[i][j] = max(lengths[i][j-1], lengths[i+1][j])
return lengths[0][n-1]
if __name__ == "__main__":
sequence = "acgtctcaaaatcg"
print(longestPalindrome(sequence)) |
#!C:/python36/python.exe
#!/usr/bin/env python3
##demo code provided by Steve Cope at www.steves-internet-guide.com
##email steve@steves-internet-guide.com
##Free to use for any purpose
##If you like and use this code you can
##buy me a drink here https://www.paypal.me/StepenCope
import asyncio
import os
import signal
import time
from gmqtt import Client as MQTTClient
from gmqtt.mqtt.constants import MQTTv311
# gmqtt also compatibility with uvloop
#import uvloop
#asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
client_id="testclient"
mqttv=MQTTv311
messages=[]
STOP = asyncio.Event()
def on_publish(client,rc, userdata, mid):
print("published")
def on_connect(client, flags, rc, properties):
print('Connected ',flags)
print('Connected ',properties)
def on_message(client, topic, payload, qos, properties):
print('MSG Properties:', properties)
msg=str(payload.decode("utf-8"))
messages.append(msg)
print('RECV MSG:', msg)
def on_disconnect(client, packet, exc=None):
print('Disconnected ',packet)
def on_subscribe(client, mid, qos):
print('SUBSCRIBED')
def ask_exit(*args):
STOP.set()
async def wait(s):
await asyncio.sleep(s)
return True
async def main(broker_host):
print("creating client")
client = MQTTClient("client1")
client.on_connect = on_connect
client.on_message = on_message
client.on_disconnect = on_disconnect
client.on_subscribe = on_subscribe
client.on_publish = on_publish
await client.connect(broker_host)
client.subscribe('org/common',no_local=True)
await asyncio.sleep(5)
print("Publish response topic")
msg_out1="test message"
client.publish('org/common',"aa",response_topic="org/responses/client1")
await asyncio.sleep(50) #wait to receive message
if len(messages)==0:
print("test failed")
else:
msg=messages.pop()
if msg==msg_out1:
print("test succeeded")
await client.disconnect()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
host = '192.168.1.61'
loop.run_until_complete(main(host))
|
"""
leetcode 148. Sort List
문제 링크 https://leetcode.com/problems/sort-list/
"""
from Linked_list.listnode import ListNode
class Solution:
def sortList(self, head: ListNode) -> ListNode:
# time : n log n
# memory : 1
# 1. 중간 찾기 : runner
harf, slow, fast = None, head, head
while fast and fast.next:
half, slow, fast = slow, slow.next, fast.next.next
half.next = None # half 위치를 기준으로 연결리스트 관계를 끊음
# 2. 병합정렬
"""
Runtime: 160 ms
Memory Usage: 30.2 MB
"""
class Solution2:
def sortList(self, head: ListNode) -> ListNode:
p = head
arr = []
while p:
arr.append(p.val)
p = p.next
arr.sort()
p = head
for i in range(len(arr)):
p.val = arr[i]
p = p.next
return head |
def create_random_array(n, m):
import random
random_array = [[0] * m for i in range(n)]
for i in range(n):
for j in range(m):
random_array[i][j] = random.randint(-100, 100)
return random_array
def negative_array_values_arith_mean(n, m):
get_array = create_random_array(n, m)
total_negative_summ = 0
negative_count = 0
for i in range(n):
for j in range(m):
if get_array[i][j] < 0:
total_negative_summ += get_array[i][j]
negative_count +=1
if negative_count == 0:
print ('no negative values in array. Arithmetical is 0')
else:
print ('Naegative values arthmetical mean is ', total_negative_summ / negative_count )
print (get_array)
if __name__ == '__main__':
negative_array_values_arith_mean(2,3) |
class Solution(object):
def removeDuplicateLetters(self, s):
"""
:type s: str
:rtype: str
"""
# s=set(list(s))
# # print(list(s))
# b=list(s)
# b.sort()
# return "".join(b)
#思路,一个字典,保存当前出现的字符和位置;一个字符串,保存着所有字符的一个排列:
#遇到一个字符不在集合中,直接加到字符串尾部;如果遇到一个重复字符,判断该字符与替换后字符的大小,如果替换后更小,则替换
#考虑这样一个问题,对于给定一个字符串,将其中某一位移动到串尾部,得到的新串是否比原串大?
#如果制定一个判断依据:
#加入该位比后面一位大:移动后>移动前
#。。。。。。。。。小:移动后<移动前
#相等考虑第一个不等于该位的字符和该位字符的大小,判断同上
a=Solution()
print(a.removeDuplicateLetters("abdcd"))
l=['a','b','c','d']
print("".join(l))
|
#Q-2 ) Palindrome Linked List
#Answer:-
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution(object):
def isPalindrome(self, head):
if not head or not head.next:
return True
slow = fast = head
while fast != None and fast.next != None:
fast = fast.next.next
slow = slow.next
prev = None
while slow:
temp = slow
slow = slow.next
temp.next = prev
prev = temp
while prev:
if prev.val != head.val:
return False
prev = prev.next
head = head.next
return True
if __name__ == "__main__":
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(2)
head.next.next.next = ListNode(1)
# print(head.next.next.val)
sol = Solution()
print(sol.isPalindrome(head)) |
# third-party imports
import os
from flask import Flask, request, jsonify, abort
from sqlalchemy import exc
import json
from flask_cors import CORS
# local imports
from .database.models import db_drop_and_create_all, setup_db, Drink
from .auth.auth import AuthError, requires_auth
app = Flask(__name__)
setup_db(app)
CORS(app)
'''
uncomment the following line to initialize the database
!! NOTE THIS WILL DROP ALL RECORDS AND START YOUR DB FROM SCRATCH
!! NOTE THIS MUST BE UNCOMMENTED ON FIRST RUN
'''
db_drop_and_create_all()
@app.route('/drinks', methods=['GET'])
def get_drinks():
"""
handles GET requests to retrieve all drinks
"""
try:
# get all available drinks.
all_drinks = Drink.query.order_by(Drink.id).all()
# return success response in json format to view
return jsonify({
'success': True,
'drinks': [drink.short() for drink in all_drinks]
})
except:
# abort 404 if no drinks found
abort(404)
@app.route('/drinks-detail', methods=['GET'])
# require the 'get:drinks-detail' permission
@requires_auth('get:drinks-detail')
def get_drink_detail(jwt):
"""
handles GET requests to GET /drinks-detail
"""
try:
# get all available drinks.
all_drinks = [drink.long() for drink in Drink.query.all()]
# return success response in json format to view
return jsonify({
'success': True,
'drinks': all_drinks
})
except:
# abort 404 if no drinks found
abort(404)
@app.route('/drinks', methods=['POST'])
# require the 'post:drinks' permission
@requires_auth('post:drinks')
def post_drink(jwt):
"""
handles POST requests to create new drink
"""
try:
# get data from front end
data = request.get_json()
if 'title' and 'recipe' not in data:
abort(422)
title = data['title']
recipe_json = json.dumps(data['recipe'])
# create a new row in the drinks table
drink = Drink(title=title, recipe=recipe_json)
drink.insert()
# return success response in json format to view
return jsonify({
'success': True,
'drinks': [drink.long()] # contain the drink.long() data.
})
except:
# abort unprocessable if exception
abort(422)
@app.route('/drinks/<int:id>', methods=['PATCH'])
# require the 'patch:drinks' permission
@requires_auth('patch:drinks')
def update_drink(jwt, id):
"""
handles PATCH request to update drinks
"""
try:
# get the matching drink
drink = Drink.query.get_or_404(id)
# 404 error if <id> is not found
if drink is None:
abort(404)
# get data from front end
data = request.get_json()
if 'title' in data:
drink.title = data['title']
if 'recipe' in data:
drink.recipe = json.dumps(data['recipe'])
# update the corresponding row for <id>
drink.update()
# return success response in json format to view
return jsonify({
'success': True,
'drinks': [drink.long()] # contain the drink.long() data
})
except:
# 404 if no results found
abort(404)
@app.route('/drinks/<int:id>', methods=['DELETE'])
# require the 'delete:drinks' permission
@requires_auth('delete:drinks')
def delete_drink(jwt, id):
try:
# get the matching drink
drink = Drink.query.get_or_404(id)
# 404 error if <id> is not found
if drink is None:
abort(404)
# delete the corresponding row for <id>
drink.delete()
# return success response in json format to view
return jsonify({
'success': True,
'delete': drink.id
})
except:
# 404 if no results found
abort(404)
# Error Handling
@app.errorhandler(422)
def unprocessable(error):
"""
error handlers for 422 unprocessable entity
"""
return jsonify({
"success": False,
"error": 422,
"message": "unprocessable"
}), 422
@app.errorhandler(404)
def resource_not_found(error):
"""
error handlers for 404
"""
return jsonify({
"success": False,
"error": 404,
"message": "resource not found"
}), 404
@app.errorhandler(401)
def unauthorized(error):
"""
error handlers for 401
"""
return jsonify({
"success": False,
"error": 401,
"message": "unauthorized"
}), 401
@app.errorhandler(403)
def forbidden(error):
"""
error handlers for 403
"""
return jsonify({
"success": False,
"error": 403,
"message": "Forbidden"
}), 403
@app.errorhandler(AuthError)
def process_auth_error(error):
"""
error handlers for AuthError
"""
response = jsonify(error.error)
response.status_code = error.status_code
return response |
#!/usr/local/python3/bin/python3
import sys
sys.path.append("..")
sys.path.append("../..")
import tushare as ts
import re
import datetime
import basicdata.basic_mgr as sk
import time
import os
import pandas as pd
from lib.time import (strtime_convert, strtime_delta_n_day)
save_dir='./moneyflow-data/'
g_start_date='2014-01-01'
def download(pro):
global g_start_date
temp_start_date=g_start_date
start_flag=False
while True:
start_date=temp_start_date
end_date=strtime_delta_n_day(start_date, 300)
df=pro.moneyflow_hsgt(start_date=strtime_convert(start_date), end_date=strtime_convert(end_date))
if start_flag==True and df.empty == True:
break
temp_start_date=strtime_delta_n_day(start_date, 301)
if df.empty == False:
start_flag=True
path=save_dir+start_date+".csv"
df.to_csv(path)
if __name__ =='__main__':
pro = ts.pro_api('08aedc1cc54171e54a64bbe834ec1cb45026fa2ab39e9e4cb8208cad')
download(pro)
|
import requests
from bs4 import BeautifulSoup as bs
import os, sys
import re
from tqdm import tqdm
from datetime import datetime
import time
import pandas as pd
import csv
import pickle
# My functions
import planning_functions as pf
import master_planning_wrapper as mpw
import functions as mf
import json
datafolder = 'data'
# Open file and read in any parameters
with open('{}/params.txt'.format(datafolder), 'rt') as f: params = json.loads(f.read())
valFile = '{}/valid_searches_{}.csv'
if __name__ == '__main__':
args = pf.getArgs(params, sys.argv, ignorePostcode=True)
# Postcode to search (dataframe, original data)
postcodes = pf.getPostcodes(args['borough'])
# Processed postcodes (dataframe)
procCodes = pf.processPostcodes(postcodes)
# Nested dictionary
pCodes2 = pf.processPostcodesD(procCodes)
## Init browser object on planning page
browser = mf.initbrowser(args['urlbase'])
# Valid searches for postcodes
valSearches = pf.findHighestPostcode(browser, pCodes2, args['urlbase'])
# List to dataframe, then dataframe to csv
df = pd.DataFrame(data=valSearches, columns=['postcodes'])
df.to_csv(valFile.format(datafolder, args['borough'].lower()), index=False)
|
# Copyright 2014 Huawei Technologies Co. Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import mock
import os
import sys
import unittest2
import uuid
from contextlib import contextmanager
os.environ['COMPASS_IGNORE_SETTING'] = 'true'
from compass.utils import setting_wrapper as setting
reload(setting)
from compass.actions import update_progress
from compass.actions import util
from compass.db.api import adapter as adapter_api
from compass.db.api import adapter_holder as adapter
from compass.db.api import cluster
from compass.db.api import database
from compass.db.api import host
from compass.db.api import machine
from compass.db.api import metadata as metadata_api
from compass.db.api import metadata_holder as metadata
from compass.db.api import network
from compass.db.api import switch
from compass.db.api import user as user_api
from compass.db import exception
from compass.log_analyzor.adapter_matcher import AdapterItemMatcher
from compass.log_analyzor.adapter_matcher import OSMatcher
from compass.log_analyzor.adapter_matcher import PackageMatcher
from compass.log_analyzor import file_matcher
from compass.log_analyzor.file_matcher import FileMatcher
from compass.log_analyzor.file_matcher import FileReaderFactory
from compass.log_analyzor.line_matcher import IncrementalProgress
from compass.log_analyzor.line_matcher import LineMatcher
from compass.log_analyzor import progress_calculator
from compass.utils import flags
from compass.utils import logsetting
ADAPTER_NAME = 'openstack_icehouse'
OS_NAME = 'CentOS-6.5-x86_64'
SWITCH_IP = '172.29.8.40'
MACHINE_MAC = '00:0c:29:bf:eb:1d'
SUBNET = '10.145.88.0/23'
HOST_IP = '10.145.88.0'
class TestProgressCalculator(unittest2.TestCase):
"""Test end to end."""
def _prepare_database(self):
adapter.load_adapters()
metadata.load_metadatas()
self.user_object = (
user_api.get_user_object(
setting.COMPASS_ADMIN_EMAIL
)
)
self.adapter_id = None
self.os_id = None
self.flavor_id = None
self.cluster_id = None
# get adapter information
list_adapters = adapter.list_adapters(user=self.user_object)
for adptr in list_adapters:
self.adapter_id = None
if adptr['name'] != ADAPTER_NAME:
continue
self.adapter_id = adptr['id']
self.os_id = None
for supported_os in adptr['supported_oses']:
if supported_os['name'] == OS_NAME:
self.os_id = supported_os['os_id']
break
if not self.os_id:
continue
if (
'package_installer' in adptr.keys() and
adptr['flavors'] != [] and
adptr['distributed_system_name'] == 'openstack'
):
self.flavor_id = None
for flavor in adptr['flavors']:
if flavor['name'] == 'allinone':
self.flavor_id = flavor['id']
break
if not self.flavor_id:
continue
else:
continue
if self.adapter_id and self.os_id and self.flavor_id:
break
if not self.adapter_id:
raise Exception('adapter id not found')
if not self.os_id:
raise Exception('os id not found')
if not self.flavor_id:
raise Exception('flavor id not found')
# add cluster
cluster.add_cluster(
adapter_id=self.adapter_id,
os_id=self.os_id,
flavor_id=self.flavor_id,
name='test_cluster',
user=self.user_object,
)
list_clusters = cluster.list_clusters(user=self.user_object)
for list_cluster in list_clusters:
if list_cluster['name'] == 'test_cluster':
self.cluster_id = list_cluster['id']
break
for list_cluster in list_clusters:
self.cluster_id = list_cluster['id']
# add switch
switch.add_switch(
ip=SWITCH_IP,
user=self.user_object,
)
list_switches = switch.list_switches(user=self.user_object)
for list_switch in list_switches:
self.switch_id = list_switch['id']
switch.add_switch_machine(
self.switch_id,
user=self.user_object,
mac=MACHINE_MAC,
port='1'
)
# get machine information
list_machines = machine.list_machines(user=self.user_object)
for list_machine in list_machines:
self.machine_id = list_machine['id']
# add cluster host
cluster.add_cluster_host(
self.cluster_id,
user=self.user_object,
machine_id=self.machine_id,
name='test_clusterhost'
)
list_clusterhosts = cluster.list_clusterhosts(user=self.user_object)
for list_clusterhost in list_clusterhosts:
self.host_id = list_clusterhost['host_id']
self.clusterhost_id = list_clusterhost['clusterhost_id']
# add subnet
network.add_subnet(
subnet=SUBNET,
user=self.user_object,
)
list_subnets = network.list_subnets(
user=self.user_object
)
for list_subnet in list_subnets:
self.subnet_id = list_subnet['id']
# add host network
host.add_host_network(
self.host_id,
user=self.user_object,
interface='eth0',
ip=HOST_IP,
subnet_id=self.subnet_id,
is_mgmt=True
)
# get clusterhost
list_clusterhosts = cluster.list_clusterhosts(
user=self.user_object
)
for list_clusterhost in list_clusterhosts:
self.clusterhost_id = list_clusterhost['id']
# update host state
self.list_hosts = host.list_hosts(user=self.user_object)
for list_host in self.list_hosts:
self.host_id = list_host['id']
self.host_state = host.update_host_state(
self.host_id,
user=self.user_object,
state='INSTALLING'
)
# update cluster state
cluster.update_cluster_state(
self.cluster_id,
user=self.user_object,
state='INSTALLING'
)
# update clusterhost state
cluster.update_clusterhost_state(
self.clusterhost_id,
user=self.user_object,
state='INSTALLING'
)
def _file_generator(self, check_point):
file_line_mapping = {
'sys.log': {
1: 'NOTICE kernel: Phoenix BIOS detected:'
'BIOS may corrupt low RAM, working around it.'
},
'anaconda.log': {
1: 'setting up kickstart',
2: 'starting STEP_STAGE2',
3: 'Running anaconda scripti /usr/bin/anaconda',
4: 'Running kickstart %%pre script(s)',
5: 'All kickstart %%pre script(s) have been run',
6: 'moving (1) to step enablefilesystems',
7: 'leaving (1) step enablefilesystems',
8: 'moving (1) to step reposetup',
9: 'leaving (1) step reposetup',
10: 'moving (1) to step postselection',
11: 'leaving (1) step postselection',
12: 'moving (1) to step installpackages',
13: 'leaving (1) step installpackages',
14: 'moving (1) to step instbootloader',
15: 'leaving (1) step instbootloader',
},
'install.log': {
1: 'Installing libgcc-4.4.7-4.el6.x86_64',
2: 'FINISHED INSTALLING PACKAGES'
},
'chef-client.log': {
1: 'Processing service[quantum-server] action',
2: 'Processing directory[/var/cache/quantum] action',
3: 'Chef Run complete in 1449.433415826 seconds'
}
}
self.check_points = {
'check_point_1': {
'percentage': 0.095,
'position': {
'file': 'anaconda.log',
'line': 'setting up kickstart'
}
},
'check_point_2': {
'percentage': 0.280594,
'position': {
'file': 'install.log',
'line': 'Installing libgcc-4.4.7-4.el6.x86_64'
}
},
'check_point_3': {
'percentage': 0.41,
'position': {
'file': 'anaconda.log',
'line': 'leaving (1) step installpackages'
}
},
'check_point_4': {
'percentage': 0.55405,
'position': {
'file': 'chef-client.log',
'line': 'Processing directory[/var/cache/quantum] action'
}
},
'check_point_5': {
'percentage': 1.0,
'position': {
'file': 'chef-client.log',
'line': 'Chef Run complete in 1449.433415826 seconds'
}
}
}
file_order = {
1: 'sys.log',
2: 'anaconda.log',
3: 'install.log',
4: 'chef-client.log'
}
class _AddToFile:
def __init__(in_self, line, file, check_point):
in_self.line = line
in_self.file = file
in_self.check_point = check_point
def _get_content(in_self):
files_to_use = []
result = {}
if in_self.check_point == 'check_point_2':
file_lines_sys = []
file_lines_anaconda = []
file_lines_install = []
for index, log_line in (
file_line_mapping['sys.log'].items()
):
file_lines_sys.append(log_line)
result['sys.log'] = file_lines_sys
for index, log_line in (
file_line_mapping['anaconda.log'].items()
):
file_lines_anaconda.append(log_line)
if index == 12:
break
result['anaconda.log'] = file_lines_anaconda
for index, log_line in (
file_line_mapping['install.log'].items()
):
file_lines_install.append(log_line)
if index == 1:
break
result['install.log'] = file_lines_install
return result
elif in_self.check_point == 'check_point_3':
file_lines_sys = []
file_lines_anaconda = []
file_lines_install = []
for index, log_line in (
file_line_mapping['sys.log'].items()
):
file_lines_sys.append(log_line)
result['sys.log'] = file_lines_sys
for index, log_line in (
file_line_mapping['anaconda.log'].items()
):
file_lines_anaconda.append(log_line)
if index == 13:
break
result['anaconda.log'] = file_lines_anaconda
for index, log_line in (
file_line_mapping['install.log'].items()
):
file_lines_install.append(log_line)
result['install.log'] = file_lines_install
return result
else:
for index, value in file_order.items():
files_to_use.append(value)
if value == in_self.file:
break
for file_to_use in files_to_use:
file_lines = []
for index, log_line in (
file_line_mapping[file_to_use].items()
):
file_lines.append(log_line)
result[file_to_use] = file_lines
current_file_lines = []
for index, log_line in (
file_line_mapping[in_self.file].items()
):
current_file_lines.append(log_line)
if log_line == in_self.line:
break
result[in_self.file] = current_file_lines
return result
tmp_logdir = os.path.join(self.tmp_logpath, 'test_clusterhost')
tmp_logdir_chef = os.path.join(
self.tmp_logpath,
'test_clusterhost.test_cluster'
)
if not os.path.exists(tmp_logdir):
os.makedirs(tmp_logdir)
if not os.path.exists(tmp_logdir_chef):
os.makedirs(tmp_logdir_chef)
line = self.check_points[check_point]['position']['line']
file = self.check_points[check_point]['position']['file']
add_to_file = _AddToFile(line, file, check_point)
raw_files = add_to_file._get_content()
for filename, raw_file in raw_files.items():
if filename == 'chef-client.log':
target_log = os.path.join(tmp_logdir_chef, filename)
else:
target_log = os.path.join(tmp_logdir, filename)
with open(target_log, 'w') as f:
for single_line in raw_file:
f.write(single_line + '\n')
f.close
def _mock_lock(self):
@contextmanager
def _lock(lock_name, blocking=True, timeout=10):
try:
yield lock_name
finally:
pass
self.lock_backup_ = util.lock
util.lock = mock.Mock(side_effect=_lock)
def _unmock_lock(self):
util.lock = self.lock_backup_
def setUp(self):
super(TestProgressCalculator, self).setUp()
parent_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), "../../../.."
))
setting.CONFIG_DIR = os.path.join(parent_path, 'conf')
logsetting.init()
self._mock_lock()
database.init('sqlite://')
database.create_db()
self.backup_cobbler_installation_dir = (
setting.COBBLER_INSTALLATION_LOGDIR
)
self.backup_chef_installation_dir = setting.CHEF_INSTALLATION_LOGDIR
self.backup_installation_dir = setting.INSTALLATION_LOGDIR
self.tmp_logpath = os.path.join('/tmp/mocklogs', str(uuid.uuid4()))
setting.COBBLER_INSTALLATION_LOGDIR = self.tmp_logpath
setting.CHEF_INSTALLATION_LOGDIR = self.tmp_logpath
setting.INSTALLATION_LOGDIR = {
'CobblerInstaller': setting.COBBLER_INSTALLATION_LOGDIR,
'ChefInstaller': setting.CHEF_INSTALLATION_LOGDIR
}
reload(progress_calculator)
def tearDown(self):
super(TestProgressCalculator, self).tearDown()
self._unmock_lock()
setting.COBBLER_INSTALLATION_LOGDIR = (
self.backup_cobbler_installation_dir
)
setting.CHEF_INSTALLATION_LOGDIR = self.backup_chef_installation_dir
setting.INSTALLATION_LOGDIR = self.backup_installation_dir
database.drop_db()
def test_update_progress_checkpoint1(self):
self._prepare_database()
self._file_generator('check_point_1')
update_progress.update_progress()
clusterhost_state = cluster.get_clusterhost_state(
self.clusterhost_id,
user=self.user_object,
)
self.assertAlmostEqual(
clusterhost_state['percentage'],
self.check_points['check_point_1']['percentage']
)
def test_update_progress_checkpoint2(self):
self._prepare_database()
self._file_generator('check_point_2')
update_progress.update_progress()
clusterhost_state = cluster.get_clusterhost_state(
self.clusterhost_id,
user=self.user_object,
)
self.assertAlmostEqual(
clusterhost_state['percentage'],
self.check_points['check_point_2']['percentage']
)
def test_update_progress_checkpoint3(self):
self._prepare_database()
self._file_generator('check_point_3')
update_progress.update_progress()
clusterhost_state = cluster.get_clusterhost_state(
self.clusterhost_id,
user=self.user_object,
)
self.assertAlmostEqual(
clusterhost_state['percentage'],
self.check_points['check_point_3']['percentage']
)
def test_update_progress_checkpoint4(self):
self._prepare_database()
self._file_generator('check_point_4')
update_progress.update_progress()
clusterhost_state = cluster.get_clusterhost_state(
self.clusterhost_id,
user=self.user_object,
)
self.assertAlmostEqual(
clusterhost_state['percentage'],
self.check_points['check_point_4']['percentage']
)
def test_update_progress_checkpoint5(self):
self._prepare_database()
self._file_generator('check_point_5')
update_progress.update_progress()
clusterhost_state = cluster.get_clusterhost_state(
self.clusterhost_id,
user=self.user_object,
)
self.assertEqual(
clusterhost_state['percentage'],
self.check_points['check_point_5']['percentage']
)
if __name__ == '__main__':
flags.init()
logsetting.init()
unittest2.main()
|
import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
from collections import Counter
import pprint
#from sklearn.covariance import EllipticEnvelope#调用离群点检测算法
#from sklearn.ensemble import IsolationForest#孤立森林算法异常检测
#from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import LocalOutlierFactor
from sklearn.cross_validation import train_test_split #异常检测算法中对数据集进行拆分
from sklearn.externals import joblib
# from scipy.stats import poisson#导入现成的泊松分布函数
def gathering(picklename):
'''
将抓包数据进行汇总为一个矩阵,然后将这个对象持久化
'''
dataset = np.zeros((1, 8))
files = ["下载.csv", "游戏.csv", "游戏2.csv", "码代码.csv", "码代码2.csv"]
for filename in files:
data = pd.read_csv(filename, skiprows=1, engine='python')
data.fillna(value=0, inplace=True) # 对空值在原来数据集用0来填充
data = np.array(data)
dataset = np.vstack((dataset, data))
datas = dataset[1:, :]
pickle.dump(datas, open(picklename, "wb"))
return picklename
def length_analyse():
'''
对包的长度的相关参数进行统计和图形化展示
'''
try:
length=pickle.load(open("lengthdata","rb"))
except Exception:
datas = pickle.load(open("datas", "rb"))
print(datas.shape)
length =list(datas[:, 5])
pickle.dump(length,open("lengthdata","wb"))
# 对包长度进行分析
print("最大包长:", max(length))
print("最小包长:", min(length))
print("平均包长:", np.mean(length))
print("包长方差:", np.var(length))
#画图
draw_picture(length,"length analyse")
def srcport_analyse():
'''
对源端口的分析
'''
try:
srcport=pickle.load(open("srcportdata","rb"))
except Exception:
datas = pickle.load(open("datas", "rb"))
srcport = list(datas[:, 6])
print("最大端口:", max(srcport))
print("最小端口:", min(srcport))
#持久化
pickle.dump(srcport,open("srcportdata","wb"))
#画图
draw_picture(srcport,"srcport analyse")
def dstport_analyse():
'''
对目的端口的分析
'''
try:
dstport=pickle.load(open("dstportdata","rb"))
except Exception:
datas = pickle.load(open("datas", "rb"))
dstport = list(datas[:, 7])
print("最大端口:", max(dstport))
print("最小端口:", min(dstport))
#持久化
pickle.dump(dstport,open("dstportdata","wb"))
#画图
draw_picture(dstport,"dstport analyse")
def protocol_analyse():
'''
对数据包的协议进行分析,通过一个协议转换字典
'''
try:
protocol=pickle.load(open("protodata","rb"))
except Exception:
# 协议映射
datas = pickle.load(open("datas", "rb"))
protocols = datas[:, 4]
Proto_set = set(protocols)
#为了避免每次生成的映射字典不一样我们这里进行持久化数据载入错误分析
pro_dic = {}
for index, pro_name in enumerate(Proto_set, 1):
# 建立联表,利用enumerate
pro_dic[pro_name] = index
pickle.dump(pro_dic,open("protocol_dict","wb"))
protocol = []
for i in protocols:
protocol.append(pro_dic[i])
#持久化
pickle.dump(protocol,open("protodata","wb"))
pro_dic=pickle.load(open("protocol_dict","rb"))
#映射字典展示
print("转换字典\n", pro_dic, "\n\n\n")
search_dic = dict(zip(pro_dic.values(), pro_dic.keys()))
#反向建立查询表
print("查询字典\n", search_dic, "\n\n\n")
# 统计协议数量
'''
change_dcount = {}
dcount = Counter(protocol)
for key in dcount.keys():
change_dcount[search_dic[key]] = dcount[key]
'''
#画图
draw_picture(protocol,"protocol analyse")
def srcip():
'''
对源IP的分析
'''
try:
srcips=pickle.load(open("srcipdata","rb"))
except Exception:
datas = pickle.load(open("datas", "rb"))
srcipdatas = datas[:, 2]
srcip_set = set(srcipdatas)
srcip_dic={}
for index, srcip in enumerate(srcip_set, 1):
# 建立联表,利用enumerate
srcip_dic[srcip] = index
pickle.dump(srcip_dic,open("srcip_dict","wb"))
srcip_changed_data = []
for i in srcipdatas:
srcip_changed_data.append(srcip_dic[i])
#持久化
pickle.dump(srcip_changed_data,open("srcipdata","wb"))
#映射字典展示
srcip_dic=pickle.load(open("srcip_dict","rb"))
print("转换字典\n",srcip_dic ,"\n\n\n")
search_dic = dict(zip(srcip_dic.values(), srcip_dic.keys()))
#反向建立查询表
print("查询字典\n", search_dic, "\n\n\n")
draw_picture(srcips,"srcip analyse")
def dstip():
'''
对目的IP的分析
'''
def draw_picture(dataset,item):
'''
画出直方图和箱线图
'''
#直方图
plt.title(item)
plt.hist(dataset,100,alpha=0.5)
#plt.axis(range(min(dataset),max(dataset),1))
plt.grid(True)
plt.show()
#箱线图
plt.title(item)
plt.boxplot(x = dataset, # 指定绘图数据
patch_artist=True, # 要求用自定义颜色填充盒形图,默认白色填充
showmeans=True, # 以点的形式显示均值
boxprops = {'color':'blue','facecolor':'#9999ff'}, # 设置箱体属性,填充色和边框色
flierprops = {'marker':'o','markerfacecolor':'red','color':'red'}, # 设置异常值属性,点的形状、填充色和边框色
meanprops = {'marker':'D','markerfacecolor':'indianred'}, # 设置均值点的属性,点的形状、填充色
medianprops = {'linestyle':'--','color':'orange'}) # 设置中位数线的属性,线的类型和颜色
plt.show()
'''old code
x = p['fliers'][0].get_xdata() # 'flies'即为异常值的标签.
y = p['fliers'][0].get_ydata()
y.sort() #从小到大排序
for i in range(len(x)):
if i>0:
plt.annotate(y[i], xy = (x[i],y[i]), xytext=(x[i]+0.05 -0.8/(y[i]-y[i-1]),y[i]))
else:
plt.annotate(y[i], xy = (x[i],y[i]), xytext=(x[i]+0.08,y[i]))
plt.tick_params(top='off', right='off')
'''
def Anomaly_Detection():
'''
调用机器学习sklearn库来对数据进行异常诊断
'''
#数据整合
pickle_datalist=["srcipdata","protodata","srcportdata","lengthdata","dstportdata"]
X=np.zeros((657254,5))
i=0
for name in pickle_datalist:
X[:,i]=pickle.load(open(name,"rb"))
i+=1
#拟合
X_train, X_test = train_test_split(X, random_state=10)#拆分成训练集和测试集
clf = LocalOutlierFactor()
clf.fit(X_train)
y_pred_test = clf.fit_predict(X)#fit_predict是针对给出的数据来拟合然后判断误差值,所以不同的预测集返回也会不一样
#对预测异常的数据进行处理
predicdata=np.zeros((1,5))
num=0
for i in y_pred_test:
if i==-1:
predicdata = np.vstack((predicdata, X[num,:]))
num=num+1
pickle.dump(predicdata,open("predicdata","wb"))
datas=pickle.load(open("predicdata","rb"))
print(len(datas))
print(datas[0:5,:])
if __name__ == "__main__":
#gathering("datas")
#length_analyse()
srcport_analyse()
#dstport_analyse()
#protocol_analyse()
#srcip()
#Anomaly_Detection()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 20/1/6 14:41
# @Author : Chaos
# @File : tasks.py
import json
import datetime
import re
import sys
import os
import socket
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from plugin.BBScan.bbscan import check_white_list, Web, check_black_list
from lib.DBPool import redis_conn
from lib.log import log
from lib.common import get_hostname_port, http_request, decode_text, get_host_ip, check_http_status, http_detect
from lib.mongo import db
from lib.checkCDN import is_cdn
from celery import Celery
app = Celery('tasks', broker="redis://127.0.0.1:6379/6", backend='redis://127.0.0.1:6379/7')
@app.task(name="tasks.before_scan")
def scan_init(task_name, task_id, target, tag_name):
"""
3个模块的初始化判断
1. 去除多余的空格
2. 校验是不是IP或者域名
3. 域名是否可以解析IP, IP是否可以连通
4. 如果可以连通,然后判断是否开启了端口扫描,可以的话进入到端口扫描流程
5. 判断运行的service
6. 根据运行的service,如果是http的话,进入到bbscan
7. 获取运行的service,进入到poc扫描,因为POC扫描只有两种service可识别,所以只需要判断是否是http服务就可以了。
:return:
"""
target = target.strip()
scheme, host, port = get_hostname_port(target)
target = '%s://%s:%s' % (scheme, host, port)
ips, hostname = get_host_ip(target) #获取hostname和ip地址
if ips:
ip = ips[0]
else:
return
iscdn = is_cdn(ip)
if len(ips) > 1:
log.info("Multi ip: %s", ips)
if iscdn:
db.portInfo.update_one({"task_id":task_id, "task_name":task_name},{"$set": {"url": hostname,"cdn": true}})
log.info("CDN Check True: %s", target)
ports = list(db.task.find({"task_id":task_id},{"_id":0,"ports":1}))[0]['ports']
if ports and not iscdn:
_ = {
"task_name": task_name,
"task_id": task_id,
"tag_name": tag_name,
"ip": ip,
"hostname": hostname,
"ports": ports
}
log.info("Task Port Scan Begin %s", hostname)
redis_conn.lpush("Task_Port_Scan", json.dumps(_))
# service = None
# alive = None
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5.0)
try:
if s.connect_ex((host, int(port))) == 0:
log.info("Port Open %s:%s", host, port)
if port == 443:
target = 'https://{}'.format(hostname)
service = 'https'
elif port == 80:
target = 'http://{}'.format(hostname)
service = 'http'
else:
service, content = http_detect(host, port) # 端口开放的时候,判断运行的是什么服务,用来丢给POC扫描流程
if service == 'http':
if 'The plain HTTP request was sent to HTTPS port' in content:
service = 'https'
status_code, headers, content = http_request(target)
if status_code in (301, 302) and headers['Location']:
_ = {
"task_name": task_name,
"task_id": task_id,
"target": headers['Location'],
"tag_name": tag_name
}
log.info("Http %s redirect to %s", target, headers['Location'])
redis_conn.lpush("before_scan", json.dumps(_))
alive = True
# content = content.eocode() if content is isinstance(content, str) else content
if check_white_list(content): # 白名单储存到数据库
log.error("White List Check True")
db.bbscan.update_one({"task_id":task_id,"task_name":task_name,"tag_name":tag_name},
{"$set":{"vul_Type": "white list", "url": target}})
# return
# if service in ('http', 'https'): # http端口保存到资产数据库
# m = re.search('<title>(.*?)</title>', content)
# title = m.group(1) if m else ''
# db.portInfo.update_one({"task_id": task_id, "task_name": task_name,"tag_name":tag_name, "ip":ip, "port":port},
# {"$set":{"server": service,"banner": content,"title":title,"hostname":hostname,"url":target}},upsert=True)
else:
log.info("Port Closed %s:%s", host, port)
service = False
alive = False
except Exception:
log.error('[Warning] Fail to connect to %s:%s' % (host, port), exc_info=True)
return
finally:
s.close()
pocs = list(db.task.find({"task_id":task_id},{"_id":0,"pocs":1}))[0]['pocs']
if alive: # 主机是否存活
log.info("host %s is alive, Check Scan options", target)
if pocs and service: # service表示是否开启运行了http还是unknown
log.info("Begin POC Scan %s", target)
_ = {
"task_name": task_name,
"task_id": task_id,
"hostname": host,
"port": port,
"pocs": pocs,
"tag_name": tag_name,
"service": service
}
redis_conn.lpush("Task_Poc_Scan", json.dumps(_))
elif list(db.task.find({"task_id":task_id},{"_id":0,"BBScan_flag":1}))[0]['BBScan_flag'] and service in ('http', 'https'):
_ = {
"task_name": task_name,
"task_id": task_id,
"target": target,
"tag_name": tag_name
}
redis_conn.lpush("BBScan_init", json.dumps(_))
else:
log.info("[Warning]: No POC Selected: %s", target)
pass
@app.task(name="tasks.spider_init")
def spider_init(task_name, task_id, target, tag_name):
try:
scheme, host, port = get_hostname_port(target)
# if check_port_open(host, port): # 检测端口连通性
# log.info("Check port is Open: %s:%s", host, port)
check_http_status(scheme, host, port, task_name, task_id, tag_name)
# else:
# log.info("Check port is Close: %s:%s", host, port)
except:
log.error("Spider_init Exception", exc_info=True)
# check_http_status(scheme, host, port,task_name, task_id) # 检查http服务的状态
@app.task(name='tasks.spider_first')
def bbscan_parse_uri(scheme, ip, port, title, content, status_code, header, task_name, task_id, tag_name):
"""
检查是否在白名单-> 从content解析二级路径-> 把路径和要检测的内容组合成新的url
检测白名单放在后面,更具有通用性
:param content:
:return:
"""
try:
if check_white_list(content):
data = {"ip":ip, "port":port, "vul_title":title}
redis_conn.lpush("VulScan", json.dumps(data)) # TODO 存储BBScan白名单的结果到数据库
# return
else:
log.info("starting parse uri for %s://%s:%s", scheme,ip,port)
web = Web(scheme, ip, port, title, content, status_code, header, task_name, task_id, tag_name)
web.init_run()
except:
log.error("celery task parse_uri error for %s://%s:%s", scheme,ip,port, exc_info=True)
@app.task(name='tasks.spider_second')
def bbscan(url, tag, status_to_match, content_type, content_type_no, vul_type, status_404, len_404_content, task_name, task_id, tag_name):
status_to_match = int(status_to_match)
status_404 = int(status_404)
try:
status_code, headers, content = http_request(url)
cur_content_type = headers['Content-Type']
status = status_code
content = decode_text(content)
cur_content_length = len(content)
if check_black_list(content): # 在黑名单的的url返回
return
if 0 <= int(cur_content_length) <= 10: # text too short
return
if cur_content_type.find('image/') >= 0: # exclude image
return
if content_type != 'application/json' and cur_content_type.find('application/json') >= 0 and \
not url.endswith('.json'): # invalid json
return
if content_type and cur_content_type.find(content_type) < 0 \
or content_type_no and cur_content_type.find(content_type_no) >= 0:
return # content type mismatch
if tag and content.find(tag) < 0:
return # tag mismatch
if check_white_list(content):
valid_item = True
else:
if status_to_match == 206 and status != 206:
return
if status_to_match in (200, 206) and status in (200, 206):
valid_item =True
elif status_to_match and status != status_to_match:
return
elif status in (403, 404) and status != status_to_match:
return
else:
valid_item =True
if status == status_404 and url != '/':
len_doc = len(content)
len_sum = int(len_404_content) + len_doc
# print("bool is %s" % bool(0.4 <= float(len_doc) / len_sum <= 0.6))
if len_sum == 0 or (0.4 <= float(len_doc) / len_sum <= 0.6):
return
if valid_item:
vul_type = vul_type.replace('_',' ')
m = re.search('<title>(.*?)</title>', content)
title = m.group(1) if m else ''
scheme, host, port = get_hostname_port(url)
vul_url = "%s://%s:%s" % (scheme, host, port)
first_find_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# log.info(bool(db.bbscan.find_one({"task_id": task_id, "url": url})))
if db.bbscan.find_one({"task_id": task_id, "tag_name":tag_name, "vul_url": url}): # 以task_id和url为主键查询条件
log.info("Get Vul Repeat %s", {"task_id": task_id, "url": url})
db.bbscan.update({"task_id": task_id, "url": url}, {"$set": {"last_find_date": first_find_date}})
else:
log.info("Get Vul Success %s", {"task_id": task_id, "url": url})
result = {"task_name": task_name, "task_id": task_id,"tag_name": tag_name,"vul_url":url,"url": vul_url,"vul_Type": vul_type, "status": status, "title": title,
"first_find_date": first_find_date, "last_find_date": first_find_date}
db.bbscan.insert(result)
except TypeError:
pass
except KeyError:
pass
except:
log.error("BBScan::process_request error %s", url, exc_info=True)
|
from geopy.geocoders import Nominatim
def get_location(uni):
a = "University of"
b = "University"
geolocator = Nominatim(user_agent="my_map")
location = geolocator.geocode(uni)
location2 = geolocator.geocode(a + uni)
location3 = geolocator.geocode(uni + b)
if location is not None:
return(location)
elif location2 is not None:
return(location2)
else:
return(location3)
lat_long_dict = {}
#loop the dictionary
for player , university in players_uni_dict.items():
try:
#Run the get_location function that we wrote
#and save it in location
location = get_location(university)
#declaring another empty dictionary
lat_long = {}
#Save the latitude in the lat_long dictionary as latitude
lat_long["latitude"] = location.latitude
#Save the longitude into lat_long dictionary as longitude
lat_long["longitude"] = location.longitude
#Save lat_long into lat_long_dict
lat_long_dict[player] = lat_long
except Exception as e:
continue
print(lat_long_dict)
pp = pprint.PrettifyPrinter(indent=4)
pp.pprint·lat_long_dict)
import csv
import plotly.plotly as py
import plotly
plotly.tools.set_credentials_file(username='<YungBuckUpNext>', api_key='<vHbW9x9M2nMMMj3Zbrhn>')
'player_lat_long = pd.read_csv('player_file.csv')
data = [dict(
type = 'scattergeo',
locationmode = 'USA-states',
lon = player_lat_long['Longitude']
lat = player_lat_long['Latitude']
text = player_lat_long['Player'] + ' ' + player_lat_long['Location'],
)]
layout = dict(
title = '49ers',
geo= dict(
scope='usa',
projection=dict( type='albers usa' ),
showland = True
landcolor= "rgb(250, 250, 250)",
subunitcolor = "rgb(217, 217, 217)",
countrycolor = "rgb(217, 217, 217)",
countrywidth = 0.5,
subunitwidth = 0.5
)
)
|
# Generated by Django 2.2.6 on 2020-01-25 17:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('history', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='history',
name='agent',
field=models.CharField(max_length=250),
),
migrations.AlterField(
model_name='history',
name='amount',
field=models.CharField(max_length=250),
),
migrations.AlterField(
model_name='history',
name='time',
field=models.CharField(max_length=250),
),
migrations.AlterField(
model_name='history',
name='trans_id',
field=models.CharField(max_length=250),
),
]
|
#!/usr/bin/env pybricks-micropython
import sys
import os
from pybricks import ev3brick as brick
from pybricks.ev3devices import (Motor, TouchSensor, ColorSensor,
InfraredSensor, UltrasonicSensor, GyroSensor)
from pybricks.parameters import Port, Stop, Direction, Button, Color
from pybricks.tools import wait, StopWatch
from pybricks.robotics import DriveBase
from pybricks.ev3devices import Motor
sys.path.append('../shared')
import robot_setup
from robot_setup import left_motor
from robot_setup import right_motor
from robot_setup import robot
from robot_setup import rack_motor
from robot_setup import crane_motor
from robot_setup import gyro
from robot_setup import touch_sensor
from robot_setup import color_sensor_back
from robot_setup import color_sensor_right
from robot_setup import color_sensor_center
from robot_setup import touch_sensor
from robot_setup import ultrasound
from robot_setup import SOUND_VOLUME
from robot_setup import WHEEL_DIAMETER_MM
from robot_setup import AXLE_TRACK_MM
from robot_setup import SENSOR_TO_AXLE
from robot_setup import WHEEL_CIRCUM_MM
from robot_setup import DEGREES_PER_MM
import shared_all
##### Do not change above this line ##########################################
#(OLD RUN, USE FUNCTION 'run')#########
def runold ():
shared_all.move_rack_to_floor ()
shared_all.move_straight(distance_mm=60, speed_mm_s=120)
while shared_all.did_motor_stall(motor =rack_motor , max_degrees =50 , speed = 320):
# log_string('stalled - step back')
shared_all.move_reverse(max_distance=6, speed_mm_s=20)
shared_all.move_crane_to_angle(motor=rack_motor, target_angle=90)
shared_all.drive_raising_crane (duration_ms=900, robot_distance_mm=80, robot_turn_angle=0,
motor=rack_motor, crane_angle=-20)
shared_all.move_crane_to_angle(motor=rack_motor, target_angle=120)
shared_all.move_reverse(max_distance=20, speed_mm_s=100)
def alignold():
shared_all.move_crane_to_top(crane_motor)
shared_all.move_straight(distance_mm=550, speed_mm_s=160)
def runold2 ():
shared_all.move_straight(distance_mm=550, speed_mm_s=160)
shared_all.move_crane_to_floor(motor=rack_motor)
shared_all.drive_raising_crane(duration_ms=50, robot_distance_mm=-37, robot_turn_angle=0,
motor=rack_motor, crane_angle=80)
shared_all.move_straight (distance_mm=550, speed_mm_s=-160)
#(NEW RUN, DO NOT USE FUNCTION 'runold') ####
def align():
shared_all.calibrate_gyro(-45)
shared_all.move_straight_target_direction(gyro=gyro,
distance_mm=480, speed_mm_s=170, target_angle=-45)
shared_all.move_to_color(color_sensor=color_sensor_center,
stop_on_color=Color.GREEN, max_distance_mm=40)
shared_all.move_straight(distance_mm=30, speed_mm_s=40)
def run():
shared_all.move_crane_down(rack_motor, 70)
shared_all.move_hook_to_floor()
shared_all.move_straight(distance_mm=20, speed_mm_s=-50)
shared_all.drive_raising_crane (duration_ms=800, robot_distance_mm=-20, robot_turn_angle=0,
motor=rack_motor, crane_angle=20)
shared_all.drive_raising_crane (duration_ms=1900, robot_distance_mm=-70, robot_turn_angle=0,
motor=rack_motor, crane_angle=30)
shared_all.move_straight(distance_mm=40, speed_mm_s=-130)
shared_all.move_crane_up(rack_motor, 30)
#backup
shared_all.move_straight(distance_mm=600, speed_mm_s=-200)
# shared_all.move_rack_to_top()
## Below lines only for testing
## Comment out when done testing. Do not upload to Git hub without commenting.
# shared_all.calibrate_gyro(-90)
# align()
# runold()
|
import os
import pytest
from . import db_setup
@pytest.fixture(scope='session', autouse=True)
def session_fixture():
# print("テスト全体の前処理")
os.environ['ENV'] = 'local'
os.environ['ENDPOINT'] = 'http://localhost:4566'
os.environ['CORS'] = 'http://localhost:3001'
db_setup.for_local()
yield
# print("テスト全体の後処理")
@pytest.fixture(scope='module', autouse=True)
def module_fixture():
# print("モジュールの前処理")
yield
# print("モジュールの後処理")
@pytest.fixture(scope='class', autouse=True)
def class_fixture():
# print("クラスの前処理")
yield
# print("クラスの後処理")
@pytest.fixture(scope='function', autouse=True)
def function_fixture():
# print("関数の前処理")
yield
# print("関数の後処理")
@pytest.fixture(scope='module', autouse=True)
def init():
print(os.environ['ENV'])
# print(os.environ['ENDPOINT'])
|
import argparse
import logging
def add(x, y):
return x + y
def subtract(x, y):
return x - y
def multiply(x, y):
return x * y
def devide(x, y):
return x / y
def main():
parser = argparse.ArgumentParser()
parser.add_argument("number1", type = int, help = "add first number")
parser.add_argument("number2", type = int, help = "add first number")
args = parser.parse_args()
n1 = args.number1
n2 = args.number2
add_result = add(n1, n2)
print("Add: {} + {} = {}".format(n1, n2, add_result))
subtract_result = subtract(n1, n2)
print("subtract: {} - {} = {}".format(n1, n2, subtract_result))
multiply_result = multiply(n1, n2)
print("multiply: {} * {} = {}".format(n1, n2, multiply_result))
devide_result = devide(n1, n2)
print("devide: {} / {} = {}".format(n1, n2, devide_result))
if __name__ == "__main__":
main()
|
# import math
#
#
# def solution(progresses, speeds):
# answer = []
# days = []
# for i in range(len(progresses)):
# days.append(math.ceil((100 - progresses[i]) / speeds[i]))
#
# stack = []
# for day in days:
# if len(stack) == 0:
# stack.append(day)
# else:
# count = 0
# while len(stack) != 0 and stack[-1] < day and stack[0] < day:
# stack.pop()
# count += 1
#
# stack.append(day)
#
# if count != 0:
# answer.append(count)
#
# answer.append(len(stack))
# return answer
#
#
# print(solution([93, 30, 55], [1, 30, 5]))
import math
def solution(progresses, speeds):
answer = []
needDays = [math.ceil((100 - progresses[i]) / speeds[i]) for i in range(len(progresses))]
stack = []
for needDay in needDays:
if len(stack) == 0:
stack.append(needDay)
else:
deployCount = 0
while len(stack) != 0 and stack[0] < needDay:
stack.pop()
deployCount += 1
stack.append(needDay)
if deployCount != 0:
answer.append(deployCount)
if len(stack) != 0:
answer.append(len(stack))
return answer
print(solution([93, 30, 55], [1, 30, 5]))
print(solution([95, 90, 99, 99, 80, 99], [1, 1, 1, 1, 1, 1]))
|
# Naive algorithm which is O(n^2)
def integers_sum(S, x):
for i in range(0, len(S)):
for j in range(i+1, len(S)):
if S[i] + S[j] == x:
return True
return False
# Binary search. if not found return -1
def binary_search(A, p, q, v):
if p > q:
return -1
mid = (p + q) /2
if A[mid] < v:
return binary_search(A, mid+1, q, v)
elif A[mid] > v:
return binary_search(A, p, mid-1, v)
else:
return mid
# A better algorithm
# 1. Sort S. O(n*lgn)
# 2. For each item i in S, use binary_search to find (x-i) in S. O(n*lgn)
def better_algo(S, x):
S.sort()
for item in S:
return binary_search(S, 0, len(S)-1, x-item) != -1
S = [1,2,3,4,5,6,7]
print integers_sum(S, 5)
print integers_sum(S, 8)
print better_algo(S, 5)
print better_algo(S, 50)
|
import socket
from hashlib import sha1
from random import randint
from struct import unpack
from socket import inet_ntoa
from threading import Timer, Thread
from time import sleep
from bencode import bencode, bdecode
address=[
("router.bittorrent.com", 6881),
("dht.transmissionbt.com", 6881),
("router.utorrent.com", 6881)
]
TID_LENGTH = 4
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
def entropy(length):
return ''.join(chr(randint(0, 255)) for _ in xrange(length))
def random_id():
hash = sha1()
hash.update(entropy(20))
return hash.digest()
def send_krpc(msg, address):
global s
try:
s.sendto(bencode(msg), address)
except Exception:
pass
def send_find_node(address, nid=None):
tid = entropy(TID_LENGTH)
msg = dict(
t=tid,
y="q",
q="find_node",
a=dict(id=nid, target=random_id())
)
print msg
send_krpc(msg, address)
def send_ping(address, nid=None):
tid = entropy(TID_LENGTH)
msg = dict(
t=tid,
y="q",
q="ping",
a=dict(id=nid)
)
print msg
print address
send_krpc(msg, address)
if __name__ == "__main__":
nid = random_id()
for a in address:
#send_find_node(a, nid)
send_ping(a, nid)
|
# coding: utf-8
from dext.common.meta_relations import logic as meta_relations_logic
from the_tale.forum.models import Category, SubCategory
from .. import conf
from .. import prototypes
from .. import meta_relations
def prepair_forum():
forum_category = Category.objects.create(caption='category-1', slug='category-1')
SubCategory.objects.create(caption=conf.settings.FORUM_CATEGORY_UID + '-caption',
uid=conf.settings.FORUM_CATEGORY_UID,
category=forum_category)
def create_post_for_meta_object(author, caption, text, meta_object, vote_by=None):
post = prototypes.PostPrototype.create(author, caption, text)
meta_relations_logic.create_relations_for_objects(meta_relations.IsAbout,
meta_relations.Post.create_from_object(post),
[meta_object])
if vote_by:
prototypes.VotePrototype.create(post, voter=vote_by)
|
bl_info = {
"name": "Rename outputs",
"author": "Tal Hershkovich ",
"version": (0, 1),
"blender": (2, 72, 0),
"location": "View3D > Tool Shelf > Render > Rename Outputs",
"description": "replace strings of outputs in render output and compositing output nodes",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/Scripts/Render/Rename_Outputs",
"category": "Render"}
import bpy
def replace_outputs(self, context):
old_str = bpy.context.scene.old_string
new_str = bpy.context.scene.new_string
for scene in bpy.data.scenes:
if scene.use_nodes:
for node in scene.node_tree.nodes:
if node.type == 'OUTPUT_FILE':
node.base_path = node.base_path.replace(old_str, new_str)
node.file_slots[0].path = node.file_slots[0].path.replace(old_str, new_str)
scene.render.filepath = scene.render.filepath.replace(old_str, new_str)
class Rename_Output(bpy.types.Operator):
"""Rename a string in all your render and compositing outputs"""
bl_label = "Rename outputs"
bl_idname = "rename.outputs"
bl_options = {'REGISTER', 'UNDO'}
bpy.types.Scene.old_string = bpy.props.StringProperty(name="Old String", description="The string that you want to be replaced")
bpy.types.Scene.new_string = bpy.props.StringProperty(name="New String", description="The string that you want to be replaced")
def execute(self, context):
replace_outputs(self, context)
return {'FINISHED'}
class Rename_Output_Panel(bpy.types.Panel):
"""Add random value to selected keyframes"""
bl_label = "Rename Outputs"
bl_idname = "renameoutputs.panel"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = "Render"
def draw(self, context):
layout = self.layout
layout.label(text="Rename Outputs")
layout.operator("rename.outputs")
layout.prop(context.scene, 'old_string')
layout.prop(context.scene, 'new_string')
def register():
bpy.utils.register_class(Rename_Output)
bpy.utils.register_class(Rename_Output_Panel)
def unregister():
bpy.utils.unregister_class(Rename_Output)
bpy.utils.unregister_class(Rename_Output_Panel)
if __name__ == "__main__":
register() |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-05-08 00:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wildlifecompliance', '0187_callemail_number'),
]
operations = [
migrations.RemoveField(
model_name='callemail',
name='schema',
),
migrations.AlterField(
model_name='callemail',
name='report_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='call_schema', to='wildlifecompliance.ReportType'),
),
]
|
import mysql.connector
import sys
import os
#import pdb;pdb.set_trace
class createdb:
def __init__(self):
self.username='root'
self.passwd="root@123"
self.IP_addr='localhost'
self.database_name='CevaShipmentDetails'
self.db_list=[]
self.connection=''
self.cursor=''
def user_input(self):
#import pdb;pdb.set_trace()
self.username=self.username
self.password=self.passwd
def set_up_db_connection(self):
#import pdb;pdb.set_trace()
self.connection=mysql.connector.connect(host=self.IP_addr,user="%s"%self.username,passwd="%s"%self.password)
self.cursor=self.connection.cursor()
def create_require_db(self):
self.user_input()
self.set_up_db_connection()
self.cursor.execute("show databases;")
for db in self.cursor:
self.db_list.append(db[0].encode())
#import pdb;pdb.set_trace()
if self.database_name not in self.db_list:
self.cursor.execute("create database %s;"%self.database_name)
else:
print("%s db is already exist"%self.database_name)
self.connection.close()
createdb().create_require_db()
|
'''
Write a function that takes an unsigned integer and return the number of '1' bits
it has (also known as the Hamming weight).
Example 1:
Input: 00000000000000000000000000001011
Output: 3
Explanation: The input binary string 00000000000000000000000000001011 has a total of three '1' bits.
Example 2:
Input: 00000000000000000000000010000000
Output: 1
Explanation: The input binary string 00000000000000000000000010000000 has a total of one '1' bit.
Example 3:
Input: 11111111111111111111111111111101
Output: 31
Explanation: The input binary string 11111111111111111111111111111101 has a total of thirty one '1' bits.
'''
def hammingWeight(n):
"""
:type n: int
:rtype: int
"""
res = 0
while (n != 0):
res += 1
n = (n-1) & n
return res
|
# Activate virtualenv
import sys
import settings
activate_this = getattr(settings, 'VENV', None)
if (sys.version_info > (3, 0)):
# Python 3
with open(activate_this) as file_:
exec(file_.read(), dict(__file__=activate_this))
else:
# Python 2
if activate_this:
execfile(activate_this, dict(__file__=activate_this))
from server import app as application
if __name__ == "__main__":
# application is ran standalone
application.run(debug=settings.DEBUG)
|
# Import numpy library
import numpy as np
# Reorganising arrays
before = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
after_1 = before.reshape(2, 2, 2)
print("after_1: ", after_1)
""" RESULT
after_1: [[[1 2]
[3 4]]
[[5 6]
[7 8]]] """
after_2 = before.reshape(4, 2)
print("after_2: ", after_2)
""" RESULT
after_2: [[1 2]
[3 4]
[5 6]
[7 8]] """
# Set (5 x 5) matrix
data1 = np.arange(1, 26)
print("data1:", data1)
data2 = data1.reshape(5, -1)
print("data2:", data2)
""" RESULT
data1: [ 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25]
data2: [[ 1 2 3 4 5]
[ 6 7 8 9 10]
[11 12 13 14 15]
[16 17 18 19 20]
[21 22 23 24 25]] """
# Vertical stacking of vectors
v1 = np.array([1, 2, 3, 4])
v2 = np.array([5, 6, 7, 8])
v_stack = np.vstack([v1, v2])
print("Ver_stack:", v_stack)
""" RESULT
Ver_stack: [[1 2 3 4]
[5 6 7 8]] """
# Obtain multiple copies
v_stack = np.vstack([v1, v1, v2, v2])
print("Ver_stack:", v_stack)
""" RESULT
Ver_stack: [[1 2 3 4]
[1 2 3 4]
[5 6 7 8]
[5 6 7 8]] """
# Horizontal stacking of vectors
h1 = np.array([1, 2, 3, 4])
h2 = np.array([5, 6, 7, 8])
horz_stack = np.hstack([h1, h2])
print("Horz_stack1:", horz_stack)
""" RESULT
Hor_stack: [1 2 3 4 5 6 7 8] """
h1 = np.ones([2, 4])
h2 = np.zeros([2, 2])
horz_stack = np.hstack([h1, h2])
print("Horz_stack2:", horz_stack)
""" RESULT
Hor_stack: [[1. 1. 1. 1. 0. 0.]
[1. 1. 1. 1. 0. 0.]] """
# Splitting the array horizontally
h = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
h = np.hsplit(h, 2)
print("h:", h)
""" RESULT
h: [array([[ 0, 1],
[ 4, 5],
[ 8, 9],
[12, 13]]), array([[ 2, 3],
[ 6, 7],
[10, 11],
[14, 15]])] """
# Splitting the array vertically
v = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])
v = np.vsplit(v, 2)
print("v:", v)
""" RESULT
v: [array([[0, 1, 2, 3],
[4, 5, 6, 7]]), array([[ 8, 9, 10, 11],
[12, 13, 14, 15]])"""
# Aggregation functions on array
stats = np.array([[1, 2, 3], [4, 5, 6]])
print("stats:", stats)
""" RESULT
stats: [[1 2 3]
[4 5 6]] """
Min_1 = np.min(stats)
print("Min_1:", Min_1)
""" RESULT
Min_1: 1 """
Min_2 = np.min(stats, axis=1)
print("Min_2:", Min_2)
""" RESULT
Min_2: [1 4] """
Max_1 = np.max(stats)
print("Max_1:", Max_1)
""" RESULT
Max_1: 6 """
Sum_1 = np.sum(stats)
print("Sum_1:", Sum_1)
""" RESULT
Sum_1: 21 """
Sum_2 = np.sum(stats, axis=0)
print("Sum_2:", Sum_2)
""" RESULT
Sum_2: [5 7 9] """
|
import copy
results = []
with open('part1Input') as inputfile:
for line in inputfile:
results.append(int(line.strip()))
helpResults1 = copy.deepcopy(results)
helpResults2 = copy.deepcopy(results)
product= 0
for x in results:
helpResults1.remove(x)
helpResults2.remove(x)
for y in helpResults1:
for z in helpResults2:
sum = x + y + z
if sum == 2020:
print("x: {0}, y: {1}, z: {2}".format(x,y,z))
product = x*y*z
break
if product != 0:
break
if product != 0:
break
print("Result: {0}".format(product))
|
from emitted import Client
from Tkinter import *
import sys
sys.path.append("C:/Users/perceptual/Waldo")
from waldo.lib import Waldo
HOSTNAME = '127.0.0.1'
PORT = 8195
games = ["bang","lucky","ducks"]
full = ["Bang!", "Kill Dr. Lucky", "Sitting Ducks"]
count = [0,0,0]
EXITSTRING = "exit"
QUITSTRING = "quit"
REFRESH = 100
myGame = -1
ready = False
retrieve = 0
def switch(msg):
if msg in games:
return {
"bang": 0,
"lucky": 1,
"ducks": 2,
EXITSTRING: -2,
QUITSTRING: -3,
}[msg]
return -1
def getMSG(endpoint, msg):
listbox.insert(END, msg)
listbox.yview(END)
#print msg
name = raw_input('What is your name? ')
#gui stuff
root = Tk()
root.geometry("500x220")
scrollbar = Scrollbar(root)
scrollbar.pack(side=RIGHT, fill=Y)
listbox = Listbox(root, yscrollcommand=scrollbar.set)
listbox.pack(side=TOP, fill=BOTH)
scrollbar.config(command=listbox.yview)
e = Entry(root,width=100)
e.pack()
e.focus_set()
#client stuff
client = Waldo.tcp_connect(Client, HOSTNAME, PORT, getMSG, name)
def cb(event):
callback()
def callback():
myGame = int(client.getGame())
msg_to_send = e.get()
ready = client.send_cmd(-3, False)
#if ready:
#print full[myGame] + " is ready to play!"
if(msg_to_send != ''):
e.delete(0,END)
listbox.yview(END)
if(msg_to_send[0] != '/'):
client.send_msg(name + ": " + msg_to_send)
else:
#print msg_to_send[1:]
result = switch(msg_to_send[1:])
#print "My game is " + str(myGame)
if(result != myGame):
if(result == -3):
Waldo.stop(); #does this even work? i don't think so!
else:
client.send_cmd(myGame, False)
if(myGame >= 0):
client.send_msg(name + " no longer wishes to play " + full[myGame] + ". Sissy.")
if(result >= 0):
client.send_cmd(result, True)
client.send_msg(name + " would like to play " + full[result] + "!")
#client.setGame(result)
client.setGame(result)
myGame = result;
b = Button(root, text="Enter", width=10, command=callback)
b.pack(side=BOTTOM)
e.bind('<Return>',cb)
#print 'liftoff!\n'
client.send_msg(name + ' has connected.')
def task(ready):
client.send_msg('')
client.retrieveMSG()
myGame = client.getGame()
temp = ready
ready = client.send_cmd(-3, False)
if temp != ready and myGame >= 0:
print str(temp) + " " + str(ready) + " "
if ready >= 2:
listbox.insert(END, full[myGame] + " is ready to play! Currently, " + str(int(ready)) + " people want to play.")
else:
listbox.insert(END, "We need one more person to play " + full[myGame] + "!")
listbox.yview(END)
thing = client.service_signal()
root.after(REFRESH,lambda:task(ready))
root.after(REFRESH,lambda:task(ready))
root.mainloop()
|
import CoreFoundation
from PyObjCTools.TestSupport import TestCase, min_os_level
import objc
class TestData(TestCase):
def testTypes(self):
try:
NSCFData = objc.lookUpClass("__NSCFData")
except objc.error:
NSCFData = objc.lookUpClass("NSCFData")
self.assertIs(CoreFoundation.CFDataRef, NSCFData)
def testTypeID(self):
v = CoreFoundation.CFDataGetTypeID()
self.assertTrue(v, int)
def testCreation(self):
self.assertArgHasType(CoreFoundation.CFDataCreate, 1, b"n^v")
self.assertArgSizeInArg(CoreFoundation.CFDataCreate, 1, 2)
data = CoreFoundation.CFDataCreate(None, b"hello", 5)
self.assertIsInstance(data, CoreFoundation.CFDataRef)
bytes_data = b"hello world"
self.assertArgHasType(CoreFoundation.CFDataCreateWithBytesNoCopy, 1, b"n^v")
self.assertArgSizeInArg(CoreFoundation.CFDataCreateWithBytesNoCopy, 1, 2)
data = CoreFoundation.CFDataCreateWithBytesNoCopy(
None, bytes_data, 5, CoreFoundation.kCFAllocatorNull
)
self.assertIsInstance(data, CoreFoundation.CFDataRef)
del data
data = CoreFoundation.CFDataCreate(None, b"hello", 5)
self.assertIsInstance(data, CoreFoundation.CFDataRef)
cpy = CoreFoundation.CFDataCreateCopy(None, data)
self.assertIsInstance(cpy, CoreFoundation.CFDataRef)
cpy2 = CoreFoundation.CFDataCreateMutableCopy(None, 0, data)
self.assertIsInstance(cpy2, CoreFoundation.CFDataRef)
mut = CoreFoundation.CFDataCreateMutable(None, 0)
self.assertIsInstance(mut, CoreFoundation.CFDataRef)
def testInspection(self):
data = CoreFoundation.CFDataCreate(None, b"hello", 5)
self.assertIsInstance(data, CoreFoundation.CFDataRef)
mutableData = CoreFoundation.CFDataCreateMutableCopy(None, 0, data)
self.assertIsInstance(mutableData, CoreFoundation.CFDataRef)
self.assertEqual(CoreFoundation.CFDataGetLength(data), 5)
self.assertEqual(CoreFoundation.CFDataGetLength(mutableData), 5)
v = CoreFoundation.CFDataGetBytePtr(data)
self.assertEqual(CoreFoundation.CFDataGetBytePtr(data)[0], b"h")
v = CoreFoundation.CFDataGetMutableBytePtr(mutableData)
self.assertEqual(v[0], b"h")
v[0] = b"p"
v = CoreFoundation.CFDataGetBytePtr(mutableData)
self.assertEqual(v[0], b"p")
self.assertArgHasType(CoreFoundation.CFDataGetBytes, 2, b"o^v")
self.assertArgSizeInArg(CoreFoundation.CFDataGetBytes, 2, 1)
bytes_data = CoreFoundation.CFDataGetBytes(data, (1, 3), None)
self.assertEqual(bytes_data, b"hello"[1:4])
CoreFoundation.CFDataSetLength(mutableData, 3)
self.assertEqual(CoreFoundation.CFDataGetLength(mutableData), 3)
CoreFoundation.CFDataIncreaseLength(mutableData, 17)
self.assertEqual(CoreFoundation.CFDataGetLength(mutableData), 20)
CoreFoundation.CFDataSetLength(mutableData, 3)
self.assertArgHasType(CoreFoundation.CFDataAppendBytes, 1, b"n^v")
self.assertArgSizeInArg(CoreFoundation.CFDataAppendBytes, 1, 2)
CoreFoundation.CFDataAppendBytes(mutableData, b" world", 6)
self.assertEqual(CoreFoundation.CFDataGetLength(mutableData), 9)
self.assertEqual(
CoreFoundation.CFDataGetBytes(mutableData, (0, 9), None), b"pel world"
)
self.assertArgHasType(CoreFoundation.CFDataReplaceBytes, 2, b"n^v")
self.assertArgSizeInArg(CoreFoundation.CFDataReplaceBytes, 2, 3)
CoreFoundation.CFDataReplaceBytes(mutableData, (0, 3), b"hello", 5)
self.assertEqual(
CoreFoundation.CFDataGetBytes(mutableData, (0, 9), None), b"hello world"[:9]
)
CoreFoundation.CFDataDeleteBytes(mutableData, (0, 6))
self.assertEqual(
CoreFoundation.CFDataGetBytes(mutableData, (0, 5), None), b"world"
)
@min_os_level("10.6")
def testConstants10_6(self):
self.assertEqual(CoreFoundation.kCFDataSearchBackwards, 1 << 0)
self.assertEqual(CoreFoundation.kCFDataSearchAnchored, 1 << 1)
@min_os_level("10.6")
def testFunctions10_6(self):
data = CoreFoundation.CFDataCreate(None, b"hello world", 11)
self.assertIsInstance(data, CoreFoundation.CFDataRef)
src = CoreFoundation.CFDataCreate(None, b"wor", 3)
self.assertIsInstance(src, CoreFoundation.CFDataRef)
self.assertResultHasType(
CoreFoundation.CFDataFind, CoreFoundation.CFRange.__typestr__
)
self.assertArgHasType(
CoreFoundation.CFDataFind, 2, CoreFoundation.CFRange.__typestr__
)
v = CoreFoundation.CFDataFind(data, src, (0, 11), 0)
self.assertIsInstance(v, CoreFoundation.CFRange)
self.assertEqual(v, (6, 3))
|
#!/usr/bin/python
"""
Copyright (c) 2018 Ian Shatwell
The above copyright notice and the LICENSE file shall be included with
all distributions of this software
"""
import sys
import signal
import time
import os
import psutil
import RPi.GPIO as GPIO
def signal_handler(signal, frame):
GPIO.cleanup()
sys.exit(0)
CPUWARNING = 80
PINSHUTDOWN = 17
PINHIGHCPU = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(PINSHUTDOWN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(PINHIGHCPU, GPIO.OUT)
# Register signal handler
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
cpuload = max(psutil.cpu_percent(interval=None, percpu=True))
countdown = 13
while True:
# Check cpu usage
cpuload = max(psutil.cpu_percent(interval=None, percpu=True))
if cpuload > CPUWARNING:
GPIO.output(PINHIGHCPU, 1)
else:
GPIO.output(PINHIGHCPU, 0)
# Look for shutdown signal
# Active low
if GPIO.input(PINSHUTDOWN):
countdown = 13
else:
countdown = countdown - 1
if countdown == 0:
print "BOOM!"
else:
print "Shutdown in {}".format(countdown)
if countdown == 0:
print "Shutdown command triggered"
os.system("sync")
time.sleep(3)
os.system("sync")
os.system("sudo shutdown -P now")
time.sleep(0.25)
|
import unittest
from datetime import datetime, timedelta
from project import get_last_value_date
today = datetime.now()
yesterday = (today - timedelta(1)).strftime('%Y-%m-%d')
class LastValueDateTestCase(unittest.TestCase):
def test_value(self):
result = get_last_value_date()
self.assertEqual(yesterday, result)
|
from math import fabs,sqrt
import pygame
import time
import constants as c
import collision
screen = pygame.display.set_mode((c.gamew, c.gameh))
pygame.font.init()
font = pygame.font.SysFont("monospace",32)
letter = font.render("A",1,(255,255,255))
class Node:
def __init__(self,cost,start,goal,parent = 0):
self.cost = cost
self.openList = []
self.closeList = []
self.start = start
self.goal = goal
if parent:
self.parent = parent
def sortList(obj):
return obj.cost
def adjacentPos(i,objpos):
if i == 0:
checkNode = (objpos[0],objpos[1]-1)
elif i == 1:
checkNode = (objpos[0]+1, objpos[1])
elif i == 2:
checkNode = (objpos[0], objpos[1]+1)
elif i == 3:
checkNode = (objpos[0-1], objpos[1])
return checkNode
def checkCloseList(closeList,Node):
notInList = True
for i in closeList:
print(len(closeList))
#print(Node.start,i.start)
if Node.start == i.start: notInList = False
print(notInList)
return notInList
def pathfind(curRoom,start,goal):
curNode = Node(0,start,goal)
openList = [curNode]
closeList = []
loop = 0
notFound = True
while notFound:
curNode = Node(0,(start),goal)
openList += [curNode]
openList.sort(key=sortList)
closeList += [curNode]
openList.remove(curNode)
curCost = 0
for i in range(4):
nextNodepos = adjacentPos(i,curNode.start)
nextNode = Node(curNode.cost + 1,nextNodepos,goal,curNode)
if (nextNodepos) not in curRoom.obstacleList and checkCloseList(closeList,nextNode):
#print(nextNodepos)
screen.blit(letter,(nextNodepos[0]*32,nextNodepos[1]*32))
pygame.display.update()
if nextNodepos == goal:
notFound = False
break
openList += [nextNode]
openList.sort(key=sortList)
curNode = openList[0]
curCost = curNode.cost
else:
closeList += [nextNode]
loop += 1
time.sleep(30)
'''
class status:
def __init__(self,value, parent, start = 0, goal = 0):
self.children = []
self.parent = parent
self.value = value
self.dist = 0
if parent:
self.path = parent.path[:]
self.path.append(value)
self.start = parent.start
self.goal = parent.goal
else:
self.path = [value]
self.start = start
self.goal = goal
def getDist(self):
pass
def children(self):
pass
class stateStr:
def __init__(self,value, parent, start = 0, goal = 0):
super(stateStr, self).__init__(value, parent, start, goal)
self.dist = self.getDist()
def getDist(self):
dist = 0
if self.value == self.goal:
return 0
for i in range (len(self.goal)):
letter = self.goal[i]
dist += abs(i - self.value.index(letter))
return dist
def children(self):
if not self.children():
for i in range((len.goal)-1):
val = self.value
val = val[:1] + val[i+1] + val[i] +val[i+2:]
child = stateStr (val,self)
self.children().append(child)
class solver:
def __init__(self,start,goal):
self.path = []
self.visitedQ = []
self.priorityQ = PriorityQueue()
self.start = start
self.goal = goal
def solver(self):
startState = stateStr(self.start,0,self.start,self.goal)
count = 0
self.priorityQ.put((0,count,startState))
while(not self.path and self.priorityQ.qsize()):
closestChild = self.priorityQ.qsize()[2]
closestChild.children()
self.visitedQ.append(closestChild.value)
for child in closestChild.children:
if child.value not in self.visitedQ:
count += 1
if not child.dist:
self.path = child.path
break
self.priorityQ.put((child.dist,count,child))
print(self.path)
return self.path
def __init__(self, tile, parent, gCost, hCost):
self.tile = tile
self.parent = parent
self.gCost = gCost
self.hCost = hCost
self.fCost = gCost + hCost
def pathfinder(playerpos,enemypos,obstaclelist):
x = fabs(enemypos[0]-playerpos[0])
y = fabs(enemypos[1]-playerpos[1])
heuristic = x+y
moveCost = sqrt(x**2+y**2)
print(heuristic,moveCost)
''' |
#!/bin/python3
import math
import os
import random
import re
import sys
import bisect
import math
# Complete the minTime function below.
def minTime(machines, goal):
# 13개의 Test Case 중에서 4개 Time out
"""
machines = sorted(machines)
machines_dict = {}
# Making Hashmap for removing duplicates
for machine in machines:
try:
machines_dict[machine] += 1
except KeyError:
machines_dict[machine] = 1
# Get Keys
machines_key = list(machines_dict.keys())
produces = 0
day = 0
while produces < goal:
day += 1
# 이진 탐색을 이용
# day보다 작은 생산 횟수를 가진 기계들을 먼저 뽑아내기
smaller_index = bisect.bisect(machines_key, day)
for machine_index in range(smaller_index):
if day > 3 and (day // 2) < machines_key[machine_index]:
# 100일을 예로 들경우, 51~99까지는 의미가 없지만 100은 의미가 있음
# 하지만, day가 엄청나게 커질 경우 결국 for문을 다돌 수 밖에 없음
try:
produces += machines_dict[day]
except KeyError:
break
finally:
break
elif day % machines_key[machine_index] == 0:
produces += machines_dict[machines_key[machine_index]]
return day
"""
# day를 1씩 더하는게 아니라면...?
# day가 아무리 커져도 결국 총합의 Produces는 기계들의 날짜수 / 연산이다.
# 알맞은 day를 Search 하는게 결국 이 문제의 목적이다
# Binary Search 를 이용해서 알맞은 day를 찾기
# Binary Search이기 때문에 O(logn)으로 구할 수 있다.
# 이 때 기계 전체를 for문으로 돌면서 생산값을 구하기 때문에 n이 곱해져서
# Time Complexity: O(n logn)
# 가장 빠른 기계와 가장 느린 기계를 통해
# Binary Search에 사용할 min과 max를 구함
min_day = math.ceil(goal / len(machines)) * min(machines)
max_day = math.ceil(goal / len(machines)) * max(machines)
"""
밑의 코드가 Binary Search의 기본 구조다.
잘 알아두면 좋을 것 같다.
"""
# 최소 일자가 최대 일자를 넘지 않을때 까지 진행
while min_day < max_day:
mid_day = (min_day + max_day) // 2
if sum(mid_day // m for m in machines) >= goal:
max_day = mid_day
else:
min_day = mid_day + 1
return min_day
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nGoal = input().split()
n = int(nGoal[0])
goal = int(nGoal[1])
machines = list(map(int, input().rstrip().split()))
ans = minTime(machines, goal)
fptr.write(str(ans) + '\n')
fptr.close()
|
from django.conf.urls import url,include
#from django.contrib import admin
#
urlpatterns = [
url(r'^myadmin/', include('myadmin.urls')),
url(r'^', include('web.urls')),
]
|
# Imports the monkeyrunner modules used by this program
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice
import time
import sys
PKG_NAME = 'com.twitter.'
ACTIVITY = '.LoginActivity'
DEV_PRE_SCRIPT = '/data/data/adafs/dev_pre.sh facebook'
DEV_CLEAR_SCRIPT = '/data/data/adafs/dev_clear.sh facebook'
# Connects to the current device, returning a MonkeyDevice object
device = MonkeyRunner.waitForConnection()
# retrives basic properties
width = int(device.getProperty("display.width"))
height = int(device.getProperty("display.height"))
# Wrapper function for touching a point down and up
def touchDownUp(x_rate, y_rate, interval = 0.2):
MonkeyRunner.sleep(interval)
x = int(width * x_rate)
y = int(height * y_rate)
device.touch(x, y, MonkeyDevice.DOWN_AND_UP)
return
def dragVertically(y1_rate, y2_rate, duration = 0.1, steps = 5):
x = int(width * 0.5)
y1 = int(height * y1_rate)
y2 = int(height * y2_rate)
device.drag((x, y1), (x, y2), duration, steps)
return
def takeSnapshot(x_rate, y_rate, w_rate, h_rate):
x = int(width * x_rate)
y = int(height * y_rate)
w = int(width * w_rate)
h = int(height * h_rate)
return device.takeSnapshot().getSubImage((x, y, w, h))
def waitToFinish(x_rate, y_rate, w_rate, h_rate, interval = 0.2):
target = takeSnapshot(x_rate, y_rate, w_rate, h_rate)
MonkeyRunner.sleep(interval)
state = takeSnapshot(x_rate, y_rate, w_rate, h_rate)
while (not target.sameAs(state)):
target = state
MonkeyRunner.sleep(interval)
state = takeSnapshot(x_rate, y_rate, w_rate, h_rate)
return state
def operateFB():
# Launches the app
runComponent = PKG_NAME + '/' + ACTIVITY
device.startActivity(component=runComponent)
MonkeyRunner.sleep(8)
waitToFinish(0.4, 0.4, 0.2, 0.2, 4)
begin = time.time()
for t in range(5):
for i in range(5):
dragVertically(0.9, 0.23)
waitToFinish(0.3, 0.89, 0.4, 0.1, 0.2)
print "[Sestet] \t%d \t%f" % (t + 1, time.time() - begin)
end = time.time()
print "[Sestet] \t%f \t%f" % (begin - g_begin, end - g_begin)
MonkeyRunner.sleep(5)
return
# Main
print "Main begins."
g_begin = time.time()
for i in range(2):
print "Trial %d:" % (i + 1)
if i % 2 == 0:
device.shell('su -c ' + DEV_PRE_SCRIPT + ' ext4')
operateFB()
device.shell('su -c ' + DEV_CLEAR_SCRIPT + ' ext4')
else:
device.shell('su -c ' + DEV_PRE_SCRIPT + ' eafs')
operateFB()
device.shell('su -c ' + DEV_CLEAR_SCRIPT + ' eafs')
|
from datasets.facial_yaml import FacialYaml
import numpy as np
facial_yaml = FacialYaml('facial_feature.yaml')
facial_dict = facial_yaml.return_facial_attr_info_dict()
selected_facial_fea = facial_dict['facial_fea']
selected_facial_fea_len = facial_dict['facial_fea_len']
selected_attrs = facial_dict['facial_fea_attr']
selected_attrs_dataset = facial_dict['attr_dataset']
list_arr_0 = []
list_arr_1 = []
for i in selected_attrs_dataset:
if sum(np.array(i[1][0]).astype(int)) == 0:
print("precessing {}".format(i[0]))
print(np.array(i[1][0]).astype(int))
list_arr_0.append(i[0])
print("------------")
print(list_arr_0)
print(len(list_arr_0))
# print(list_arr_0)
# print(len(list_arr_1))
# print(list_arr_1)
|
from django import forms
# ================================================= ФОРМА СВЯЗИ ПРОЕКТОВ ===============================================
class Linked_Projects_Form(forms.Form):
url_rm = forms.CharField(label="url_rm", help_text="Enter Redmine project url")
url_gh = forms.CharField(label="url_gh", help_text="Enter Github repository url")
field_order = ["url_rm", "url_gh"]
def get(self):
data = self.cleaned_data['linked_projects_form']
return data |
# -*- coding: utf-8 -*-
import numpy as np
import os
from PIL import Image
import math
import json
import scipy.io as scio
import sqlite3
# path_save = r'D:\for_locate_point\user5-8_final_label_0703'
# image_list = []
# file_list = ['D:/for_locate_point/user5_1', 'D:/for_locate_point/user6_1', 'D:/for_locate_point/user7_1', 'D:/for_locate_point/user8_1']
#
# for i in range(4):
# for root, dirs, files in os.walk(file_list[i]):
# for file in files:
# if file not in image_list:
# image_list.append(file)
#
# for i in range(len(image_list)):
# left_point_x = []
# left_point_y = []
# right_point_x = []
# right_point_y = []
# if os.path.exists(os.path.join(file_list[0], image_list[i])):
# a = scio.loadmat(os.path.join(file_list[0], image_list[i]))
# if a['leftpoint'][0][0] != 0:
# left_point_x.append(a['leftpoint'][0][0])
# left_point_y.append(a['leftpoint'][0][1])
# if a['rightpoint'][0][0] != 0:
# right_point_x.append(a['rightpoint'][0][0])
# right_point_y.append(a['rightpoint'][0][1])
# if os.path.exists(os.path.join(file_list[1], image_list[i])):
# a = scio.loadmat(os.path.join(file_list[1], image_list[i]))
# if a['leftpoint'][0][0] != 0:
# left_point_x.append(a['leftpoint'][0][0])
# left_point_y.append(a['leftpoint'][0][1])
# if a['rightpoint'][0][0] != 0:
# right_point_x.append(a['rightpoint'][0][0])
# right_point_y.append(a['rightpoint'][0][1])
# if os.path.exists(os.path.join(file_list[2], image_list[i])):
# a = scio.loadmat(os.path.join(file_list[2], image_list[i]))
# if a['leftpoint'][0][0] != 0:
# left_point_x.append(a['leftpoint'][0][0])
# left_point_y.append(a['leftpoint'][0][1])
# if a['rightpoint'][0][0] != 0:
# right_point_x.append(a['rightpoint'][0][0])
# right_point_y.append(a['rightpoint'][0][1])
# if os.path.exists(os.path.join(file_list[3], image_list[i])):
# a = scio.loadmat(os.path.join(file_list[3], image_list[i]))
# if a['leftpoint'][0][0] != 0:
# left_point_x.append(a['leftpoint'][0][0])
# left_point_y.append(a['leftpoint'][0][1])
# if a['rightpoint'][0][0] != 0:
# right_point_x.append(a['rightpoint'][0][0])
# right_point_y.append(a['rightpoint'][0][1])
#
# if len(left_point_x) == 0 and len(right_point_x) == 0:
# scio.savemat(os.path.join(path_save, image_list[i]), {'leftpoint': [0, 0], 'rightpoint': [0, 0]})
# elif len(left_point_x) == 0:
# rightpoint = [np.mean(right_point_x), np.mean(right_point_y)]
# scio.savemat(os.path.join(path_save, image_list[i]), {'leftpoint': [0, 0], 'rightpoint': rightpoint})
# elif len(right_point_x) == 0:
# leftpoint = [np.mean(left_point_x), np.mean(left_point_y)]
# scio.savemat(os.path.join(path_save, image_list[i]), {'leftpoint': leftpoint, 'rightpoint': [0, 0]})
# else:
# leftpoint = [np.mean(left_point_x), np.mean(left_point_y)]
# rightpoint = [np.mean(right_point_x), np.mean(right_point_y)]
# scio.savemat(os.path.join(path_save, image_list[i]), {'leftpoint': leftpoint, 'rightpoint': rightpoint})
path_save = r'C:\Users\cvter\Desktop\AS-OCT\relabel50_0726\point'
class_savepath = r'C:\Users\cvter\Desktop\AS-OCT\relabel50_0726\angle'
conn = sqlite3.connect(r'D:\skeptical point\Data\label.db')
ret = conn.execute("select * from asoct_label") #获取该表所有元素
ret2 = conn.execute("select * from asoct_update_label")
conn.commit()
rows1 = ret.fetchall()
rows2 = ret2.fetchall()
for row in rows1:
# print(row[9]) #这里就是获取去除来的每行的第2个元素的内容,row[0]则是第一个
label_name = row[3][:-4] + '_' + str(row[4]) + '.mat'
# print(label_name)
# if label_name == '20180521.1727702684-13959-1_9.mat' or label_name == '20180226.1727702684-10743-1_2.mat' or label_name== '20170731.1727702684-2706-1_4.mat'\
# or label_name == '20171214.1727702684-7727-1_15.mat':
# continue
label = row[9]
label = json.loads(label)
left_flag = int(label['left_radio_value'])
right_flag = int(label['right_radio_value'])
scio.savemat(os.path.join(class_savepath, label_name), {'leftstatus': left_flag, 'rightstatus': right_flag})
radio = float(row[7])
if left_flag == 4 and right_flag == 4:
scio.savemat(os.path.join(path_save, label_name), {'leftpoint': [0, 0], 'rightpoint': [0, 0]})
elif left_flag == 4:
label1 = label['label_data'][0]['data']
label1 = json.loads(label1)
p1x = (label1['left'] + 0.5 * label1['width']) / radio
p1y = (label1['top'] + 0.5 * label1['height']) / radio
scio.savemat(os.path.join(path_save, label_name), {'leftpoint': [0, 0], 'rightpoint': [p1x, p1y]})
elif right_flag == 4:
label1 = label['label_data'][0]['data']
label1 = json.loads(label1)
p1x = (label1['left'] + 0.5 * label1['width']) / radio
p1y = (label1['top'] + 0.5 * label1['height']) / radio
scio.savemat(os.path.join(path_save, label_name), {'leftpoint': [p1x, p1y], 'rightpoint': [0, 0]})
else:
label1 = label['label_data'][0]['data']
label1 = json.loads(label1)
p1x = (label1['left'] + 0.5 * label1['width']) / radio
p1y = (label1['top'] + 0.5 * label1['height']) / radio
label2 = label['label_data'][1]['data']
label2 = json.loads(label2)
p2x = (label2['left'] + 0.5 * label2['width']) / radio
p2y = (label2['top'] + 0.5 * label2['height']) / radio
if p1x < p2x:
scio.savemat(os.path.join(path_save, label_name), {'leftpoint': [p1x, p1y], 'rightpoint': [p2x, p2y]})
else:
scio.savemat(os.path.join(path_save, label_name), {'leftpoint': [p2x, p2y], 'rightpoint': [p1x, p1y]})
for row in rows2:
# print(row[9]) #这里就是获取去除来的每行的第2个元素的内容,row[0]则是第一个
label_name = row[3][:-4] + '_' + str(row[4]) + '.mat'
label = row[9]
label = json.loads(label)
left_flag = int(label['left_radio_value'])
right_flag = int(label['right_radio_value'])
scio.savemat(os.path.join(class_savepath, label_name), {'leftstatus': left_flag, 'rightstatus': right_flag})
radio = float(row[7])
if left_flag == 4 and right_flag == 4:
scio.savemat(os.path.join(path_save, label_name), {'leftpoint': [0, 0], 'rightpoint': [0, 0]})
elif left_flag == 4:
label1 = label['label_data'][0]['data']
label1 = json.loads(label1)
p1x = (label1['left'] + 0.5 * label1['width']) / radio
p1y = (label1['top'] + 0.5 * label1['height']) / radio
scio.savemat(os.path.join(path_save, label_name), {'leftpoint': [0, 0], 'rightpoint': [p1x, p1y]})
elif right_flag == 4:
label1 = label['label_data'][0]['data']
label1 = json.loads(label1)
p1x = (label1['left'] + 0.5 * label1['width']) / radio
p1y = (label1['top'] + 0.5 * label1['height']) / radio
scio.savemat(os.path.join(path_save, label_name), {'leftpoint': [p1x, p1y], 'rightpoint': [0, 0]})
else:
label1 = label['label_data'][0]['data']
label1 = json.loads(label1)
p1x = (label1['left'] + 0.5 * label1['width']) / radio
p1y = (label1['top'] + 0.5 * label1['height']) / radio
label2 = label['label_data'][1]['data']
label2 = json.loads(label2)
p2x = (label2['left'] + 0.5 * label2['width']) / radio
p2y = (label2['top'] + 0.5 * label2['height']) / radio
if p1x < p2x:
scio.savemat(os.path.join(path_save, label_name), {'leftpoint': [p1x, p1y], 'rightpoint': [p2x, p2y]})
else:
scio.savemat(os.path.join(path_save, label_name), {'leftpoint': [p2x, p2y], 'rightpoint': [p1x, p1y]})
conn.close()
|
from django.urls import path
from . import views
app_name= 'dashboard'
urlpatterns = [
path('', views.index, name='index'),
path('clientDash/', views.clientDash, name='clientDash'),
path('allClientData/', views.allClientData, name="data"),
path('clientsData/', views.ClientData, name='data'),
path('subtaskData/', views.subtaskData, name='data'),
path('tasksData/', views.taskData, name='data'),
path('projectData/', views.projectData, name='data'),
# low&mid&high projects
path('clientsProjectLowData/', views.clientsLowProjectData, name='data' ),
path('clientsProjectMidData/', views.clientsMidProjectData, name='data' ),
path('clientsProjectHighData/', views.clientsHighProjectData, name='data' ),
# low&mid&high tasks
path('clientsTaskLowData/', views.clientsLowTaskData, name='data' ),
path('clientsTaskMidData/', views.clientsMidTaskData, name='data' ),
path('clientsTaskHighData/', views.clientsHighTaskData, name='data' ),
# low&mid&high subtask
path('clientsSubtaskLowData/', views.clientsLowSubtaskData, name='data' ),
path('clientsSubtaskMidData/', views.clientsMidSubtaskData, name='data' ),
path('clientsSubtaskHighData/', views.clientsHighSubtaskData, name='data' ),
] |
from bs4 import BeautifulSoup as bs
import re
import os
import urllib.error
from urllib.request import urlopen, urlretrieve, HTTPCookieProcessor, build_opener, install_opener, Request
from urllib.parse import urlencode
from http.cookiejar import CookieJar
import urllib.request
def getHTML(posturl):
header={'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64;rv:14.0) Gecko/20100101 Firefox/14.0.1'}
# header = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64)\
# AppleWebKit/537.36 (KHTML, like Gecko)'}
request = Request(posturl ,headers = header)
#打开网页
try:
page = urlopen(request)
html = page.read()
html = html.decode('UTF-8')
return html
except urllib.error.URLError as e:
print(e.reason)
def getInfor(html):
soup = bs(html, 'lxml')
with open('text.txt', 'a+') as f:
print("爬取中......")
# 小说内容
title = soup.find_all("h1")
f.write(title[1].text)
f.write("\n")
data = soup.find_all("p")
f.write(data[1].text)
f.write("\n")
f.write("\n")
f.write("\n")
print("爬取完毕")
url = 'http://www.laifudao.com/wangwen/'
url0 = 'http://www.laifudao.com'
# 小说第一章
# url = 'http://read.qidian.com/chapter/WVfRBg3f0Rqt-wSl2uB4dQ2/POrenFOevQj6ItTi_ILQ7A2'
html = getHTML(url)
# i = 1
soup = bs(html, 'lxml')
print(soup)
# hreflist = soup.find_all("h1")
# for href in hreflist[1:-3]:
# # print(url0 + href.a["href"])
# html = getHTML(url0 + href.a["href"])
# getInfor(html)
# for tag in href[20:130]:
# url = "http:" + tag["href"]
# # print(url)
# html = getHTML(url)
# getInfor(html)
# # i += 1
# # print(html)
|
message = 'Hello world'
print(message.lower())
print(message.upper())
print(message.swapcase())
print(message.find("world"))
print(message.count("o"))
print(message.capitalize())
print(message.replace("Hello", "Hi")) |
def find_anagrams(word, candidates):
word = word.lower()
sorted_word = sorted(word)
anagrams = []
for candidate in candidates:
if len(word) == len(candidate): #O(1)
if word != candidate.lower():
if sorted_word == sorted(candidate.lower()):
anagrams.append(candidate)
return anagrams
|
import os
import sys
import unittest
import datetime
from unittest.mock import patch
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from seriesbr import bcb # noqa: E402
def mocked_json_to_df(url, *args):
"""Instead of parsing the JSON, just return the URL"""
return url
def mocked_today_date():
"""Use this date as if it were today"""
return datetime.datetime(2019, 12, 2)
BASEURL = "https://api.bcb.gov.br/dados/serie/bcdata.sgs.11/dados?format=json"
@patch("seriesbr.bcb.bcb_json_to_df", mocked_json_to_df)
@patch("seriesbr.helpers.dates.today_date", mocked_today_date)
class BCBtest(unittest.TestCase):
def test_url_no_dates(self):
test = bcb.get_serie(11)
expected = BASEURL + "&dataInicial=01/01/1900&dataFinal=02/12/2019"
self.assertEqual(test, expected)
# Testing start dates argument
def test_url_start_date_year_only(self):
test = bcb.get_serie(11, start="2013")
expected = BASEURL + "&dataInicial=01/01/2013&dataFinal=02/12/2019"
self.assertEqual(test, expected)
def test_url_start_date_month_and_year(self):
test = bcb.get_serie(11, start="07-2013")
expected = BASEURL + "&dataInicial=01/07/2013&dataFinal=02/12/2019"
self.assertEqual(test, expected)
# Testing end dates argument
def test_url_end_date_year_only(self):
test = bcb.get_serie(11, end="1990")
expected = BASEURL + "&dataInicial=01/01/1900&dataFinal=31/12/1990"
self.assertEqual(test, expected)
def test_url_end_date_month_and_year(self):
test = bcb.get_serie(11, end="06-1990")
expected = BASEURL + "&dataInicial=01/01/1900&dataFinal=30/06/1990"
self.assertEqual(test, expected)
def test_url_end_date_full(self):
test = bcb.get_serie(11, end="05032016")
expected = BASEURL + "&dataInicial=01/01/1900&dataFinal=05/03/2016"
self.assertEqual(test, expected)
# Testing start and end dates arguments
def test_url_start_and_end_date_year_only(self):
test = bcb.get_serie(11, start="2013", end="09/2014")
expected = BASEURL + "&dataInicial=01/01/2013&dataFinal=30/09/2014"
self.assertEqual(test, expected)
def test_url_start_and_end_date_month_and_year(self):
test = bcb.get_serie(11, start="07-2013", end="09-2014")
expected = BASEURL + "&dataInicial=01/07/2013&dataFinal=30/09/2014"
self.assertEqual(test, expected)
def test_url_start_and_end_date_full_dates(self):
test = bcb.get_serie(11, start="05032016", end="25102017")
expected = BASEURL + "&dataInicial=05/03/2016&dataFinal=25/10/2017"
self.assertEqual(test, expected)
# Testing last_n argument
def test_url_last_n(self):
test = bcb.get_serie(11, last_n=30)
expected = "https://api.bcb.gov.br/dados/serie/bcdata.sgs.11/dados/ultimos/30?formato=json"
self.assertEqual(test, expected)
# Testing invalid inputs
def test_crazy_date(self):
with self.assertRaises(ValueError):
bcb.get_serie(11, start="asfhajksfsa")
bcb.get_serie(11, start="002562345645")
bcb.get_serie(11, start="###$%#RG")
if __name__ == "__main__":
unittest.main()
# vi: nowrap
|
import string
arr = []
for _ in range(int(input())):
x=input().split()
cmd = x[0]
args = x[1:]
if cmd !="print":
cmd += "("+ ",".join(args) +")"
eval("arr."+cmd)
else:
print(arr)
|
# A collection of functions that spawn openstack clients
import keystoneclient.v2_0.client as ksclient
from keystoneclient import session
from keystoneclient.auth.identity import v2
import glanceclient.v2.client as glclient
from novaclient import client
from swiftclient import Connection
def create_keystone_client(credentials):
keystone = ksclient.Client(auth_url=credentials["OS_AUTH_URL"],
username=credentials["OS_USERNAME"],
password=credentials["OS_PASSWORD"],
tenant_name=credentials["OS_TENANT_NAME"],
region_name=credentials["OS_REGION_NAME"])
try:
keystone.auth_token
return keystone
except:
print "KEYSTONE AUTHENTICATION FAILURE. Check config file."
def create_nova_client(credentials):
auth = v2.Password(auth_url=credentials["OS_AUTH_URL"],
username=credentials["OS_USERNAME"],
password=credentials["OS_PASSWORD"],
tenant_name=credentials["OS_TENANT_NAME"])
sess = session.Session(auth=auth)
nova = client.Client("2", session=sess)
return nova
# Note: A swift endpoint is required for creating a swift client
def create_swift_client(credentials):
swift = Connection(user=credentials["OS_USERNAME"],
key=credentials["OS_PASSWORD"],
authurl=credentials["OS_AUTH_URL"],
tenant_name=credentials["OS_TENANT_NAME"],
auth_version="2.0")
return swift
def create_glance_client(keystone_client):
glance_endpoint = keystone_client.service_catalog.url_for(
service_type='image')
glance = glclient.Client(endpoint=glance_endpoint,
token=keystone_client.auth_token)
return glance
|
class ClaseDecoradora:
def __init__(self, fnc):
self.fnc = fnc
def __call__(self, *args, **kwargs):
print('se llama a la clase decorradora')
self.fnc(*args, **kwargs)
@ClaseDecoradora
def hablar(mensaje):
print(mensaje)
hablar('Hola')
|
from tokenizer import Tokenizer
from tokenizer import Token
from token_type import TokenType
from number import Number
class ExprParser(object):
'''
Parser for grammar specified in 'language_definition.txt'
See: https://en.wikipedia.org/wiki/Recursive_descent_parser
Note: Need paranthesis to enforce precedence for ~ and -
Example: ~(-b11011011) or -(~b11011011)
'''
def parse(self, expr):
self.tokens = Tokenizer(expr).tokenize()
self.currentToken = None
self.nextToken = None
self._nextToken()
return self.startExpr()
def _nextToken(self):
self.currentToken, self.nextToken = self.nextToken, next(self.tokens, None)
def _accept(self, tokenType):
if self.nextToken and self.nextToken.type == tokenType:
self._nextToken()
return True
else:
return False
def _expect(self, tokenType):
if self._accept(tokenType):
return True
raise SyntaxError(f'Expected {tokenType.name}')
def startExpr(self):
'''
L -> R
start_expr
:= xor_expr { '|' xor_expr }
'''
result = self.xorExpr()
while self._accept(TokenType.OR):
if self.currentToken.type != TokenType.OR:
raise SyntaxError(f'Expected |, got {self.currentToken.value}')
result |= self.xorExpr()
return result
def xorExpr(self):
'''
L -> R
xor_expr
:= and_expr { '^' and_expr }
'''
result = self.andExpr()
while self._accept(TokenType.XOR):
if self.currentToken.type != TokenType.XOR:
raise SyntaxError(f'Expected ^, got {self.currentToken.value}')
result ^= self.andExpr()
return result
def andExpr(self):
'''
L -> R
and_expr
:= shift_expr { '&' shift_expr }
'''
result = self.shiftExpr()
while self._accept(TokenType.AND):
if self.currentToken.type != TokenType.AND:
raise SyntaxError(f'Expected &, got {self.currentToken.value}')
result &= self.shiftExpr()
return result
def shiftExpr(self):
'''
L -> R
shift_expr
:= arith_expr { ( '<<' | '>>' ) arith_expr }
'''
result = self.arithExpr()
while self._accept(TokenType.LSHIFT) or self._accept(TokenType.RSHIFT):
if self.currentToken.type == TokenType.LSHIFT:
result = result << self.arithExpr()
elif self.currentToken.type == TokenType.RSHIFT:
result = result >> self.arithExpr()
else:
raise SyntaxError(f'Expected >> or <<, got {self.currentToken.value}')
return result
def arithExpr(self):
'''
L -> R
arith_expr
:= term { ( '+' | '-' ) term }
'''
result = self.term()
while self._accept(TokenType.ADD) or self._accept(TokenType.MINUS):
if self.currentToken.type == TokenType.ADD:
result += self.term()
elif self.currentToken.type == TokenType.MINUS:
result -= self.term()
else:
raise SyntaxError(f'Expected + or -, got {self.currentToken.value}')
return result
def term(self):
'''
L -> R
term
:= factor { ( '*' | '%' | '//' ) factor }
'''
result = self.factor()
while self._accept(TokenType.MULTIPLY) or self._accept(TokenType.REMAINDER) or self._accept(TokenType.DIVIDE):
if self.currentToken.type == TokenType.MULTIPLY:
result *= self.factor()
elif self.currentToken.type == TokenType.REMAINDER:
result %= self.factor()
elif self.currentToken.type == TokenType.DIVIDE:
result //= self.factor()
else:
raise SyntaxError(f'Expected *, % or //, got {self.currentToken.value}')
return result
def factor(self):
'''
L -> R
factor
:= ('-'|'~' ) primary | primary
'''
if self._accept(TokenType.MINUS):
if self.currentToken.type != TokenType.MINUS:
raise SyntaxError(f'Expected -, got {self.currentToken.value}')
return Number().fromNumber(0) - self.primary()
elif self._accept(TokenType.INVERT):
if self.currentToken.type != TokenType.INVERT:
raise SyntaxError(f'Expected ~, got {self.currentToken.value}')
return ~self.primary()
return self.primary()
def primary(self):
'''
primary
:= hex_number | binary_number | octal_number | decimal_number | '(' start_expr ')'
'''
if self._accept(TokenType.DECIMAL):
if self.currentToken.type != TokenType.DECIMAL:
raise SyntaxError(f'Expected decimal number, got {self.currentToken.value}')
return Number().fromString(self.currentToken.value)
if self._accept(TokenType.HEX):
if self.currentToken.type != TokenType.HEX:
raise SyntaxError(f'Expected hex number, got {self.currentToken.value}')
return Number().fromString(self.currentToken.value)
if self._accept(TokenType.BINARY):
if self.currentToken.type != TokenType.BINARY:
raise SyntaxError(f'Expected binary number, got {self.currentToken.value}')
return Number().fromString(self.currentToken.value)
if self._accept(TokenType.OCTAL):
if self.currentToken.type != TokenType.OCTAL:
raise SyntaxError(f'Expected octal number, got {self.currentToken.value}')
return Number().fromString(self.currentToken.value)
elif self._accept(TokenType.LPAREN):
result = self.startExpr()
self._expect(TokenType.RPAREN)
return result
else:
raise SyntaxError(f'Got unexpected token {self.nextToken.value}')
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 04 19:57:11 2013
@author: drewhill
"""
#from AbilListClass.py import AbilListClass
import os
class ability: #for both skills and interests
def __init__(self,newID,val,kind): #constructor
self.id = newID
self.name = val
self.type = kind #Skill or Int
self.empList = dict() #empID, skill level (1-3)
self.notes = ""
def rename(self,val):
self.inFile()
self.name = val
self.outFile()
def addEmp(self,empID,lvl=1):
self.inFile() #get most updated
if not self.empList.has_key(empID):
self.empList[empID]=lvl
self.outFile()
def chgLvl(self,empID,lvl):
self.inFile()
if self.empList.has_key(empID):
self.empList[empID]=lvl
self.outFile()
def remEmp(self,empID):
self.inFile() #get most updated
if self.empList.has_key(empID):
self.empList.pop(empID)
self.outFile()
def retList(self):
return self.empList.items()
def inFile(self):
if self.type == "Skill":
ext=".skl"
else:
ext=".int"
if not os.path.isfile(os.path.join(os.getcwd(),str(self.type),str(self.id)+ext)):
return -1
with open(os.path.join(os.getcwd(),str(self.type),str(self.id)+ext),'r') as f:
line=f.readline()
line=line.rstrip("\n") #"ID"
line=f.readline()
line=line.rstrip("\n") #ID
self.id=int(line)
line=f.readline()
line=line.rstrip("\n") #"Name"
line=f.readline()
line=line.rstrip("\n") #Name
self.name=line
line=f.readline()
line=line.rstrip("\n") #"Type"
line=f.readline()
line=line.rstrip("\n") #Type
if not line=="Notes":
self.type=line
line=f.readline()
line=line.rstrip("\n") #"Notes"
else:
self.type=""
line=f.readline()
line=line.rstrip("\n") #Notes
if not line=="Employee List":
self.notes=line
line=f.readline()
line=line.rstrip("\n") #"Employee List"
else:
self.notes=""
self.empList=dict() #reset it
for line in f:
line=line.rstrip("\n")
empID,empLvl=line.split(":")
self.empList[int(empID)]=int(empLvl)
def outFile(self):
if self.type == "Skill":
ext=".skl"
else:
ext=".int"
with open(os.path.join(os.getcwd(),str(self.type),str(self.id)+ext),'w+') as f:
f.write("ID"+"\n")
f.write(str(self.id)+"\n")
f.write("Name"+"\n")
f.write(str(self.name)+"\n")
f.write("Type"+"\n")
f.write(str(self.type)+"\n")
f.write("Notes"+"\n")
if(not self.notes==""):
f.write(str(self.notes)+"\n")
#TODO: Sanitize output for colons and newlines
f.write("Employee List"+"\n")
for empID, empLvl in self.empList.items():
f.write(str(empID))
f.write(":")
f.write(str(empLvl))
f.write("\n") |
import argparse
import subprocess
import unittest.mock
from argparse import Namespace
from get_ip_addresses import IpAddresses
class TestStringMethods(unittest.TestCase):
def setUp(self):
self.ip_addresses_all = IpAddresses().ip_addresses_all()
self.ip_addresses_with_prefix = IpAddresses().ip_addresses_with_prefix()
self.ip_addresses_overlapping = IpAddresses().ip_addresses_overlapping()
self.maxDiff = None
@unittest.mock.patch('os.system')
@unittest.mock.patch('get_ip_addresses.IpAddresses.parse_arguments')
def test_get_ip_addresses_all(self,
mock_parse,
mock_bash_command):
mock_parse.return_value = Namespace(overlapping=False, with_prefix=False)
IpAddresses().ip_addresses_all()
bash_command = "ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*'"
mock_bash_command.assert_called_once_with(bash_command)
@unittest.mock.patch('os.system')
@unittest.mock.patch('get_ip_addresses.IpAddresses.parse_arguments')
def test_ip_addresses_with_prefix(self,
mock_parse,
mock_bash_command):
mock_parse.return_value = Namespace(overlapping=False, with_prefix=True)
IpAddresses().ip_addresses_with_prefix()
bash_command = "ip -o -f inet addr show | awk '/scope global/ {print $4}'"
mock_bash_command.assert_called_once_with(bash_command)
@unittest.mock.patch('os.system')
@unittest.mock.patch('get_ip_addresses.IpAddresses.parse_arguments')
def test_ip_addresses_overlapping(self,
mock_parse,
mock_bash_command):
mock_parse.return_value = Namespace(overlapping=True, with_prefix=False)
ips_with_prefix = subprocess.check_output("ip -o -f inet addr show | awk '/scope global/ {print $4}'",
shell=True)
command_arguments = ' '.join(ips_with_prefix.decode("utf-8").split('\n'))
IpAddresses().ip_addresses_overlapping()
bash_command = f"ipconflict -o {command_arguments}"
mock_bash_command.assert_called_once_with(bash_command)
@unittest.mock.patch('get_ip_addresses.IpAddresses.ip_addresses_all')
@unittest.mock.patch('get_ip_addresses.IpAddresses.parse_arguments')
def test_no_parameters_called(self,
mock_parse,
mock_all):
mock_parse.return_value = Namespace(overlapping=False, with_prefix=False)
mock_all.return_value = "0.0.0.0"
self.assertEqual("0.0.0.0", IpAddresses().get_ip_addresses())
@unittest.mock.patch('get_ip_addresses.IpAddresses.ip_addresses_with_prefix')
@unittest.mock.patch('get_ip_addresses.IpAddresses.parse_arguments')
def test_with_prefix_called(self,
mock_parse,
mock_prefix):
mock_parse.return_value = Namespace(overlapping=False, with_prefix=True)
mock_prefix.return_value = "0.0.0.0"
self.assertEqual("0.0.0.0", IpAddresses().get_ip_addresses())
@unittest.mock.patch('get_ip_addresses.IpAddresses.ip_addresses_overlapping')
@unittest.mock.patch('get_ip_addresses.IpAddresses.parse_arguments')
def test_overlapping_called(self,
mock_parse,
mock_prefix):
mock_parse.return_value = Namespace(overlapping=True, with_prefix=False)
mock_prefix.return_value = "0.0.0.0"
self.assertEqual("0.0.0.0", IpAddresses().get_ip_addresses())
@unittest.mock.patch('get_ip_addresses.IpAddresses.ip_addresses_all')
@unittest.mock.patch('get_ip_addresses.IpAddresses.parse_arguments')
def test_raise_runtime_error(self,
mock_parse,
mock_all):
mock_parse.return_value = Namespace(overlapping=False, with_prefix=False)
mock_all.side_effect = subprocess.CalledProcessError(returncode=2, cmd=["bad"])
self.assertRaises(RuntimeError, IpAddresses().get_ip_addresses)
def test_parser(self):
parser = IpAddresses().parse_arguments(args=["--with-prefix", "--overlapping"])
self.assertTrue(parser)
def tearDown(self):
self.ip_addresses_all = None
self.ip_addresses_with_prefix = None
self.ip_addresses_overlapping = None
self.get_ip_addresses = None
if __name__ == '__main__':
unittest.main()
|
# Time complexity: O(N)
def moveElementToEnd(array, element):
left = 0
right = len(array) - 1
while left < right:
while left < right and array[right] == element:
right -= 1
if array[left] == element:
array[left], array[right] = array[right], array[left]
left += 1
return array
print(moveElementToEnd([2, 1, 2, 2, 2, 3, 4, 2], 2)) |
'''
Myanna Harris
9-11-16
asgn2.py
Normalize and check Zipf's Law on Jane Austen's novel, Emma.
To run:
python asgn2.py "path/to/austen-emma.txt"
'''
import sys
import re
import numpy as np
import matplotlib.pylab as plt
# normalize(filePath)
def normalize(file):
f = open(file,'r')
iter = re.finditer('(^|[^a-zA-Z])[a-zA-Z0-9]+([^a-zA-Z]|$)',f.read())
f.close()
fOut = open("list.txt","w")
wordList = []
for w in iter:
s = (str(w.group(0))).strip()
if len(s) > 0 and s != "s":
# print s
fOut.write(s+"\n")
wordList.append(s)
fOut.close()
return wordList
def makeDictionary(list):
freqDict = {}
for word in list:
if freqDict.has_key(word):
freqDict[word] += 1
else:
freqDict[word] = 1
return freqDict
def plotZipfsLaw():
x = np.linspace(1,100,1000)
y = 1/x
plt.loglog(x,y)
def plotFreq(dict):
sortedTuples = sorted(dict.items(), key=lambda x:x[1],reverse=True)
sortedFreq = []
for tup in sortedTuples:
sortedFreq.append(tup[1])
x = np.linspace(1,len(sortedFreq)+1,len(sortedFreq))
y = sortedFreq
plt.loglog(x,y)
def main(argv):
if len(argv) < 1:
print "Need file path"
return 0
wordList = normalize(argv[0])
wordDict = makeDictionary(wordList)
plotZipfsLaw()
plotFreq(wordDict)
plt.title("Zipf's Law with Emma's word frequency vs rank")
plt.xlabel("r/rank")
plt.ylabel("(1/r)/frequency")
plt.show()
if __name__ == '__main__':
main(sys.argv[1:])
|
# Copyright (c) Alibaba, Inc. and its affiliates.
from collections.abc import Mapping
import torch
from torch import distributed as dist
from modelscope.metainfo import Trainers
from modelscope.trainers.builder import TRAINERS
from modelscope.trainers.optimizer.builder import build_optimizer
from modelscope.trainers.trainer import EpochBasedTrainer
from modelscope.utils.constant import ModeKeys
from modelscope.utils.logger import get_logger
@TRAINERS.register_module(module_name=Trainers.image_portrait_enhancement)
class ImagePortraitEnhancementTrainer(EpochBasedTrainer):
def train_step(self, model, inputs):
""" Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (`TorchModel`): The model to train.
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
Return:
`torch.Tensor`: The tensor with training loss on this batch.
"""
# EvaluationHook will do evaluate and change mode to val, return to train mode
# TODO: find more pretty way to change mode
self.d_reg_every = self.cfg.train.get('d_reg_every', 16)
self.g_reg_every = self.cfg.train.get('g_reg_every', 4)
self.path_regularize = self.cfg.train.get('path_regularize', 2)
self.r1 = self.cfg.train.get('r1', 10)
train_outputs = dict()
self._mode = ModeKeys.TRAIN
# call model forward but not __call__ to skip postprocess
if isinstance(inputs, Mapping):
d_loss = model._train_forward_d(**inputs)
else:
d_loss = model._train_forward_d(inputs)
train_outputs['d_loss'] = d_loss
model.discriminator.zero_grad()
d_loss.backward()
self.optimizer_d.step()
if self._iter % self.d_reg_every == 0:
if isinstance(inputs, Mapping):
r1_loss = model._train_forward_d_r1(**inputs)
else:
r1_loss = model._train_forward_d_r1(inputs)
train_outputs['r1_loss'] = r1_loss
model.discriminator.zero_grad()
(self.r1 / 2 * r1_loss * self.d_reg_every).backward()
self.optimizer_d.step()
if isinstance(inputs, Mapping):
g_loss = model._train_forward_g(**inputs)
else:
g_loss = model._train_forward_g(inputs)
train_outputs['g_loss'] = g_loss
model.generator.zero_grad()
g_loss.backward()
self.optimizer.step()
path_loss = 0
if self._iter % self.g_reg_every == 0:
if isinstance(inputs, Mapping):
path_loss = model._train_forward_g_path(**inputs)
else:
path_loss = model._train_forward_g_path(inputs)
train_outputs['path_loss'] = path_loss
model.generator.zero_grad()
weighted_path_loss = self.path_regularize * self.g_reg_every * path_loss
weighted_path_loss.backward()
self.optimizer.step()
model.accumulate()
if not isinstance(train_outputs, dict):
raise TypeError('"model.forward()" must return a dict')
# add model output info to log
if 'log_vars' not in train_outputs:
default_keys_pattern = ['loss']
match_keys = set([])
for key_p in default_keys_pattern:
match_keys.update(
[key for key in train_outputs.keys() if key_p in key])
log_vars = {}
for key in match_keys:
value = train_outputs.get(key, None)
if value is not None:
if dist.is_available() and dist.is_initialized():
value = value.data.clone()
dist.all_reduce(value.div_(dist.get_world_size()))
log_vars.update({key: value.item()})
self.log_buffer.update(log_vars)
else:
self.log_buffer.update(train_outputs['log_vars'])
self.train_outputs = train_outputs
def create_optimizer_and_scheduler(self):
""" Create optimizer and lr scheduler
We provide a default implementation, if you want to customize your own optimizer
and lr scheduler, you can either pass a tuple through trainer init function or
subclass this class and override this method.
"""
optimizer, lr_scheduler = self.optimizers
if optimizer is None:
optimizer_cfg = self.cfg.train.get('optimizer', None)
else:
optimizer_cfg = None
optimizer_d_cfg = self.cfg.train.get('optimizer_d', None)
optim_options = {}
if optimizer_cfg is not None:
optim_options = optimizer_cfg.pop('options', {})
optimizer = build_optimizer(
self.model.generator, cfg=optimizer_cfg)
if optimizer_d_cfg is not None:
optimizer_d = build_optimizer(
self.model.discriminator, cfg=optimizer_d_cfg)
lr_options = {}
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.optimizer_d = optimizer_d
return self.optimizer, self.lr_scheduler, optim_options, lr_options
|
# The number, 1406357289, is a 0 to 9 pandigital number because it is made up of each of the digits 0 to 9
# in some order, but it also has a rather interesting sub-string divisibility property.
#
# Let d1 be the 1st digit, d2 be the 2nd digit, and so on. In this way, we note the following:
#
# d2d3d4=406 is divisible by 2
# d3d4d5=063 is divisible by 3
# d4d5d6=635 is divisible by 5
# d5d6d7=357 is divisible by 7
# d6d7d8=572 is divisible by 11
# d7d8d9=728 is divisible by 13
# d8d9d10=289 is divisible by 17
#
# Find the sum of all 0 to 9 pandigital numbers with this property.
import itertools
import progressbar
def generate_pandigitals():
return ("".join(p) for p in itertools.permutations("0123456789"))
def solve():
solutions = []
pandigitals = list(generate_pandigitals())
bar = progressbar.ProgressBar(max_value=len(pandigitals) + 1)
counter = 0
for p in pandigitals:
counter += 1
bar.update(counter)
if p[0] == '0':
continue
if int(''.join(p[1:4])) % 2 != 0:
continue
if int(p[2:5]) % 3 != 0:
continue
if int(p[3:6]) % 5 != 0:
continue
if int(p[4:7]) % 7 != 0:
continue
if int(p[5:8]) % 11 != 0:
continue
if int(p[6:9]) % 13 != 0:
continue
if int(p[7:]) % 17 != 0:
continue
solutions.append(int(p))
print " "
return "The solution is %d." % sum(solutions)
print solve()
# SOLVED
|
from Robinhood import Robinhood
rb = Robinhood()
rb.login_prompt()
watchlist = rb.watchlist()
symbols = ' '.join([instrument['symbol'] for instrument in watchlist])
print(symbols)
with open("watchlist.txt", "w") as text_file:
text_file.write("{0}".format(symbols))
|
message = "Hello India"
if message == "Hello India":
print("vales are Equal")
else:
print("values are not Equal")
values = [1, 10, 11, 3, 4, 5]
# for i in values:
# print(i)
sum = 0
for i in range(1, 6):
sum = sum + i
print(sum)
print('***************************')
for j in range(1, 10, 2):
print(j)
print('***************************')
val = 10
while val > 1:
if val == 3:
break
print(val)
val = val - 1
|
from enum import Enum
from os import path
from typing import List
import typer
from colorama import init
from ..utils import STATUS_ARROW
from .dashboard import create
from .data import aggregate_results, parse_simulation_report
from .plotters import metric_corr, plot_miss_freq, plot_num_miss_after_del
class PlotType(str, Enum):
afterdelete = "AFTERDELETE"
missfreq = "MISSFREQ"
app = typer.Typer(name="probe.results", add_completion=False)
@app.command()
def dashboard(folders: List[str], dash_ip: str = "localhost"):
init()
print(f"{STATUS_ARROW}Aggregate results...")
results = aggregate_results(folders)
print(f"{STATUS_ARROW}Start dashboard...")
create(results, dash_ip)
@app.command()
def plot(
folders: List[str],
p_type: PlotType = PlotType.afterdelete,
output_filename: str = "",
):
init()
print(f"{STATUS_ARROW}Aggregate results...")
results = aggregate_results(folders)
if p_type.value == "AFTERDELETE":
plot_num_miss_after_del(
parse_simulation_report(
results.get_all(),
path.commonprefix(results.files),
generator=True,
target=p_type.value,
),
output_filename=output_filename,
)
elif p_type.value == "MISSFREQ":
plot_miss_freq(
parse_simulation_report(
results.get_all(),
path.commonprefix(results.files),
generator=True,
target=p_type.value,
),
output_filename=output_filename,
)
@app.command()
def plot_corr(
folders: List[str],
):
print(f"{STATUS_ARROW}Aggregate results...")
results = aggregate_results(folders)
print(f"{STATUS_ARROW}Calculate and plot correlation matrix...")
metric_corr(results.get_all_df())
if __name__ == "__main__":
app(prog_name="probe.results")
|
import itertools, utils, re
from collections import defaultdict
from defense.models import Player, Country, Team, \
Stadium, City, Region, StadiumTeam, TournamentTeam, \
GameTeam, Goal, GamePlayer, PlayerTeam
from difflib import SequenceMatcher
from django.core.exceptions import ObjectDoesNotExist
num_pattern = re.compile('[^0-9]')
DEF_COUNTRY = 'paraguay'
potential_compound_last_names = [{'last_name': 'silva', 'prefix': 'da'}]
def disambiguate_objs_by_name(objs, ref_name):
similarity = []
for obj in objs:
nor_name = utils.normalize_text(obj.name)
names = itertools.izip_longest(nor_name, ref_name)
similarity.append(len([c1 for c1, c2 in names if c1 == c2]))
idx_max_sim = similarity.index(max(similarity))
return objs[idx_max_sim]
def search_most_similar_strings(model, name):
threshold = 0.50
objs_to_return = []
model_objs = model.objects.all()
similar_ratios = []
for obj in model_objs:
similar_ratios.append(SequenceMatcher(None, name, obj.name).ratio())
if similar_ratios:
max_ratios = max(similar_ratios)
if max_ratios > threshold:
size_vec_sim = len(similar_ratios)
for i in range(0, size_vec_sim):
if similar_ratios[i] == max_ratios:
objs_to_return.append(model_objs[i])
return objs_to_return
def search_obj_by_name(model, name):
nor_name = utils.normalize_text(name).strip()
objs_to_return = model.objects.filter(name__icontains=nor_name)
if len(objs_to_return) == 1:
return objs_to_return
else:
if len(objs_to_return) == 0:
objs_to_return = search_most_similar_strings(model, nor_name)
if len(objs_to_return) > 1:
objs_to_return = [disambiguate_objs_by_name(objs_to_return, nor_name)]
return objs_to_return
###
# Person (Player, Coach)
###
def extract_firstname_lastname_from_string(name_str):
vec_name = name_str.split(' ')
name_length = len(vec_name)
if name_length > 2:
print('Found the name {0} having more than two words, took the last '
'word as the last name'.format(utils.normalize_text(name_str)))
# assume that the last part of the name contains the last name
last_name = vec_name[-1].strip().lower()
# check whether the last name can be a compound last name
limit_name = name_length-1
for compound_last_name in potential_compound_last_names:
if last_name.lower() == compound_last_name['last_name'] and \
vec_name[-2].lower() == compound_last_name['prefix']:
last_name = vec_name[-2] + ' ' + last_name
limit_name -= 1
break
# extract name
if name_length == 1:
first_name = ''
else:
first_name = utils.format_text_to_save_db(' '.join(vec_name[:limit_name]))
return {'first_name': first_name, 'last_name': last_name}
def search_person_by_name(name):
dict_name = extract_firstname_lastname_from_string(name)
return Player.objects.filter(last_name__iexact=dict_name['last_name'])
def create_new_person(person_dict):
dict_name = extract_firstname_lastname_from_string(person_dict['name'])
person_attrs = {'first_name': dict_name['first_name'],
'last_name': dict_name['last_name']}
if 'wikipage' in person_dict.keys() and person_dict['wikipage']:
person_attrs['wikipage'] = person_dict['wikipage']
if 'country' in person_dict.keys():
country = Country.objects.get_or_create(name__iexact=person_dict['country'])
person_attrs['nationality'] = country
return Player.objects.create(**person_attrs)
def update_person(person_obj, person_dict):
dict_name = extract_firstname_lastname_from_string(person_dict['name'])
person_obj.first_name = dict_name['first_name']
if 'country' in person_dict.keys():
person_obj.nationality= person_dict['country']
if 'wikipage' in person_dict.keys():
person_obj.wikipage = person_dict['wikipage']
person_obj.save()
def disambiguate_player(player_objs, tournament_obj):
tournament_teams = tournament_obj.teams.all()
players_str = ''
for player_obj in player_objs:
players_str += player_obj.name + ' '
player_teams = player_objs.team_set.all()
for team in player_teams:
if team in tournament_teams:
return player_obj
raise Exception('Couldnt disambiguate the players ', players_str)
def get_or_create_player(tournament_obj, player_dict):
player_name = utils.normalize_text(player_dict['name'])
ret_obj = search_person_by_name(player_name)
if not ret_obj:
# the player doesn't exist yet
player_obj = create_new_person(player_dict)
elif len(ret_obj) > 1:
player_obj = disambiguate_player(ret_obj, tournament_obj)
else:
player_obj = ret_obj[0]
update_person(player_obj, player_dict)
return player_obj
###
# Team
###
def create_new_team(team_dict, stadium_obj):
team_attrs = {'name': utils.format_text_to_save_db(team_dict['name']),
'city': stadium_obj.city}
if 'wikipage' in team_dict.keys():
team_attrs['wikipage'] = team_dict['wikipage']
return Team.objects.create(**team_attrs)
def disambiguate_team(team_objs, tournament_obj):
teams_str = ''
for team_obj in team_objs:
teams_str += team_obj.name + ' '
try:
team_tournament_obj = TournamentTeam.objects.get(tournament=tournament_obj,
team=team_obj)
return team_tournament_obj.team
except ObjectDoesNotExist:
continue
raise Exception('Couldnt disambiguate the teams ', teams_str)
def update_team(team_obj, team_dict):
team_name = team_dict['name'].replace('club', '').strip() # delete word 'club'
if len(team_name) > len(team_obj.name):
team_obj.name = utils.format_text_to_save_db(team_dict['name'])
if 'foundation' in team_dict.keys():
team_obj.foundation = team_dict['foundation']
if 'wikipage' in team_dict.keys():
team_obj.wikipage = team_dict['wikipage']
team_obj.save()
def get_or_create_team(tournament_obj, team, source):
team_name = utils.normalize_text(team['name'])
team_name = team_name.replace('club', '').strip() # delete word 'club'
ret_obj = search_obj_by_name(Team, team_name)
stadium = None
if not ret_obj:
# the team doesn't exist yet
if 'stadium' in team.keys() and 'city' in team.keys():
stadium = get_or_create_stadium(team['stadium'], team['city'])
team_obj = create_new_team(team, stadium)
else:
raise Exception('The team {0} doesnt exist and a new one cannot be created because there are not '
'information about city and stadium'.format(team_name))
elif len(ret_obj) > 1:
team_obj = disambiguate_team(ret_obj, tournament_obj)
else:
team_obj = ret_obj[0]
# associate the team with its stadium in case the association
# doesn't exists
if stadium and not team_obj.stadium.all():
StadiumTeam.objects.create(stadium=stadium, team=team_obj, source=source)
update_team(team_obj, team)
return team_obj
###
# Stadium
###
def create_new_stadium(stadium_dict, city):
stadium_attrs = {'name': utils.format_text_to_save_db(stadium_dict['name']),
'city': city}
if 'capacity' in stadium_dict.keys():
stadium_attrs['capacity'] = int(num_pattern.sub('', stadium_dict['capacity']))
else:
stadium_attrs['capacity'] = -1
if 'wikipage' in stadium_dict.keys():
stadium_attrs['wikipage'] = stadium_dict['wikipage']
return Stadium.objects.create(**stadium_attrs)
def update_stadium(stadium_obj, stadium_dict):
if len(stadium_dict['name']) > len(stadium_dict.name):
stadium_dict.name = stadium_dict['name']
stadium_obj.name = stadium_dict['name']
if 'capacity' in stadium_dict.keys():
stadium_obj.capacity = stadium_dict['capacity']
if 'wikipage' in stadium_dict.keys():
stadium_obj.wikipage = stadium_dict['wikipage']
stadium_obj.save()
def get_or_create_stadium(stadium_dict, city_dict):
stadium_name = utils.normalize_text(stadium_dict['name'])
# delete word 'estadio'
stadium_name = stadium_name.replace('estadio', '').strip()
ret_obj = search_obj_by_name(Stadium, stadium_name)
if not ret_obj:
# the stadium doesn't exist yet
city = get_or_create_city(city_dict)
stadium_obj = create_new_stadium(stadium_dict, city)
elif len(ret_obj) > 0:
raise Exception('Got more than one stadium')
else:
stadium_obj = ret_obj[0]
update_stadium(stadium_obj, stadium_dict)
return stadium_obj
###
# City
###
def create_new_city(city_dict, region):
city_attrs = {'name': utils.format_text_to_save_db(city_dict['name'])}
if 'wikipage' in city_dict.keys():
city_attrs['wikipage'] = city_dict['wikipage']
if region:
city_attrs['region'] = region
return City.objects.create(**city_attrs)
def update_city(city_obj, city_dict):
if len(city_dict['name']) > len(city_obj.name):
city_obj.name = city_dict['name']
city_obj.name = city_dict['name']
if 'wikipage' in city_dict.keys():
city_obj.wikipage = city_dict['wikipage']
if 'region' in city_dict.keys():
region = get_or_create_region(city_dict['region'])
city_obj.add(region)
city_obj.save()
def get_or_create_city(city_dict):
city_name = utils.normalize_text(city_dict['name'])
ret_obj = search_obj_by_name(City, city_name)
if not ret_obj:
# the city doesn't exist, the default country for
# all cities will be paraguay
country = Country.objects.get(name__iexact=DEF_COUNTRY)
if 'region' in city_dict:
region = get_or_create_region(city_dict['region'])
else:
region = None
city_obj = create_new_city(city_dict, region)
elif len(ret_obj) > 1:
raise Exception('Got more than one city')
else:
city_obj = ret_obj[0]
update_city(city_obj, city_dict)
return city_obj
###
# Region
###
def create_new_region(region_dict):
region_attrs = {'name': utils.format_text_to_save_db(region_dict['name'])}
if 'wikipage' in region_dict.keys():
region_attrs['wikipage'] = region_dict['wikipage']
return Region.objects.create(**region_attrs)
def get_or_create_region(region_dict):
region_name = utils.normalize_text(region_dict['name'])
ret_objs = search_obj_by_name(Region, region_name)
if not ret_objs:
# the region doesn't exist, the default country for
# all regions will be paraguay
country = Country.objects.get(name__iexact=DEF_COUNTRY)
region_obj = create_new_region(region_dict)
elif len(ret_objs) > 1:
raise Exception('Got more than one region')
else:
region_obj = ret_objs[0]
return region_obj
def update_team_info_in_tournament(tournament_obj, team_obj, source_obj,
game_team_obj, team_dict, rival_dict):
try:
tournament_team_obj = TournamentTeam.objects.get(tournament=tournament_obj,
team=team_obj)
except ObjectDoesNotExist:
tournament_team_obj = TournamentTeam.objects.create(tournament=tournament_obj,
team=team_obj,
source=source_obj)
tournament_team_obj.games += 1
if game_team_obj.goals > int(rival_dict['score']):
team_result = 'won'
tournament_team_obj.wins += 1
elif game_team_obj.goals == int(rival_dict['score']):
team_result = 'drew'
tournament_team_obj.draws += 1
else:
team_result = 'lost'
tournament_team_obj.losses += 1
tournament_team_obj.goals += len(team_dict['goals_info'])
tournament_team_obj.goals_conceded += len(rival_dict['goals_info'])
tournament_team_obj.points = (3 * tournament_team_obj.wins) + tournament_team_obj.draws
tournament_team_obj.save()
return team_result
def add_team_game(tournament_obj, game_obj, source_obj, team_dict,
rival_dict, home=True):
team_obj = get_or_create_team(tournament_obj, team_dict, source_obj)
game_team_attrs = {
'game': game_obj,
'team': team_obj,
'home': home,
'goals': int(team_dict['score'])
}
game_team_obj = GameTeam.objects.create(**game_team_attrs)
# update team info in tournament
team_result = update_team_info_in_tournament(tournament_obj, team_obj, source_obj,
game_team_obj, team_dict, rival_dict)
# create goal objects
game_players = defaultdict(list)
for goal in team_dict['goals_info']:
player_obj = get_or_create_player(tournament_obj, {'name': goal['author']})
goal_attrs = {
'author': player_obj,
'minute': int(goal['minute']),
'game': game_obj
}
if goal['type']:
if goal['type'] == 'penalty':
goal_attrs['type'] = 'penalty'
if goal['type'] == 'own goal':
goal_attrs['own'] = True
goal_obj = Goal.objects.create(**goal_attrs)
goal_obj.source.add(source_obj)
# create/update game player models
try:
game_player_obj = GamePlayer.objects.get(game=game_obj, player=player_obj)
game_player_obj.goals += 1
game_player_obj.save()
except ObjectDoesNotExist:
game_player_attrs = {
'game': game_obj,
'player': player_obj,
'goals': 1
}
game_player_obj = GamePlayer.objects.create(**game_player_attrs)
game_player_obj.source.add(source_obj)
# create/update team player models
try:
team_player_obj = PlayerTeam.objects.get(player=player_obj)
if player_obj.id not in game_players.keys():
team_player_obj.games += 1
if team_result == 'won':
team_player_obj.wins += 1
elif team_result == 'drew':
team_player_obj.draws += 1
else:
team_player_obj.losses += 1
team_player_obj.goals += 1
team_player_obj.save()
except ObjectDoesNotExist:
team_player_attrs = {
'player': player_obj,
'team': team_obj,
'games': 1,
'goals': 1
}
if team_result == 'won':
team_player_attrs['wins'] = 1
elif team_result == 'drew':
team_player_attrs['draws'] = 1
else:
team_player_attrs['losses'] = 1
player_team_obj = PlayerTeam.objects.create(**team_player_attrs)
player_team_obj.source.add(source_obj)
game_players[player_obj.id].append(goal)
return game_players
def add_players_to_tournament_list(tournament_players, players):
for player_id, goals in players.items():
if player_id in tournament_players.keys():
tournament_players[player_id].extend(goals)
else:
tournament_players[player_id].append(goals)
return tournament_players
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-22 03:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0002_userinfo_type'),
]
operations = [
migrations.RenameField(
model_name='link',
old_name='receiver',
new_name='recipient',
),
migrations.AddField(
model_name='link',
name='short_link',
field=models.CharField(default='', max_length=30),
),
]
|
import pygame
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from Objects.chessBoard import ChessBoard
from Objects.player import Player
from Objects.ai import AI
from Framework.sceneManager import Scene, SceneManager
from Framework.simpleImage import SimpleImage
class AIGameScene(Scene) :
def __init__(self, screen, clock) :
self.screen = screen
self.clock = clock
self.load_resources()
self.isWhiteTurn = True
self.isPlayerSelected = False
self.selectedPiece = None
def update(self) :
self.chessBoard.update(self.screen)
isGameEnd = self.chessBoard.isGameEnd()
if isGameEnd[0] :
if isGameEnd[1] :
print("white win!")
else :
print("black win!")
SceneManager.getInstance().changeScene("MainScene")
for event in pygame.event.get() :
if event.type == pygame.QUIT :
SceneManager.getInstance().isQuit = True
return
if self.isWhiteTurn :
if event.type == pygame.MOUSEBUTTONDOWN :
mousePos = pygame.mouse.get_pos()
selectedPiece = self.chessBoard.getPiece(mousePos[0] // 90, mousePos[1] // 90)
if not self.isPlayerSelected :
if (self.isWhiteTurn and self.whitePlayer.inputCheck(selectedPiece)) :
self.selectedPiece = selectedPiece
self.isPlayerSelected = True
self.chessBoard.setPieceNotation(selectedPiece)
else :
if self.selectedPiece == selectedPiece :
self.selectedPiece = None
self.isPlayerSelected = False
self.chessBoard.setIsSelected(False)
else :
if self.chessBoard.movePiece(mousePos[0] // 90, mousePos[1] // 90) :
self.isWhiteTurn = not self.isWhiteTurn
self.selectedPiece = None
self.isPlayerSelected = False
self.chessBoard.setIsSelected(False)
else :
move = self.blackPlayer.minimaxRoot(3, self.chessBoard.getChessBoard(), self.chessBoard.getPieces(), True)
print(self.chessBoard.movePieceWithAI(move[0], move[1][0], move[1][1]))
self.isWhiteTurn = not self.isWhiteTurn
def load_resources(self) :
self.chessBoard = ChessBoard()
self.whitePlayer = Player(True)
self.blackPlayer = AI(False)
self.whitePlayer.makePieces()
self.blackPlayer.makePieces()
self.chessBoard.makeBoard(self.whitePlayer.getPieces(), self.blackPlayer.getPieces())
|
print("Advent of Code - Day 3")
f = open("input3-1", "r").read()
wire1_moves = f.split(',')
f = open("input3-2", "r").read()
wire2_moves = f.split(',')
from math import*
def manhattan_distance(x,y):
# distance += abs(x_value - x_goal) + abs(y_value - y_goal)
return sum(abs(a-b) for a,b in zip(x,y))
# Test the manhattan distance function
assert manhattan_distance([15,20],[10,30]) == 15
def step_right(current_coord):
current_coord = (current_coord[0]+1, current_coord[1])
return current_coord
# Test step_right
assert step_right((0,0)) == (1,0)
def step_left(current_coord):
current_coord = (current_coord[0]-1, current_coord[1])
return current_coord
# Test step_left
assert step_left((0,0)) == (-1,0)
def step_up(current_coord):
current_coord = (current_coord[0], current_coord[1]+1)
return current_coord
# Test step_up
assert step_up((0,0)) == (0,1)
def step_down(current_coord):
current_coord = (current_coord[0], current_coord[1]-1)
return current_coord
# Test step_down
assert step_down((0,0)) == (0,-1)
def generate_segments(wire_moves):
segments = []
current_coord = (0,0)
for move in wire_moves:
direction = move[0]
length = int(move[1:])
starting_coord = current_coord
if direction == 'R':
current_coord = (current_coord[0] + length, current_coord[1])
segments.append((starting_coord,current_coord))
elif direction == 'L':
current_coord = (current_coord[0] - length, current_coord[1])
segments.append((starting_coord,current_coord))
elif direction == 'U':
current_coord = (current_coord[0], current_coord[1] + length)
segments.append((starting_coord,current_coord))
elif direction == 'D':
current_coord = (current_coord[0], current_coord[1] - length)
segments.append((starting_coord,current_coord))
return segments
# def move_line(wire_moves):
# line_coords = []
# current_coord = (0,0)
# for move in wire_moves:
# direction = move[0]
# length = int(move[1:])
# if direction == 'R':
# for x in range(length):
# current_coord = step_right(current_coord)
# line_coords.append(current_coord)
# elif direction == 'L':
# for x in range(length):
# current_coord = step_left(current_coord)
# line_coords.append(current_coord)
# elif direction == 'U':
# for x in range(length):
# current_coord = step_up(current_coord)
# line_coords.append(current_coord)
# elif direction == 'D':
# for x in range(length):
# current_coord = step_down(current_coord)
# line_coords.append(current_coord)
# return line_coords
# def overlapping_coords(coords1, coords2):
# overlapping = []
# for x in coords1:
# for y in coords2:
# if x == y:
# overlapping.append(x)
# return overlapping
# def overlapping_segments(segments1, segments2):
# overlaps = []
# for seg1 in segments1:
# for seg2 in segments2:
# if (seg2[0][0] >= seg1[0][0]) && (seg2[0][0] <= seg1[1][0]):
# return None
def intersection(line1, line2):
xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(xdiff, ydiff)
if div == 0:
return
d = (det(*line1), det(*line2))
x = det(d, xdiff) / div
y = det(d, ydiff) / div
return int(x), int(y)
TEST_MOVES_1 = ['R8','U5','L5','D3']
TEST_MOVES_2 = ['U7','R6','D4','L4']
segments1 = generate_segments(TEST_MOVES_1)
segments2 = generate_segments(TEST_MOVES_2)
intersections = []
for seg1 in segments1:
for seg2 in segments2:
intersect = intersection(seg1, seg2)
if intersect is not None and intersect != (0, 0):
intersections.append(intersect)
import pdb; pdb.set_trace()
distances = []
for coords in intersections:
distances.append(manhattan_distance((0,0), coords))
distances.sort()
print("Smallest distance is:", *distances[:1])
|
import os
import sys
import glob
usage = """
Cython .pyx files as well as the created .cpp files are included in the source
distribution. The following information is useful for developers working on the
Cython source code.
Install in development mode. Will cythonize .pyx files first if needed.
python setup.py cythonize develop
Rebuild .cpp files from .pyx, and then stop:
python setup.py cythonize
Build extensions from existing .cpp
python setup.py build_ext
Source distribution:
python setup.py clean cythonize sdist
"""
try:
from Cython.Build import build_ext as cython_build_ext
from Cython.Build import cythonize
HAVE_CYTHON = True
except ImportError:
HAVE_CYTHON = False
if '--usage' in sys.argv:
print(usage)
sys.exit(0)
elif 'cythonize' in sys.argv:
USE_CYTHON = True
else:
USE_CYTHON = False
if USE_CYTHON and not HAVE_CYTHON:
raise ValueError(
'''
Cython could not be found. Please install Cython and try again.
''')
# Try bootstrapping setuptools if it doesn't exist. This is for using the
# `develop` command, which is very useful for in-place development work.
try:
import pkg_resources
try:
pkg_resources.require("setuptools>=0.6c5")
except pkg_resources.VersionConflict:
from ez_setup import use_setuptools
use_setuptools(version="0.6c5")
from setuptools import setup, Command
except ImportError:
sys.exit(
'pybedtools uses setuptools '
'(https://packaging.python.org/installing/) '
'for installation but setuptools was not found')
curdir = os.path.abspath(os.path.dirname(__file__))
# These imports need to be here; setuptools needs to be imported first.
from distutils.extension import Extension # noqa: E402
from distutils.command.build import build # noqa: E402
from distutils.command.build_ext import build_ext # noqa: E402
from distutils.command.sdist import sdist # noqa: E402
import distutils.log
MAJ = 0
MIN = 9
REV = 1
VERSION = '%d.%d.%d' % (MAJ, MIN, REV)
class CleanCommand(Command):
"""
Custom distutils command to clean the various files created by cython.
E.g.,
pybedtools/featurefuncs.cpp
pybedtools/cbedtools.cpp
pybedtools/cbedtools.cpython-36m-x86_64-linux-gnu.so
pybedtools/featurefuncs.cpython-36m-x86_64-linux-gnu.so
"""
user_options = []
def initialize_options(self):
self._clean_me = []
self._clean_trees = []
# Add files to be protected here
self._clean_exclude = ['bedFile.cpp', 'fileType.cpp', 'gzstream.cpp']
for root, dirs, files in list(os.walk('pybedtools')):
for f in files:
if f in self._clean_exclude:
continue
if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o', '.pyo',
'.pyd', '.c', '.cpp', '.cxx',
'.orig'):
self._clean_me.append(os.path.join(root, f))
for d in dirs:
if d == '__pycache__':
self._clean_trees.append(os.path.join(root, d))
for d in ('build',):
if os.path.exists(d):
self._clean_trees.append(d)
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
print('removing', clean_me)
os.unlink(clean_me)
except Exception:
pass
for clean_tree in self._clean_trees:
try:
import shutil
print('removing directory', clean_tree)
shutil.rmtree(clean_tree)
except Exception:
pass
class CythonBuildExt(build_ext):
"""
Subclass build_ext to get clearer report if Cython is necessary.
"""
def build_extensions(self):
for ext in self.extensions:
cythonize(ext)
build_ext.build_extensions(self)
class Cythonize(Command):
"""
Generate .cpp files and then stop
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
cythonize(extensions)
def find_missing_src(ext):
"""
Check whether sources needed to build extension *ext* are present.
Will return a list of missing source files that might be obtained by running cythonize.
Will raise a FileNotFoundError if some missing .cpp source files do not have a corresponding .pyx.
"""
missing_src = []
for src in ext.sources:
if not os.path.exists(src):
# raise Exception(
# """
# Cython-generated file '%s' not found.
# Please install Cython and run
# python setup.py cythonize
# """ % src)
(root, extn) = os.path.splitext(src)
if extn == ".cpp":
alt_src = root + ".pyx"
if not os.path.exists(alt_src):
raise FileNotFoundError(
"Files %s and %s not found." % (src, alt_src))
missing_src.append(src)
else:
raise FileNotFoundError(
"File %s not found." % src)
return missing_src
class InformativeBuildExt(build_ext):
def build_extensions(self):
for ext in self.extensions:
missing_src = find_missing_src(ext)
if missing_src:
if not HAVE_CYTHON:
raise ValueError(
'''
Cython could not be found.
Please install Cython and try again.
''')
self.announce(
"Trying to generate the following missing files:\n%s" % "\n".join(missing_src),
level=distutils.log.INFO)
for src in missing_src:
assert src in ext.sources
(root, extn) = os.path.splitext(src)
assert extn == ".cpp"
cythonize(root + ".pyx")
still_missing = find_missing_src(ext)
if still_missing:
raise ValueError(
'''
Some source files are missing to build an extension.
%s''' % "\n".join(still_missing))
build_ext.build_extensions(self)
class SDist(sdist):
def run(self):
cythonize(extensions)
for ext in extensions:
for src in ext.sources:
if not os.path.exists(src):
raise Exception(
"Cython-generated file '{0}' not found. "
"Run 'python setup.py --usage' for details.".format(src, usage))
sdist.run(self)
EXT = '.pyx' if USE_CYTHON else '.cpp'
extensions = [
Extension(
'pybedtools.cbedtools',
depends=glob.glob('pybedtools/include/*h'),
libraries=['stdc++', 'z'],
include_dirs=['pybedtools/include/'],
sources=['pybedtools/cbedtools' + EXT] + sorted(glob.glob('pybedtools/include/*.cpp')),
language='c++'),
Extension(
'pybedtools.featurefuncs',
depends=glob.glob('pybedtools/include/*h'),
libraries=['stdc++', 'z'],
include_dirs=['pybedtools/include/'],
sources=['pybedtools/featurefuncs' + EXT] + sorted(glob.glob('pybedtools/include/*.cpp')),
language='c++'),
]
cmdclass = {
'clean': CleanCommand,
'build': build,
'sdist': SDist,
}
if USE_CYTHON:
cmdclass['build_ext'] = cython_build_ext
cmdclass['cythonize'] = Cythonize
else:
cmdclass['build_ext'] = InformativeBuildExt
if __name__ == "__main__":
with open(os.path.join(curdir, 'pybedtools/version.py'), 'w') as fout:
fout.write(
"\n".join(["",
"# THIS FILE IS GENERATED FROM SETUP.PY",
"version = '{version}'",
"__version__ = version"]).format(version=VERSION)
)
README = open(os.path.join(curdir, "README.rst")).read()
setup(
name='pybedtools',
maintainer='Ryan Dale',
version=VERSION,
ext_modules=extensions,
maintainer_email='ryan.dale@nih.gov',
description='Wrapper around BEDTools for bioinformatics work',
license='MIT',
url='https://github.com/daler/pybedtools',
download_url='',
long_description=README,
zip_safe=False,
setup_requires=[],
install_requires=['six', 'pysam', 'numpy'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries :: Python Modules',
],
cmdclass=cmdclass,
packages=['pybedtools',
'pybedtools.test',
'pybedtools.contrib',
'pybedtools.test.data'],
package_data={'pybedtools': ["test/data/*",
"*.pyx",
"*.pxi",
"*.pxd",
"*.cxx",
"*.c",
"*.cpp",
"*.h"],
'src': ['src/*'],
},
include_package_data=True,
language_level=2,
)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2017-08-24 20:22
from __future__ import unicode_literals
import blogger.apps.principal.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('principal', '0006_institucionslider'),
]
operations = [
migrations.CreateModel(
name='Slider',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('foto', models.ImageField(upload_to='img/')),
('titulo', models.CharField(max_length=50)),
],
),
migrations.RemoveField(
model_name='institucionslider',
name='titulo',
),
migrations.AlterField(
model_name='institucionslider',
name='foto',
field=models.ImageField(upload_to=blogger.apps.principal.models.image_directory_path),
),
]
|
# tests/__init__.py
import os
import time
import tempfile
import pytest
from caten_music import CreateApp
@pytest.fixture
def client():
app = CreateApp.Test()
# db_fd, app.config["SQLALCHEMY_DATABASE_URI"] = tempfile.mkstemp()
client = app.test_client()
yield client
# os.close(db_fd)
# os.unlink(app.config["SQLALCHEMY_DATABASE_URI"])
def dropAll():
app = CreateApp.Test()
from caten_music.models.base import db
db.drop_all()
def createAll():
app = CreateApp.Test()
from caten_music.models.base import db
db.create_all()
def create_test_user():
app = CreateApp.Test()
from caten_music import models
models.db.create_all()
# 建立已經啟動的帳號
test_user = models.User(username="test", email="testmail@test.mail.commm", displayname="testDisplay", password="testpassword", is_authenticated=True, is_active=True)
test_user.save()
# 建立尚未啟動的帳號
test_user_not_activated = models.User(username="test_not_act", email="testmailnotact@test.mail.commm", displayname="testDisplay", password="testpasswordnotact")
test_user_not_activated.save()
# def create_init_db():
# from caten_music.db import db
# yield db
|
# -*- coding: utf-8 -*-
__author__ = 'liupeiyu'
from watchdog.utils import watchdog_info
CLOUD_USER_SESSION_KEY = 'clouduid'
def get_request_cloud_user(request):
#假设经过了CloudSessionMiddleware中间件的处理
return request.cloud_user if hasattr(request, 'cloud_user') else None
def get_session_cloud_user_name():
return CLOUD_USER_SESSION_KEY
def get_cloud_user_from_cookie(request):
return request.COOKIES.get(get_session_cloud_user_name())
def save_session_cloud_user(response, user_id):
sign = '%s' % user_id
response.set_cookie(get_session_cloud_user_name(), sign, max_age=3600*24*24*24)
# watchdog_info(u"freight login success save_session sign: %s, name: %s" % (sign, get_session_cloud_user_name()))
def delete_session_cloud_user(response):
response.delete_cookie(get_session_cloud_user_name())
def logout_cloud_user(response):
delete_session_cloud_user(response)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Author : yanyongyu
@Date : 2021-03-23 00:20:51
@LastEditors : yanyongyu
@LastEditTime : 2021-03-23 00:21:18
@Description : None
@GitHub : https://github.com/yanyongyu
"""
__author__ = "yanyongyu"
from typing import Optional
from . import redis
USER_TOKEN_FORMAT = "github_token_{user_id}"
def set_user_token(user_id: str, token: str) -> Optional[bool]:
return redis.set(USER_TOKEN_FORMAT.format(user_id=user_id), token)
def delete_user_token(user_id: str) -> int:
return redis.delete(USER_TOKEN_FORMAT.format(user_id=user_id))
def exists_user_token(user_id: str) -> int:
return redis.exists(USER_TOKEN_FORMAT.format(user_id=user_id))
def get_user_token(user_id: str) -> Optional[str]:
value = redis.get(USER_TOKEN_FORMAT.format(user_id=user_id))
return value if value is None else value.decode()
|
#Seth Jones
#11/07/2019
#Period 1/2
import random
from time import sleep
divideLines = "------------------------------------------------"
#--------------------------Options----------------------#
def options():
while True:
print(divideLines)
sleep(0.5)
options = """
Welcome to the almalgamation of Tic Tac Toe and the Number Game.
Please choose your game mode.
1: You Guess Computer's Number
2: Computer Guesses Your Number
3: Play Tic Tac Toe
4: Quit
"""
print(options)
while True:
op = input("Enter option (1-4): ")
if op == "1":
sleep(0.5)
menu()
break
elif op == "2":
sleep(0.5)
comset()
elif op == "3":
import TicTacToe
sleep(1)
input("\nPress enter to go back.")
print(divideLines)
break
if op == "4":
print(divideLines)
quit()
else:
sleep(0.5)
print("Please choose a valid option.\n")
continue
#----------------------Menu---------------------------#
def menu():
while True:
print(divideLines)
menu = """
Play Alone
Choose your difficulty.
1: Easy
2: Medium
3: Hard
4: Custom
5: Quit
"""
print(menu)
dif = input("Enter difficulty (1-5): ")
if dif == "1":
sleep(0.5)
print("\nYou picked Easy.")
rmin = 0
rmax = 10
maxTries = 3
game_loop(rmin,rmax,maxTries,gamesWon,gamesLost)
break
elif dif == "2":
sleep(0.5)
rmin = 0
rmax = 100
maxTries = 5
print("\nYou picked Medium.")
game_loop(rmin,rmax,maxTries,gamesWon,gamesLost)
break
elif dif == "3":
sleep(0.5)
rmin = 0
rmax = 1000
maxTries = 10
print("\nYou picked Hard.")
game_loop(rmin,rmax,maxTries,gamesWon,gamesLost)
break
elif dif == "4":
while True:
sleep(0.5)
minIn = input("\nPlease choose your minimum: ")
if minIn.isdigit():
rmin = int(minIn)
break
else:
print("Please choose a valid number.")
while True:
maxIn = input("Please choose your maximum: ")
if maxIn.isdigit():
rmax = int(maxIn)
break
else:
print("Please choose a valid number.")
if rmin > rmax:
print("Please choose a maximum that is more than the minimum.")
break
while True:
tryIn = input("Please choose your try count: ")
if tryIn.isdigit():
maxTries = int(tryIn)
game_loop(rmin,rmax,maxTries,gamesWon,gamesLost)
break
else:
print("Please choose a valid number.")
elif dif == "5":
quit()
else:
print("\nPlease choose a number from 1 to 4.")
#-------------------------Assign Number------------------------#
def assign_number(rmin,rmax):
while True:
sleep(0.5)
userNum = input(str.format("\nPick a number between {0} and {1}: ",rmin,rmax))
if userNum.isdigit():
userNum = int(userNum)
if userNum >= rmin and userNum <= rmax:
return userNum
else:
sleep(0.5)
print("Please choose a number in the range.")
continue
sleep(0.5)
print("Invalid choice, try again.")
#-------------------------------Game Loop--------------------------#
def game_loop(rmin,rmax,maxTries,gamesWon,gamesLost):
tries = 0
randnum = random.randint(rmin,rmax)
x = assign_number(rmin,rmax)
tries += 1
while tries != maxTries and x != randnum:
if x > randnum:
print("Guess lower.")
if (maxTries - tries) == 1:
print("You have 1 try left.")
else:
print(str.format("You have {} tries left.",(maxTries - tries)))
else:
print("Guess higher.")
if (maxTries - tries) == 1:
print("You have 1 try left.")
else:
print(str.format("You have {} tries left.",maxTries - tries))
x = assign_number(rmin,rmax)
tries += 1
if x == randnum:
sleep(0.5)
print("\nYou have won the game!")
gamesWon += 1
sleep(1)
while True:
quitChoice = input("Would you like to try again? (y/n): ")
if quitChoice == "n":
quit()
elif quitChoice == "y":
sleep(1)
options()
elif quitChoice == "'y' or 'n'":
sleep(0.5)
print("Cheeky.\n")
continue
else:
sleep(0.5)
print("Please type 'y' or 'n'.\n")
continue
else:
print("You lose!")
print(str.format("The number was {}.",randnum))
gamesLost += 1
sleep(1)
while True:
quitChoice = input("Would you like to try again? (y/n): ")
if quitChoice == "y":
game_loop(rmin,rmax,maxTries,gamesWon,gamesLost)
elif quitChoice == "n":
sleep(1)
options()
elif quitChoice == "'y' or 'n'":
sleep(0.5)
print("Cheeky.\n")
continue
else:
sleep(0.5)
print("Please type 'y' or 'n'.\n")
continue
def comset():
print(divideLines)
sleep(1)
print("""Welcome to the computer number guessing game! You will think
of a number, then the computer will try to guess it based on
your inputs. You will enter 'h' for higher, 'l' for lower,
and 'e' for equal.\n""")
while True:
sleep(0.5)
minIn = input("Please choose your minimum: ")
if minIn.isdigit():
rmin = int(minIn)
break
else:
sleep(0.5)
print("Please choose a valid number.\n")
continue
while True:
sleep(0.5)
maxIn = input("Please choose your maximum: ")
if maxIn.isdigit():
rmax = int(maxIn)
break
elif rmin >= rmax:
sleep(0.5)
print("Please choose a maximum that is more than your minimum.\n")
continue
else:
sleep(0.5)
print("Please choose a valid number.\n")
continue
while True:
sleep(0.5)
tryIn = input("Please choose the amount of guesses the computer has: ")
if tryIn.isdigit:
maxTries = tryIn
comloop(rmin,rmax,maxTries)
break
else:
sleep(0.5)
print("Please choose a valid number.\n")
def comloop(rmin,rmax,maxTries):
tries = 0
while maxTries != tries:
inCom = round((rmax - rmin + 1)/2)
print(divideLines)
print(str.format("I'm thinking your number is {}.",inCom))
sleep(0.5)
tries += 1
while maxTries != tries:
print(tries)
print(maxTries)
sleep(0.5)
hl = input("Is this number higher, lower, or equal('h', 'l', or 'e')? ")
if hl == "l":
sleep(0.5)
rmin = inCom
inCom = round(((rmax - rmin + 1)/2) + inCom)
print(divideLines)
print(str.format("I'm thinking your number is {}.",inCom))
tries += 1
elif hl == "h":
sleep(0.5)
rmax = inCom
inCom = round(inCom - ((rmax - rmin + 1)/2))
print(divideLines)
print(str.format("I'm thinking your number is {}.",inCom))
tries += 1
elif hl == "e":
sleep(0.5)
print("Nice. We did it.")
break
else:
print("Please type 'h', 'l', ir 'e'.")
continue
else:
sleep(0.5)
print("I've run out of tries. I'm sorry.")
break
while True:
quitChoice = input("Would you like to try again? (y/n): ")
if quitChoice == "n":
options()
elif quitChoice == "y":
sleep(1)
comset()
elif quitChoice == "'y' or 'n'":
sleep(0.5)
print("Cheeky.\n")
continue
else:
sleep(0.5)
print("Please type 'y' or 'n'.\n")
continue
else:
sleep(1)
print("The computer has run out of guesses.")
while True:
quitChoice = input("Would you like to try again? (y/n): ")
if quitChoice == "n":
options()
elif quitChoice == "y":
sleep(1)
comset()
elif quitChoice == "'y' or 'n'":
sleep(0.5)
print("Cheeky.\n")
continue
else:
sleep(0.5)
print("Please type 'y' or 'n'.\n")
continue
sleep(0.5)
options()
|
from openpyxl import Workbook
from openpyxl import load_workbook
if __name__ == '__main__':
excel_data = Workbook()
excel_data = load_workbook("C:/Users/Owner/Documents/GitHub/TATADataChallenge2017/NBTC_Tata_Challenge.01.xlsx")
#excel_data.create_sheet("video_game_data", 0)
print(excel_data.get_sheet_names)
|
#!/usr/bin/python3
from typing import List
import json
from bplib.butil import TreeNode, arr2TreeNode, btreeconnect, aprint
class Solution:
def findTheDifference(self, s: str, t: str) -> str:
arr = [0] * 26
for c in s:
arr[ord(c)-ord('a')] += 1
for c in t:
arr[ord(c)-ord('a')] -= 1
for i, a in enumerate(arr):
if a == -1:
return chr(i+ord('a'))
return ""
s = json.loads(input())
t = json.loads(input())
sol = Solution()
print(sol.findTheDifference(s, t))
|
from django.test import TestCase, Client
from django.db.models import Max
from .models import Category, Item, CartItem, OrderItem, Order, User
# pylint: disable=no-member
# Create your tests here.
class OrdersTestCase(TestCase):
def setUp(self):
# Create users.
user_1 = User.objects.create(username='User_1', password='passw0rd')
user_2 = User.objects.create(username='User_2', password='passw0rd')
# Create items.
pizza = Category.objects.create(name='Pizza')
topping = Category.objects.create(name='Topping')
item_1 = Item.objects.create(category=pizza, name='Item_1', price=10000)
item_2 = Item.objects.create(category=pizza, name='Item_2', price=0)
item_3 = Item.objects.create(category=pizza, name='Item_3', price=-10000)
Item.objects.create(category=topping, name='topping_1')
# Create cartitems.
CartItem.objects.create(user=user_1, item=item_1, quantity=1, price=item_1.price)
CartItem.objects.create(user=user_1, item=item_2, quantity=1, price=item_2.price)
CartItem.objects.create(user=user_1, item=item_3, quantity=1, price=item_3.price)
# Create orderitems
orderitem_1 = OrderItem.objects.create(user=user_2, item=item_1, quantity=1, price=item_1.price)
orderitem_2 = OrderItem.objects.create(user=user_2, item=item_2, quantity=1, price=item_2.price)
OrderItem.objects.create(user=user_2, item=item_3, quantity=1, price=item_3.price)
# Create an order
order_1 = Order.objects.create(
user = user_2,
contact = '0l0',
billing_address = 'somewhere',
shipping_address = 'anywhere',
subtotal = orderitem_1.price + orderitem_2.price
)
order_1.items.add(orderitem_1)
order_1.items.add(orderitem_2)
### Database-side tests
# Test item.
def test_valid_item(self):
item_1 = Item.objects.get(name='Item_1')
self.assertTrue(item_1.is_valid_item())
def test_invalid_item(self):
item_3 = Item.objects.get(name='Item_3')
self.assertFalse(item_3.is_valid_item())
# Test cart_item.
def test_valid_cartitem(self):
item_1 = Item.objects.get(name='Item_1')
cartitem_1 = CartItem.objects.get(item=item_1)
self.assertTrue(cartitem_1.is_valid_cartitem())
def test_invalid_cartitem(self):
item_2 = Item.objects.get(name='Item_2')
cartitem_2 = CartItem.objects.get(item=item_2)
self.assertFalse(cartitem_2.is_valid_cartitem())
def test_cartitem_count(self):
user_1 = User.objects.get(username='User_1')
self.assertEqual(user_1.cart.count(), 3)
# Test order_item.
def test_valid_orderitem(self):
item_1 = Item.objects.get(name='Item_1')
orderitem_1 = OrderItem.objects.get(item=item_1)
self.assertTrue(orderitem_1.is_valid_orderitem())
def test_invalid_orderitem(self):
item_2 = Item.objects.get(name='Item_2')
orderitem_2 = OrderItem.objects.get(item=item_2)
self.assertFalse(orderitem_2.is_valid_orderitem())
# Test order.
def test_valid_order(self):
user_2 = User.objects.get(username='User_2')
order = Order.objects.get(user=user_2)
self.assertTrue(order.is_valid_order())
def test_invalid_order(self):
user_2 = User.objects.get(username='User_2')
orderitem_2 = OrderItem.objects.get(price=0)
orderitem_3 = OrderItem.objects.get(price=-10000)
order = Order.objects.create(
user = user_2,
contact = '010',
billing_address = 'somewhere',
shipping_address = 'anywhere',
subtotal = orderitem_2.price + orderitem_2.price
)
order.items.add(orderitem_2)
order.items.add(orderitem_3)
self.assertFalse(order.is_valid_order()) # subtotal is -10000
def test_order_subtotal(self):
user_2 = User.objects.get(username='User_2')
item_1 = Item.objects.get(name='Item_1')
item_2 = Item.objects.get(name='Item_2')
order = Order.objects.get(user=user_2)
self.assertEqual(order.subtotal, item_1.price + item_2.price)
def test_order_count(self):
user_2 = User.objects.get(username='User_2')
order = Order.objects.get(user=user_2)
self.assertEqual(order.items.count(), 2)
### View-side tests
def test_index(self):
c = Client()
response = c.get("/")
self.assertEqual(response.status_code, 302)
def test_valid_item_page(self):
item_1 = Item.objects.get(name='Item_1')
c = Client()
response = c.get(f"/item/{item_1.id}")
self.assertEqual(response.status_code, 200)
def test_invalid_item_page(self):
max_id = Item.objects.all().aggregate(Max("id"))["id__max"]
c = Client()
response = c.get(f"/{max_id + 1}")
self.assertEqual(response.status_code, 404) |
def choosepivot_first(A):
'''
Use first element as pivot.
'''
return 0
def choosepivot_last(A):
'''
Use last element as pivot.
'''
return len(A) - 1
def choosepivot_median(A):
'''
Use median of [first, last, middle] elements as pivot.
Note: if A is even length, rounds *down* for middle element index
'''
a = A[0]
b = A[len(A) - 1]
c = A[(len(A) - 1)//2]
if a <= b <= c or c <= b <= a:
return len(A) - 1
elif b <= a <= c or c <= a <= b:
return 0
else:
return (len(A) - 1)//2
def partition(B, p):
'''
Given an array B and the index of a pivot element p, partition the array,
i.e. sort it such that all elements smaller than the pivot come before all
elements larger than the pivot, and the pivot comes between the two groups.
'''
# Move pivot to beginning of array
B[0], B[p] = B[p], B[0]
n = len(B)
i = 1
# Walk through array, swapping when necessary
for j in range(1, n):
if B[j] < B[0]:
B[i], B[j] = B[j], B[i]
i += 1
# Swap pivot at B[0] with element at end of lesser partition to ensure
# array is [lesser, pivot, greater]
B[0], B[i-1] = B[i-1], B[0]
return B, i-1
def quicksort(A, comp=0):
'''
Returns quicksorted array A and number of primitive comparisons made in
partition functions.
>>> quicksort([3, 8, 2, 5, 1, 4, 7, 6])
([1, 2, 3, 4, 5, 6, 7, 8], 15)
'''
if len(A) <= 1:
return (A, comp)
else:
comp += len(A) - 1
# p is index of pivot
p = choosepivot_first(A)
# Partition a and update p to be new location of pivot
A, p = partition(A, p)
# Recursively quicksort lesser and greater partitions
A1, comp = quicksort(A[:p], comp=comp)
A2, comp = quicksort(A[p+1:], comp=comp)
# Stitch together results, with pivot in the middle
return (A1 + [A[p]] + A2, comp)
def test_quicksort():
'''
Answers are:
size first last median
10 25 29 21
100 615 587 518
1000 10297 10184 8921
'''
import mergesort
for n in [10, 100, 1000]:
f = open("quicksort_tests/" + str(n) + ".txt", "r")
A = [int(i) for i in f]
A, comp = quicksort(A)
assert mergesort.mergesort(A) == A
print comp
def exercise_quicksort():
'''
Prints solution to algorithms week 2 programming problems (edit quicksort
to use appropriate pivot selection).
'''
import mergesort
f = open("quicksort_tests/QuickSort.txt", "r")
A = [int(i) for i in f]
A, comp = quicksort(A)
assert mergesort.mergesort(A) == A
print comp
if __name__ == "__main__":
import doctest
doctest.testmod()
|
#!/usr/bin/env python
from vector import vector
DIRECTIONS = ('E', 'W', 'N', 'S', 'F')
TURNINGS = ('R', 'L')
nav_ins = list((ins[0], int(ins[1:])) for ins in open('input.txt'))
ship = vector(0, 0, 'E')
print(ship.get_pos())
for ins in nav_ins:
if ins[0] in DIRECTIONS:
ship.change_position(ins)
elif ins[0] in TURNINGS:
ship.change_direction(ins)
print(ship.get_pos())
print(f"Manhattan distance: {ship.manhattan_distance()}")
|
#import random
import secrets
#min = 1
#max = 6
foo = ['1','2','3','4','5','6']
roll_again = "yes"
while roll_again == "yes" or roll_again == "y":
print ("Rolling the dices...")
print ("The values are....")
# print (random.randint(min, max))
# print (random.randint(min, max))
print (secrets.choice(foo))
roll_again = input("Roll the dices again?") |
'''
Python tem duas funções muito interessantes: ANY e ALL.
-> A função 'ANY' recebe uma lista (ou outro objeto interável) e retorna
'True' se algum dos elementos for avaliado como 'True'.
-> Já 'ALL' só retorna 'True' se todos os elementos forem avaliados como 'True' ou se ainda se o iterável está vazio. Veja:
>>> everybody=[1,2,3,4]
>>> anybody=[0,1,0,2]
>>> nobody=[0,0,0,0]
>>> any(everybody)
True
>>> any(nobody)
False
>>> any(anybody)
True
>>> all(everybody)
True
>>> all(nobody) - Aqui retorno false porque o 0 significa como FALSO e o numero 1 como VERDADEIRO, pois isso no ALL dá como FALSE.
False
>>> all(anybody)
False
Sem segredos, certo? Mas essas duas funções junto com os generators permite uma sintaxe muito interessante:
>>> v=[10,12,25,14]
>>> any(n>20 for n in v)
True
>>> all(n>20 for n in v)
False
Veja um exemplo disso num código real:
if all(v<100 for v in values):
msg='Para usar seu cupom de desconto, pelo menos '+
'um dos produtos deve custar mais de R$ 100,00.'
E numa classe real:
class Form:
# ...
def validates(self):
return not any(field.error for field in self.fields)
'''
nomes = ['Carlos', 'Camila', 'Carla', 'Cassiano', 'Cristina', 'Daniel'] #Deu False porque tem o nome Daniel
print(all([nome[0] == 'C' for nome in nomes]))
nomes1 = ['Carlos', 'camila', 'Carla', 'Cassiano', 'Cristina'] #Deu Treu porque todos os nomes começam com C Maiusculo
print(all([nome[0] == 'C' for nome in nomes1]))
print(all([letra for letra in 'b' if letra in 'aeiou'])) #Dá TRUE pois dá como vazio, e dá vazio pois não consta nenhuma letra.
#Um iterável vazio convertido em boolean é FALSE, mas o all() entende como TRUE.
print(any([0,0,0,0,0,1])) #Deu TRUE pois contém o numero 1
print(any([0,0,0,0,0])) #Deu FALSE pois só contém o numero 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.